queued tcg patches
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJV22RbAAoJEK0ScMxN0CebzgkIAICUc/zbs+jSMa3vBH3nH0QP v8//Ek73dH14B7BnKGIHA/9VWSpGq8HEHcRQfEOiu5pHZK/9XpwuxLTs2u7O9BVZ n+XcAazeF7eikMScsEaMpFkPpKOcIv5QkwZaX90/sBMsGw3+nAR0+nBpqX2rIn2J C+3YMMd4WgCG0SvV4rdxfZtFEh9NsHUddZKznyX4zzDYcGYSq0e6plKEhTt+u5zB GJrfqkRWYg6bHe0p18u9o8kL3BSyLCWDuj3rXw8vSlkoybPHN/XsEMZ3LzZ6NVrV Omx5ubAe2EwxOlHXD/N8wc+euQl3g0ZR2nd9j/KGcGDuNfziGgRo0l4sSF0LNx8= =AI/e -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20150824' into staging queued tcg patches # gpg: Signature made Mon 24 Aug 2015 19:37:15 BST using RSA key ID 4DD0279B # gpg: Good signature from "Richard Henderson <rth7680@gmail.com>" # gpg: aka "Richard Henderson <rth@redhat.com>" # gpg: aka "Richard Henderson <rth@twiddle.net>" * remotes/rth/tags/pull-tcg-20150824: linux-user: remove useless macros GUEST_BASE and RESERVED_VA linux-user: remove --enable-guest-base/--disable-guest-base tcg/aarch64: Use softmmu fast path for unaligned accesses tcg/s390: Use softmmu fast path for unaligned accesses tcg/ppc: Improve unaligned load/store handling on 64-bit backend tcg/i386: use softmmu fast path for unaligned accesses tcg: Remove tcg_gen_trunc_i64_i32 tcg: Split trunc_shr_i32 opcode into extr[lh]_i64_i32 tcg: update README about size changing ops tcg/optimize: add optimizations for ext_i32_i64 and extu_i32_i64 ops tcg: implement real ext_i32_i64 and extu_i32_i64 ops tcg: don't abuse TCG type in tcg_gen_trunc_shr_i64_i32 tcg: rename trunc_shr_i32 into trunc_shr_i64_i32 tcg/optimize: allow constant to have copies tcg/optimize: track const/copy status separately tcg/optimize: add temp_is_const and temp_is_copy functions tcg/optimize: optimize temps tracking tcg/optimize: fix constant signedness Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
34a4450434
@ -1371,7 +1371,6 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
|
||||
info->mmap = 0;
|
||||
elf_entry = (abi_ulong) elf_ex.e_entry;
|
||||
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
/*
|
||||
* In case where user has not explicitly set the guest_base, we
|
||||
* probe here that should we set it automatically.
|
||||
@ -1392,7 +1391,6 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_USE_GUEST_BASE */
|
||||
|
||||
/* Do this so that we can load the interpreter, if need be. We will
|
||||
change some of these later */
|
||||
|
@ -35,12 +35,10 @@
|
||||
#include "qemu/envlist.h"
|
||||
|
||||
int singlestep;
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
unsigned long mmap_min_addr;
|
||||
unsigned long guest_base;
|
||||
int have_guest_base;
|
||||
unsigned long reserved_va;
|
||||
#endif
|
||||
|
||||
static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
|
||||
const char *qemu_uname_release;
|
||||
@ -682,9 +680,7 @@ static void usage(void)
|
||||
"-drop-ld-preload drop LD_PRELOAD for target process\n"
|
||||
"-E var=value sets/modifies targets environment variable(s)\n"
|
||||
"-U var unsets targets environment variable(s)\n"
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
"-B address set guest_base address to address\n"
|
||||
#endif
|
||||
"-bsd type select emulated BSD type FreeBSD/NetBSD/OpenBSD (default)\n"
|
||||
"\n"
|
||||
"Debug options:\n"
|
||||
@ -830,11 +826,9 @@ int main(int argc, char **argv)
|
||||
#endif
|
||||
exit(1);
|
||||
}
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
} else if (!strcmp(r, "B")) {
|
||||
guest_base = strtol(argv[optind++], NULL, 0);
|
||||
have_guest_base = 1;
|
||||
#endif
|
||||
} else if (!strcmp(r, "drop-ld-preload")) {
|
||||
(void) envlist_unsetenv(envlist, "LD_PRELOAD");
|
||||
} else if (!strcmp(r, "bsd")) {
|
||||
@ -923,7 +917,6 @@ int main(int argc, char **argv)
|
||||
target_environ = envlist_to_environ(envlist, NULL);
|
||||
envlist_free(envlist);
|
||||
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
/*
|
||||
* Now that page sizes are configured in cpu_init() we can do
|
||||
* proper page alignment for guest_base.
|
||||
@ -950,7 +943,6 @@ int main(int argc, char **argv)
|
||||
fclose(fp);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_USE_GUEST_BASE */
|
||||
|
||||
if (loader_exec(filename, argv+optind, target_environ, regs, info) != 0) {
|
||||
printf("Error loading %s\n", filename);
|
||||
@ -964,9 +956,7 @@ int main(int argc, char **argv)
|
||||
free(target_environ);
|
||||
|
||||
if (qemu_log_enabled()) {
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
qemu_log("guest_base 0x%lx\n", guest_base);
|
||||
#endif
|
||||
log_page_dump();
|
||||
|
||||
qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
|
||||
@ -986,12 +976,10 @@ int main(int argc, char **argv)
|
||||
syscall_init();
|
||||
signal_init();
|
||||
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
|
||||
generating the prologue until now so that the prologue can take
|
||||
the real value of GUEST_BASE into account. */
|
||||
tcg_prologue_init(&tcg_ctx);
|
||||
#endif
|
||||
|
||||
/* build Task State */
|
||||
memset(ts, 0, sizeof(TaskState));
|
||||
|
@ -101,9 +101,7 @@ typedef struct TaskState {
|
||||
|
||||
void init_task_state(TaskState *ts);
|
||||
extern const char *qemu_uname_release;
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
extern unsigned long mmap_min_addr;
|
||||
#endif
|
||||
|
||||
/* ??? See if we can avoid exposing so much of the loader internals. */
|
||||
/*
|
||||
|
10
configure
vendored
10
configure
vendored
@ -293,7 +293,6 @@ cocoa="no"
|
||||
softmmu="yes"
|
||||
linux_user="no"
|
||||
bsd_user="no"
|
||||
guest_base="yes"
|
||||
aix="no"
|
||||
blobs="yes"
|
||||
pkgversion=""
|
||||
@ -975,10 +974,6 @@ for opt do
|
||||
;;
|
||||
--enable-bsd-user) bsd_user="yes"
|
||||
;;
|
||||
--enable-guest-base) guest_base="yes"
|
||||
;;
|
||||
--disable-guest-base) guest_base="no"
|
||||
;;
|
||||
--enable-pie) pie="yes"
|
||||
;;
|
||||
--disable-pie) pie="no"
|
||||
@ -1314,7 +1309,6 @@ disabled with --disable-FEATURE, default is enabled if available:
|
||||
user supported user emulation targets
|
||||
linux-user all linux usermode emulation targets
|
||||
bsd-user all BSD usermode emulation targets
|
||||
guest-base GUEST_BASE support for usermode emulation targets
|
||||
docs build documentation
|
||||
guest-agent build the QEMU Guest Agent
|
||||
guest-agent-msi build guest agent Windows MSI installation package
|
||||
@ -4544,7 +4538,6 @@ fi
|
||||
echo "brlapi support $brlapi"
|
||||
echo "bluez support $bluez"
|
||||
echo "Documentation $docs"
|
||||
echo "GUEST_BASE $guest_base"
|
||||
echo "PIE $pie"
|
||||
echo "vde support $vde"
|
||||
echo "netmap support $netmap"
|
||||
@ -5481,9 +5474,6 @@ fi
|
||||
if test "$target_user_only" = "yes" -a "$bflt" = "yes"; then
|
||||
echo "TARGET_HAS_BFLT=y" >> $config_target_mak
|
||||
fi
|
||||
if test "$target_user_only" = "yes" -a "$guest_base" = "yes"; then
|
||||
echo "CONFIG_USE_GUEST_BASE=y" >> $config_target_mak
|
||||
fi
|
||||
if test "$target_bsd_user" = "yes" ; then
|
||||
echo "CONFIG_BSD_USER=y" >> $config_target_mak
|
||||
fi
|
||||
|
@ -160,18 +160,11 @@ static inline void tswap64s(uint64_t *s)
|
||||
/* On some host systems the guest address space is reserved on the host.
|
||||
* This allows the guest address space to be offset to a convenient location.
|
||||
*/
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
extern unsigned long guest_base;
|
||||
extern int have_guest_base;
|
||||
extern unsigned long reserved_va;
|
||||
#define GUEST_BASE guest_base
|
||||
#define RESERVED_VA reserved_va
|
||||
#else
|
||||
#define GUEST_BASE 0ul
|
||||
#define RESERVED_VA 0ul
|
||||
#endif
|
||||
|
||||
#define GUEST_ADDR_MAX (RESERVED_VA ? RESERVED_VA : \
|
||||
#define GUEST_ADDR_MAX (reserved_va ? reserved_va : \
|
||||
(1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
|
||||
#endif
|
||||
|
||||
|
@ -49,20 +49,20 @@
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
|
||||
#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
|
||||
#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + guest_base))
|
||||
|
||||
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
|
||||
#define h2g_valid(x) 1
|
||||
#else
|
||||
#define h2g_valid(x) ({ \
|
||||
unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
|
||||
unsigned long __guest = (unsigned long)(x) - guest_base; \
|
||||
(__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
|
||||
(!RESERVED_VA || (__guest < RESERVED_VA)); \
|
||||
(!reserved_va || (__guest < reserved_va)); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define h2g_nocheck(x) ({ \
|
||||
unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
|
||||
unsigned long __ret = (unsigned long)(x) - guest_base; \
|
||||
(abi_ulong)__ret; \
|
||||
})
|
||||
|
||||
|
@ -1756,7 +1756,6 @@ static void probe_guest_base(const char *image_name,
|
||||
* it explicitly, and set guest_base appropriately.
|
||||
* In case of error we will print a suitable message and exit.
|
||||
*/
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
const char *errmsg;
|
||||
if (!have_guest_base && !reserved_va) {
|
||||
unsigned long host_start, real_start, host_size;
|
||||
@ -1795,7 +1794,6 @@ static void probe_guest_base(const char *image_name,
|
||||
exit_errmsg:
|
||||
fprintf(stderr, "%s: %s\n", image_name, errmsg);
|
||||
exit(-1);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -43,7 +43,6 @@ int gdbstub_port;
|
||||
envlist_t *envlist;
|
||||
static const char *cpu_model;
|
||||
unsigned long mmap_min_addr;
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
unsigned long guest_base;
|
||||
int have_guest_base;
|
||||
#if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
|
||||
@ -63,7 +62,6 @@ unsigned long reserved_va = 0xf7000000;
|
||||
#else
|
||||
unsigned long reserved_va;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static void usage(void);
|
||||
|
||||
@ -3584,7 +3582,6 @@ static void handle_arg_cpu(const char *arg)
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
static void handle_arg_guest_base(const char *arg)
|
||||
{
|
||||
guest_base = strtol(arg, NULL, 0);
|
||||
@ -3626,7 +3623,6 @@ static void handle_arg_reserved_va(const char *arg)
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void handle_arg_singlestep(const char *arg)
|
||||
{
|
||||
@ -3673,12 +3669,10 @@ static const struct qemu_argument arg_table[] = {
|
||||
"argv0", "forces target process argv[0] to be 'argv0'"},
|
||||
{"r", "QEMU_UNAME", true, handle_arg_uname,
|
||||
"uname", "set qemu uname release string to 'uname'"},
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
{"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base,
|
||||
"address", "set guest_base address to 'address'"},
|
||||
{"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va,
|
||||
"size", "reserve 'size' bytes for guest virtual address space"},
|
||||
#endif
|
||||
{"d", "QEMU_LOG", true, handle_arg_log,
|
||||
"item[,...]", "enable logging of specified items "
|
||||
"(use '-d help' for a list of items)"},
|
||||
@ -3954,7 +3948,6 @@ int main(int argc, char **argv, char **envp)
|
||||
target_environ = envlist_to_environ(envlist, NULL);
|
||||
envlist_free(envlist);
|
||||
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
/*
|
||||
* Now that page sizes are configured in cpu_init() we can do
|
||||
* proper page alignment for guest_base.
|
||||
@ -3976,7 +3969,6 @@ int main(int argc, char **argv, char **envp)
|
||||
mmap_next_start = reserved_va;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_USE_GUEST_BASE */
|
||||
|
||||
/*
|
||||
* Read in mmap_min_addr kernel parameter. This value is used
|
||||
@ -4050,9 +4042,7 @@ int main(int argc, char **argv, char **envp)
|
||||
free(target_environ);
|
||||
|
||||
if (qemu_log_enabled()) {
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
qemu_log("guest_base 0x%lx\n", guest_base);
|
||||
#endif
|
||||
log_page_dump();
|
||||
|
||||
qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
|
||||
@ -4072,12 +4062,10 @@ int main(int argc, char **argv, char **envp)
|
||||
syscall_init();
|
||||
signal_init();
|
||||
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
|
||||
generating the prologue until now so that the prologue can take
|
||||
the real value of GUEST_BASE into account. */
|
||||
tcg_prologue_init(&tcg_ctx);
|
||||
#endif
|
||||
|
||||
#if defined(TARGET_I386)
|
||||
env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
|
||||
|
@ -206,7 +206,6 @@ abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
|
||||
|
||||
unsigned long last_brk;
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
|
||||
of guest address space. */
|
||||
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
|
||||
@ -216,14 +215,14 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
|
||||
int prot;
|
||||
int looped = 0;
|
||||
|
||||
if (size > RESERVED_VA) {
|
||||
if (size > reserved_va) {
|
||||
return (abi_ulong)-1;
|
||||
}
|
||||
|
||||
size = HOST_PAGE_ALIGN(size);
|
||||
end_addr = start + size;
|
||||
if (end_addr > RESERVED_VA) {
|
||||
end_addr = RESERVED_VA;
|
||||
if (end_addr > reserved_va) {
|
||||
end_addr = reserved_va;
|
||||
}
|
||||
addr = end_addr - qemu_host_page_size;
|
||||
|
||||
@ -232,7 +231,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
|
||||
if (looped) {
|
||||
return (abi_ulong)-1;
|
||||
}
|
||||
end_addr = RESERVED_VA;
|
||||
end_addr = reserved_va;
|
||||
addr = end_addr - qemu_host_page_size;
|
||||
looped = 1;
|
||||
continue;
|
||||
@ -253,7 +252,6 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
|
||||
|
||||
return addr;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Find and reserve a free memory area of size 'size'. The search
|
||||
@ -276,11 +274,9 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
|
||||
|
||||
size = HOST_PAGE_ALIGN(size);
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
if (RESERVED_VA) {
|
||||
if (reserved_va) {
|
||||
return mmap_find_vma_reserved(start, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
addr = start;
|
||||
wrapped = repeat = 0;
|
||||
@ -671,7 +667,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
|
||||
ret = 0;
|
||||
/* unmap what we can */
|
||||
if (real_start < real_end) {
|
||||
if (RESERVED_VA) {
|
||||
if (reserved_va) {
|
||||
mmap_reserve(real_start, real_end - real_start);
|
||||
} else {
|
||||
ret = munmap(g2h(real_start), real_end - real_start);
|
||||
@ -701,7 +697,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
flags,
|
||||
g2h(new_addr));
|
||||
|
||||
if (RESERVED_VA && host_addr != MAP_FAILED) {
|
||||
if (reserved_va && host_addr != MAP_FAILED) {
|
||||
/* If new and old addresses overlap then the above mremap will
|
||||
already have failed with EINVAL. */
|
||||
mmap_reserve(old_addr, old_size);
|
||||
@ -719,13 +715,13 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
old_size, new_size,
|
||||
flags | MREMAP_FIXED,
|
||||
g2h(mmap_start));
|
||||
if ( RESERVED_VA ) {
|
||||
if (reserved_va) {
|
||||
mmap_reserve(old_addr, old_size);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int prot = 0;
|
||||
if (RESERVED_VA && old_size < new_size) {
|
||||
if (reserved_va && old_size < new_size) {
|
||||
abi_ulong addr;
|
||||
for (addr = old_addr + old_size;
|
||||
addr < old_addr + new_size;
|
||||
@ -735,7 +731,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
}
|
||||
if (prot == 0) {
|
||||
host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
|
||||
if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
|
||||
if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
|
||||
mmap_reserve(old_addr + old_size, new_size - old_size);
|
||||
}
|
||||
} else {
|
||||
|
@ -2007,7 +2007,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
|
||||
REQUIRE_REG_31(rb);
|
||||
t32 = tcg_temp_new_i32();
|
||||
va = load_gpr(ctx, ra);
|
||||
tcg_gen_trunc_i64_i32(t32, va);
|
||||
tcg_gen_extrl_i64_i32(t32, va);
|
||||
gen_helper_memory_to_s(vc, t32);
|
||||
tcg_temp_free_i32(t32);
|
||||
break;
|
||||
@ -2027,7 +2027,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
|
||||
REQUIRE_REG_31(rb);
|
||||
t32 = tcg_temp_new_i32();
|
||||
va = load_gpr(ctx, ra);
|
||||
tcg_gen_trunc_i64_i32(t32, va);
|
||||
tcg_gen_extrl_i64_i32(t32, va);
|
||||
gen_helper_memory_to_f(vc, t32);
|
||||
tcg_temp_free_i32(t32);
|
||||
break;
|
||||
|
@ -528,9 +528,9 @@ static inline void gen_set_NZ64(TCGv_i64 result)
|
||||
TCGv_i64 flag = tcg_temp_new_i64();
|
||||
|
||||
tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
|
||||
tcg_gen_trunc_i64_i32(cpu_ZF, flag);
|
||||
tcg_gen_extrl_i64_i32(cpu_ZF, flag);
|
||||
tcg_gen_shri_i64(flag, result, 32);
|
||||
tcg_gen_trunc_i64_i32(cpu_NF, flag);
|
||||
tcg_gen_extrl_i64_i32(cpu_NF, flag);
|
||||
tcg_temp_free_i64(flag);
|
||||
}
|
||||
|
||||
@ -540,8 +540,8 @@ static inline void gen_logic_CC(int sf, TCGv_i64 result)
|
||||
if (sf) {
|
||||
gen_set_NZ64(result);
|
||||
} else {
|
||||
tcg_gen_trunc_i64_i32(cpu_ZF, result);
|
||||
tcg_gen_trunc_i64_i32(cpu_NF, result);
|
||||
tcg_gen_extrl_i64_i32(cpu_ZF, result);
|
||||
tcg_gen_extrl_i64_i32(cpu_NF, result);
|
||||
}
|
||||
tcg_gen_movi_i32(cpu_CF, 0);
|
||||
tcg_gen_movi_i32(cpu_VF, 0);
|
||||
@ -559,7 +559,7 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
|
||||
tcg_gen_movi_i64(tmp, 0);
|
||||
tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
|
||||
|
||||
tcg_gen_trunc_i64_i32(cpu_CF, flag);
|
||||
tcg_gen_extrl_i64_i32(cpu_CF, flag);
|
||||
|
||||
gen_set_NZ64(result);
|
||||
|
||||
@ -568,7 +568,7 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
|
||||
tcg_gen_andc_i64(flag, flag, tmp);
|
||||
tcg_temp_free_i64(tmp);
|
||||
tcg_gen_shri_i64(flag, flag, 32);
|
||||
tcg_gen_trunc_i64_i32(cpu_VF, flag);
|
||||
tcg_gen_extrl_i64_i32(cpu_VF, flag);
|
||||
|
||||
tcg_gen_mov_i64(dest, result);
|
||||
tcg_temp_free_i64(result);
|
||||
@ -580,8 +580,8 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
|
||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
||||
|
||||
tcg_gen_movi_i32(tmp, 0);
|
||||
tcg_gen_trunc_i64_i32(t0_32, t0);
|
||||
tcg_gen_trunc_i64_i32(t1_32, t1);
|
||||
tcg_gen_extrl_i64_i32(t0_32, t0);
|
||||
tcg_gen_extrl_i64_i32(t1_32, t1);
|
||||
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
|
||||
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
|
||||
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
|
||||
@ -609,7 +609,7 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
|
||||
gen_set_NZ64(result);
|
||||
|
||||
tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
|
||||
tcg_gen_trunc_i64_i32(cpu_CF, flag);
|
||||
tcg_gen_extrl_i64_i32(cpu_CF, flag);
|
||||
|
||||
tcg_gen_xor_i64(flag, result, t0);
|
||||
tmp = tcg_temp_new_i64();
|
||||
@ -617,7 +617,7 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
|
||||
tcg_gen_and_i64(flag, flag, tmp);
|
||||
tcg_temp_free_i64(tmp);
|
||||
tcg_gen_shri_i64(flag, flag, 32);
|
||||
tcg_gen_trunc_i64_i32(cpu_VF, flag);
|
||||
tcg_gen_extrl_i64_i32(cpu_VF, flag);
|
||||
tcg_gen_mov_i64(dest, result);
|
||||
tcg_temp_free_i64(flag);
|
||||
tcg_temp_free_i64(result);
|
||||
@ -627,8 +627,8 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
|
||||
TCGv_i32 t1_32 = tcg_temp_new_i32();
|
||||
TCGv_i32 tmp;
|
||||
|
||||
tcg_gen_trunc_i64_i32(t0_32, t0);
|
||||
tcg_gen_trunc_i64_i32(t1_32, t1);
|
||||
tcg_gen_extrl_i64_i32(t0_32, t0);
|
||||
tcg_gen_extrl_i64_i32(t1_32, t1);
|
||||
tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
|
||||
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
|
||||
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
|
||||
@ -670,14 +670,14 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
|
||||
tcg_gen_extu_i32_i64(cf_64, cpu_CF);
|
||||
tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
|
||||
tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
|
||||
tcg_gen_trunc_i64_i32(cpu_CF, cf_64);
|
||||
tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
|
||||
gen_set_NZ64(result);
|
||||
|
||||
tcg_gen_xor_i64(vf_64, result, t0);
|
||||
tcg_gen_xor_i64(tmp, t0, t1);
|
||||
tcg_gen_andc_i64(vf_64, vf_64, tmp);
|
||||
tcg_gen_shri_i64(vf_64, vf_64, 32);
|
||||
tcg_gen_trunc_i64_i32(cpu_VF, vf_64);
|
||||
tcg_gen_extrl_i64_i32(cpu_VF, vf_64);
|
||||
|
||||
tcg_gen_mov_i64(dest, result);
|
||||
|
||||
@ -691,8 +691,8 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
|
||||
t1_32 = tcg_temp_new_i32();
|
||||
tmp = tcg_const_i32(0);
|
||||
|
||||
tcg_gen_trunc_i64_i32(t0_32, t0);
|
||||
tcg_gen_trunc_i64_i32(t1_32, t1);
|
||||
tcg_gen_extrl_i64_i32(t0_32, t0);
|
||||
tcg_gen_extrl_i64_i32(t1_32, t1);
|
||||
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
|
||||
tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
|
||||
|
||||
@ -1301,7 +1301,7 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt)
|
||||
TCGv_i32 nzcv = tcg_temp_new_i32();
|
||||
|
||||
/* take NZCV from R[t] */
|
||||
tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
|
||||
tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
|
||||
|
||||
/* bit 31, N */
|
||||
tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
|
||||
@ -3131,8 +3131,8 @@ static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
|
||||
TCGv_i32 t0, t1;
|
||||
t0 = tcg_temp_new_i32();
|
||||
t1 = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(t0, src);
|
||||
tcg_gen_trunc_i64_i32(t1, shift_amount);
|
||||
tcg_gen_extrl_i64_i32(t0, src);
|
||||
tcg_gen_extrl_i64_i32(t1, shift_amount);
|
||||
tcg_gen_rotr_i32(t0, t0, t1);
|
||||
tcg_gen_extu_i32_i64(dst, t0);
|
||||
tcg_temp_free_i32(t0);
|
||||
@ -3680,7 +3680,7 @@ static void handle_clz(DisasContext *s, unsigned int sf,
|
||||
gen_helper_clz64(tcg_rd, tcg_rn);
|
||||
} else {
|
||||
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
|
||||
tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
|
||||
gen_helper_clz(tcg_tmp32, tcg_tmp32);
|
||||
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
|
||||
tcg_temp_free_i32(tcg_tmp32);
|
||||
@ -3698,7 +3698,7 @@ static void handle_cls(DisasContext *s, unsigned int sf,
|
||||
gen_helper_cls64(tcg_rd, tcg_rn);
|
||||
} else {
|
||||
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
|
||||
tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
|
||||
gen_helper_cls32(tcg_tmp32, tcg_tmp32);
|
||||
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
|
||||
tcg_temp_free_i32(tcg_tmp32);
|
||||
@ -3716,7 +3716,7 @@ static void handle_rbit(DisasContext *s, unsigned int sf,
|
||||
gen_helper_rbit64(tcg_rd, tcg_rn);
|
||||
} else {
|
||||
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
|
||||
tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
|
||||
gen_helper_rbit(tcg_tmp32, tcg_tmp32);
|
||||
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
|
||||
tcg_temp_free_i32(tcg_tmp32);
|
||||
@ -5475,16 +5475,16 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
|
||||
assert(elements == 4);
|
||||
|
||||
read_vec_element(s, tcg_elt, rn, 0, MO_32);
|
||||
tcg_gen_trunc_i64_i32(tcg_elt1, tcg_elt);
|
||||
tcg_gen_extrl_i64_i32(tcg_elt1, tcg_elt);
|
||||
read_vec_element(s, tcg_elt, rn, 1, MO_32);
|
||||
tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
|
||||
tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
|
||||
|
||||
do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
|
||||
|
||||
read_vec_element(s, tcg_elt, rn, 2, MO_32);
|
||||
tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
|
||||
tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
|
||||
read_vec_element(s, tcg_elt, rn, 3, MO_32);
|
||||
tcg_gen_trunc_i64_i32(tcg_elt3, tcg_elt);
|
||||
tcg_gen_extrl_i64_i32(tcg_elt3, tcg_elt);
|
||||
|
||||
do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
|
||||
|
||||
@ -7647,7 +7647,7 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar,
|
||||
static NeonGenNarrowFn * const xtnfns[3] = {
|
||||
gen_helper_neon_narrow_u8,
|
||||
gen_helper_neon_narrow_u16,
|
||||
tcg_gen_trunc_i64_i32,
|
||||
tcg_gen_extrl_i64_i32,
|
||||
};
|
||||
static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
|
||||
gen_helper_neon_unarrow_sat8,
|
||||
@ -7681,10 +7681,10 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar,
|
||||
} else {
|
||||
TCGv_i32 tcg_lo = tcg_temp_new_i32();
|
||||
TCGv_i32 tcg_hi = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tcg_lo, tcg_op);
|
||||
tcg_gen_extrl_i64_i32(tcg_lo, tcg_op);
|
||||
gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
|
||||
tcg_gen_shri_i64(tcg_op, tcg_op, 32);
|
||||
tcg_gen_trunc_i64_i32(tcg_hi, tcg_op);
|
||||
tcg_gen_extrl_i64_i32(tcg_hi, tcg_op);
|
||||
gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
|
||||
tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
|
||||
tcg_temp_free_i32(tcg_lo);
|
||||
@ -8593,7 +8593,7 @@ static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
|
||||
static void do_narrow_high_u32(TCGv_i32 res, TCGv_i64 in)
|
||||
{
|
||||
tcg_gen_shri_i64(in, in, 32);
|
||||
tcg_gen_trunc_i64_i32(res, in);
|
||||
tcg_gen_extrl_i64_i32(res, in);
|
||||
}
|
||||
|
||||
static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
|
||||
|
@ -1557,7 +1557,7 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
|
||||
} else {
|
||||
tmp = tcg_temp_new_i32();
|
||||
iwmmxt_load_reg(cpu_V0, rd);
|
||||
tcg_gen_trunc_i64_i32(tmp, cpu_V0);
|
||||
tcg_gen_extrl_i64_i32(tmp, cpu_V0);
|
||||
}
|
||||
tcg_gen_andi_i32(tmp, tmp, mask);
|
||||
tcg_gen_mov_i32(dest, tmp);
|
||||
@ -1581,9 +1581,9 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
|
||||
rdhi = (insn >> 16) & 0xf;
|
||||
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
|
||||
iwmmxt_load_reg(cpu_V0, wrd);
|
||||
tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
|
||||
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
|
||||
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
|
||||
tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
|
||||
tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
|
||||
} else { /* TMCRR */
|
||||
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
|
||||
iwmmxt_store_reg(cpu_V0, wrd);
|
||||
@ -1638,15 +1638,15 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
|
||||
if (insn & (1 << 22)) { /* WSTRD */
|
||||
gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
|
||||
} else { /* WSTRW wRd */
|
||||
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
|
||||
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
||||
gen_aa32_st32(tmp, addr, get_mem_index(s));
|
||||
}
|
||||
} else {
|
||||
if (insn & (1 << 22)) { /* WSTRH */
|
||||
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
|
||||
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
||||
gen_aa32_st16(tmp, addr, get_mem_index(s));
|
||||
} else { /* WSTRB */
|
||||
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
|
||||
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
||||
gen_aa32_st8(tmp, addr, get_mem_index(s));
|
||||
}
|
||||
}
|
||||
@ -1946,7 +1946,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
|
||||
switch ((insn >> 22) & 3) {
|
||||
case 0:
|
||||
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
|
||||
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
|
||||
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
||||
if (insn & 8) {
|
||||
tcg_gen_ext8s_i32(tmp, tmp);
|
||||
} else {
|
||||
@ -1955,7 +1955,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
|
||||
break;
|
||||
case 1:
|
||||
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
|
||||
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
|
||||
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
||||
if (insn & 8) {
|
||||
tcg_gen_ext16s_i32(tmp, tmp);
|
||||
} else {
|
||||
@ -1964,7 +1964,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
|
||||
break;
|
||||
case 2:
|
||||
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
|
||||
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
|
||||
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
||||
break;
|
||||
}
|
||||
store_reg(s, rd, tmp);
|
||||
@ -2627,9 +2627,9 @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
|
||||
|
||||
if (insn & ARM_CP_RW_BIT) { /* MRA */
|
||||
iwmmxt_load_reg(cpu_V0, acc);
|
||||
tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
|
||||
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
|
||||
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
|
||||
tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
|
||||
tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
|
||||
tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
|
||||
} else { /* MAR */
|
||||
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
|
||||
@ -2951,7 +2951,7 @@ static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
|
||||
} else {
|
||||
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
|
||||
}
|
||||
tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
|
||||
tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
|
||||
tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
|
||||
tcg_temp_free_i32(tcg_tmp);
|
||||
tcg_temp_free_i64(tcg_res);
|
||||
@ -4683,7 +4683,7 @@ static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
|
||||
switch (size) {
|
||||
case 0: gen_helper_neon_narrow_u8(dest, src); break;
|
||||
case 1: gen_helper_neon_narrow_u16(dest, src); break;
|
||||
case 2: tcg_gen_trunc_i64_i32(dest, src); break;
|
||||
case 2: tcg_gen_extrl_i64_i32(dest, src); break;
|
||||
default: abort();
|
||||
}
|
||||
}
|
||||
@ -6254,7 +6254,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
|
||||
break;
|
||||
case 2:
|
||||
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
|
||||
tcg_gen_trunc_i64_i32(tmp, cpu_V0);
|
||||
tcg_gen_extrl_i64_i32(tmp, cpu_V0);
|
||||
break;
|
||||
default: abort();
|
||||
}
|
||||
@ -6269,7 +6269,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
|
||||
case 2:
|
||||
tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
|
||||
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
|
||||
tcg_gen_trunc_i64_i32(tmp, cpu_V0);
|
||||
tcg_gen_extrl_i64_i32(tmp, cpu_V0);
|
||||
break;
|
||||
default: abort();
|
||||
}
|
||||
@ -7224,11 +7224,11 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
|
||||
tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
|
||||
}
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tmp, tmp64);
|
||||
tcg_gen_extrl_i64_i32(tmp, tmp64);
|
||||
store_reg(s, rt, tmp);
|
||||
tcg_gen_shri_i64(tmp64, tmp64, 32);
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tmp, tmp64);
|
||||
tcg_gen_extrl_i64_i32(tmp, tmp64);
|
||||
tcg_temp_free_i64(tmp64);
|
||||
store_reg(s, rt2, tmp);
|
||||
} else {
|
||||
@ -7334,11 +7334,11 @@ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
|
||||
{
|
||||
TCGv_i32 tmp;
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tmp, val);
|
||||
tcg_gen_extrl_i64_i32(tmp, val);
|
||||
store_reg(s, rlow, tmp);
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_shri_i64(val, val, 32);
|
||||
tcg_gen_trunc_i64_i32(tmp, val);
|
||||
tcg_gen_extrl_i64_i32(tmp, val);
|
||||
store_reg(s, rhigh, tmp);
|
||||
}
|
||||
|
||||
@ -8013,7 +8013,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
||||
tmp64 = gen_muls_i64_i32(tmp, tmp2);
|
||||
tcg_gen_shri_i64(tmp64, tmp64, 16);
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tmp, tmp64);
|
||||
tcg_gen_extrl_i64_i32(tmp, tmp64);
|
||||
tcg_temp_free_i64(tmp64);
|
||||
if ((sh & 2) == 0) {
|
||||
tmp2 = load_reg(s, rn);
|
||||
@ -8679,7 +8679,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
||||
}
|
||||
tcg_gen_shri_i64(tmp64, tmp64, 32);
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tmp, tmp64);
|
||||
tcg_gen_extrl_i64_i32(tmp, tmp64);
|
||||
tcg_temp_free_i64(tmp64);
|
||||
store_reg(s, rn, tmp);
|
||||
break;
|
||||
@ -9749,7 +9749,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
||||
tmp64 = gen_muls_i64_i32(tmp, tmp2);
|
||||
tcg_gen_shri_i64(tmp64, tmp64, 16);
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tmp, tmp64);
|
||||
tcg_gen_extrl_i64_i32(tmp, tmp64);
|
||||
tcg_temp_free_i64(tmp64);
|
||||
if (rs != 15)
|
||||
{
|
||||
@ -9773,7 +9773,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
||||
}
|
||||
tcg_gen_shri_i64(tmp64, tmp64, 32);
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tmp, tmp64);
|
||||
tcg_gen_extrl_i64_i32(tmp, tmp64);
|
||||
tcg_temp_free_i64(tmp64);
|
||||
break;
|
||||
case 7: /* Unsigned sum of absolute differences. */
|
||||
|
@ -2604,9 +2604,9 @@ static int dec_movem_mr(CPUCRISState *env, DisasContext *dc)
|
||||
tcg_temp_free(addr);
|
||||
|
||||
for (i = 0; i < (nr >> 1); i++) {
|
||||
tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]);
|
||||
tcg_gen_extrl_i64_i32(cpu_R[i * 2], tmp[i]);
|
||||
tcg_gen_shri_i64(tmp[i], tmp[i], 32);
|
||||
tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
|
||||
tcg_gen_extrl_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
|
||||
tcg_temp_free_i64(tmp[i]);
|
||||
}
|
||||
if (nr & 1) {
|
||||
|
@ -2680,7 +2680,7 @@ DISAS_INSN(from_mac)
|
||||
if (s->env->macsr & MACSR_FI) {
|
||||
gen_helper_get_macf(rx, cpu_env, acc);
|
||||
} else if ((s->env->macsr & MACSR_OMC) == 0) {
|
||||
tcg_gen_trunc_i64_i32(rx, acc);
|
||||
tcg_gen_extrl_i64_i32(rx, acc);
|
||||
} else if (s->env->macsr & MACSR_SU) {
|
||||
gen_helper_get_macs(rx, acc);
|
||||
} else {
|
||||
|
@ -598,9 +598,9 @@ static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
|
||||
tcg_gen_ext_i32_i64(t1, b);
|
||||
tcg_gen_mul_i64(t0, t0, t1);
|
||||
|
||||
tcg_gen_trunc_i64_i32(d, t0);
|
||||
tcg_gen_extrl_i64_i32(d, t0);
|
||||
tcg_gen_shri_i64(t0, t0, 32);
|
||||
tcg_gen_trunc_i64_i32(d2, t0);
|
||||
tcg_gen_extrl_i64_i32(d2, t0);
|
||||
|
||||
tcg_temp_free_i64(t0);
|
||||
tcg_temp_free_i64(t1);
|
||||
@ -618,9 +618,9 @@ static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
|
||||
tcg_gen_extu_i32_i64(t1, b);
|
||||
tcg_gen_mul_i64(t0, t0, t1);
|
||||
|
||||
tcg_gen_trunc_i64_i32(d, t0);
|
||||
tcg_gen_extrl_i64_i32(d, t0);
|
||||
tcg_gen_shri_i64(t0, t0, 32);
|
||||
tcg_gen_trunc_i64_i32(d2, t0);
|
||||
tcg_gen_extrl_i64_i32(d2, t0);
|
||||
|
||||
tcg_temp_free_i64(t0);
|
||||
tcg_temp_free_i64(t1);
|
||||
|
@ -1629,7 +1629,7 @@ static void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
|
||||
if (ctx->hflags & MIPS_HFLAG_FRE) {
|
||||
generate_exception(ctx, EXCP_RI);
|
||||
}
|
||||
tcg_gen_trunc_i64_i32(t, fpu_f64[reg]);
|
||||
tcg_gen_extrl_i64_i32(t, fpu_f64[reg]);
|
||||
}
|
||||
|
||||
static void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
|
||||
@ -1649,7 +1649,7 @@ static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
|
||||
if (ctx->hflags & MIPS_HFLAG_F64) {
|
||||
TCGv_i64 t64 = tcg_temp_new_i64();
|
||||
tcg_gen_shri_i64(t64, fpu_f64[reg], 32);
|
||||
tcg_gen_trunc_i64_i32(t, t64);
|
||||
tcg_gen_extrl_i64_i32(t, t64);
|
||||
tcg_temp_free_i64(t64);
|
||||
} else {
|
||||
gen_load_fpr32(ctx, t, reg | 1);
|
||||
|
@ -279,7 +279,7 @@ static void dec_calc(DisasContext *dc, uint32_t insn)
|
||||
tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
|
||||
tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
|
||||
tcg_gen_add_i64(td, ta, tb);
|
||||
tcg_gen_trunc_i64_i32(res, td);
|
||||
tcg_gen_extrl_i64_i32(res, td);
|
||||
tcg_gen_shri_i64(td, td, 31);
|
||||
tcg_gen_andi_i64(td, td, 0x3);
|
||||
/* Jump to lab when no overflow. */
|
||||
@ -324,7 +324,7 @@ static void dec_calc(DisasContext *dc, uint32_t insn)
|
||||
tcg_gen_shri_i64(tcy, tcy, 10);
|
||||
tcg_gen_add_i64(td, ta, tb);
|
||||
tcg_gen_add_i64(td, td, tcy);
|
||||
tcg_gen_trunc_i64_i32(res, td);
|
||||
tcg_gen_extrl_i64_i32(res, td);
|
||||
tcg_gen_shri_i64(td, td, 32);
|
||||
tcg_gen_andi_i64(td, td, 0x3);
|
||||
/* Jump to lab when no overflow. */
|
||||
@ -366,7 +366,7 @@ static void dec_calc(DisasContext *dc, uint32_t insn)
|
||||
tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
|
||||
tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
|
||||
tcg_gen_sub_i64(td, ta, tb);
|
||||
tcg_gen_trunc_i64_i32(res, td);
|
||||
tcg_gen_extrl_i64_i32(res, td);
|
||||
tcg_gen_shri_i64(td, td, 31);
|
||||
tcg_gen_andi_i64(td, td, 0x3);
|
||||
/* Jump to lab when no overflow. */
|
||||
@ -779,9 +779,9 @@ static void dec_misc(DisasContext *dc, uint32_t insn)
|
||||
tcg_gen_ext_i32_i64(t1, dst);
|
||||
tcg_gen_concat_i32_i64(t2, maclo, machi);
|
||||
tcg_gen_add_i64(t2, t2, t1);
|
||||
tcg_gen_trunc_i64_i32(maclo, t2);
|
||||
tcg_gen_extrl_i64_i32(maclo, t2);
|
||||
tcg_gen_shri_i64(t2, t2, 32);
|
||||
tcg_gen_trunc_i64_i32(machi, t2);
|
||||
tcg_gen_extrl_i64_i32(machi, t2);
|
||||
tcg_temp_free_i32(dst);
|
||||
tcg_temp_free(ttmp);
|
||||
tcg_temp_free_i64(t1);
|
||||
@ -898,7 +898,7 @@ static void dec_misc(DisasContext *dc, uint32_t insn)
|
||||
TCGv_i32 sr_ove = tcg_temp_local_new_i32();
|
||||
tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
|
||||
tcg_gen_addi_i64(td, ta, sign_extend(I16, 16));
|
||||
tcg_gen_trunc_i64_i32(res, td);
|
||||
tcg_gen_extrl_i64_i32(res, td);
|
||||
tcg_gen_shri_i64(td, td, 32);
|
||||
tcg_gen_andi_i64(td, td, 0x3);
|
||||
/* Jump to lab when no overflow. */
|
||||
@ -934,7 +934,7 @@ static void dec_misc(DisasContext *dc, uint32_t insn)
|
||||
tcg_gen_extu_i32_i64(tcy, sr_cy);
|
||||
tcg_gen_addi_i64(td, ta, sign_extend(I16, 16));
|
||||
tcg_gen_add_i64(td, td, tcy);
|
||||
tcg_gen_trunc_i64_i32(res, td);
|
||||
tcg_gen_extrl_i64_i32(res, td);
|
||||
tcg_gen_shri_i64(td, td, 32);
|
||||
tcg_gen_andi_i64(td, td, 0x3);
|
||||
/* Jump to lab when no overflow. */
|
||||
@ -1073,9 +1073,9 @@ static void dec_mac(DisasContext *dc, uint32_t insn)
|
||||
tcg_gen_ext_i32_i64(t1, t0);
|
||||
tcg_gen_concat_i32_i64(t2, maclo, machi);
|
||||
tcg_gen_add_i64(t2, t2, t1);
|
||||
tcg_gen_trunc_i64_i32(maclo, t2);
|
||||
tcg_gen_extrl_i64_i32(maclo, t2);
|
||||
tcg_gen_shri_i64(t2, t2, 32);
|
||||
tcg_gen_trunc_i64_i32(machi, t2);
|
||||
tcg_gen_extrl_i64_i32(machi, t2);
|
||||
tcg_temp_free_i32(t0);
|
||||
tcg_temp_free_i64(t1);
|
||||
tcg_temp_free_i64(t2);
|
||||
@ -1092,9 +1092,9 @@ static void dec_mac(DisasContext *dc, uint32_t insn)
|
||||
tcg_gen_ext_i32_i64(t1, t0);
|
||||
tcg_gen_concat_i32_i64(t2, maclo, machi);
|
||||
tcg_gen_sub_i64(t2, t2, t1);
|
||||
tcg_gen_trunc_i64_i32(maclo, t2);
|
||||
tcg_gen_extrl_i64_i32(maclo, t2);
|
||||
tcg_gen_shri_i64(t2, t2, 32);
|
||||
tcg_gen_trunc_i64_i32(machi, t2);
|
||||
tcg_gen_extrl_i64_i32(machi, t2);
|
||||
tcg_temp_free_i32(t0);
|
||||
tcg_temp_free_i64(t1);
|
||||
tcg_temp_free_i64(t2);
|
||||
|
@ -811,7 +811,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
|
||||
case CC_OP_LTGT0_32:
|
||||
c->is_64 = false;
|
||||
c->u.s32.a = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
|
||||
tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
|
||||
c->u.s32.b = tcg_const_i32(0);
|
||||
break;
|
||||
case CC_OP_LTGT_32:
|
||||
@ -819,9 +819,9 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
|
||||
case CC_OP_SUBU_32:
|
||||
c->is_64 = false;
|
||||
c->u.s32.a = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
|
||||
tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
|
||||
c->u.s32.b = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
|
||||
tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
|
||||
break;
|
||||
|
||||
case CC_OP_LTGT0_64:
|
||||
@ -851,11 +851,11 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
|
||||
c->is_64 = false;
|
||||
c->u.s32.a = tcg_temp_new_i32();
|
||||
c->u.s32.b = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
|
||||
tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
|
||||
if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
|
||||
tcg_gen_movi_i32(c->u.s32.b, 0);
|
||||
} else {
|
||||
tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
|
||||
tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1532,7 +1532,7 @@ static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
|
||||
store_reg32_i64(r1, t);
|
||||
c.u.s32.a = tcg_temp_new_i32();
|
||||
c.u.s32.b = tcg_const_i32(0);
|
||||
tcg_gen_trunc_i64_i32(c.u.s32.a, t);
|
||||
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
|
||||
tcg_temp_free_i64(t);
|
||||
|
||||
return help_branch(s, &c, is_imm, imm, o->in2);
|
||||
@ -1556,7 +1556,7 @@ static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
|
||||
store_reg32h_i64(r1, t);
|
||||
c.u.s32.a = tcg_temp_new_i32();
|
||||
c.u.s32.b = tcg_const_i32(0);
|
||||
tcg_gen_trunc_i64_i32(c.u.s32.a, t);
|
||||
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
|
||||
tcg_temp_free_i64(t);
|
||||
|
||||
return help_branch(s, &c, 1, imm, o->in2);
|
||||
@ -1599,8 +1599,8 @@ static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
|
||||
tcg_gen_add_i64(t, regs[r1], regs[r3]);
|
||||
c.u.s32.a = tcg_temp_new_i32();
|
||||
c.u.s32.b = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(c.u.s32.a, t);
|
||||
tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
|
||||
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
|
||||
tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
|
||||
store_reg32_i64(r1, t);
|
||||
tcg_temp_free_i64(t);
|
||||
|
||||
@ -1905,7 +1905,7 @@ static ExitStatus op_clm(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
|
||||
TCGv_i32 t1 = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(t1, o->in1);
|
||||
tcg_gen_extrl_i64_i32(t1, o->in1);
|
||||
potential_page_fault(s);
|
||||
gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
|
||||
set_cc_static(s);
|
||||
@ -1977,7 +1977,7 @@ static ExitStatus op_cs(DisasContext *s, DisasOps *o)
|
||||
|
||||
/* Store CC back to cc_op. Wait until after the store so that any
|
||||
exception gets the old cc_op value. */
|
||||
tcg_gen_trunc_i64_i32(cc_op, cc);
|
||||
tcg_gen_extrl_i64_i32(cc_op, cc);
|
||||
tcg_temp_free_i64(cc);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
@ -2027,7 +2027,7 @@ static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
|
||||
/* Save back state now that we've passed all exceptions. */
|
||||
tcg_gen_mov_i64(regs[r1], outh);
|
||||
tcg_gen_mov_i64(regs[r1 + 1], outl);
|
||||
tcg_gen_trunc_i64_i32(cc_op, cc);
|
||||
tcg_gen_extrl_i64_i32(cc_op, cc);
|
||||
tcg_temp_free_i64(outh);
|
||||
tcg_temp_free_i64(outl);
|
||||
tcg_temp_free_i64(cc);
|
||||
@ -2051,7 +2051,7 @@ static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
TCGv_i64 t1 = tcg_temp_new_i64();
|
||||
TCGv_i32 t2 = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(t2, o->in1);
|
||||
tcg_gen_extrl_i64_i32(t2, o->in1);
|
||||
gen_helper_cvd(t1, t2);
|
||||
tcg_temp_free_i32(t2);
|
||||
tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
|
||||
@ -3235,8 +3235,8 @@ static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
|
||||
TCGv_i32 t1 = tcg_temp_new_i32();
|
||||
TCGv_i32 t2 = tcg_temp_new_i32();
|
||||
TCGv_i32 to = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(t1, o->in1);
|
||||
tcg_gen_trunc_i64_i32(t2, o->in2);
|
||||
tcg_gen_extrl_i64_i32(t1, o->in1);
|
||||
tcg_gen_extrl_i64_i32(t2, o->in2);
|
||||
tcg_gen_rotl_i32(to, t1, t2);
|
||||
tcg_gen_extu_i32_i64(o->out, to);
|
||||
tcg_temp_free_i32(t1);
|
||||
|
@ -288,10 +288,10 @@ static inline void gen_load_fpr64(TCGv_i64 t, int reg)
|
||||
static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
|
||||
{
|
||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(tmp, t);
|
||||
tcg_gen_extrl_i64_i32(tmp, t);
|
||||
tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
|
||||
tcg_gen_shri_i64(t, t, 32);
|
||||
tcg_gen_trunc_i64_i32(tmp, t);
|
||||
tcg_gen_extrl_i64_i32(tmp, t);
|
||||
tcg_gen_mov_i32(cpu_fregs[reg], tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
|
||||
TCGv_i64 t = tcg_temp_new_i64();
|
||||
|
||||
tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
|
||||
tcg_gen_trunc_i64_i32(ret, t);
|
||||
tcg_gen_extrl_i64_i32(ret, t);
|
||||
tcg_temp_free_i64(t);
|
||||
|
||||
return ret;
|
||||
@ -379,8 +379,8 @@ static TCGv_i32 gen_add32_carry32(void)
|
||||
#if TARGET_LONG_BITS == 64
|
||||
cc_src1_32 = tcg_temp_new_i32();
|
||||
cc_src2_32 = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
|
||||
tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
|
||||
tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
|
||||
tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
|
||||
#else
|
||||
cc_src1_32 = cpu_cc_dst;
|
||||
cc_src2_32 = cpu_cc_src;
|
||||
@ -405,8 +405,8 @@ static TCGv_i32 gen_sub32_carry32(void)
|
||||
#if TARGET_LONG_BITS == 64
|
||||
cc_src1_32 = tcg_temp_new_i32();
|
||||
cc_src2_32 = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
|
||||
tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
|
||||
tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
|
||||
tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
|
||||
#else
|
||||
cc_src1_32 = cpu_cc_src;
|
||||
cc_src2_32 = cpu_cc_src2;
|
||||
@ -2254,11 +2254,11 @@ static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
|
||||
the later. */
|
||||
c32 = tcg_temp_new_i32();
|
||||
if (cmp->is_bool) {
|
||||
tcg_gen_trunc_i64_i32(c32, cmp->c1);
|
||||
tcg_gen_extrl_i64_i32(c32, cmp->c1);
|
||||
} else {
|
||||
TCGv_i64 c64 = tcg_temp_new_i64();
|
||||
tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
|
||||
tcg_gen_trunc_i64_i32(c32, c64);
|
||||
tcg_gen_extrl_i64_i32(c32, c64);
|
||||
tcg_temp_free_i64(c64);
|
||||
}
|
||||
|
||||
|
@ -457,11 +457,11 @@ gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
|
||||
tcg_gen_xor_i64(t1, result, r1);
|
||||
tcg_gen_xor_i64(t0, r1, r2);
|
||||
tcg_gen_andc_i64(t1, t1, t0);
|
||||
tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t1, 32);
|
||||
tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
|
||||
/* calc SV bit */
|
||||
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
|
||||
/* calc AV/SAV bits */
|
||||
tcg_gen_trunc_shr_i64_i32(temp, result, 32);
|
||||
tcg_gen_extrh_i64_i32(temp, result);
|
||||
tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
|
||||
tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
|
||||
/* calc SAV */
|
||||
@ -540,14 +540,14 @@ static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
|
||||
tcg_gen_mul_i64(t1, t1, t3);
|
||||
tcg_gen_add_i64(t1, t2, t1);
|
||||
|
||||
tcg_gen_trunc_i64_i32(ret, t1);
|
||||
tcg_gen_extrl_i64_i32(ret, t1);
|
||||
/* calc V
|
||||
t1 > 0x7fffffff */
|
||||
tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
|
||||
/* t1 < -0x80000000 */
|
||||
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
|
||||
tcg_gen_or_i64(t2, t2, t3);
|
||||
tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
|
||||
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
|
||||
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
|
||||
/* Calc SV bit */
|
||||
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
|
||||
@ -621,7 +621,7 @@ gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
|
||||
/* only the add overflows, if t2 < t1
|
||||
calc V bit */
|
||||
tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
|
||||
tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
|
||||
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
|
||||
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
|
||||
/* Calc SV bit */
|
||||
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
|
||||
@ -1110,12 +1110,12 @@ gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
|
||||
tcg_gen_sari_i64(t2, t2, up_shift);
|
||||
|
||||
tcg_gen_add_i64(t3, t1, t2);
|
||||
tcg_gen_trunc_i64_i32(temp3, t3);
|
||||
tcg_gen_extrl_i64_i32(temp3, t3);
|
||||
/* calc v bit */
|
||||
tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
|
||||
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
|
||||
tcg_gen_or_i64(t1, t1, t2);
|
||||
tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
|
||||
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
|
||||
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
|
||||
/* We produce an overflow on the host if the mul before was
|
||||
(0x80000000 * 0x80000000) << 1). If this is the
|
||||
@ -1273,7 +1273,7 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
|
||||
tcg_gen_xor_i64(t3, t4, t1);
|
||||
tcg_gen_xor_i64(t2, t1, t2);
|
||||
tcg_gen_andc_i64(t3, t3, t2);
|
||||
tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t3, 32);
|
||||
tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
|
||||
/* We produce an overflow on the host if the mul before was
|
||||
(0x80000000 * 0x80000000) << 1). If this is the
|
||||
case, we negate the ovf. */
|
||||
@ -1356,14 +1356,14 @@ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
|
||||
tcg_gen_mul_i64(t1, t1, t3);
|
||||
tcg_gen_sub_i64(t1, t2, t1);
|
||||
|
||||
tcg_gen_trunc_i64_i32(ret, t1);
|
||||
tcg_gen_extrl_i64_i32(ret, t1);
|
||||
/* calc V
|
||||
t2 > 0x7fffffff */
|
||||
tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
|
||||
/* result < -0x80000000 */
|
||||
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
|
||||
tcg_gen_or_i64(t2, t2, t3);
|
||||
tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
|
||||
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
|
||||
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
|
||||
|
||||
/* Calc SV bit */
|
||||
@ -1445,7 +1445,7 @@ gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
|
||||
tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
|
||||
/* calc V bit, only the sub can overflow, if t1 > t2 */
|
||||
tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
|
||||
tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
|
||||
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
|
||||
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
|
||||
/* Calc SV bit */
|
||||
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
|
||||
@ -1630,11 +1630,11 @@ gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
|
||||
tcg_gen_xor_i64(t1, result, r1);
|
||||
tcg_gen_xor_i64(t0, r1, r2);
|
||||
tcg_gen_and_i64(t1, t1, t0);
|
||||
tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t1, 32);
|
||||
tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
|
||||
/* calc SV bit */
|
||||
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
|
||||
/* calc AV/SAV bits */
|
||||
tcg_gen_trunc_shr_i64_i32(temp, result, 32);
|
||||
tcg_gen_extrh_i64_i32(temp, result);
|
||||
tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
|
||||
tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
|
||||
/* calc SAV */
|
||||
@ -1973,12 +1973,12 @@ gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
|
||||
tcg_gen_add_i64(t2, t2, t4);
|
||||
|
||||
tcg_gen_sub_i64(t3, t1, t2);
|
||||
tcg_gen_trunc_i64_i32(temp3, t3);
|
||||
tcg_gen_extrl_i64_i32(temp3, t3);
|
||||
/* calc v bit */
|
||||
tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
|
||||
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
|
||||
tcg_gen_or_i64(t1, t1, t2);
|
||||
tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
|
||||
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
|
||||
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
|
||||
/* Calc SV bit */
|
||||
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
|
||||
@ -2126,7 +2126,7 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
|
||||
tcg_gen_xor_i64(t3, t4, t1);
|
||||
tcg_gen_xor_i64(t2, t1, t2);
|
||||
tcg_gen_and_i64(t3, t3, t2);
|
||||
tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t3, 32);
|
||||
tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
|
||||
/* We produce an overflow on the host if the mul before was
|
||||
(0x80000000 * 0x80000000) << 1). If this is the
|
||||
case, we negate the ovf. */
|
||||
|
@ -1544,7 +1544,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
|
||||
TCGv_i64 tmp = tcg_temp_new_i64(); \
|
||||
tcg_gen_extu_i32_i64(tmp, reg); \
|
||||
tcg_gen_##cmd##_i64(v, v, tmp); \
|
||||
tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
|
||||
tcg_gen_extrl_i64_i32(cpu_R[RRR_R], v); \
|
||||
tcg_temp_free_i64(v); \
|
||||
tcg_temp_free_i64(tmp); \
|
||||
} while (0)
|
||||
|
32
tcg/README
32
tcg/README
@ -314,11 +314,17 @@ This operation would be equivalent to
|
||||
|
||||
dest = (t1 & ~0x0f00) | ((t2 << 8) & 0x0f00)
|
||||
|
||||
* trunc_shr_i32 t0, t1, pos
|
||||
* extrl_i64_i32 t0, t1
|
||||
|
||||
For 64-bit hosts only, right shift the 64-bit input T1 by POS and
|
||||
truncate to 32-bit output T0. Depending on the host, this may be
|
||||
a simple mov/shift, or may require additional canonicalization.
|
||||
For 64-bit hosts only, extract the low 32-bits of input T1 and place it
|
||||
into 32-bit output T0. Depending on the host, this may be a simple move,
|
||||
or may require additional canonicalization.
|
||||
|
||||
* extrh_i64_i32 t0, t1
|
||||
|
||||
For 64-bit hosts only, extract the high 32-bits of input T1 and place it
|
||||
into 32-bit output T0. Depending on the host, this may be a simple shift,
|
||||
or may require additional canonicalization.
|
||||
|
||||
********* Conditional moves
|
||||
|
||||
@ -466,13 +472,25 @@ On a 32 bit target, all 64 bit operations are converted to 32 bits. A
|
||||
few specific operations must be implemented to allow it (see add2_i32,
|
||||
sub2_i32, brcond2_i32).
|
||||
|
||||
On a 64 bit target, the values are transfered between 32 and 64-bit
|
||||
registers using the following ops:
|
||||
- trunc_shr_i64_i32
|
||||
- ext_i32_i64
|
||||
- extu_i32_i64
|
||||
|
||||
They ensure that the values are correctly truncated or extended when
|
||||
moved from a 32-bit to a 64-bit register or vice-versa. Note that the
|
||||
trunc_shr_i64_i32 is an optional op. It is not necessary to implement
|
||||
it if all the following conditions are met:
|
||||
- 64-bit registers can hold 32-bit values
|
||||
- 32-bit values in a 64-bit register do not need to stay zero or
|
||||
sign extended
|
||||
- all 32-bit TCG ops ignore the high part of 64-bit registers
|
||||
|
||||
Floating point operations are not supported in this version. A
|
||||
previous incarnation of the code generator had full support of them,
|
||||
but it is better to concentrate on integer operations first.
|
||||
|
||||
On a 64 bit target, no assumption is made in TCG about the storage of
|
||||
the 32 bit values in 64 bit registers.
|
||||
|
||||
4.2) Constraints
|
||||
|
||||
GCC like constraints are used to define the constraints of every
|
||||
|
@ -30,7 +30,7 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
||||
static const int tcg_target_reg_alloc_order[] = {
|
||||
TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
|
||||
TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
|
||||
TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */
|
||||
TCG_REG_X28, /* we will reserve this for guest_base if configured */
|
||||
|
||||
TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
|
||||
TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
|
||||
@ -56,11 +56,7 @@ static const int tcg_target_call_oarg_regs[1] = {
|
||||
#define TCG_REG_TMP TCG_REG_X30
|
||||
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
# ifdef CONFIG_USE_GUEST_BASE
|
||||
# define TCG_REG_GUEST_BASE TCG_REG_X28
|
||||
# else
|
||||
# define TCG_REG_GUEST_BASE TCG_REG_XZR
|
||||
# endif
|
||||
#define TCG_REG_GUEST_BASE TCG_REG_X28
|
||||
#endif
|
||||
|
||||
static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
|
||||
@ -1051,14 +1047,29 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
|
||||
slow path for the failure case, which will be patched later when finalizing
|
||||
the slow path. Generated code returns the host addend in X1,
|
||||
clobbers X0,X2,X3,TMP. */
|
||||
static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits,
|
||||
static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
|
||||
tcg_insn_unit **label_ptr, int mem_index,
|
||||
bool is_read)
|
||||
{
|
||||
TCGReg base = TCG_AREG0;
|
||||
int tlb_offset = is_read ?
|
||||
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
|
||||
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
|
||||
int s_mask = (1 << (opc & MO_SIZE)) - 1;
|
||||
TCGReg base = TCG_AREG0, x3;
|
||||
uint64_t tlb_mask;
|
||||
|
||||
/* For aligned accesses, we check the first byte and include the alignment
|
||||
bits within the address. For unaligned access, we check that we don't
|
||||
cross pages using the address of the last byte of the access. */
|
||||
if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
|
||||
tlb_mask = TARGET_PAGE_MASK | s_mask;
|
||||
x3 = addr_reg;
|
||||
} else {
|
||||
tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
|
||||
TCG_REG_X3, addr_reg, s_mask);
|
||||
tlb_mask = TARGET_PAGE_MASK;
|
||||
x3 = TCG_REG_X3;
|
||||
}
|
||||
|
||||
/* Extract the TLB index from the address into X0.
|
||||
X0<CPU_TLB_BITS:0> =
|
||||
@ -1066,11 +1077,9 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits,
|
||||
tcg_out_ubfm(s, TARGET_LONG_BITS == 64, TCG_REG_X0, addr_reg,
|
||||
TARGET_PAGE_BITS, TARGET_PAGE_BITS + CPU_TLB_BITS);
|
||||
|
||||
/* Store the page mask part of the address and the low s_bits into X3.
|
||||
Later this allows checking for equality and alignment at the same time.
|
||||
X3 = addr_reg & (PAGE_MASK | ((1 << s_bits) - 1)) */
|
||||
tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, TCG_REG_X3,
|
||||
addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
|
||||
/* Store the page mask part of the address into X3. */
|
||||
tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64,
|
||||
TCG_REG_X3, x3, tlb_mask);
|
||||
|
||||
/* Add any "high bits" from the tlb offset to the env address into X2,
|
||||
to take advantage of the LSL12 form of the ADDI instruction.
|
||||
@ -1207,17 +1216,16 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
|
||||
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
unsigned mem_index = get_mmuidx(oi);
|
||||
TCGMemOp s_bits = memop & MO_SIZE;
|
||||
tcg_insn_unit *label_ptr;
|
||||
|
||||
tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
|
||||
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
|
||||
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
|
||||
TCG_REG_X1, otype, addr_reg);
|
||||
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
|
||||
s->code_ptr, label_ptr);
|
||||
#else /* !CONFIG_SOFTMMU */
|
||||
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
|
||||
GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
|
||||
guest_base ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
|
||||
otype, addr_reg);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
}
|
||||
@ -1229,17 +1237,16 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
|
||||
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
unsigned mem_index = get_mmuidx(oi);
|
||||
TCGMemOp s_bits = memop & MO_SIZE;
|
||||
tcg_insn_unit *label_ptr;
|
||||
|
||||
tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0);
|
||||
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
|
||||
tcg_out_qemu_st_direct(s, memop, data_reg,
|
||||
TCG_REG_X1, otype, addr_reg);
|
||||
add_qemu_ldst_label(s, false, oi, s_bits == MO_64, data_reg, addr_reg,
|
||||
s->code_ptr, label_ptr);
|
||||
add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
|
||||
data_reg, addr_reg, s->code_ptr, label_ptr);
|
||||
#else /* !CONFIG_SOFTMMU */
|
||||
tcg_out_qemu_st_direct(s, memop, data_reg,
|
||||
GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
|
||||
guest_base ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
|
||||
otype, addr_reg);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
}
|
||||
@ -1556,6 +1563,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||
case INDEX_op_ext16s_i32:
|
||||
tcg_out_sxt(s, ext, MO_16, a0, a1);
|
||||
break;
|
||||
case INDEX_op_ext_i32_i64:
|
||||
case INDEX_op_ext32s_i64:
|
||||
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
|
||||
break;
|
||||
@ -1567,6 +1575,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||
case INDEX_op_ext16u_i32:
|
||||
tcg_out_uxt(s, MO_16, a0, a1);
|
||||
break;
|
||||
case INDEX_op_extu_i32_i64:
|
||||
case INDEX_op_ext32u_i64:
|
||||
tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
|
||||
break;
|
||||
@ -1712,6 +1721,8 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
|
||||
{ INDEX_op_ext8u_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext16u_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext32u_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext_i32_i64, { "r", "r" } },
|
||||
{ INDEX_op_extu_i32_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_deposit_i32, { "r", "0", "rZ" } },
|
||||
{ INDEX_op_deposit_i64, { "r", "0", "rZ" } },
|
||||
@ -1794,9 +1805,9 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
||||
tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
|
||||
CPU_TEMP_BUF_NLONGS * sizeof(long));
|
||||
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
if (GUEST_BASE) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, GUEST_BASE);
|
||||
#if !defined(CONFIG_SOFTMMU)
|
||||
if (guest_base) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
|
||||
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
|
||||
}
|
||||
#endif
|
||||
|
@ -70,7 +70,8 @@ typedef enum {
|
||||
#define TCG_TARGET_HAS_muls2_i32 0
|
||||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_extrl_i64_i32 0
|
||||
#define TCG_TARGET_HAS_extrh_i64_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_div_i64 1
|
||||
#define TCG_TARGET_HAS_rem_i64 1
|
||||
|
@ -1493,8 +1493,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||
add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
|
||||
s->code_ptr, label_ptr);
|
||||
#else /* !CONFIG_SOFTMMU */
|
||||
if (GUEST_BASE) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
|
||||
if (guest_base) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
|
||||
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
|
||||
} else {
|
||||
tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
|
||||
@ -1623,8 +1623,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
||||
add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
|
||||
s->code_ptr, label_ptr);
|
||||
#else /* !CONFIG_SOFTMMU */
|
||||
if (GUEST_BASE) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
|
||||
if (guest_base) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
|
||||
tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
|
||||
datahi, addrlo, TCG_REG_TMP);
|
||||
} else {
|
||||
|
@ -1172,7 +1172,7 @@ static void * const qemu_st_helpers[16] = {
|
||||
First argument register is clobbered. */
|
||||
|
||||
static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
|
||||
int mem_index, TCGMemOp s_bits,
|
||||
int mem_index, TCGMemOp opc,
|
||||
tcg_insn_unit **label_ptr, int which)
|
||||
{
|
||||
const TCGReg r0 = TCG_REG_L0;
|
||||
@ -1180,6 +1180,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
|
||||
TCGType ttype = TCG_TYPE_I32;
|
||||
TCGType htype = TCG_TYPE_I32;
|
||||
int trexw = 0, hrexw = 0;
|
||||
int s_mask = (1 << (opc & MO_SIZE)) - 1;
|
||||
bool aligned = (opc & MO_AMASK) == MO_ALIGN || s_mask == 0;
|
||||
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
if (TARGET_LONG_BITS == 64) {
|
||||
@ -1193,13 +1195,19 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
|
||||
}
|
||||
|
||||
tcg_out_mov(s, htype, r0, addrlo);
|
||||
tcg_out_mov(s, ttype, r1, addrlo);
|
||||
if (aligned) {
|
||||
tcg_out_mov(s, ttype, r1, addrlo);
|
||||
} else {
|
||||
/* For unaligned access check that we don't cross pages using
|
||||
the page address of the last byte. */
|
||||
tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask);
|
||||
}
|
||||
|
||||
tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
|
||||
tgen_arithi(s, ARITH_AND + trexw, r1,
|
||||
TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
|
||||
TARGET_PAGE_MASK | (aligned ? s_mask : 0), 0);
|
||||
tgen_arithi(s, ARITH_AND + hrexw, r0,
|
||||
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
|
||||
|
||||
@ -1424,7 +1432,7 @@ int arch_prctl(int code, unsigned long addr);
|
||||
static int guest_base_flags;
|
||||
static inline void setup_guest_base_seg(void)
|
||||
{
|
||||
if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) {
|
||||
if (arch_prctl(ARCH_SET_GS, guest_base) == 0) {
|
||||
guest_base_flags = P_GS;
|
||||
}
|
||||
}
|
||||
@ -1545,7 +1553,6 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||
TCGMemOp opc;
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
int mem_index;
|
||||
TCGMemOp s_bits;
|
||||
tcg_insn_unit *label_ptr[2];
|
||||
#endif
|
||||
|
||||
@ -1558,9 +1565,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
mem_index = get_mmuidx(oi);
|
||||
s_bits = opc & MO_SIZE;
|
||||
|
||||
tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
|
||||
tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
|
||||
label_ptr, offsetof(CPUTLBEntry, addr_read));
|
||||
|
||||
/* TLB Hit. */
|
||||
@ -1571,7 +1577,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||
s->code_ptr, label_ptr);
|
||||
#else
|
||||
{
|
||||
int32_t offset = GUEST_BASE;
|
||||
int32_t offset = guest_base;
|
||||
TCGReg base = addrlo;
|
||||
int index = -1;
|
||||
int seg = 0;
|
||||
@ -1580,7 +1586,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||
We can do this with the ADDR32 prefix if we're not using
|
||||
a guest base, or when using segmentation. Otherwise we
|
||||
need to zero-extend manually. */
|
||||
if (GUEST_BASE == 0 || guest_base_flags) {
|
||||
if (guest_base == 0 || guest_base_flags) {
|
||||
seg = guest_base_flags;
|
||||
offset = 0;
|
||||
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
|
||||
@ -1591,8 +1597,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||
tcg_out_ext32u(s, TCG_REG_L0, base);
|
||||
base = TCG_REG_L0;
|
||||
}
|
||||
if (offset != GUEST_BASE) {
|
||||
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
|
||||
if (offset != guest_base) {
|
||||
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
|
||||
index = TCG_REG_L1;
|
||||
offset = 0;
|
||||
}
|
||||
@ -1687,7 +1693,6 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
||||
TCGMemOp opc;
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
int mem_index;
|
||||
TCGMemOp s_bits;
|
||||
tcg_insn_unit *label_ptr[2];
|
||||
#endif
|
||||
|
||||
@ -1700,9 +1705,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
||||
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
mem_index = get_mmuidx(oi);
|
||||
s_bits = opc & MO_SIZE;
|
||||
|
||||
tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
|
||||
tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
|
||||
label_ptr, offsetof(CPUTLBEntry, addr_write));
|
||||
|
||||
/* TLB Hit. */
|
||||
@ -1713,12 +1717,12 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
||||
s->code_ptr, label_ptr);
|
||||
#else
|
||||
{
|
||||
int32_t offset = GUEST_BASE;
|
||||
int32_t offset = guest_base;
|
||||
TCGReg base = addrlo;
|
||||
int seg = 0;
|
||||
|
||||
/* See comment in tcg_out_qemu_ld re zero-extension of addrlo. */
|
||||
if (GUEST_BASE == 0 || guest_base_flags) {
|
||||
if (guest_base == 0 || guest_base_flags) {
|
||||
seg = guest_base_flags;
|
||||
offset = 0;
|
||||
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
|
||||
@ -1727,12 +1731,12 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
||||
} else if (TCG_TARGET_REG_BITS == 64) {
|
||||
/* ??? Note that we can't use the same SIB addressing scheme
|
||||
as for loads, since we require L0 free for bswap. */
|
||||
if (offset != GUEST_BASE) {
|
||||
if (offset != guest_base) {
|
||||
if (TARGET_LONG_BITS == 32) {
|
||||
tcg_out_ext32u(s, TCG_REG_L0, base);
|
||||
base = TCG_REG_L0;
|
||||
}
|
||||
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
|
||||
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
|
||||
tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
|
||||
base = TCG_REG_L1;
|
||||
offset = 0;
|
||||
@ -2064,9 +2068,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||
case INDEX_op_bswap64_i64:
|
||||
tcg_out_bswap64(s, args[0]);
|
||||
break;
|
||||
case INDEX_op_extu_i32_i64:
|
||||
case INDEX_op_ext32u_i64:
|
||||
tcg_out_ext32u(s, args[0], args[1]);
|
||||
break;
|
||||
case INDEX_op_ext_i32_i64:
|
||||
case INDEX_op_ext32s_i64:
|
||||
tcg_out_ext32s(s, args[0], args[1]);
|
||||
break;
|
||||
@ -2201,6 +2207,9 @@ static const TCGTargetOpDef x86_op_defs[] = {
|
||||
{ INDEX_op_ext16u_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext32u_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_ext_i32_i64, { "r", "r" } },
|
||||
{ INDEX_op_extu_i32_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_deposit_i64, { "Q", "0", "Q" } },
|
||||
{ INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
|
||||
|
||||
@ -2306,8 +2315,8 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
||||
tcg_out_opc(s, OPC_RET, 0, 0, 0);
|
||||
|
||||
#if !defined(CONFIG_SOFTMMU)
|
||||
/* Try to set up a segment register to point to GUEST_BASE. */
|
||||
if (GUEST_BASE) {
|
||||
/* Try to set up a segment register to point to guest_base. */
|
||||
if (guest_base) {
|
||||
setup_guest_base_seg();
|
||||
}
|
||||
#endif
|
||||
|
@ -102,7 +102,8 @@ extern bool have_bmi1;
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_extrl_i64_i32 0
|
||||
#define TCG_TARGET_HAS_extrh_i64_i32 0
|
||||
#define TCG_TARGET_HAS_div2_i64 1
|
||||
#define TCG_TARGET_HAS_rot_i64 1
|
||||
#define TCG_TARGET_HAS_ext8s_i64 1
|
||||
|
@ -40,13 +40,8 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
#define TCG_GUEST_BASE_REG TCG_REG_R55
|
||||
#else
|
||||
#define TCG_GUEST_BASE_REG TCG_REG_R0
|
||||
#endif
|
||||
#ifndef GUEST_BASE
|
||||
#define GUEST_BASE 0
|
||||
#endif
|
||||
|
||||
/* Branch registers */
|
||||
@ -1765,7 +1760,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
|
||||
bswap = opc & MO_BSWAP;
|
||||
|
||||
#if TARGET_LONG_BITS == 32
|
||||
if (GUEST_BASE != 0) {
|
||||
if (guest_base != 0) {
|
||||
tcg_out_bundle(s, mII,
|
||||
INSN_NOP_M,
|
||||
tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
|
||||
@ -1829,7 +1824,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (GUEST_BASE != 0) {
|
||||
if (guest_base != 0) {
|
||||
tcg_out_bundle(s, MmI,
|
||||
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
|
||||
TCG_GUEST_BASE_REG, addr_reg),
|
||||
@ -1889,7 +1884,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
|
||||
bswap = opc & MO_BSWAP;
|
||||
|
||||
#if TARGET_LONG_BITS == 32
|
||||
if (GUEST_BASE != 0) {
|
||||
if (guest_base != 0) {
|
||||
tcg_out_bundle(s, mII,
|
||||
INSN_NOP_M,
|
||||
tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
|
||||
@ -1935,7 +1930,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
|
||||
INSN_NOP_M,
|
||||
INSN_NOP_I);
|
||||
#else
|
||||
if (GUEST_BASE != 0) {
|
||||
if (guest_base != 0) {
|
||||
add_guest_base = tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
|
||||
TCG_GUEST_BASE_REG, addr_reg);
|
||||
addr_reg = TCG_REG_R2;
|
||||
@ -1944,7 +1939,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
|
||||
}
|
||||
|
||||
if (!bswap) {
|
||||
tcg_out_bundle(s, (GUEST_BASE ? MmI : mmI),
|
||||
tcg_out_bundle(s, (guest_base ? MmI : mmI),
|
||||
add_guest_base,
|
||||
tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
|
||||
data_reg, addr_reg),
|
||||
@ -2148,9 +2143,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||
case INDEX_op_ext16u_i64:
|
||||
tcg_out_ext(s, OPC_ZXT2_I29, args[0], args[1]);
|
||||
break;
|
||||
case INDEX_op_ext_i32_i64:
|
||||
case INDEX_op_ext32s_i64:
|
||||
tcg_out_ext(s, OPC_SXT4_I29, args[0], args[1]);
|
||||
break;
|
||||
case INDEX_op_extu_i32_i64:
|
||||
case INDEX_op_ext32u_i64:
|
||||
tcg_out_ext(s, OPC_ZXT4_I29, args[0], args[1]);
|
||||
break;
|
||||
@ -2301,6 +2298,8 @@ static const TCGTargetOpDef ia64_op_defs[] = {
|
||||
{ INDEX_op_ext16u_i64, { "r", "rZ"} },
|
||||
{ INDEX_op_ext32s_i64, { "r", "rZ"} },
|
||||
{ INDEX_op_ext32u_i64, { "r", "rZ"} },
|
||||
{ INDEX_op_ext_i32_i64, { "r", "rZ" } },
|
||||
{ INDEX_op_extu_i32_i64, { "r", "rZ" } },
|
||||
|
||||
{ INDEX_op_bswap16_i64, { "r", "rZ" } },
|
||||
{ INDEX_op_bswap32_i64, { "r", "rZ" } },
|
||||
@ -2349,14 +2348,14 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
||||
tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
|
||||
TCG_REG_B6, TCG_REG_R33, 0));
|
||||
|
||||
/* ??? If GUEST_BASE < 0x200000, we could load the register via
|
||||
/* ??? If guest_base < 0x200000, we could load the register via
|
||||
an ADDL in the M slot of the next bundle. */
|
||||
if (GUEST_BASE != 0) {
|
||||
if (guest_base != 0) {
|
||||
tcg_out_bundle(s, mlx,
|
||||
INSN_NOP_M,
|
||||
tcg_opc_l2 (GUEST_BASE),
|
||||
tcg_opc_l2(guest_base),
|
||||
tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2,
|
||||
TCG_GUEST_BASE_REG, GUEST_BASE));
|
||||
TCG_GUEST_BASE_REG, guest_base));
|
||||
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
|
||||
}
|
||||
|
||||
|
@ -160,7 +160,8 @@ typedef enum {
|
||||
#define TCG_TARGET_HAS_muluh_i64 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i64 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_extrl_i64_i32 0
|
||||
#define TCG_TARGET_HAS_extrh_i64_i32 0
|
||||
|
||||
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
|
||||
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
|
||||
|
@ -1180,12 +1180,12 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
add_qemu_ldst_label(s, 1, oi, data_regl, data_regh, addr_regl, addr_regh,
|
||||
s->code_ptr, label_ptr);
|
||||
#else
|
||||
if (GUEST_BASE == 0 && data_regl != addr_regl) {
|
||||
if (guest_base == 0 && data_regl != addr_regl) {
|
||||
base = addr_regl;
|
||||
} else if (GUEST_BASE == (int16_t)GUEST_BASE) {
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
|
||||
} else if (guest_base == (int16_t)guest_base) {
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
|
||||
} else {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
|
||||
tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
|
||||
}
|
||||
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
|
||||
@ -1314,14 +1314,14 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
add_qemu_ldst_label(s, 0, oi, data_regl, data_regh, addr_regl, addr_regh,
|
||||
s->code_ptr, label_ptr);
|
||||
#else
|
||||
if (GUEST_BASE == 0) {
|
||||
if (guest_base == 0) {
|
||||
base = addr_regl;
|
||||
} else {
|
||||
base = TCG_REG_A0;
|
||||
if (GUEST_BASE == (int16_t)GUEST_BASE) {
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
|
||||
if (guest_base == (int16_t)guest_base) {
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
|
||||
} else {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
|
||||
tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
|
||||
}
|
||||
}
|
||||
|
253
tcg/optimize.c
253
tcg/optimize.c
@ -35,14 +35,8 @@
|
||||
glue(glue(case INDEX_op_, x), _i32): \
|
||||
glue(glue(case INDEX_op_, x), _i64)
|
||||
|
||||
typedef enum {
|
||||
TCG_TEMP_UNDEF = 0,
|
||||
TCG_TEMP_CONST,
|
||||
TCG_TEMP_COPY,
|
||||
} tcg_temp_state;
|
||||
|
||||
struct tcg_temp_info {
|
||||
tcg_temp_state state;
|
||||
bool is_const;
|
||||
uint16_t prev_copy;
|
||||
uint16_t next_copy;
|
||||
tcg_target_ulong val;
|
||||
@ -50,23 +44,47 @@ struct tcg_temp_info {
|
||||
};
|
||||
|
||||
static struct tcg_temp_info temps[TCG_MAX_TEMPS];
|
||||
static TCGTempSet temps_used;
|
||||
|
||||
/* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove
|
||||
the copy flag from the left temp. */
|
||||
static inline bool temp_is_const(TCGArg arg)
|
||||
{
|
||||
return temps[arg].is_const;
|
||||
}
|
||||
|
||||
static inline bool temp_is_copy(TCGArg arg)
|
||||
{
|
||||
return temps[arg].next_copy != arg;
|
||||
}
|
||||
|
||||
/* Reset TEMP's state, possibly removing the temp for the list of copies. */
|
||||
static void reset_temp(TCGArg temp)
|
||||
{
|
||||
if (temps[temp].state == TCG_TEMP_COPY) {
|
||||
if (temps[temp].prev_copy == temps[temp].next_copy) {
|
||||
temps[temps[temp].next_copy].state = TCG_TEMP_UNDEF;
|
||||
} else {
|
||||
temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
|
||||
temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
|
||||
}
|
||||
}
|
||||
temps[temp].state = TCG_TEMP_UNDEF;
|
||||
temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
|
||||
temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
|
||||
temps[temp].next_copy = temp;
|
||||
temps[temp].prev_copy = temp;
|
||||
temps[temp].is_const = false;
|
||||
temps[temp].mask = -1;
|
||||
}
|
||||
|
||||
/* Reset all temporaries, given that there are NB_TEMPS of them. */
|
||||
static void reset_all_temps(int nb_temps)
|
||||
{
|
||||
bitmap_zero(temps_used.l, nb_temps);
|
||||
}
|
||||
|
||||
/* Initialize and activate a temporary. */
|
||||
static void init_temp_info(TCGArg temp)
|
||||
{
|
||||
if (!test_bit(temp, temps_used.l)) {
|
||||
temps[temp].next_copy = temp;
|
||||
temps[temp].prev_copy = temp;
|
||||
temps[temp].is_const = false;
|
||||
temps[temp].mask = -1;
|
||||
set_bit(temp, temps_used.l);
|
||||
}
|
||||
}
|
||||
|
||||
static TCGOp *insert_op_before(TCGContext *s, TCGOp *old_op,
|
||||
TCGOpcode opc, int nargs)
|
||||
{
|
||||
@ -98,16 +116,6 @@ static TCGOp *insert_op_before(TCGContext *s, TCGOp *old_op,
|
||||
return new_op;
|
||||
}
|
||||
|
||||
/* Reset all temporaries, given that there are NB_TEMPS of them. */
|
||||
static void reset_all_temps(int nb_temps)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < nb_temps; i++) {
|
||||
temps[i].state = TCG_TEMP_UNDEF;
|
||||
temps[i].mask = -1;
|
||||
}
|
||||
}
|
||||
|
||||
static int op_bits(TCGOpcode op)
|
||||
{
|
||||
const TCGOpDef *def = &tcg_op_defs[op];
|
||||
@ -179,8 +187,7 @@ static bool temps_are_copies(TCGArg arg1, TCGArg arg2)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (temps[arg1].state != TCG_TEMP_COPY
|
||||
|| temps[arg2].state != TCG_TEMP_COPY) {
|
||||
if (!temp_is_copy(arg1) || !temp_is_copy(arg2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -202,7 +209,7 @@ static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg *args,
|
||||
op->opc = new_op;
|
||||
|
||||
reset_temp(dst);
|
||||
temps[dst].state = TCG_TEMP_CONST;
|
||||
temps[dst].is_const = true;
|
||||
temps[dst].val = val;
|
||||
mask = val;
|
||||
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_movi_i32) {
|
||||
@ -223,11 +230,6 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg *args,
|
||||
return;
|
||||
}
|
||||
|
||||
if (temps[src].state == TCG_TEMP_CONST) {
|
||||
tcg_opt_gen_movi(s, op, args, dst, temps[src].val);
|
||||
return;
|
||||
}
|
||||
|
||||
TCGOpcode new_op = op_to_mov(op->opc);
|
||||
tcg_target_ulong mask;
|
||||
|
||||
@ -241,19 +243,13 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg *args,
|
||||
}
|
||||
temps[dst].mask = mask;
|
||||
|
||||
assert(temps[src].state != TCG_TEMP_CONST);
|
||||
|
||||
if (s->temps[src].type == s->temps[dst].type) {
|
||||
if (temps[src].state != TCG_TEMP_COPY) {
|
||||
temps[src].state = TCG_TEMP_COPY;
|
||||
temps[src].next_copy = src;
|
||||
temps[src].prev_copy = src;
|
||||
}
|
||||
temps[dst].state = TCG_TEMP_COPY;
|
||||
temps[dst].next_copy = temps[src].next_copy;
|
||||
temps[dst].prev_copy = src;
|
||||
temps[temps[dst].next_copy].prev_copy = dst;
|
||||
temps[src].next_copy = dst;
|
||||
temps[dst].is_const = temps[src].is_const;
|
||||
temps[dst].val = temps[src].val;
|
||||
}
|
||||
|
||||
args[0] = dst;
|
||||
@ -292,7 +288,6 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
|
||||
case INDEX_op_shr_i32:
|
||||
return (uint32_t)x >> (y & 31);
|
||||
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
case INDEX_op_shr_i64:
|
||||
return (uint64_t)x >> (y & 63);
|
||||
|
||||
@ -347,12 +342,18 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
|
||||
CASE_OP_32_64(ext16u):
|
||||
return (uint16_t)x;
|
||||
|
||||
case INDEX_op_ext_i32_i64:
|
||||
case INDEX_op_ext32s_i64:
|
||||
return (int32_t)x;
|
||||
|
||||
case INDEX_op_extu_i32_i64:
|
||||
case INDEX_op_extrl_i64_i32:
|
||||
case INDEX_op_ext32u_i64:
|
||||
return (uint32_t)x;
|
||||
|
||||
case INDEX_op_extrh_i64_i32:
|
||||
return (uint64_t)x >> 32;
|
||||
|
||||
case INDEX_op_muluh_i32:
|
||||
return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
|
||||
case INDEX_op_mulsh_i32:
|
||||
@ -395,7 +396,7 @@ static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
|
||||
{
|
||||
TCGArg res = do_constant_folding_2(op, x, y);
|
||||
if (op_bits(op) == 32) {
|
||||
res &= 0xffffffff;
|
||||
res = (int32_t)res;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -481,7 +482,7 @@ static bool do_constant_folding_cond_eq(TCGCond c)
|
||||
static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
|
||||
TCGArg y, TCGCond c)
|
||||
{
|
||||
if (temps[x].state == TCG_TEMP_CONST && temps[y].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(x) && temp_is_const(y)) {
|
||||
switch (op_bits(op)) {
|
||||
case 32:
|
||||
return do_constant_folding_cond_32(temps[x].val, temps[y].val, c);
|
||||
@ -492,7 +493,7 @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
|
||||
}
|
||||
} else if (temps_are_copies(x, y)) {
|
||||
return do_constant_folding_cond_eq(c);
|
||||
} else if (temps[y].state == TCG_TEMP_CONST && temps[y].val == 0) {
|
||||
} else if (temp_is_const(y) && temps[y].val == 0) {
|
||||
switch (c) {
|
||||
case TCG_COND_LTU:
|
||||
return 0;
|
||||
@ -513,12 +514,10 @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
|
||||
TCGArg al = p1[0], ah = p1[1];
|
||||
TCGArg bl = p2[0], bh = p2[1];
|
||||
|
||||
if (temps[bl].state == TCG_TEMP_CONST
|
||||
&& temps[bh].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(bl) && temp_is_const(bh)) {
|
||||
uint64_t b = ((uint64_t)temps[bh].val << 32) | (uint32_t)temps[bl].val;
|
||||
|
||||
if (temps[al].state == TCG_TEMP_CONST
|
||||
&& temps[ah].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(al) && temp_is_const(ah)) {
|
||||
uint64_t a;
|
||||
a = ((uint64_t)temps[ah].val << 32) | (uint32_t)temps[al].val;
|
||||
return do_constant_folding_cond_64(a, b, c);
|
||||
@ -544,8 +543,8 @@ static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
|
||||
{
|
||||
TCGArg a1 = *p1, a2 = *p2;
|
||||
int sum = 0;
|
||||
sum += temps[a1].state == TCG_TEMP_CONST;
|
||||
sum -= temps[a2].state == TCG_TEMP_CONST;
|
||||
sum += temp_is_const(a1);
|
||||
sum -= temp_is_const(a2);
|
||||
|
||||
/* Prefer the constant in second argument, and then the form
|
||||
op a, a, b, which is better handled on non-RISC hosts. */
|
||||
@ -560,10 +559,10 @@ static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
|
||||
static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
|
||||
{
|
||||
int sum = 0;
|
||||
sum += temps[p1[0]].state == TCG_TEMP_CONST;
|
||||
sum += temps[p1[1]].state == TCG_TEMP_CONST;
|
||||
sum -= temps[p2[0]].state == TCG_TEMP_CONST;
|
||||
sum -= temps[p2[1]].state == TCG_TEMP_CONST;
|
||||
sum += temp_is_const(p1[0]);
|
||||
sum += temp_is_const(p1[1]);
|
||||
sum -= temp_is_const(p2[0]);
|
||||
sum -= temp_is_const(p2[1]);
|
||||
if (sum > 0) {
|
||||
TCGArg t;
|
||||
t = p1[0], p1[0] = p2[0], p2[0] = t;
|
||||
@ -598,17 +597,29 @@ void tcg_optimize(TCGContext *s)
|
||||
const TCGOpDef *def = &tcg_op_defs[opc];
|
||||
|
||||
oi_next = op->next;
|
||||
|
||||
/* Count the arguments, and initialize the temps that are
|
||||
going to be used */
|
||||
if (opc == INDEX_op_call) {
|
||||
nb_oargs = op->callo;
|
||||
nb_iargs = op->calli;
|
||||
for (i = 0; i < nb_oargs + nb_iargs; i++) {
|
||||
tmp = args[i];
|
||||
if (tmp != TCG_CALL_DUMMY_ARG) {
|
||||
init_temp_info(tmp);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
nb_oargs = def->nb_oargs;
|
||||
nb_iargs = def->nb_iargs;
|
||||
for (i = 0; i < nb_oargs + nb_iargs; i++) {
|
||||
init_temp_info(args[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Do copy propagation */
|
||||
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
||||
if (temps[args[i]].state == TCG_TEMP_COPY) {
|
||||
if (temp_is_copy(args[i])) {
|
||||
args[i] = find_better_copy(s, args[i]);
|
||||
}
|
||||
}
|
||||
@ -678,8 +689,7 @@ void tcg_optimize(TCGContext *s)
|
||||
CASE_OP_32_64(sar):
|
||||
CASE_OP_32_64(rotl):
|
||||
CASE_OP_32_64(rotr):
|
||||
if (temps[args[1]].state == TCG_TEMP_CONST
|
||||
&& temps[args[1]].val == 0) {
|
||||
if (temp_is_const(args[1]) && temps[args[1]].val == 0) {
|
||||
tcg_opt_gen_movi(s, op, args, args[0], 0);
|
||||
continue;
|
||||
}
|
||||
@ -689,7 +699,7 @@ void tcg_optimize(TCGContext *s)
|
||||
TCGOpcode neg_op;
|
||||
bool have_neg;
|
||||
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2])) {
|
||||
/* Proceed with possible constant folding. */
|
||||
break;
|
||||
}
|
||||
@ -703,8 +713,7 @@ void tcg_optimize(TCGContext *s)
|
||||
if (!have_neg) {
|
||||
break;
|
||||
}
|
||||
if (temps[args[1]].state == TCG_TEMP_CONST
|
||||
&& temps[args[1]].val == 0) {
|
||||
if (temp_is_const(args[1]) && temps[args[1]].val == 0) {
|
||||
op->opc = neg_op;
|
||||
reset_temp(args[0]);
|
||||
args[1] = args[2];
|
||||
@ -714,34 +723,30 @@ void tcg_optimize(TCGContext *s)
|
||||
break;
|
||||
CASE_OP_32_64(xor):
|
||||
CASE_OP_32_64(nand):
|
||||
if (temps[args[1]].state != TCG_TEMP_CONST
|
||||
&& temps[args[2]].state == TCG_TEMP_CONST
|
||||
&& temps[args[2]].val == -1) {
|
||||
if (!temp_is_const(args[1])
|
||||
&& temp_is_const(args[2]) && temps[args[2]].val == -1) {
|
||||
i = 1;
|
||||
goto try_not;
|
||||
}
|
||||
break;
|
||||
CASE_OP_32_64(nor):
|
||||
if (temps[args[1]].state != TCG_TEMP_CONST
|
||||
&& temps[args[2]].state == TCG_TEMP_CONST
|
||||
&& temps[args[2]].val == 0) {
|
||||
if (!temp_is_const(args[1])
|
||||
&& temp_is_const(args[2]) && temps[args[2]].val == 0) {
|
||||
i = 1;
|
||||
goto try_not;
|
||||
}
|
||||
break;
|
||||
CASE_OP_32_64(andc):
|
||||
if (temps[args[2]].state != TCG_TEMP_CONST
|
||||
&& temps[args[1]].state == TCG_TEMP_CONST
|
||||
&& temps[args[1]].val == -1) {
|
||||
if (!temp_is_const(args[2])
|
||||
&& temp_is_const(args[1]) && temps[args[1]].val == -1) {
|
||||
i = 2;
|
||||
goto try_not;
|
||||
}
|
||||
break;
|
||||
CASE_OP_32_64(orc):
|
||||
CASE_OP_32_64(eqv):
|
||||
if (temps[args[2]].state != TCG_TEMP_CONST
|
||||
&& temps[args[1]].state == TCG_TEMP_CONST
|
||||
&& temps[args[1]].val == 0) {
|
||||
if (!temp_is_const(args[2])
|
||||
&& temp_is_const(args[1]) && temps[args[1]].val == 0) {
|
||||
i = 2;
|
||||
goto try_not;
|
||||
}
|
||||
@ -782,9 +787,8 @@ void tcg_optimize(TCGContext *s)
|
||||
CASE_OP_32_64(or):
|
||||
CASE_OP_32_64(xor):
|
||||
CASE_OP_32_64(andc):
|
||||
if (temps[args[1]].state != TCG_TEMP_CONST
|
||||
&& temps[args[2]].state == TCG_TEMP_CONST
|
||||
&& temps[args[2]].val == 0) {
|
||||
if (!temp_is_const(args[1])
|
||||
&& temp_is_const(args[2]) && temps[args[2]].val == 0) {
|
||||
tcg_opt_gen_mov(s, op, args, args[0], args[1]);
|
||||
continue;
|
||||
}
|
||||
@ -792,9 +796,8 @@ void tcg_optimize(TCGContext *s)
|
||||
CASE_OP_32_64(and):
|
||||
CASE_OP_32_64(orc):
|
||||
CASE_OP_32_64(eqv):
|
||||
if (temps[args[1]].state != TCG_TEMP_CONST
|
||||
&& temps[args[2]].state == TCG_TEMP_CONST
|
||||
&& temps[args[2]].val == -1) {
|
||||
if (!temp_is_const(args[1])
|
||||
&& temp_is_const(args[2]) && temps[args[2]].val == -1) {
|
||||
tcg_opt_gen_mov(s, op, args, args[0], args[1]);
|
||||
continue;
|
||||
}
|
||||
@ -832,17 +835,26 @@ void tcg_optimize(TCGContext *s)
|
||||
|
||||
CASE_OP_32_64(and):
|
||||
mask = temps[args[2]].mask;
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2])) {
|
||||
and_const:
|
||||
affected = temps[args[1]].mask & ~mask;
|
||||
}
|
||||
mask = temps[args[1]].mask & mask;
|
||||
break;
|
||||
|
||||
case INDEX_op_ext_i32_i64:
|
||||
if ((temps[args[1]].mask & 0x80000000) != 0) {
|
||||
break;
|
||||
}
|
||||
case INDEX_op_extu_i32_i64:
|
||||
/* We do not compute affected as it is a size changing op. */
|
||||
mask = (uint32_t)temps[args[1]].mask;
|
||||
break;
|
||||
|
||||
CASE_OP_32_64(andc):
|
||||
/* Known-zeros does not imply known-ones. Therefore unless
|
||||
args[2] is constant, we can't infer anything from it. */
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2])) {
|
||||
mask = ~temps[args[2]].mask;
|
||||
goto and_const;
|
||||
}
|
||||
@ -851,37 +863,40 @@ void tcg_optimize(TCGContext *s)
|
||||
break;
|
||||
|
||||
case INDEX_op_sar_i32:
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2])) {
|
||||
tmp = temps[args[2]].val & 31;
|
||||
mask = (int32_t)temps[args[1]].mask >> tmp;
|
||||
}
|
||||
break;
|
||||
case INDEX_op_sar_i64:
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2])) {
|
||||
tmp = temps[args[2]].val & 63;
|
||||
mask = (int64_t)temps[args[1]].mask >> tmp;
|
||||
}
|
||||
break;
|
||||
|
||||
case INDEX_op_shr_i32:
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2])) {
|
||||
tmp = temps[args[2]].val & 31;
|
||||
mask = (uint32_t)temps[args[1]].mask >> tmp;
|
||||
}
|
||||
break;
|
||||
case INDEX_op_shr_i64:
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2])) {
|
||||
tmp = temps[args[2]].val & 63;
|
||||
mask = (uint64_t)temps[args[1]].mask >> tmp;
|
||||
}
|
||||
break;
|
||||
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
mask = (uint64_t)temps[args[1]].mask >> args[2];
|
||||
case INDEX_op_extrl_i64_i32:
|
||||
mask = (uint32_t)temps[args[1]].mask;
|
||||
break;
|
||||
case INDEX_op_extrh_i64_i32:
|
||||
mask = (uint64_t)temps[args[1]].mask >> 32;
|
||||
break;
|
||||
|
||||
CASE_OP_32_64(shl):
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2])) {
|
||||
tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1);
|
||||
mask = temps[args[1]].mask << tmp;
|
||||
}
|
||||
@ -962,8 +977,7 @@ void tcg_optimize(TCGContext *s)
|
||||
CASE_OP_32_64(mul):
|
||||
CASE_OP_32_64(muluh):
|
||||
CASE_OP_32_64(mulsh):
|
||||
if ((temps[args[2]].state == TCG_TEMP_CONST
|
||||
&& temps[args[2]].val == 0)) {
|
||||
if ((temp_is_const(args[2]) && temps[args[2]].val == 0)) {
|
||||
tcg_opt_gen_movi(s, op, args, args[0], 0);
|
||||
continue;
|
||||
}
|
||||
@ -1018,21 +1032,17 @@ void tcg_optimize(TCGContext *s)
|
||||
CASE_OP_32_64(ext16u):
|
||||
case INDEX_op_ext32s_i64:
|
||||
case INDEX_op_ext32u_i64:
|
||||
if (temps[args[1]].state == TCG_TEMP_CONST) {
|
||||
case INDEX_op_ext_i32_i64:
|
||||
case INDEX_op_extu_i32_i64:
|
||||
case INDEX_op_extrl_i64_i32:
|
||||
case INDEX_op_extrh_i64_i32:
|
||||
if (temp_is_const(args[1])) {
|
||||
tmp = do_constant_folding(opc, temps[args[1]].val, 0);
|
||||
tcg_opt_gen_movi(s, op, args, args[0], tmp);
|
||||
break;
|
||||
}
|
||||
goto do_default;
|
||||
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
if (temps[args[1]].state == TCG_TEMP_CONST) {
|
||||
tmp = do_constant_folding(opc, temps[args[1]].val, args[2]);
|
||||
tcg_opt_gen_movi(s, op, args, args[0], tmp);
|
||||
break;
|
||||
}
|
||||
goto do_default;
|
||||
|
||||
CASE_OP_32_64(add):
|
||||
CASE_OP_32_64(sub):
|
||||
CASE_OP_32_64(mul):
|
||||
@ -1055,8 +1065,7 @@ void tcg_optimize(TCGContext *s)
|
||||
CASE_OP_32_64(divu):
|
||||
CASE_OP_32_64(rem):
|
||||
CASE_OP_32_64(remu):
|
||||
if (temps[args[1]].state == TCG_TEMP_CONST
|
||||
&& temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[1]) && temp_is_const(args[2])) {
|
||||
tmp = do_constant_folding(opc, temps[args[1]].val,
|
||||
temps[args[2]].val);
|
||||
tcg_opt_gen_movi(s, op, args, args[0], tmp);
|
||||
@ -1065,8 +1074,7 @@ void tcg_optimize(TCGContext *s)
|
||||
goto do_default;
|
||||
|
||||
CASE_OP_32_64(deposit):
|
||||
if (temps[args[1]].state == TCG_TEMP_CONST
|
||||
&& temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[1]) && temp_is_const(args[2])) {
|
||||
tmp = deposit64(temps[args[1]].val, args[3], args[4],
|
||||
temps[args[2]].val);
|
||||
tcg_opt_gen_movi(s, op, args, args[0], tmp);
|
||||
@ -1106,10 +1114,8 @@ void tcg_optimize(TCGContext *s)
|
||||
|
||||
case INDEX_op_add2_i32:
|
||||
case INDEX_op_sub2_i32:
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST
|
||||
&& temps[args[3]].state == TCG_TEMP_CONST
|
||||
&& temps[args[4]].state == TCG_TEMP_CONST
|
||||
&& temps[args[5]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2]) && temp_is_const(args[3])
|
||||
&& temp_is_const(args[4]) && temp_is_const(args[5])) {
|
||||
uint32_t al = temps[args[2]].val;
|
||||
uint32_t ah = temps[args[3]].val;
|
||||
uint32_t bl = temps[args[4]].val;
|
||||
@ -1128,8 +1134,8 @@ void tcg_optimize(TCGContext *s)
|
||||
|
||||
rl = args[0];
|
||||
rh = args[1];
|
||||
tcg_opt_gen_movi(s, op, args, rl, (uint32_t)a);
|
||||
tcg_opt_gen_movi(s, op2, args2, rh, (uint32_t)(a >> 32));
|
||||
tcg_opt_gen_movi(s, op, args, rl, (int32_t)a);
|
||||
tcg_opt_gen_movi(s, op2, args2, rh, (int32_t)(a >> 32));
|
||||
|
||||
/* We've done all we need to do with the movi. Skip it. */
|
||||
oi_next = op2->next;
|
||||
@ -1138,8 +1144,7 @@ void tcg_optimize(TCGContext *s)
|
||||
goto do_default;
|
||||
|
||||
case INDEX_op_mulu2_i32:
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST
|
||||
&& temps[args[3]].state == TCG_TEMP_CONST) {
|
||||
if (temp_is_const(args[2]) && temp_is_const(args[3])) {
|
||||
uint32_t a = temps[args[2]].val;
|
||||
uint32_t b = temps[args[3]].val;
|
||||
uint64_t r = (uint64_t)a * b;
|
||||
@ -1149,8 +1154,8 @@ void tcg_optimize(TCGContext *s)
|
||||
|
||||
rl = args[0];
|
||||
rh = args[1];
|
||||
tcg_opt_gen_movi(s, op, args, rl, (uint32_t)r);
|
||||
tcg_opt_gen_movi(s, op2, args2, rh, (uint32_t)(r >> 32));
|
||||
tcg_opt_gen_movi(s, op, args, rl, (int32_t)r);
|
||||
tcg_opt_gen_movi(s, op2, args2, rh, (int32_t)(r >> 32));
|
||||
|
||||
/* We've done all we need to do with the movi. Skip it. */
|
||||
oi_next = op2->next;
|
||||
@ -1171,10 +1176,8 @@ void tcg_optimize(TCGContext *s)
|
||||
tcg_op_remove(s, op);
|
||||
}
|
||||
} else if ((args[4] == TCG_COND_LT || args[4] == TCG_COND_GE)
|
||||
&& temps[args[2]].state == TCG_TEMP_CONST
|
||||
&& temps[args[3]].state == TCG_TEMP_CONST
|
||||
&& temps[args[2]].val == 0
|
||||
&& temps[args[3]].val == 0) {
|
||||
&& temp_is_const(args[2]) && temps[args[2]].val == 0
|
||||
&& temp_is_const(args[3]) && temps[args[3]].val == 0) {
|
||||
/* Simplify LT/GE comparisons vs zero to a single compare
|
||||
vs the high word of the input. */
|
||||
do_brcond_high:
|
||||
@ -1236,10 +1239,8 @@ void tcg_optimize(TCGContext *s)
|
||||
do_setcond_const:
|
||||
tcg_opt_gen_movi(s, op, args, args[0], tmp);
|
||||
} else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE)
|
||||
&& temps[args[3]].state == TCG_TEMP_CONST
|
||||
&& temps[args[4]].state == TCG_TEMP_CONST
|
||||
&& temps[args[3]].val == 0
|
||||
&& temps[args[4]].val == 0) {
|
||||
&& temp_is_const(args[3]) && temps[args[3]].val == 0
|
||||
&& temp_is_const(args[4]) && temps[args[4]].val == 0) {
|
||||
/* Simplify LT/GE comparisons vs zero to a single compare
|
||||
vs the high word of the input. */
|
||||
do_setcond_high:
|
||||
@ -1299,7 +1300,9 @@ void tcg_optimize(TCGContext *s)
|
||||
if (!(args[nb_oargs + nb_iargs + 1]
|
||||
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
|
||||
for (i = 0; i < nb_globals; i++) {
|
||||
reset_temp(i);
|
||||
if (test_bit(i, temps_used.l)) {
|
||||
reset_temp(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
goto do_reset_output;
|
||||
|
@ -80,19 +80,13 @@
|
||||
|
||||
static tcg_insn_unit *tb_ret_addr;
|
||||
|
||||
#ifndef GUEST_BASE
|
||||
#define GUEST_BASE 0
|
||||
#endif
|
||||
|
||||
#include "elf.h"
|
||||
static bool have_isa_2_06;
|
||||
#define HAVE_ISA_2_06 have_isa_2_06
|
||||
#define HAVE_ISEL have_isa_2_06
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
#define TCG_GUEST_BASE_REG 30
|
||||
#else
|
||||
#define TCG_GUEST_BASE_REG 0
|
||||
#endif
|
||||
|
||||
#ifndef NDEBUG
|
||||
@ -1361,7 +1355,7 @@ static void * const qemu_st_helpers[16] = {
|
||||
in CR7, loads the addend of the TLB into R3, and returns the register
|
||||
containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
|
||||
|
||||
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits,
|
||||
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
|
||||
TCGReg addrlo, TCGReg addrhi,
|
||||
int mem_index, bool is_read)
|
||||
{
|
||||
@ -1371,6 +1365,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits,
|
||||
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
|
||||
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
|
||||
TCGReg base = TCG_AREG0;
|
||||
TCGMemOp s_bits = opc & MO_SIZE;
|
||||
|
||||
/* Extract the page index, shifted into place for tlb index. */
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
@ -1422,17 +1417,37 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits,
|
||||
to minimize any load use delay. */
|
||||
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off);
|
||||
|
||||
/* Clear the non-page, non-alignment bits from the address. */
|
||||
/* Clear the non-page, non-alignment bits from the address */
|
||||
if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) {
|
||||
/* We don't support unaligned accesses on 32-bits, preserve
|
||||
* the bottom bits and thus trigger a comparison failure on
|
||||
* unaligned accesses
|
||||
*/
|
||||
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
|
||||
(32 - s_bits) & 31, 31 - TARGET_PAGE_BITS);
|
||||
} else if (!s_bits) {
|
||||
tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo,
|
||||
0, 63 - TARGET_PAGE_BITS);
|
||||
} else if (s_bits) {
|
||||
/* > byte access, we need to handle alignment */
|
||||
if ((opc & MO_AMASK) == MO_ALIGN) {
|
||||
/* Alignment required by the front-end, same as 32-bits */
|
||||
tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo,
|
||||
64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits);
|
||||
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
|
||||
} else {
|
||||
/* We support unaligned accesses, we need to make sure we fail
|
||||
* if we cross a page boundary. The trick is to add the
|
||||
* access_size-1 to the address before masking the low bits.
|
||||
* That will make the address overflow to the next page if we
|
||||
* cross a page boundary which will then force a mismatch of
|
||||
* the TLB compare since the next page cannot possibly be in
|
||||
* the same TLB index.
|
||||
*/
|
||||
tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, (1 << s_bits) - 1));
|
||||
tcg_out_rld(s, RLDICR, TCG_REG_R0, TCG_REG_R0,
|
||||
0, 63 - TARGET_PAGE_BITS);
|
||||
}
|
||||
} else {
|
||||
tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo,
|
||||
64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits);
|
||||
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
|
||||
/* Byte access, just chop off the bits below the page index */
|
||||
tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo, 0, 63 - TARGET_PAGE_BITS);
|
||||
}
|
||||
|
||||
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
|
||||
@ -1592,7 +1607,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
mem_index = get_mmuidx(oi);
|
||||
addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, true);
|
||||
addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
|
||||
|
||||
/* Load a pointer into the current opcode w/conditional branch-link. */
|
||||
label_ptr = s->code_ptr;
|
||||
@ -1600,7 +1615,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
|
||||
rbase = TCG_REG_R3;
|
||||
#else /* !CONFIG_SOFTMMU */
|
||||
rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
|
||||
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
|
||||
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
|
||||
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
|
||||
addrlo = TCG_REG_TMP1;
|
||||
@ -1667,7 +1682,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
mem_index = get_mmuidx(oi);
|
||||
addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, false);
|
||||
addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
|
||||
|
||||
/* Load a pointer into the current opcode w/conditional branch-link. */
|
||||
label_ptr = s->code_ptr;
|
||||
@ -1675,7 +1690,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
|
||||
rbase = TCG_REG_R3;
|
||||
#else /* !CONFIG_SOFTMMU */
|
||||
rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
|
||||
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
|
||||
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
|
||||
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
|
||||
addrlo = TCG_REG_TMP1;
|
||||
@ -1779,9 +1794,9 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
||||
}
|
||||
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
if (GUEST_BASE) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
if (guest_base) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
|
||||
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
|
||||
}
|
||||
#endif
|
||||
@ -2200,12 +2215,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
|
||||
case INDEX_op_ext16s_i64:
|
||||
c = EXTSH;
|
||||
goto gen_ext;
|
||||
case INDEX_op_ext_i32_i64:
|
||||
case INDEX_op_ext32s_i64:
|
||||
c = EXTSW;
|
||||
goto gen_ext;
|
||||
gen_ext:
|
||||
tcg_out32(s, c | RS(args[1]) | RA(args[0]));
|
||||
break;
|
||||
case INDEX_op_extu_i32_i64:
|
||||
tcg_out_ext32u(s, args[0], args[1]);
|
||||
break;
|
||||
|
||||
case INDEX_op_setcond_i32:
|
||||
tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
|
||||
@ -2482,6 +2501,8 @@ static const TCGTargetOpDef ppc_op_defs[] = {
|
||||
{ INDEX_op_ext8s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext16s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext32s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext_i32_i64, { "r", "r" } },
|
||||
{ INDEX_op_extu_i32_i64, { "r", "r" } },
|
||||
{ INDEX_op_bswap16_i64, { "r", "r" } },
|
||||
{ INDEX_op_bswap32_i64, { "r", "r" } },
|
||||
{ INDEX_op_bswap64_i64, { "r", "r" } },
|
||||
|
@ -77,7 +77,8 @@ typedef enum {
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
#define TCG_TARGET_HAS_add2_i32 0
|
||||
#define TCG_TARGET_HAS_sub2_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_extrl_i64_i32 0
|
||||
#define TCG_TARGET_HAS_extrh_i64_i32 0
|
||||
#define TCG_TARGET_HAS_div_i64 1
|
||||
#define TCG_TARGET_HAS_rem_i64 0
|
||||
#define TCG_TARGET_HAS_rot_i64 1
|
||||
|
@ -51,17 +51,12 @@
|
||||
/* A scratch register that may be be used throughout the backend. */
|
||||
#define TCG_TMP0 TCG_REG_R14
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
#define TCG_GUEST_BASE_REG TCG_REG_R13
|
||||
#else
|
||||
#define TCG_GUEST_BASE_REG TCG_REG_R0
|
||||
#endif
|
||||
|
||||
#ifndef GUEST_BASE
|
||||
#define GUEST_BASE 0
|
||||
#endif
|
||||
|
||||
|
||||
/* All of the following instructions are prefixed with their instruction
|
||||
format, and are defined as 8- or 16-bit quantities, even when the two
|
||||
halves of the 16-bit quantity may appear 32 bits apart in the insn.
|
||||
@ -1504,20 +1499,36 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
|
||||
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
|
||||
int mem_index, bool is_ld)
|
||||
{
|
||||
TCGMemOp s_bits = opc & MO_SIZE;
|
||||
uint64_t tlb_mask = TARGET_PAGE_MASK | ((1 << s_bits) - 1);
|
||||
int ofs;
|
||||
int s_mask = (1 << (opc & MO_SIZE)) - 1;
|
||||
int ofs, a_off;
|
||||
uint64_t tlb_mask;
|
||||
|
||||
/* For aligned accesses, we check the first byte and include the alignment
|
||||
bits within the address. For unaligned access, we check that we don't
|
||||
cross pages using the address of the last byte of the access. */
|
||||
if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
|
||||
a_off = 0;
|
||||
tlb_mask = TARGET_PAGE_MASK | s_mask;
|
||||
} else {
|
||||
a_off = s_mask;
|
||||
tlb_mask = TARGET_PAGE_MASK;
|
||||
}
|
||||
|
||||
if (facilities & FACILITY_GEN_INST_EXT) {
|
||||
tcg_out_risbg(s, TCG_REG_R2, addr_reg,
|
||||
64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
|
||||
63 - CPU_TLB_ENTRY_BITS,
|
||||
64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
|
||||
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
|
||||
if (a_off) {
|
||||
tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
|
||||
tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
|
||||
} else {
|
||||
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
|
||||
}
|
||||
} else {
|
||||
tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_R3, addr_reg);
|
||||
tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
|
||||
tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
|
||||
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
|
||||
tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
|
||||
@ -1622,9 +1633,9 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
|
||||
tgen_ext32u(s, TCG_TMP0, *addr_reg);
|
||||
*addr_reg = TCG_TMP0;
|
||||
}
|
||||
if (GUEST_BASE < 0x80000) {
|
||||
if (guest_base < 0x80000) {
|
||||
*index_reg = TCG_REG_NONE;
|
||||
*disp = GUEST_BASE;
|
||||
*disp = guest_base;
|
||||
} else {
|
||||
*index_reg = TCG_GUEST_BASE_REG;
|
||||
*disp = 0;
|
||||
@ -2090,6 +2101,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||
case INDEX_op_ext16s_i64:
|
||||
tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
|
||||
break;
|
||||
case INDEX_op_ext_i32_i64:
|
||||
case INDEX_op_ext32s_i64:
|
||||
tgen_ext32s(s, args[0], args[1]);
|
||||
break;
|
||||
@ -2099,6 +2111,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||
case INDEX_op_ext16u_i64:
|
||||
tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
|
||||
break;
|
||||
case INDEX_op_extu_i32_i64:
|
||||
case INDEX_op_ext32u_i64:
|
||||
tgen_ext32u(s, args[0], args[1]);
|
||||
break;
|
||||
@ -2251,6 +2264,9 @@ static const TCGTargetOpDef s390_op_defs[] = {
|
||||
{ INDEX_op_ext32s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext32u_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_ext_i32_i64, { "r", "r" } },
|
||||
{ INDEX_op_extu_i32_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_bswap16_i64, { "r", "r" } },
|
||||
{ INDEX_op_bswap32_i64, { "r", "r" } },
|
||||
{ INDEX_op_bswap64_i64, { "r", "r" } },
|
||||
@ -2328,8 +2344,8 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
||||
TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
|
||||
CPU_TEMP_BUF_NLONGS * sizeof(long));
|
||||
|
||||
if (GUEST_BASE >= 0x80000) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
|
||||
if (guest_base >= 0x80000) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
|
||||
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,8 @@ typedef enum TCGReg {
|
||||
#define TCG_TARGET_HAS_muls2_i32 0
|
||||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_extrl_i64_i32 0
|
||||
#define TCG_TARGET_HAS_extrh_i64_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_div2_i64 1
|
||||
#define TCG_TARGET_HAS_rot_i64 1
|
||||
|
@ -83,10 +83,8 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
||||
#define TCG_REG_T1 TCG_REG_G1
|
||||
#define TCG_REG_T2 TCG_REG_O7
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
# define TCG_GUEST_BASE_REG TCG_REG_I5
|
||||
#else
|
||||
# define TCG_GUEST_BASE_REG TCG_REG_G0
|
||||
#endif
|
||||
|
||||
static const int tcg_target_reg_alloc_order[] = {
|
||||
@ -955,9 +953,9 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
||||
tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
|
||||
INSN_IMM13(-frame_size));
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
if (GUEST_BASE != 0) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
if (guest_base != 0) {
|
||||
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
|
||||
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
|
||||
}
|
||||
#endif
|
||||
@ -1146,7 +1144,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
|
||||
addr = TCG_REG_T1;
|
||||
}
|
||||
tcg_out_ldst_rr(s, data, addr,
|
||||
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||
(guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||
qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
}
|
||||
@ -1201,7 +1199,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
|
||||
addr = TCG_REG_T1;
|
||||
}
|
||||
tcg_out_ldst_rr(s, data, addr,
|
||||
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||
(guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||
qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
}
|
||||
@ -1407,18 +1405,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||
case INDEX_op_divu_i64:
|
||||
c = ARITH_UDIVX;
|
||||
goto gen_arith;
|
||||
case INDEX_op_ext_i32_i64:
|
||||
case INDEX_op_ext32s_i64:
|
||||
tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
|
||||
break;
|
||||
case INDEX_op_extu_i32_i64:
|
||||
case INDEX_op_ext32u_i64:
|
||||
tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
|
||||
break;
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
if (a2 == 0) {
|
||||
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
|
||||
} else {
|
||||
tcg_out_arithi(s, a0, a1, a2, SHIFT_SRLX);
|
||||
}
|
||||
case INDEX_op_extrl_i64_i32:
|
||||
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
|
||||
break;
|
||||
case INDEX_op_extrh_i64_i32:
|
||||
tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
|
||||
break;
|
||||
|
||||
case INDEX_op_brcond_i64:
|
||||
@ -1531,9 +1530,12 @@ static const TCGTargetOpDef sparc_op_defs[] = {
|
||||
{ INDEX_op_neg_i64, { "R", "RJ" } },
|
||||
{ INDEX_op_not_i64, { "R", "RJ" } },
|
||||
|
||||
{ INDEX_op_ext32s_i64, { "R", "r" } },
|
||||
{ INDEX_op_ext32u_i64, { "R", "r" } },
|
||||
{ INDEX_op_trunc_shr_i32, { "r", "R" } },
|
||||
{ INDEX_op_ext32s_i64, { "R", "R" } },
|
||||
{ INDEX_op_ext32u_i64, { "R", "R" } },
|
||||
{ INDEX_op_ext_i32_i64, { "R", "r" } },
|
||||
{ INDEX_op_extu_i32_i64, { "R", "r" } },
|
||||
{ INDEX_op_extrl_i64_i32, { "r", "R" } },
|
||||
{ INDEX_op_extrh_i64_i32, { "r", "R" } },
|
||||
|
||||
{ INDEX_op_brcond_i64, { "RZ", "RJ" } },
|
||||
{ INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
|
||||
|
@ -118,7 +118,8 @@ extern bool use_vis3_instructions;
|
||||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 1
|
||||
#define TCG_TARGET_HAS_extrl_i64_i32 1
|
||||
#define TCG_TARGET_HAS_extrh_i64_i32 1
|
||||
#define TCG_TARGET_HAS_div_i64 1
|
||||
#define TCG_TARGET_HAS_rem_i64 0
|
||||
#define TCG_TARGET_HAS_rot_i64 0
|
||||
|
48
tcg/tcg-op.c
48
tcg/tcg-op.c
@ -1737,28 +1737,28 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
|
||||
|
||||
/* Size changing operations. */
|
||||
|
||||
void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg, unsigned count)
|
||||
void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
|
||||
{
|
||||
tcg_debug_assert(count < 64);
|
||||
if (TCG_TARGET_REG_BITS == 32) {
|
||||
if (count >= 32) {
|
||||
tcg_gen_shri_i32(ret, TCGV_HIGH(arg), count - 32);
|
||||
} else if (count == 0) {
|
||||
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
|
||||
} else {
|
||||
TCGv_i64 t = tcg_temp_new_i64();
|
||||
tcg_gen_shri_i64(t, arg, count);
|
||||
tcg_gen_mov_i32(ret, TCGV_LOW(t));
|
||||
tcg_temp_free_i64(t);
|
||||
}
|
||||
} else if (TCG_TARGET_HAS_trunc_shr_i32) {
|
||||
tcg_gen_op3i_i32(INDEX_op_trunc_shr_i32, ret,
|
||||
MAKE_TCGV_I32(GET_TCGV_I64(arg)), count);
|
||||
} else if (count == 0) {
|
||||
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
|
||||
} else if (TCG_TARGET_HAS_extrl_i64_i32) {
|
||||
tcg_gen_op2(&tcg_ctx, INDEX_op_extrl_i64_i32,
|
||||
GET_TCGV_I32(ret), GET_TCGV_I64(arg));
|
||||
} else {
|
||||
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
|
||||
}
|
||||
}
|
||||
|
||||
void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
|
||||
{
|
||||
if (TCG_TARGET_REG_BITS == 32) {
|
||||
tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
|
||||
} else if (TCG_TARGET_HAS_extrh_i64_i32) {
|
||||
tcg_gen_op2(&tcg_ctx, INDEX_op_extrh_i64_i32,
|
||||
GET_TCGV_I32(ret), GET_TCGV_I64(arg));
|
||||
} else {
|
||||
TCGv_i64 t = tcg_temp_new_i64();
|
||||
tcg_gen_shri_i64(t, arg, count);
|
||||
tcg_gen_shri_i64(t, arg, 32);
|
||||
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(t)));
|
||||
tcg_temp_free_i64(t);
|
||||
}
|
||||
@ -1770,9 +1770,8 @@ void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
|
||||
tcg_gen_mov_i32(TCGV_LOW(ret), arg);
|
||||
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
|
||||
} else {
|
||||
/* Note: we assume the target supports move between
|
||||
32 and 64 bit registers. */
|
||||
tcg_gen_ext32u_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)));
|
||||
tcg_gen_op2(&tcg_ctx, INDEX_op_extu_i32_i64,
|
||||
GET_TCGV_I64(ret), GET_TCGV_I32(arg));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1782,9 +1781,8 @@ void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
|
||||
tcg_gen_mov_i32(TCGV_LOW(ret), arg);
|
||||
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
|
||||
} else {
|
||||
/* Note: we assume the target supports move between
|
||||
32 and 64 bit registers. */
|
||||
tcg_gen_ext32s_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)));
|
||||
tcg_gen_op2(&tcg_ctx, INDEX_op_ext_i32_i64,
|
||||
GET_TCGV_I64(ret), GET_TCGV_I32(arg));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1820,8 +1818,8 @@ void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg)
|
||||
tcg_gen_mov_i32(lo, TCGV_LOW(arg));
|
||||
tcg_gen_mov_i32(hi, TCGV_HIGH(arg));
|
||||
} else {
|
||||
tcg_gen_trunc_shr_i64_i32(lo, arg, 0);
|
||||
tcg_gen_trunc_shr_i64_i32(hi, arg, 32);
|
||||
tcg_gen_extrl_i64_i32(lo, arg);
|
||||
tcg_gen_extrh_i64_i32(hi, arg);
|
||||
}
|
||||
}
|
||||
|
||||
|
12
tcg/tcg-op.h
12
tcg/tcg-op.h
@ -684,7 +684,8 @@ static inline void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg)
|
||||
void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
|
||||
void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
|
||||
void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high);
|
||||
void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg, unsigned int c);
|
||||
void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
|
||||
void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
|
||||
void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg);
|
||||
void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg);
|
||||
|
||||
@ -693,11 +694,6 @@ static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
|
||||
tcg_gen_deposit_i64(ret, lo, hi, 32, 32);
|
||||
}
|
||||
|
||||
static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
|
||||
{
|
||||
tcg_gen_trunc_shr_i64_i32(ret, arg, 0);
|
||||
}
|
||||
|
||||
/* QEMU specific operations. */
|
||||
|
||||
#ifndef TARGET_LONG_BITS
|
||||
@ -853,7 +849,7 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
|
||||
#define tcg_gen_divu_tl tcg_gen_divu_i64
|
||||
#define tcg_gen_remu_tl tcg_gen_remu_i64
|
||||
#define tcg_gen_discard_tl tcg_gen_discard_i64
|
||||
#define tcg_gen_trunc_tl_i32 tcg_gen_trunc_i64_i32
|
||||
#define tcg_gen_trunc_tl_i32 tcg_gen_extrl_i64_i32
|
||||
#define tcg_gen_trunc_i64_tl tcg_gen_mov_i64
|
||||
#define tcg_gen_extu_i32_tl tcg_gen_extu_i32_i64
|
||||
#define tcg_gen_ext_i32_tl tcg_gen_ext_i32_i64
|
||||
@ -932,7 +928,7 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
|
||||
#define tcg_gen_remu_tl tcg_gen_remu_i32
|
||||
#define tcg_gen_discard_tl tcg_gen_discard_i32
|
||||
#define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32
|
||||
#define tcg_gen_trunc_i64_tl tcg_gen_trunc_i64_i32
|
||||
#define tcg_gen_trunc_i64_tl tcg_gen_extrl_i64_i32
|
||||
#define tcg_gen_extu_i32_tl tcg_gen_mov_i32
|
||||
#define tcg_gen_ext_i32_tl tcg_gen_mov_i32
|
||||
#define tcg_gen_extu_tl_i64 tcg_gen_extu_i32_i64
|
||||
|
@ -138,8 +138,14 @@ DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
||||
DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
||||
DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
|
||||
|
||||
DEF(trunc_shr_i32, 1, 1, 1,
|
||||
IMPL(TCG_TARGET_HAS_trunc_shr_i32)
|
||||
/* size changing ops */
|
||||
DEF(ext_i32_i64, 1, 1, 0, IMPL64)
|
||||
DEF(extu_i32_i64, 1, 1, 0, IMPL64)
|
||||
DEF(extrl_i64_i32, 1, 1, 0,
|
||||
IMPL(TCG_TARGET_HAS_extrl_i64_i32)
|
||||
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
|
||||
DEF(extrh_i64_i32, 1, 1, 0,
|
||||
IMPL(TCG_TARGET_HAS_extrh_i64_i32)
|
||||
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
|
||||
|
||||
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64)
|
||||
|
@ -66,7 +66,8 @@ typedef uint64_t TCGRegSet;
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
/* Turn some undef macros into false macros. */
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_extrl_i64_i32 0
|
||||
#define TCG_TARGET_HAS_extrh_i64_i32 0
|
||||
#define TCG_TARGET_HAS_div_i64 0
|
||||
#define TCG_TARGET_HAS_rem_i64 0
|
||||
#define TCG_TARGET_HAS_div2_i64 0
|
||||
|
@ -210,6 +210,8 @@ static const TCGTargetOpDef tcg_target_op_defs[] = {
|
||||
#if TCG_TARGET_HAS_ext32u_i64
|
||||
{ INDEX_op_ext32u_i64, { R, R } },
|
||||
#endif
|
||||
{ INDEX_op_ext_i32_i64, { R, R } },
|
||||
{ INDEX_op_extu_i32_i64, { R, R } },
|
||||
#if TCG_TARGET_HAS_bswap16_i64
|
||||
{ INDEX_op_bswap16_i64, { R, R } },
|
||||
#endif
|
||||
@ -701,6 +703,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
|
||||
case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */
|
||||
case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */
|
||||
case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */
|
||||
case INDEX_op_ext_i32_i64:
|
||||
case INDEX_op_extu_i32_i64:
|
||||
#endif /* TCG_TARGET_REG_BITS == 64 */
|
||||
case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */
|
||||
case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */
|
||||
|
@ -84,7 +84,8 @@
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_extrl_i64_i32 0
|
||||
#define TCG_TARGET_HAS_extrh_i64_i32 0
|
||||
#define TCG_TARGET_HAS_bswap16_i64 1
|
||||
#define TCG_TARGET_HAS_bswap32_i64 1
|
||||
#define TCG_TARGET_HAS_bswap64_i64 1
|
||||
|
6
tci.c
6
tci.c
@ -1033,18 +1033,20 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
|
||||
#endif
|
||||
#if TCG_TARGET_HAS_ext32s_i64
|
||||
case INDEX_op_ext32s_i64:
|
||||
#endif
|
||||
case INDEX_op_ext_i32_i64:
|
||||
t0 = *tb_ptr++;
|
||||
t1 = tci_read_r32s(&tb_ptr);
|
||||
tci_write_reg64(t0, t1);
|
||||
break;
|
||||
#endif
|
||||
#if TCG_TARGET_HAS_ext32u_i64
|
||||
case INDEX_op_ext32u_i64:
|
||||
#endif
|
||||
case INDEX_op_extu_i32_i64:
|
||||
t0 = *tb_ptr++;
|
||||
t1 = tci_read_r32(&tb_ptr);
|
||||
tci_write_reg64(t0, t1);
|
||||
break;
|
||||
#endif
|
||||
#if TCG_TARGET_HAS_bswap16_i64
|
||||
case INDEX_op_bswap16_i64:
|
||||
TODO();
|
||||
|
@ -688,7 +688,7 @@ void tcg_exec_init(unsigned long tb_size)
|
||||
tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
|
||||
tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
|
||||
page_init();
|
||||
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
/* There's no guest base to take into account, so go ahead and
|
||||
initialize the prologue now. */
|
||||
tcg_prologue_init(&tcg_ctx);
|
||||
|
Loading…
Reference in New Issue
Block a user