Remove some target-specific endianness knowledge from target/.

For MIPS, propagate endianness at the board level, using QOM property.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmcOihcACgkQ4+MsLN6t
 wN4VaRAAor+5gUJiV0OOMQVfP8e1o9ssKcYy9m31zLeii1Iq2M/2oz6YK4OjdQDe
 oJ8VRJvBPKC2Bpi14TfKsSGaP8CGNO/hdxcMi71pjHZC+bjHt5Uv0U/7T4XCBWuV
 N+85juN1PTc4cci4FkGIXaPB1YWL00L56BDCzja0uhLF4+Xe2eYGVrPXeFCkN/3I
 Ky7jHlB/dgzc47kTXvg5snCee2egDFf/x4igwZj6+gWJyo+jubRpwoOqKbK2M0Nd
 VvNpBVFNGZMP3Fn9lh45uDOuRWGu6zSIPBVkjoFc+wdGsFsITIMrZ3h46UffsGTJ
 F1j6Zsq6hoLbaNRRjZ6OsN6u683oo1lknmWauD62LrjXcX0RlRwRFbMD0AjedR8t
 6+YHg5LlwGg6r/AOtEe28ggXZohB2vjr2V0MJm1x/XgLYhFHoN3//jxn22oOHj4p
 0z3+eDc3Se8JNRV6jPMHbbuTqZqZjRgVFbYN3aMbdoXYzhpYnrPj6f7WQL2smiAW
 C2vdswrubQWKou1wcn6rbg0nnMRVTh+GxrtZ3mkgaxNzgNFxsrX8YubMsh77XUcr
 mnCGj1tE3hp40xsuSk6yZXY3ZZiTyZasvO1wq4gWOI9le0Zmq+d335F9+IVJ8RlP
 YhA+MY5aeomsixVRdmrPrgOfanQiHXv02lsbperU8QFfGRhf2Y8=
 =Qydb
 -----END PGP SIGNATURE-----

Merge tag 'single-binary-20241015' of https://github.com/philmd/qemu into staging

Remove some target-specific endianness knowledge from target/.

For MIPS, propagate endianness at the board level, using QOM property.

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmcOihcACgkQ4+MsLN6t
# wN4VaRAAor+5gUJiV0OOMQVfP8e1o9ssKcYy9m31zLeii1Iq2M/2oz6YK4OjdQDe
# oJ8VRJvBPKC2Bpi14TfKsSGaP8CGNO/hdxcMi71pjHZC+bjHt5Uv0U/7T4XCBWuV
# N+85juN1PTc4cci4FkGIXaPB1YWL00L56BDCzja0uhLF4+Xe2eYGVrPXeFCkN/3I
# Ky7jHlB/dgzc47kTXvg5snCee2egDFf/x4igwZj6+gWJyo+jubRpwoOqKbK2M0Nd
# VvNpBVFNGZMP3Fn9lh45uDOuRWGu6zSIPBVkjoFc+wdGsFsITIMrZ3h46UffsGTJ
# F1j6Zsq6hoLbaNRRjZ6OsN6u683oo1lknmWauD62LrjXcX0RlRwRFbMD0AjedR8t
# 6+YHg5LlwGg6r/AOtEe28ggXZohB2vjr2V0MJm1x/XgLYhFHoN3//jxn22oOHj4p
# 0z3+eDc3Se8JNRV6jPMHbbuTqZqZjRgVFbYN3aMbdoXYzhpYnrPj6f7WQL2smiAW
# C2vdswrubQWKou1wcn6rbg0nnMRVTh+GxrtZ3mkgaxNzgNFxsrX8YubMsh77XUcr
# mnCGj1tE3hp40xsuSk6yZXY3ZZiTyZasvO1wq4gWOI9le0Zmq+d335F9+IVJ8RlP
# YhA+MY5aeomsixVRdmrPrgOfanQiHXv02lsbperU8QFfGRhf2Y8=
# =Qydb
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 15 Oct 2024 16:28:23 BST
# gpg:                using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE
# gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [full]
# Primary key fingerprint: FAAB E75E 1291 7221 DCFD  6BB2 E3E3 2C2C DEAD C0DE

* tag 'single-binary-20241015' of https://github.com/philmd/qemu: (33 commits)
  hw/mips: Have mips_cpu_create_with_clock() take an endianness argument
  hw/mips/cps: Set the vCPU 'cpu-big-endian' property
  target/mips: Expose MIPSCPU::is_big_endian property
  target/mips: Use tcg_constant_tl() instead of tcg_gen_movi_tl()
  target/mips: Use gen_op_addr_addi() when possible
  target/mips: Have gen_addiupc() expand $pc during translation
  target/mips: Replace MO_TE by mo_endian()
  target/mips: Introduce mo_endian() helper
  target/mips: Remove unused MEMOP_IDX() macro
  target/mips: Rename unused sysemu argument of OP_LD_ATOMIC()
  target/mips: Explode MO_TExx -> MO_TE | MO_xx
  target/mips: Factor mo_endian_rev() out of MXU code
  target/mips: Convert mips16e decr_and_load/store() macros to functions
  target/mips: Replace MO_TE by  mo_endian_env() in get_pte()
  target/mips: Introduce mo_endian_env() helper
  target/mips: Rename cpu_is_bigendian() -> disas_is_bigendian()
  target/mips: Declare mips_env_is_bigendian() in 'internal.h'
  hw/xtensa/xtfpga: Remove TARGET_BIG_ENDIAN #ifdef'ry
  target/ppc: Use tcg_constant_tl() instead of tcg_gen_movi_tl()
  target/tricore: Use tcg_constant_tl() instead of tcg_gen_movi_tl()
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-10-16 20:22:08 +01:00
commit 08ae519ab8
35 changed files with 370 additions and 392 deletions

View File

@ -133,9 +133,9 @@ static void mb_add_mod(MultibootState *s,
p = (char *)s->mb_buf + s->offset_mbinfo + MB_MOD_SIZE * s->mb_mods_count;
stl_p(p + MB_MOD_START, start);
stl_p(p + MB_MOD_END, end);
stl_p(p + MB_MOD_CMDLINE, cmdline_phys);
stl_le_p(p + MB_MOD_START, start);
stl_le_p(p + MB_MOD_END, end);
stl_le_p(p + MB_MOD_CMDLINE, cmdline_phys);
mb_debug("mod%02d: "HWADDR_FMT_plx" - "HWADDR_FMT_plx,
s->mb_mods_count, start, end);
@ -168,9 +168,9 @@ int load_multiboot(X86MachineState *x86ms,
/* Ok, let's see if it is a multiboot image.
The header is 12x32bit long, so the latest entry may be 8192 - 48. */
for (i = 0; i < (8192 - 48); i += 4) {
if (ldl_p(header+i) == 0x1BADB002) {
uint32_t checksum = ldl_p(header+i+8);
flags = ldl_p(header+i+4);
if (ldl_le_p(header + i) == 0x1BADB002) {
uint32_t checksum = ldl_le_p(header + i + 8);
flags = ldl_le_p(header + i + 4);
checksum += flags;
checksum += (uint32_t)0x1BADB002;
if (!checksum) {
@ -223,11 +223,11 @@ int load_multiboot(X86MachineState *x86ms,
mb_kernel_size, (size_t)mh_entry_addr);
} else {
/* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_ADDR. */
uint32_t mh_header_addr = ldl_p(header+i+12);
uint32_t mh_load_end_addr = ldl_p(header+i+20);
uint32_t mh_bss_end_addr = ldl_p(header+i+24);
uint32_t mh_header_addr = ldl_le_p(header + i + 12);
uint32_t mh_load_end_addr = ldl_le_p(header + i + 20);
uint32_t mh_bss_end_addr = ldl_le_p(header + i + 24);
mh_load_addr = ldl_p(header+i+16);
mh_load_addr = ldl_le_p(header + i + 16);
if (mh_header_addr < mh_load_addr) {
error_report("invalid load_addr address");
exit(1);
@ -239,7 +239,7 @@ int load_multiboot(X86MachineState *x86ms,
uint32_t mb_kernel_text_offset = i - (mh_header_addr - mh_load_addr);
uint32_t mb_load_size = 0;
mh_entry_addr = ldl_p(header+i+28);
mh_entry_addr = ldl_le_p(header + i + 28);
if (mh_load_end_addr) {
if (mh_load_end_addr < mh_load_addr) {
@ -364,22 +364,21 @@ int load_multiboot(X86MachineState *x86ms,
/* Commandline support */
kcmdline = g_strdup_printf("%s %s", kernel_filename, kernel_cmdline);
stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline));
stl_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, bootloader_name));
stl_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo);
stl_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */
stl_le_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline));
stl_le_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs,
bootloader_name));
stl_le_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo);
stl_le_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */
/* the kernel is where we want it to be now */
stl_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY
stl_le_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY
| MULTIBOOT_FLAGS_BOOT_DEVICE
| MULTIBOOT_FLAGS_CMDLINE
| MULTIBOOT_FLAGS_MODULES
| MULTIBOOT_FLAGS_MMAP
| MULTIBOOT_FLAGS_BOOTLOADER);
stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */
stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP);
stl_le_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */
stl_le_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP);
mb_debug("multiboot: entry_addr = %#x", mh_entry_addr);
mb_debug(" mb_buf_phys = "HWADDR_FMT_plx, mbs.mb_buf_phys);

View File

@ -586,7 +586,7 @@ static bool load_elfboot(const char *kernel_filename,
uint64_t elf_low, elf_high;
int kernel_size;
if (ldl_p(header) != 0x464c457f) {
if (ldl_le_p(header) != 0x464c457f) {
return false; /* no elfboot */
}
@ -669,8 +669,8 @@ void x86_load_linux(X86MachineState *x86ms,
* kernel protocol version.
* Please see https://www.kernel.org/doc/Documentation/x86/boot.txt
*/
if (ldl_p(header + 0x202) == 0x53726448) /* Magic signature "HdrS" */ {
protocol = lduw_p(header + 0x206);
if (ldl_le_p(header + 0x202) == 0x53726448) /* Magic signature "HdrS" */ {
protocol = lduw_le_p(header + 0x206);
} else {
/*
* This could be a multiboot kernel. If it is, let's stop treating it
@ -762,7 +762,7 @@ void x86_load_linux(X86MachineState *x86ms,
/* highest address for loading the initrd */
if (protocol >= 0x20c &&
lduw_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) {
lduw_le_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) {
/*
* Linux has supported initrd up to 4 GB for a very long time (2007,
* long before XLF_CAN_BE_LOADED_ABOVE_4G which was added in 2013),
@ -781,7 +781,7 @@ void x86_load_linux(X86MachineState *x86ms,
*/
initrd_max = UINT32_MAX;
} else if (protocol >= 0x203) {
initrd_max = ldl_p(header + 0x22c);
initrd_max = ldl_le_p(header + 0x22c);
} else {
initrd_max = 0x37ffffff;
}
@ -797,10 +797,10 @@ void x86_load_linux(X86MachineState *x86ms,
sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1;
if (protocol >= 0x202) {
stl_p(header + 0x228, cmdline_addr);
stl_le_p(header + 0x228, cmdline_addr);
} else {
stw_p(header + 0x20, 0xA33F);
stw_p(header + 0x22, cmdline_addr - real_addr);
stw_le_p(header + 0x20, 0xA33F);
stw_le_p(header + 0x22, cmdline_addr - real_addr);
}
/* handle vga= parameter */
@ -824,7 +824,7 @@ void x86_load_linux(X86MachineState *x86ms,
exit(1);
}
}
stw_p(header + 0x1fa, video_mode);
stw_le_p(header + 0x1fa, video_mode);
}
/* loader type */
@ -839,7 +839,7 @@ void x86_load_linux(X86MachineState *x86ms,
/* heap */
if (protocol >= 0x201) {
header[0x211] |= 0x80; /* CAN_USE_HEAP */
stw_p(header + 0x224, cmdline_addr - real_addr - 0x200);
stw_le_p(header + 0x224, cmdline_addr - real_addr - 0x200);
}
/* load initrd */
@ -879,8 +879,8 @@ void x86_load_linux(X86MachineState *x86ms,
sev_load_ctx.initrd_data = initrd_data;
sev_load_ctx.initrd_size = initrd_size;
stl_p(header + 0x218, initrd_addr);
stl_p(header + 0x21c, initrd_size);
stl_le_p(header + 0x218, initrd_addr);
stl_le_p(header + 0x21c, initrd_size);
}
/* load kernel and setup */
@ -926,7 +926,7 @@ void x86_load_linux(X86MachineState *x86ms,
kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size;
kernel = g_realloc(kernel, kernel_size);
stq_p(header + 0x250, prot_addr + setup_data_offset);
stq_le_p(header + 0x250, prot_addr + setup_data_offset);
setup_data = (struct setup_data *)(kernel + setup_data_offset);
setup_data->next = 0;

View File

@ -77,6 +77,9 @@ static void mips_cps_realize(DeviceState *dev, Error **errp)
MIPSCPU *cpu = MIPS_CPU(object_new(s->cpu_type));
CPUMIPSState *env = &cpu->env;
object_property_set_bool(OBJECT(cpu), "big-endian", s->cpu_is_bigendian,
&error_abort);
/* All VPs are halted on reset. Leave powering up to CPC. */
object_property_set_bool(OBJECT(cpu), "start-powered-off", true,
&error_abort);
@ -167,6 +170,7 @@ static Property mips_cps_properties[] = {
DEFINE_PROP_UINT32("num-vp", MIPSCPSState, num_vp, 1),
DEFINE_PROP_UINT32("num-irq", MIPSCPSState, num_irq, 256),
DEFINE_PROP_STRING("cpu-type", MIPSCPSState, cpu_type),
DEFINE_PROP_BOOL("cpu-big-endian", MIPSCPSState, cpu_is_bigendian, false),
DEFINE_PROP_END_OF_LIST()
};

View File

@ -229,7 +229,7 @@ static void mips_fuloong2e_init(MachineState *machine)
clock_set_hz(cpuclk, 533080000); /* ~533 MHz */
/* init CPUs */
cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk);
cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk, false);
env = &cpu->env;
qemu_register_reset(main_cpu_reset, cpu);

View File

@ -212,7 +212,8 @@ static void mips_jazz_init(MachineState *machine,
* ext_clk[jazz_model].pll_mult);
/* init CPUs */
cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk);
cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk,
TARGET_BIG_ENDIAN);
env = &cpu->env;
qemu_register_reset(main_cpu_reset, cpu);

View File

@ -567,7 +567,7 @@ static void mips_loongson3_virt_init(MachineState *machine)
int ip;
/* init CPUs */
cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk);
cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk, false);
/* Init internal devices */
cpu_mips_irq_init_cpu(cpu);

View File

@ -1034,7 +1034,8 @@ static void create_cpu_without_cps(MachineState *ms, MaltaState *s,
int i;
for (i = 0; i < ms->smp.cpus; i++) {
cpu = mips_cpu_create_with_clock(ms->cpu_type, s->cpuclk);
cpu = mips_cpu_create_with_clock(ms->cpu_type, s->cpuclk,
TARGET_BIG_ENDIAN);
/* Init internal devices */
cpu_mips_irq_init_cpu(cpu);
@ -1054,6 +1055,8 @@ static void create_cps(MachineState *ms, MaltaState *s,
object_initialize_child(OBJECT(s), "cps", &s->cps, TYPE_MIPS_CPS);
object_property_set_str(OBJECT(&s->cps), "cpu-type", ms->cpu_type,
&error_fatal);
object_property_set_bool(OBJECT(&s->cps), "cpu-big-endian",
TARGET_BIG_ENDIAN, &error_abort);
object_property_set_uint(OBJECT(&s->cps), "num-vp", ms->smp.cpus,
&error_fatal);
qdev_connect_clock_in(DEVICE(&s->cps), "clk-in", s->cpuclk);

View File

@ -160,7 +160,8 @@ mips_mipssim_init(MachineState *machine)
#endif
/* Init CPUs. */
cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk);
cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk,
TARGET_BIG_ENDIAN);
env = &cpu->env;
reset_info = g_new0(ResetData, 1);

View File

@ -415,8 +415,7 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine)
}
}
if (entry_point != env->pc) {
uint8_t boot[] = {
#if TARGET_BIG_ENDIAN
uint8_t boot_be[] = {
0x60, 0x00, 0x08, /* j 1f */
0x00, /* .literal_position */
0x00, 0x00, 0x00, 0x00, /* .literal entry_pc */
@ -425,7 +424,8 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine)
0x10, 0xff, 0xfe, /* l32r a0, entry_pc */
0x12, 0xff, 0xfe, /* l32r a2, entry_a2 */
0x0a, 0x00, 0x00, /* jx a0 */
#else
};
uint8_t boot_le[] = {
0x06, 0x02, 0x00, /* j 1f */
0x00, /* .literal_position */
0x00, 0x00, 0x00, 0x00, /* .literal entry_pc */
@ -434,14 +434,16 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine)
0x01, 0xfe, 0xff, /* l32r a0, entry_pc */
0x21, 0xfe, 0xff, /* l32r a2, entry_a2 */
0xa0, 0x00, 0x00, /* jx a0 */
#endif
};
const size_t boot_sz = TARGET_BIG_ENDIAN ? sizeof(boot_be)
: sizeof(boot_le);
uint8_t *boot = TARGET_BIG_ENDIAN ? boot_be : boot_le;
uint32_t entry_pc = tswap32(entry_point);
uint32_t entry_a2 = tswap32(tagptr);
memcpy(boot + 4, &entry_pc, sizeof(entry_pc));
memcpy(boot + 8, &entry_a2, sizeof(entry_a2));
cpu_physical_memory_write(env->pc, boot, sizeof(boot));
cpu_physical_memory_write(env->pc, boot, boot_sz);
}
} else {
if (flash) {

View File

@ -164,12 +164,6 @@ static inline MemOp size_memop(unsigned size)
return (MemOp)ctz32(size);
}
/* Big endianness from MemOp. */
static inline bool memop_big_endian(MemOp op)
{
return (op & MO_BSWAP) == MO_BE;
}
/**
* memop_alignment_bits:
* @memop: MemOp value

View File

@ -28,7 +28,7 @@ bool target_words_bigendian(void);
#ifdef COMPILING_PER_TARGET
#define target_needs_bswap() (HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN)
#else
#define target_needs_bswap() (target_words_bigendian() != HOST_BIG_ENDIAN)
#define target_needs_bswap() (HOST_BIG_ENDIAN != target_words_bigendian())
#endif /* COMPILING_PER_TARGET */
static inline uint16_t tswap16(uint16_t s)

View File

@ -95,9 +95,13 @@ static inline uint8_t *gdb_get_reg_ptr(GByteArray *buf, int len)
#if TARGET_LONG_BITS == 64
#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
#define ldtul_p(addr) ldq_p(addr)
#define ldtul_le_p(addr) ldq_le_p(addr)
#define ldtul_be_p(addr) ldq_be_p(addr)
#else
#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
#define ldtul_p(addr) ldl_p(addr)
#define ldtul_le_p(addr) ldl_le_p(addr)
#define ldtul_be_p(addr) ldl_be_p(addr)
#endif
#endif /* _GDBSTUB_HELPERS_H_ */

View File

@ -38,6 +38,7 @@ struct MIPSCPSState {
uint32_t num_vp;
uint32_t num_irq;
char *cpu_type;
bool cpu_is_bigendian;
MemoryRegion container;
MIPSGCRState gcr;

View File

@ -140,6 +140,8 @@ CPU_CONVERT(le, 16, uint16_t)
CPU_CONVERT(le, 32, uint32_t)
CPU_CONVERT(le, 64, uint64_t)
#undef CPU_CONVERT
/*
* Same as cpu_to_le{16,32,64}, except that gcc will figure the result is
* a compile-time constant if you pass in a constant. So this can be

View File

@ -754,8 +754,8 @@ static bool restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
env->eip = tswapl(sc->rip);
#endif
cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
cpu_x86_load_seg(env, R_CS, lduw_le_p(&sc->cs) | 3);
cpu_x86_load_seg(env, R_SS, lduw_le_p(&sc->ss) | 3);
tmpflags = tswapl(sc->eflags);
env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);

View File

@ -59,7 +59,7 @@ int alpha_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
int alpha_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
CPUAlphaState *env = cpu_env(cs);
target_ulong tmp = ldtul_p(mem_buf);
target_ulong tmp = ldq_le_p(mem_buf);
CPU_DoubleU d;
switch (n) {

View File

@ -69,13 +69,13 @@ int avr_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
/* SP */
if (n == 33) {
env->sp = lduw_p(mem_buf);
env->sp = lduw_le_p(mem_buf);
return 2;
}
/* PC */
if (n == 34) {
env->pc_w = ldl_p(mem_buf) / 2;
env->pc_w = ldl_le_p(mem_buf) / 2;
return 4;
}

View File

@ -52,7 +52,7 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
CPUHexagonState *env = cpu_env(cs);
if (n == HEX_REG_P3_0_ALIASED) {
uint32_t p3_0 = ldtul_p(mem_buf);
uint32_t p3_0 = ldl_le_p(mem_buf);
for (int i = 0; i < NUM_PREGS; i++) {
env->pred[i] = extract32(p3_0, i * 8, 8);
}
@ -60,14 +60,14 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
}
if (n < TOTAL_PER_THREAD_REGS) {
env->gpr[n] = ldtul_p(mem_buf);
env->gpr[n] = ldl_le_p(mem_buf);
return sizeof(target_ulong);
}
n -= TOTAL_PER_THREAD_REGS;
if (n < NUM_PREGS) {
env->pred[n] = ldtul_p(mem_buf) & 0xff;
env->pred[n] = ldl_le_p(mem_buf) & 0xff;
return sizeof(uint8_t);
}
@ -117,7 +117,7 @@ static int gdb_put_vreg(CPUHexagonState *env, uint8_t *mem_buf, int n)
{
int i;
for (i = 0; i < ARRAY_SIZE(env->VRegs[n].uw); i++) {
env->VRegs[n].uw[i] = ldtul_p(mem_buf);
env->VRegs[n].uw[i] = ldl_le_p(mem_buf);
mem_buf += 4;
}
return MAX_VEC_SIZE_BYTES;
@ -127,7 +127,7 @@ static int gdb_put_qreg(CPUHexagonState *env, uint8_t *mem_buf, int n)
{
int i;
for (i = 0; i < ARRAY_SIZE(env->QRegs[n].uw); i++) {
env->QRegs[n].uw[i] = ldtul_p(mem_buf);
env->QRegs[n].uw[i] = ldl_le_p(mem_buf);
mem_buf += 4;
}
return MAX_VEC_SIZE_BYTES / 8;

View File

@ -67,10 +67,10 @@ int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
int length = 0;
if (is_la64(env)) {
tmp = ldq_p(mem_buf);
tmp = ldq_le_p(mem_buf);
read_length = 8;
} else {
tmp = ldl_p(mem_buf);
tmp = ldl_le_p(mem_buf);
read_length = 4;
}
@ -106,13 +106,13 @@ static int loongarch_gdb_set_fpu(CPUState *cs, uint8_t *mem_buf, int n)
int length = 0;
if (0 <= n && n < 32) {
env->fpr[n].vreg.D(0) = ldq_p(mem_buf);
env->fpr[n].vreg.D(0) = ldq_le_p(mem_buf);
length = 8;
} else if (32 <= n && n < 40) {
env->cf[n - 32] = ldub_p(mem_buf);
length = 1;
} else if (n == 40) {
env->fcsr0 = ldl_p(mem_buf);
env->fcsr0 = ldl_le_p(mem_buf);
length = 4;
}
return length;

View File

@ -200,10 +200,8 @@ static void mips_cpu_reset_hold(Object *obj, ResetType type)
/* Reset registers to their default values */
env->CP0_PRid = env->cpu_model->CP0_PRid;
env->CP0_Config0 = env->cpu_model->CP0_Config0;
#if TARGET_BIG_ENDIAN
env->CP0_Config0 |= (1 << CP0C0_BE);
#endif
env->CP0_Config0 = deposit32(env->cpu_model->CP0_Config0,
CP0C0_BE, 1, cpu->is_big_endian);
env->CP0_Config1 = env->cpu_model->CP0_Config1;
env->CP0_Config2 = env->cpu_model->CP0_Config2;
env->CP0_Config3 = env->cpu_model->CP0_Config3;
@ -541,6 +539,11 @@ static const struct SysemuCPUOps mips_sysemu_ops = {
};
#endif
static Property mips_cpu_properties[] = {
DEFINE_PROP_BOOL("big-endian", MIPSCPU, is_big_endian, TARGET_BIG_ENDIAN),
DEFINE_PROP_END_OF_LIST(),
};
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
/*
@ -571,6 +574,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
DeviceClass *dc = DEVICE_CLASS(c);
ResettableClass *rc = RESETTABLE_CLASS(c);
device_class_set_props(dc, mips_cpu_properties);
device_class_set_parent_realize(dc, mips_cpu_realizefn,
&mcc->parent_realize);
resettable_class_set_parent_phases(rc, NULL, mips_cpu_reset_hold, NULL,
@ -639,12 +643,15 @@ static void mips_cpu_register_types(void)
type_init(mips_cpu_register_types)
/* Could be used by generic CPU object */
MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk)
MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk,
bool is_big_endian)
{
DeviceState *cpu;
cpu = DEVICE(object_new(cpu_type));
qdev_connect_clock_in(cpu, "clk-in", cpu_refclk);
object_property_set_bool(OBJECT(cpu), "big-endian", is_big_endian,
&error_abort);
qdev_realize(cpu, NULL, &error_abort);
return MIPS_CPU(cpu);

View File

@ -1209,6 +1209,9 @@ struct ArchCPU {
Clock *clock;
Clock *count_div; /* Divider for CP0_Count clock */
/* Properties */
bool is_big_endian;
};
/**
@ -1373,12 +1376,14 @@ static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, vaddr *pc,
* mips_cpu_create_with_clock:
* @typename: a MIPS CPU type.
* @cpu_refclk: this cpu input clock (an output clock of another device)
* @is_big_endian: whether this CPU is configured in big endianness
*
* Instantiates a MIPS CPU, set the input clock of the CPU to @cpu_refclk,
* then realizes the CPU.
*
* Returns: A #CPUState or %NULL if an error occurred.
*/
MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk);
MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk,
bool is_big_endian);
#endif /* MIPS_CPU_H */

View File

@ -225,6 +225,16 @@ static inline void mips_env_set_pc(CPUMIPSState *env, target_ulong value)
}
}
static inline bool mips_env_is_bigendian(CPUMIPSState *env)
{
return extract32(env->CP0_Config0, CP0C0_BE, 1);
}
static inline MemOp mo_endian_env(CPUMIPSState *env)
{
return mips_env_is_bigendian(env) ? MO_BE : MO_LE;
}
static inline void restore_pamask(CPUMIPSState *env)
{
if (env->hflags & MIPS_HFLAG_ELPA) {

View File

@ -53,11 +53,6 @@ HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong))
#endif /* !CONFIG_USER_ONLY */
static inline bool cpu_is_bigendian(CPUMIPSState *env)
{
return extract32(env->CP0_Config0, CP0C0_BE, 1);
}
static inline target_ulong get_lmask(CPUMIPSState *env,
target_ulong value, unsigned bits)
{
@ -65,7 +60,7 @@ static inline target_ulong get_lmask(CPUMIPSState *env,
value &= mask;
if (!cpu_is_bigendian(env)) {
if (!mips_env_is_bigendian(env)) {
value ^= mask;
}
@ -76,7 +71,7 @@ void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
target_ulong lmask = get_lmask(env, arg2, 32);
int dir = cpu_is_bigendian(env) ? 1 : -1;
int dir = mips_env_is_bigendian(env) ? 1 : -1;
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
@ -100,7 +95,7 @@ void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
target_ulong lmask = get_lmask(env, arg2, 32);
int dir = cpu_is_bigendian(env) ? 1 : -1;
int dir = mips_env_is_bigendian(env) ? 1 : -1;
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
@ -130,7 +125,7 @@ void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
target_ulong lmask = get_lmask(env, arg2, 64);
int dir = cpu_is_bigendian(env) ? 1 : -1;
int dir = mips_env_is_bigendian(env) ? 1 : -1;
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
@ -174,7 +169,7 @@ void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
target_ulong lmask = get_lmask(env, arg2, 64);
int dir = cpu_is_bigendian(env) ? 1 : -1;
int dir = mips_env_is_bigendian(env) ? 1 : -1;
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());

View File

@ -977,23 +977,21 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd,
gen_reserved_instruction(ctx);
return;
}
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL |
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rd);
tcg_gen_movi_tl(t1, 4);
gen_op_addr_add(ctx, t0, t0, t1);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL |
gen_op_addr_addi(ctx, t0, t0, 4);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rd + 1);
break;
case SWP:
gen_load_gpr(t1, rd);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
tcg_gen_movi_tl(t1, 4);
gen_op_addr_add(ctx, t0, t0, t1);
gen_op_addr_addi(ctx, t0, t0, 4);
gen_load_gpr(t1, rd + 1);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
break;
#ifdef TARGET_MIPS64
@ -1002,23 +1000,21 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd,
gen_reserved_instruction(ctx);
return;
}
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rd);
tcg_gen_movi_tl(t1, 8);
gen_op_addr_add(ctx, t0, t0, t1);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
gen_op_addr_addi(ctx, t0, t0, 8);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rd + 1);
break;
case SDP:
gen_load_gpr(t1, rd);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
tcg_gen_movi_tl(t1, 8);
gen_op_addr_add(ctx, t0, t0, t1);
gen_op_addr_addi(ctx, t0, t0, 8);
gen_load_gpr(t1, rd + 1);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
#endif
@ -2572,13 +2568,13 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
gen_st(ctx, mips32_op, rt, rs, offset);
break;
case SC:
gen_st_cond(ctx, rt, rs, offset, MO_TESL, false);
gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_SL, false);
break;
#if defined(TARGET_MIPS64)
case SCD:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
gen_st_cond(ctx, rt, rs, offset, MO_TEUQ, false);
gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_UQ, false);
break;
#endif
case LD_EVA:
@ -2659,7 +2655,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
mips32_op = OPC_SHE;
goto do_st_lr;
case SCE:
gen_st_cond(ctx, rt, rs, offset, MO_TESL, true);
gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_SL, true);
break;
case SWE:
mips32_op = OPC_SWE;

View File

@ -122,11 +122,21 @@ enum {
static int xlat(int r)
{
static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
return map[r];
}
static void decr_and_store(DisasContext *ctx, unsigned regidx, TCGv t0)
{
TCGv t1 = tcg_temp_new();
gen_op_addr_addi(ctx, t0, t0, -4);
gen_load_gpr(t1, regidx);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
}
static void gen_mips16_save(DisasContext *ctx,
int xsregs, int aregs,
int do_ra, int do_s0, int do_s1,
@ -134,7 +144,6 @@ static void gen_mips16_save(DisasContext *ctx,
{
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
int args, astatic;
switch (aregs) {
@ -172,70 +181,62 @@ static void gen_mips16_save(DisasContext *ctx,
case 4:
gen_base_offset_addr(ctx, t0, 29, 12);
gen_load_gpr(t1, 7);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
/* Fall through */
case 3:
gen_base_offset_addr(ctx, t0, 29, 8);
gen_load_gpr(t1, 6);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
/* Fall through */
case 2:
gen_base_offset_addr(ctx, t0, 29, 4);
gen_load_gpr(t1, 5);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
/* Fall through */
case 1:
gen_base_offset_addr(ctx, t0, 29, 0);
gen_load_gpr(t1, 4);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
}
gen_load_gpr(t0, 29);
#define DECR_AND_STORE(reg) do { \
tcg_gen_movi_tl(t2, -4); \
gen_op_addr_add(ctx, t0, t0, t2); \
gen_load_gpr(t1, reg); \
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | \
ctx->default_tcg_memop_mask); \
} while (0)
if (do_ra) {
DECR_AND_STORE(31);
decr_and_store(ctx, 31, t0);
}
switch (xsregs) {
case 7:
DECR_AND_STORE(30);
decr_and_store(ctx, 30, t0);
/* Fall through */
case 6:
DECR_AND_STORE(23);
decr_and_store(ctx, 23, t0);
/* Fall through */
case 5:
DECR_AND_STORE(22);
decr_and_store(ctx, 22, t0);
/* Fall through */
case 4:
DECR_AND_STORE(21);
decr_and_store(ctx, 21, t0);
/* Fall through */
case 3:
DECR_AND_STORE(20);
decr_and_store(ctx, 20, t0);
/* Fall through */
case 2:
DECR_AND_STORE(19);
decr_and_store(ctx, 19, t0);
/* Fall through */
case 1:
DECR_AND_STORE(18);
decr_and_store(ctx, 18, t0);
}
if (do_s1) {
DECR_AND_STORE(17);
decr_and_store(ctx, 17, t0);
}
if (do_s0) {
DECR_AND_STORE(16);
decr_and_store(ctx, 16, t0);
}
switch (aregs) {
@ -270,21 +271,31 @@ static void gen_mips16_save(DisasContext *ctx,
}
if (astatic > 0) {
DECR_AND_STORE(7);
decr_and_store(ctx, 7, t0);
if (astatic > 1) {
DECR_AND_STORE(6);
decr_and_store(ctx, 6, t0);
if (astatic > 2) {
DECR_AND_STORE(5);
decr_and_store(ctx, 5, t0);
if (astatic > 3) {
DECR_AND_STORE(4);
decr_and_store(ctx, 4, t0);
}
}
}
}
#undef DECR_AND_STORE
tcg_gen_movi_tl(t2, -framesize);
gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2);
gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], -framesize);
}
static void decr_and_load(DisasContext *ctx, unsigned regidx, TCGv t0)
{
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
tcg_gen_movi_tl(t2, -4);
gen_op_addr_add(ctx, t0, t0, t2);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TE | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, regidx);
}
static void gen_mips16_restore(DisasContext *ctx,
@ -294,52 +305,41 @@ static void gen_mips16_restore(DisasContext *ctx,
{
int astatic;
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
tcg_gen_movi_tl(t2, framesize);
gen_op_addr_add(ctx, t0, cpu_gpr[29], t2);
#define DECR_AND_LOAD(reg) do { \
tcg_gen_movi_tl(t2, -4); \
gen_op_addr_add(ctx, t0, t0, t2); \
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | \
ctx->default_tcg_memop_mask); \
gen_store_gpr(t1, reg); \
} while (0)
gen_op_addr_addi(ctx, t0, cpu_gpr[29], -framesize);
if (do_ra) {
DECR_AND_LOAD(31);
decr_and_load(ctx, 31, t0);
}
switch (xsregs) {
case 7:
DECR_AND_LOAD(30);
decr_and_load(ctx, 30, t0);
/* Fall through */
case 6:
DECR_AND_LOAD(23);
decr_and_load(ctx, 23, t0);
/* Fall through */
case 5:
DECR_AND_LOAD(22);
decr_and_load(ctx, 22, t0);
/* Fall through */
case 4:
DECR_AND_LOAD(21);
decr_and_load(ctx, 21, t0);
/* Fall through */
case 3:
DECR_AND_LOAD(20);
decr_and_load(ctx, 20, t0);
/* Fall through */
case 2:
DECR_AND_LOAD(19);
decr_and_load(ctx, 19, t0);
/* Fall through */
case 1:
DECR_AND_LOAD(18);
decr_and_load(ctx, 18, t0);
}
if (do_s1) {
DECR_AND_LOAD(17);
decr_and_load(ctx, 17, t0);
}
if (do_s0) {
DECR_AND_LOAD(16);
decr_and_load(ctx, 16, t0);
}
switch (aregs) {
@ -374,21 +374,19 @@ static void gen_mips16_restore(DisasContext *ctx,
}
if (astatic > 0) {
DECR_AND_LOAD(7);
decr_and_load(ctx, 7, t0);
if (astatic > 1) {
DECR_AND_LOAD(6);
decr_and_load(ctx, 6, t0);
if (astatic > 2) {
DECR_AND_LOAD(5);
decr_and_load(ctx, 5, t0);
if (astatic > 3) {
DECR_AND_LOAD(4);
decr_and_load(ctx, 4, t0);
}
}
}
}
#undef DECR_AND_LOAD
tcg_gen_movi_tl(t2, framesize);
gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2);
gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], -framesize);
}
#if defined(TARGET_MIPS64)

View File

@ -8211,14 +8211,6 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
/* Element-by-element access macros */
#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
#if !defined(CONFIG_USER_ONLY)
#define MEMOP_IDX(DF) \
MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
mips_env_mmu_index(env));
#else
#define MEMOP_IDX(DF)
#endif
#if TARGET_BIG_ENDIAN
static inline uint64_t bswap16x4(uint64_t x)
{

View File

@ -1533,7 +1533,7 @@ static void gen_mxu_s32ldxx(DisasContext *ctx, bool reversed, bool postinc)
tcg_gen_add_tl(t0, t0, t1);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx,
(MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
MO_SL | mo_endian_rev(ctx, reversed) |
ctx->default_tcg_memop_mask);
gen_store_mxu_gpr(t1, XRa);
@ -1569,7 +1569,7 @@ static void gen_mxu_s32stxx(DisasContext *ctx, bool reversed, bool postinc)
gen_load_mxu_gpr(t1, XRa);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
(MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
MO_SL | mo_endian_rev(ctx, reversed) |
ctx->default_tcg_memop_mask);
if (postinc) {
@ -1605,7 +1605,7 @@ static void gen_mxu_s32ldxvx(DisasContext *ctx, bool reversed,
tcg_gen_add_tl(t0, t0, t1);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx,
(MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
MO_SL | mo_endian_rev(ctx, reversed) |
ctx->default_tcg_memop_mask);
gen_store_mxu_gpr(t1, XRa);
@ -1675,7 +1675,7 @@ static void gen_mxu_s32stxvx(DisasContext *ctx, bool reversed,
gen_load_mxu_gpr(t1, XRa);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
(MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
MO_SL | mo_endian_rev(ctx, reversed) |
ctx->default_tcg_memop_mask);
if (postinc) {
@ -4803,19 +4803,19 @@ static void decode_opc_mxu__pool17(DisasContext *ctx)
switch (opcode) {
case OPC_MXU_LXW:
gen_mxu_lxx(ctx, strd2, MO_TE | MO_UL);
gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UL);
break;
case OPC_MXU_LXB:
gen_mxu_lxx(ctx, strd2, MO_TE | MO_SB);
gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_SB);
break;
case OPC_MXU_LXH:
gen_mxu_lxx(ctx, strd2, MO_TE | MO_SW);
gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_SW);
break;
case OPC_MXU_LXBU:
gen_mxu_lxx(ctx, strd2, MO_TE | MO_UB);
gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UB);
break;
case OPC_MXU_LXHU:
gen_mxu_lxx(ctx, strd2, MO_TE | MO_UW);
gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UW);
break;
default:
MIPS_INVAL("decode_opc_mxu");

View File

@ -998,8 +998,9 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset,
TCGv tmp2 = tcg_temp_new();
gen_base_offset_addr(ctx, taddr, base, offset);
tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ | MO_ALIGN);
if (cpu_is_bigendian(ctx)) {
tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx,
mo_endian(ctx) | MO_UQ | MO_ALIGN);
if (disas_is_bigendian(ctx)) {
tcg_gen_extr_i64_tl(tmp2, tmp1, tval);
} else {
tcg_gen_extr_i64_tl(tmp1, tmp2, tval);
@ -1031,7 +1032,7 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
gen_load_gpr(tmp1, reg1);
gen_load_gpr(tmp2, reg2);
if (cpu_is_bigendian(ctx)) {
if (disas_is_bigendian(ctx)) {
tcg_gen_concat_tl_i64(tval, tmp2, tmp1);
} else {
tcg_gen_concat_tl_i64(tval, tmp1, tmp2);
@ -1052,8 +1053,7 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
tcg_gen_movi_tl(cpu_gpr[reg1], 0);
}
gen_set_label(lab_done);
tcg_gen_movi_tl(lladdr, -1);
tcg_gen_st_tl(lladdr, tcg_env, offsetof(CPUMIPSState, lladdr));
tcg_gen_st_tl(tcg_constant_tl(-1), tcg_env, offsetof(CPUMIPSState, lladdr));
}
static void gen_adjust_sp(DisasContext *ctx, int u)
@ -1075,7 +1075,7 @@ static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count,
gen_base_offset_addr(ctx, va, 29, this_offset);
gen_load_gpr(t0, this_rt);
tcg_gen_qemu_st_tl(t0, va, ctx->mem_idx,
(MO_TEUL | ctx->default_tcg_memop_mask));
mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask);
counter++;
}
@ -1095,8 +1095,8 @@ static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count,
int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f);
int this_offset = u - ((counter + 1) << 2);
gen_base_offset_addr(ctx, va, 29, this_offset);
tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx, MO_TESL |
ctx->default_tcg_memop_mask);
tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx,
mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask);
tcg_gen_ext32s_tl(t0, t0);
gen_store_gpr(t0, this_rt);
counter++;
@ -1543,7 +1543,6 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
{
int16_t imm;
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv v0_t = tcg_temp_new();
gen_load_gpr(v0_t, v1);
@ -1570,12 +1569,10 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
check_dsp(ctx);
switch (extract32(ctx->opcode, 12, 2)) {
case NM_MTHLIP:
tcg_gen_movi_tl(t0, v2 >> 3);
gen_helper_mthlip(t0, v0_t, tcg_env);
gen_helper_mthlip(tcg_constant_tl(v2 >> 3), v0_t, tcg_env);
break;
case NM_SHILOV:
tcg_gen_movi_tl(t0, v2 >> 3);
gen_helper_shilo(t0, v0_t, tcg_env);
gen_helper_shilo(tcg_constant_tl(v2 >> 3), v0_t, tcg_env);
break;
default:
gen_reserved_instruction(ctx);
@ -1587,39 +1584,34 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
imm = extract32(ctx->opcode, 14, 7);
switch (extract32(ctx->opcode, 12, 2)) {
case NM_RDDSP:
tcg_gen_movi_tl(t0, imm);
gen_helper_rddsp(t0, t0, tcg_env);
gen_helper_rddsp(t0, tcg_constant_tl(imm), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_WRDSP:
gen_load_gpr(t0, ret);
tcg_gen_movi_tl(t1, imm);
gen_helper_wrdsp(t0, t1, tcg_env);
gen_helper_wrdsp(t0, tcg_constant_tl(imm), tcg_env);
break;
case NM_EXTP:
tcg_gen_movi_tl(t0, v2 >> 3);
tcg_gen_movi_tl(t1, v1);
gen_helper_extp(t0, t0, t1, tcg_env);
gen_helper_extp(t0, tcg_constant_tl(v2 >> 3),
tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTPDP:
tcg_gen_movi_tl(t0, v2 >> 3);
tcg_gen_movi_tl(t1, v1);
gen_helper_extpdp(t0, t0, t1, tcg_env);
gen_helper_extpdp(t0, tcg_constant_tl(v2 >> 3),
tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
}
break;
case NM_POOL32AXF_1_4:
check_dsp(ctx);
tcg_gen_movi_tl(t0, v2 >> 2);
switch (extract32(ctx->opcode, 12, 1)) {
case NM_SHLL_QB:
gen_helper_shll_qb(t0, t0, v0_t, tcg_env);
gen_helper_shll_qb(t0, tcg_constant_tl(v2 >> 2), v0_t, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_SHRL_QB:
gen_helper_shrl_qb(t0, t0, v0_t);
gen_helper_shrl_qb(t0, tcg_constant_tl(v2 >> 2), v0_t);
gen_store_gpr(t0, ret);
break;
}
@ -1630,23 +1622,25 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_POOL32AXF_1_7:
check_dsp(ctx);
tcg_gen_movi_tl(t0, v2 >> 3);
tcg_gen_movi_tl(t1, v1);
switch (extract32(ctx->opcode, 12, 2)) {
case NM_EXTR_W:
gen_helper_extr_w(t0, t0, t1, tcg_env);
gen_helper_extr_w(t0, tcg_constant_tl(v2 >> 3),
tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTR_R_W:
gen_helper_extr_r_w(t0, t0, t1, tcg_env);
gen_helper_extr_r_w(t0, tcg_constant_tl(v2 >> 3),
tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTR_RS_W:
gen_helper_extr_rs_w(t0, t0, t1, tcg_env);
gen_helper_extr_rs_w(t0, tcg_constant_tl(v2 >> 3),
tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTR_S_H:
gen_helper_extr_s_h(t0, t0, t1, tcg_env);
gen_helper_extr_s_h(t0, tcg_constant_tl(v2 >> 3),
tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
}
@ -1848,8 +1842,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
case NM_EXTRV_W:
check_dsp(ctx);
gen_load_gpr(v1_t, rs);
tcg_gen_movi_tl(t0, rd >> 3);
gen_helper_extr_w(t0, t0, v1_t, tcg_env);
gen_helper_extr_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@ -1903,8 +1896,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTRV_R_W:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
gen_helper_extr_r_w(t0, t0, v1_t, tcg_env);
gen_helper_extr_r_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
default:
@ -1923,8 +1915,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTPV:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
gen_helper_extp(t0, t0, v1_t, tcg_env);
gen_helper_extp(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_MSUB:
@ -1947,8 +1938,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTRV_RS_W:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
gen_helper_extr_rs_w(t0, t0, v1_t, tcg_env);
gen_helper_extr_rs_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@ -1964,8 +1954,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTPDPV:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
gen_helper_extpdp(t0, t0, v1_t, tcg_env);
gen_helper_extpdp(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_MSUBU:
@ -1990,8 +1979,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTRV_S_H:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
gen_helper_extr_s_h(t0, t0, v1_t, tcg_env);
gen_helper_extr_s_h(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@ -2149,24 +2137,22 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc,
switch (opc) {
case NM_SHRA_R_QB:
check_dsp_r2(ctx);
tcg_gen_movi_tl(t0, rd >> 2);
switch (extract32(ctx->opcode, 12, 1)) {
case 0:
/* NM_SHRA_QB */
gen_helper_shra_qb(t0, t0, rs_t);
gen_helper_shra_qb(t0, tcg_constant_tl(rd >> 2), rs_t);
gen_store_gpr(t0, rt);
break;
case 1:
/* NM_SHRA_R_QB */
gen_helper_shra_r_qb(t0, t0, rs_t);
gen_helper_shra_r_qb(t0, tcg_constant_tl(rd >> 2), rs_t);
gen_store_gpr(t0, rt);
break;
}
break;
case NM_SHRL_PH:
check_dsp_r2(ctx);
tcg_gen_movi_tl(t0, rd >> 1);
gen_helper_shrl_ph(t0, t0, rs_t);
gen_helper_shrl_ph(t0, tcg_constant_tl(rd >> 1), rs_t);
gen_store_gpr(t0, rt);
break;
case NM_REPL_QB:
@ -2180,8 +2166,7 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc,
(uint32_t)imm << 8 |
(uint32_t)imm;
result = (int32_t)result;
tcg_gen_movi_tl(t0, result);
gen_store_gpr(t0, rt);
gen_store_gpr(tcg_constant_tl(result), rt);
}
break;
default:
@ -2302,10 +2287,9 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc,
{
TCGCond cond = TCG_COND_ALWAYS;
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv timm = tcg_constant_tl(imm);
gen_load_gpr(t0, rt);
tcg_gen_movi_tl(t1, imm);
ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
/* Load needed operands and calculate btarget */
@ -2334,7 +2318,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc,
} else {
tcg_gen_shri_tl(t0, t0, imm);
tcg_gen_andi_tl(t0, t0, 1);
tcg_gen_movi_tl(t1, 0);
timm = tcg_constant_tl(0);
if (opc == NM_BBEQZC) {
cond = TCG_COND_EQ;
} else {
@ -2389,7 +2373,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc,
/* Conditional compact branch */
TCGLabel *fs = gen_new_label();
tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, t1, fs);
tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, timm, fs);
gen_goto_tb(ctx, 1, ctx->btarget);
gen_set_label(fs);
@ -2403,7 +2387,6 @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs,
int rt)
{
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
/* load rs */
gen_load_gpr(t0, rs);
@ -2415,8 +2398,7 @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs,
/* calculate btarget */
tcg_gen_shli_tl(t0, t0, 1);
tcg_gen_movi_tl(t1, ctx->base.pc_next + 4);
gen_op_addr_add(ctx, btarget, t1, t0);
gen_op_addr_add(ctx, btarget, tcg_constant_tl(ctx->base.pc_next + 4), t0);
/* branch completion */
clear_branch_hflags(ctx);
@ -2469,11 +2451,9 @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc,
} else {
/* OPC_JIC, OPC_JIALC */
TCGv tbase = tcg_temp_new();
TCGv toffset = tcg_temp_new();
gen_load_gpr(tbase, rt);
tcg_gen_movi_tl(toffset, offset);
gen_op_addr_add(ctx, btarget, tbase, toffset);
gen_op_addr_addi(ctx, btarget, tbase, offset);
}
break;
default:
@ -2647,13 +2627,13 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt)
case NM_LHX:
/*case NM_LHXS:*/
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
MO_TESW | ctx->default_tcg_memop_mask);
mo_endian(ctx) | MO_SW | ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rd);
break;
case NM_LWX:
/*case NM_LWXS:*/
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
MO_TESL | ctx->default_tcg_memop_mask);
mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rd);
break;
case NM_LBUX:
@ -2663,7 +2643,7 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt)
case NM_LHUX:
/*case NM_LHUXS:*/
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
MO_TEUW | ctx->default_tcg_memop_mask);
mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rd);
break;
case NM_SBX:
@ -2676,14 +2656,14 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt)
check_nms(ctx);
gen_load_gpr(t1, rd);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
MO_TEUW | ctx->default_tcg_memop_mask);
mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask);
break;
case NM_SWX:
/*case NM_SWXS:*/
check_nms(ctx);
gen_load_gpr(t1, rd);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
MO_TEUL | ctx->default_tcg_memop_mask);
mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask);
break;
case NM_LWC1X:
/*case NM_LWC1XS:*/
@ -3445,13 +3425,10 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc,
case NM_SHILO:
check_dsp(ctx);
{
TCGv tv0 = tcg_temp_new();
TCGv tv1 = tcg_temp_new();
int16_t imm = extract32(ctx->opcode, 16, 7);
tcg_gen_movi_tl(tv0, rd >> 3);
tcg_gen_movi_tl(tv1, imm);
gen_helper_shilo(tv0, tv1, tcg_env);
gen_helper_shilo(tcg_constant_tl(rd >> 3),
tcg_constant_tl(imm), tcg_env);
}
break;
case NM_MULEQ_S_W_PHL:
@ -3506,8 +3483,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc,
break;
case NM_SHRA_R_W:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd);
gen_helper_shra_r_w(v1_t, t0, v1_t);
gen_helper_shra_r_w(v1_t, tcg_constant_tl(rd), v1_t);
gen_store_gpr(v1_t, rt);
break;
case NM_SHRA_R_PH:
@ -3547,8 +3523,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc,
break;
case NM_SHLL_S_W:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd);
gen_helper_shll_s_w(v1_t, t0, v1_t, tcg_env);
gen_helper_shll_s_w(v1_t, tcg_constant_tl(rd), v1_t, tcg_env);
gen_store_gpr(v1_t, rt);
break;
case NM_REPL_PH:
@ -3729,32 +3704,29 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
case NM_LWPC48:
check_nms(ctx);
if (rt != 0) {
TCGv t0;
t0 = tcg_temp_new();
target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
addr_off);
tcg_gen_movi_tl(t0, addr);
tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx,
MO_TESL | ctx->default_tcg_memop_mask);
tcg_gen_qemu_ld_tl(cpu_gpr[rt], tcg_constant_tl(addr),
ctx->mem_idx,
mo_endian(ctx) | MO_SL
| ctx->default_tcg_memop_mask);
}
break;
case NM_SWPC48:
check_nms(ctx);
{
TCGv t0, t1;
t0 = tcg_temp_new();
TCGv t1;
t1 = tcg_temp_new();
target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
addr_off);
tcg_gen_movi_tl(t0, addr);
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
MO_TEUL | ctx->default_tcg_memop_mask);
tcg_gen_qemu_st_tl(t1, tcg_constant_tl(addr), ctx->mem_idx,
mo_endian(ctx) | MO_UL
| ctx->default_tcg_memop_mask);
}
break;
default:
@ -4132,14 +4104,14 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
switch (extract32(ctx->opcode, 11, 4)) {
case NM_UALH:
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW |
MO_UNALN);
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
mo_endian(ctx) | MO_SW | MO_UNALN);
gen_store_gpr(t0, rt);
break;
case NM_UASH:
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
MO_UNALN);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
mo_endian(ctx) | MO_UW | MO_UNALN);
break;
}
}
@ -4161,7 +4133,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
case NM_P_SC:
switch (ctx->opcode & 0x03) {
case NM_SC:
gen_st_cond(ctx, rt, rs, s, MO_TESL, false);
gen_st_cond(ctx, rt, rs, s, mo_endian(ctx) | MO_SL,
false);
break;
case NM_SCWP:
check_xnp(ctx);
@ -4274,7 +4247,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
check_xnp(ctx);
check_eva(ctx);
check_cp0_enabled(ctx);
gen_st_cond(ctx, rt, rs, s, MO_TESL, true);
gen_st_cond(ctx, rt, rs, s, mo_endian(ctx) | MO_SL,
true);
break;
case NM_SCWPE:
check_xnp(ctx);
@ -4317,7 +4291,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
switch (extract32(ctx->opcode, 11, 1)) {
case NM_LWM:
tcg_gen_qemu_ld_tl(t1, va, ctx->mem_idx,
memop | MO_TESL);
memop | mo_endian(ctx) | MO_SL);
gen_store_gpr(t1, this_rt);
if ((this_rt == rs) &&
(counter != (count - 1))) {
@ -4328,7 +4302,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
this_rt = (rt == 0) ? 0 : this_rt;
gen_load_gpr(t1, this_rt);
tcg_gen_qemu_st_tl(t1, va, ctx->mem_idx,
memop | MO_TEUL);
memop | mo_endian(ctx) | MO_UL);
break;
}
counter++;

View File

@ -601,7 +601,7 @@ static bool get_pte(CPUMIPSState *env, uint64_t vaddr, MemOp op,
return false;
}
oi = make_memop_idx(op | MO_TE, ptw_mmu_idx);
oi = make_memop_idx(op | mo_endian_env(env), ptw_mmu_idx);
if (op == MO_64) {
*pte = cpu_ldq_mmu(env, vaddr, oi, 0);
} else {

View File

@ -1456,8 +1456,7 @@ void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1)
#endif
}
static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base,
target_long ofs)
void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, target_long ofs)
{
tcg_gen_addi_tl(ret, base, ofs);
@ -1957,16 +1956,16 @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
tcg_gen_st_tl(ret, tcg_env, offsetof(CPUMIPSState, llval)); \
}
#else
#define OP_LD_ATOMIC(insn, fname) \
#define OP_LD_ATOMIC(insn, ignored_memop) \
static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
DisasContext *ctx) \
{ \
gen_helper_##insn(ret, tcg_env, arg1, tcg_constant_i32(mem_idx)); \
}
#endif
OP_LD_ATOMIC(ll, MO_TESL);
OP_LD_ATOMIC(ll, mo_endian(ctx) | MO_SL);
#if defined(TARGET_MIPS64)
OP_LD_ATOMIC(lld, MO_TEUQ);
OP_LD_ATOMIC(lld, mo_endian(ctx) | MO_UQ);
#endif
#undef OP_LD_ATOMIC
@ -2010,7 +2009,7 @@ static void gen_lxl(DisasContext *ctx, TCGv reg, TCGv addr,
*/
tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB);
tcg_gen_andi_tl(t1, addr, sizem1);
if (!cpu_is_bigendian(ctx)) {
if (!disas_is_bigendian(ctx)) {
tcg_gen_xori_tl(t1, t1, sizem1);
}
tcg_gen_shli_tl(t1, t1, 3);
@ -2037,7 +2036,7 @@ static void gen_lxr(DisasContext *ctx, TCGv reg, TCGv addr,
*/
tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB);
tcg_gen_andi_tl(t1, addr, sizem1);
if (cpu_is_bigendian(ctx)) {
if (disas_is_bigendian(ctx)) {
tcg_gen_xori_tl(t1, t1, sizem1);
}
tcg_gen_shli_tl(t1, t1, 3);
@ -2073,12 +2072,12 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_LWU:
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL |
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
case OPC_LD:
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ |
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@ -2090,33 +2089,33 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
case OPC_LDL:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
gen_lxl(ctx, t1, t0, mem_idx, MO_TEUQ);
gen_lxl(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t1, rt);
break;
case OPC_LDR:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
gen_lxr(ctx, t1, t0, mem_idx, MO_TEUQ);
gen_lxr(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t1, rt);
break;
case OPC_LDPC:
t1 = tcg_constant_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t0, rt);
break;
#endif
case OPC_LWPC:
t1 = tcg_constant_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL);
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SL);
gen_store_gpr(t0, rt);
break;
case OPC_LWE:
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_LW:
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL |
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@ -2124,7 +2123,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_LH:
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESW |
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SW |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@ -2132,7 +2131,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_LHU:
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUW |
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UW |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@ -2156,7 +2155,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
case OPC_LWL:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
gen_lxl(ctx, t1, t0, mem_idx, MO_TEUL);
gen_lxl(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UL);
tcg_gen_ext32s_tl(t1, t1);
gen_store_gpr(t1, rt);
break;
@ -2166,7 +2165,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
case OPC_LWR:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
gen_lxr(ctx, t1, t0, mem_idx, MO_TEUL);
gen_lxr(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UL);
tcg_gen_ext32s_tl(t1, t1);
gen_store_gpr(t1, rt);
break;
@ -2194,7 +2193,7 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt,
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_SD:
tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUQ |
tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
case OPC_SDL:
@ -2208,14 +2207,14 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt,
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_SW:
tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL |
tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
break;
case OPC_SHE:
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_SH:
tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW |
tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UW |
ctx->default_tcg_memop_mask);
break;
case OPC_SBE:
@ -2253,8 +2252,7 @@ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset,
/* compare the address against that of the preceding LL */
gen_base_offset_addr(ctx, addr, base, offset);
tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1);
tcg_gen_movi_tl(t0, 0);
gen_store_gpr(t0, rt);
gen_store_gpr(tcg_constant_tl(0), rt);
tcg_gen_br(done);
gen_set_label(l1);
@ -2281,7 +2279,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
case OPC_LWC1:
{
TCGv_i32 fp0 = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_fpr32(ctx, fp0, ft);
}
@ -2290,14 +2288,14 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
{
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, ft);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
}
break;
case OPC_LDC1:
{
TCGv_i64 fp0 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, fp0, ft);
}
@ -2306,7 +2304,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, ft);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
}
break;
@ -2987,14 +2985,14 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc,
case R6_OPC_LWPC:
offset = sextract32(ctx->opcode << 2, 0, 21);
addr = addr_add(ctx, pc, offset);
gen_r6_ld(addr, rs, ctx->mem_idx, MO_TESL);
gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_SL);
break;
#if defined(TARGET_MIPS64)
case OPC_LWUPC:
check_mips_64(ctx);
offset = sextract32(ctx->opcode << 2, 0, 21);
addr = addr_add(ctx, pc, offset);
gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUL);
gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_UL);
break;
#endif
default:
@ -3021,7 +3019,7 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc,
check_mips_64(ctx);
offset = sextract32(ctx->opcode << 3, 0, 21);
addr = addr_add(ctx, (pc & ~0x7), offset);
gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUQ);
gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
break;
#endif
default:
@ -3060,8 +3058,7 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
}
@ -3077,30 +3074,27 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
}
break;
case R6_OPC_DIVU:
{
TCGv t2 = tcg_constant_tl(0);
TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
}
break;
case R6_OPC_MODU:
{
TCGv t2 = tcg_constant_tl(0);
TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
}
@ -3155,8 +3149,7 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
}
break;
@ -3169,24 +3162,21 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
}
break;
case R6_OPC_DDIVU:
{
TCGv t2 = tcg_constant_tl(0);
TCGv t3 = tcg_constant_tl(1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_divu_i64(cpu_gpr[rd], t0, t1);
}
break;
case R6_OPC_DMODU:
{
TCGv t2 = tcg_constant_tl(0);
TCGv t3 = tcg_constant_tl(1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_remu_i64(cpu_gpr[rd], t0, t1);
}
break;
@ -3239,8 +3229,7 @@ static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_LO[1], t0, t1);
tcg_gen_rem_tl(cpu_HI[1], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]);
@ -3295,8 +3284,7 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc,
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_LO[acc], t0, t1);
tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
@ -3348,17 +3336,15 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc,
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_LO[acc], t0, t1);
tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
}
break;
case OPC_DDIVU:
{
TCGv t2 = tcg_constant_tl(0);
TCGv t3 = tcg_constant_tl(1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_divu_i64(cpu_LO[acc], t0, t1);
tcg_gen_remu_i64(cpu_HI[acc], t0, t1);
}
@ -4160,10 +4146,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
case OPC_GSLQ:
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rt);
gen_store_gpr(t0, lsq_rt1);
@ -4172,10 +4158,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
check_cp1_enabled(ctx);
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t1, rt);
gen_store_fpr64(ctx, t0, lsq_rt1);
@ -4184,11 +4170,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
gen_load_gpr(t1, lsq_rt1);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
case OPC_GSSQC1:
@ -4196,11 +4182,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
gen_load_fpr64(ctx, t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
gen_load_fpr64(ctx, t1, lsq_rt1);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
#endif
@ -4213,7 +4199,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
gen_load_fpr32(ctx, fp0, rt);
t1 = tcg_temp_new();
tcg_gen_ext_i32_tl(t1, fp0);
gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUL);
gen_lxl(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL);
tcg_gen_trunc_tl_i32(fp0, t1);
gen_store_fpr32(ctx, fp0, rt);
break;
@ -4224,7 +4210,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
gen_load_fpr32(ctx, fp0, rt);
t1 = tcg_temp_new();
tcg_gen_ext_i32_tl(t1, fp0);
gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUL);
gen_lxr(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL);
tcg_gen_trunc_tl_i32(fp0, t1);
gen_store_fpr32(ctx, fp0, rt);
break;
@ -4234,7 +4220,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
gen_base_offset_addr(ctx, t0, rs, shf_offset);
t1 = tcg_temp_new();
gen_load_fpr64(ctx, t1, rt);
gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUQ);
gen_lxl(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_fpr64(ctx, t1, rt);
break;
case OPC_GSLDRC1:
@ -4242,7 +4228,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
gen_base_offset_addr(ctx, t0, rs, shf_offset);
t1 = tcg_temp_new();
gen_load_fpr64(ctx, t1, rt);
gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUQ);
gen_lxr(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_fpr64(ctx, t1, rt);
break;
#endif
@ -4360,7 +4346,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
gen_store_gpr(t0, rt);
break;
case OPC_GSLHX:
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW |
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SW |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@ -4369,7 +4355,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL |
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@ -4379,7 +4365,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@ -4390,7 +4376,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
fp0 = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_fpr32(ctx, fp0, rt);
break;
@ -4400,7 +4386,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t0, rt);
break;
@ -4413,34 +4399,34 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
case OPC_GSSHX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UW |
ctx->default_tcg_memop_mask);
break;
case OPC_GSSWX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
#endif
case OPC_GSSWXC1:
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, rt);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDXC1:
t1 = tcg_temp_new();
gen_load_fpr64(ctx, t1, rt);
tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ |
tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
#endif
@ -10779,7 +10765,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i32 fp0 = tcg_temp_new_i32();
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL);
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(ctx, fp0, fd);
}
@ -10789,7 +10775,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
check_cp1_registers(ctx, fd);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@ -10799,7 +10785,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@ -10808,7 +10794,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL);
}
break;
case OPC_SDXC1:
@ -10817,7 +10803,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
}
break;
case OPC_SUXC1:
@ -10826,7 +10812,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
}
break;
}
@ -10856,7 +10842,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc,
tcg_gen_br(l2);
gen_set_label(l1);
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2);
if (cpu_is_bigendian(ctx)) {
if (disas_is_bigendian(ctx)) {
gen_load_fpr32(ctx, fp, fs);
gen_load_fpr32h(ctx, fph, ft);
gen_store_fpr32h(ctx, fp, fd);
@ -11265,10 +11251,9 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc,
} else {
/* OPC_JIC, OPC_JIALC */
TCGv tbase = tcg_temp_new();
TCGv toffset = tcg_constant_tl(offset);
gen_load_gpr(tbase, rt);
gen_op_addr_add(ctx, btarget, tbase, toffset);
gen_op_addr_addi(ctx, btarget, tbase, offset);
}
break;
default:
@ -11428,20 +11413,18 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc,
void gen_addiupc(DisasContext *ctx, int rx, int imm,
int is_64_bit, int extended)
{
TCGv t0;
target_ulong npc;
if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
gen_reserved_instruction(ctx);
return;
}
t0 = tcg_temp_new();
tcg_gen_movi_tl(t0, pc_relative_pc(ctx));
tcg_gen_addi_tl(cpu_gpr[rx], t0, imm);
npc = pc_relative_pc(ctx) + imm;
if (!is_64_bit) {
tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
npc = (int32_t)npc;
}
tcg_gen_movi_tl(cpu_gpr[rx], npc);
}
static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base,
@ -11476,7 +11459,7 @@ void gen_ldxs(DisasContext *ctx, int base, int index, int rd)
gen_op_addr_add(ctx, t0, t1, t0);
}
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL);
gen_store_gpr(t1, rd);
}
@ -11567,16 +11550,16 @@ static void gen_mips_lx(DisasContext *ctx, uint32_t opc,
gen_store_gpr(t0, rd);
break;
case OPC_LHX:
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW);
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SW);
gen_store_gpr(t0, rd);
break;
case OPC_LWX:
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL);
gen_store_gpr(t0, rd);
break;
#if defined(TARGET_MIPS64)
case OPC_LDX:
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t0, rd);
break;
#endif
@ -13719,7 +13702,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
}
break;
case R6_OPC_SC:
gen_st_cond(ctx, rt, rs, imm, MO_TESL, false);
gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, false);
break;
case R6_OPC_LL:
gen_ld(ctx, op1, rt, rs, imm);
@ -13765,7 +13748,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
#endif
#if defined(TARGET_MIPS64)
case R6_OPC_SCD:
gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false);
gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_UQ, false);
break;
case R6_OPC_LLD:
gen_ld(ctx, op1, rt, rs, imm);
@ -14448,7 +14431,7 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
return;
case OPC_SCE:
check_cp0_enabled(ctx);
gen_st_cond(ctx, rt, rs, imm, MO_TESL, true);
gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, true);
return;
case OPC_CACHEE:
check_eva(ctx);
@ -14912,7 +14895,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx)
if (ctx->insn_flags & INSN_R5900) {
check_insn_opc_user_only(ctx, INSN_R5900);
}
gen_st_cond(ctx, rt, rs, imm, MO_TESL, false);
gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, false);
break;
case OPC_CACHE:
check_cp0_enabled(ctx);
@ -15191,7 +15174,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx)
check_insn_opc_user_only(ctx, INSN_R5900);
}
check_mips_64(ctx);
gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false);
gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_UQ, false);
break;
case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */
if (ctx->insn_flags & ISA_MIPS_R6) {

View File

@ -176,6 +176,7 @@ void gen_addiupc(DisasContext *ctx, int rx, int imm,
* Address Computation and Large Constant Instructions
*/
void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1);
void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, target_long ofs);
bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa);
bool gen_dlsa(DisasContext *ctx, int rd, int rt, int rs, int sa);
@ -235,9 +236,19 @@ bool decode_ext_vr54xx(DisasContext *ctx, uint32_t insn);
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ return FUNC(ctx, a, __VA_ARGS__); }
static inline bool cpu_is_bigendian(DisasContext *ctx)
static inline bool disas_is_bigendian(DisasContext *ctx)
{
return extract32(ctx->CP0_Config0, CP0C0_BE, 1);
}
static inline MemOp mo_endian(DisasContext *dc)
{
return disas_is_bigendian(dc) ? MO_BE : MO_LE;
}
static inline MemOp mo_endian_rev(DisasContext *dc, bool reversed)
{
return disas_is_bigendian(dc) ^ reversed ? MO_BE : MO_LE;
}
#endif

View File

@ -340,12 +340,12 @@ static bool trans_LQ(DisasContext *ctx, arg_i *a)
tcg_gen_andi_tl(addr, addr, ~0xf);
/* Lower half */
tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t0, a->rt);
/* Upper half */
tcg_gen_addi_i64(addr, addr, 8);
tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr_hi(t0, a->rt);
return true;
}
@ -364,12 +364,12 @@ static bool trans_SQ(DisasContext *ctx, arg_i *a)
/* Lower half */
gen_load_gpr(t0, a->rt);
tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
/* Upper half */
tcg_gen_addi_i64(addr, addr, 8);
gen_load_gpr_hi(t0, a->rt);
tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
return true;
}

View File

@ -1588,16 +1588,13 @@ static opc_handler_t invalid_handler = {
static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
{
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_movi_tl(t0, CRF_EQ);
tcg_gen_movi_tl(t1, CRF_LT);
tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
t0, arg0, arg1, t1, t0);
tcg_gen_movi_tl(t1, CRF_GT);
t0, arg0, arg1,
tcg_constant_tl(CRF_LT), tcg_constant_tl(CRF_EQ));
tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
t0, arg0, arg1, t1, t0);
t0, arg0, arg1, tcg_constant_tl(CRF_GT), t0);
tcg_gen_trunc_tl_i32(t, t0);
tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
@ -2974,8 +2971,8 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
/* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
tcg_gen_movi_tl(u, 1 << (memop_size(memop) * 8 - 1));
tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t,
tcg_constant_tl(1 << (memop_size(memop) * 8 - 1)));
}
static void gen_ld_atomic(DisasContext *ctx, MemOp memop)

View File

@ -124,7 +124,7 @@ int tricore_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
CPUTriCoreState *env = cpu_env(cs);
uint32_t tmp;
tmp = ldl_p(mem_buf);
tmp = ldl_le_p(mem_buf);
if (n < 16) { /* data registers */
env->gpr_d[n] = tmp;

View File

@ -2732,8 +2732,7 @@ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
tcg_gen_movi_tl(mask, 1);
tcg_gen_shl_tl(mask, mask, width);
tcg_gen_shl_tl(mask, tcg_constant_tl(1), width);
tcg_gen_subi_tl(mask, mask, 1);
tcg_gen_shl_tl(mask, mask, pos);