2005-11-26 13:38:39 +03:00
|
|
|
#include "cpu.h"
|
2014-04-15 22:18:37 +04:00
|
|
|
#include "internals.h"
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/gdbstub.h"
|
2014-04-08 09:31:41 +04:00
|
|
|
#include "exec/helper-proto.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/host-utils.h"
|
2013-09-10 22:09:33 +04:00
|
|
|
#include "sysemu/arch_init.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/sysemu.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/bitops.h"
|
2014-02-26 21:20:07 +04:00
|
|
|
#include "qemu/crc32c.h"
|
2014-03-28 22:42:10 +04:00
|
|
|
#include "exec/cpu_ldst.h"
|
2014-03-28 22:09:49 +04:00
|
|
|
#include "arm_ldst.h"
|
2014-02-26 21:20:07 +04:00
|
|
|
#include <zlib.h> /* For crc32 */
|
2012-01-25 16:42:29 +04:00
|
|
|
|
2012-06-20 15:57:16 +04:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2014-04-15 22:18:40 +04:00
|
|
|
static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
2012-06-20 15:57:16 +04:00
|
|
|
int access_type, int is_user,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr *phys_ptr, int *prot,
|
2012-06-20 15:57:16 +04:00
|
|
|
target_ulong *page_size);
|
2014-03-10 18:56:28 +04:00
|
|
|
|
|
|
|
/* Definitions for the PMCCNTR and PMCR registers */
|
|
|
|
#define PMCRD 0x8
|
|
|
|
#define PMCRC 0x4
|
|
|
|
#define PMCRE 0x1
|
2012-06-20 15:57:16 +04:00
|
|
|
#endif
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
|
2008-10-11 21:55:29 +04:00
|
|
|
{
|
|
|
|
int nregs;
|
|
|
|
|
|
|
|
/* VFP data registers are always little-endian. */
|
|
|
|
nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
|
|
|
|
if (reg < nregs) {
|
|
|
|
stfq_le_p(buf, env->vfp.regs[reg]);
|
|
|
|
return 8;
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_NEON)) {
|
|
|
|
/* Aliases for Q regs. */
|
|
|
|
nregs += 16;
|
|
|
|
if (reg < nregs) {
|
|
|
|
stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
|
|
|
|
stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
|
|
|
|
return 16;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
switch (reg - nregs) {
|
|
|
|
case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
|
|
|
|
case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
|
|
|
|
case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
|
2008-10-11 21:55:29 +04:00
|
|
|
{
|
|
|
|
int nregs;
|
|
|
|
|
|
|
|
nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
|
|
|
|
if (reg < nregs) {
|
|
|
|
env->vfp.regs[reg] = ldfq_le_p(buf);
|
|
|
|
return 8;
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_NEON)) {
|
|
|
|
nregs += 16;
|
|
|
|
if (reg < nregs) {
|
|
|
|
env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
|
|
|
|
env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
|
|
|
|
return 16;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
switch (reg - nregs) {
|
|
|
|
case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
|
|
|
|
case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
|
2009-10-26 12:46:42 +03:00
|
|
|
case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
|
2008-10-11 21:55:29 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-17 23:42:32 +04:00
|
|
|
static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
|
|
|
|
{
|
|
|
|
switch (reg) {
|
|
|
|
case 0 ... 31:
|
|
|
|
/* 128 bit FP register */
|
|
|
|
stfq_le_p(buf, env->vfp.regs[reg * 2]);
|
|
|
|
stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
|
|
|
|
return 16;
|
|
|
|
case 32:
|
|
|
|
/* FPSR */
|
|
|
|
stl_p(buf, vfp_get_fpsr(env));
|
|
|
|
return 4;
|
|
|
|
case 33:
|
|
|
|
/* FPCR */
|
|
|
|
stl_p(buf, vfp_get_fpcr(env));
|
|
|
|
return 4;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
|
|
|
|
{
|
|
|
|
switch (reg) {
|
|
|
|
case 0 ... 31:
|
|
|
|
/* 128 bit FP register */
|
|
|
|
env->vfp.regs[reg * 2] = ldfq_le_p(buf);
|
|
|
|
env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
|
|
|
|
return 16;
|
|
|
|
case 32:
|
|
|
|
/* FPSR */
|
|
|
|
vfp_set_fpsr(env, ldl_p(buf));
|
|
|
|
return 4;
|
|
|
|
case 33:
|
|
|
|
/* FPCR */
|
|
|
|
vfp_set_fpcr(env, ldl_p(buf));
|
|
|
|
return 4;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
2013-06-25 21:16:07 +04:00
|
|
|
{
|
2014-02-26 21:20:01 +04:00
|
|
|
if (cpreg_field_is_64bit(ri)) {
|
2014-02-20 14:35:54 +04:00
|
|
|
return CPREG_FIELD64(env, ri);
|
2013-08-20 17:54:31 +04:00
|
|
|
} else {
|
2014-02-20 14:35:54 +04:00
|
|
|
return CPREG_FIELD32(env, ri);
|
2013-08-20 17:54:31 +04:00
|
|
|
}
|
2013-06-25 21:16:07 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2013-06-25 21:16:07 +04:00
|
|
|
{
|
2014-02-26 21:20:01 +04:00
|
|
|
if (cpreg_field_is_64bit(ri)) {
|
2013-08-20 17:54:31 +04:00
|
|
|
CPREG_FIELD64(env, ri) = value;
|
|
|
|
} else {
|
|
|
|
CPREG_FIELD32(env, ri) = value;
|
|
|
|
}
|
2013-06-25 21:16:07 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
|
2013-06-25 21:16:07 +04:00
|
|
|
{
|
2014-02-20 14:35:54 +04:00
|
|
|
/* Raw read of a coprocessor register (as needed for migration, etc). */
|
2013-06-25 21:16:07 +04:00
|
|
|
if (ri->type & ARM_CP_CONST) {
|
2014-02-20 14:35:54 +04:00
|
|
|
return ri->resetvalue;
|
2013-06-25 21:16:07 +04:00
|
|
|
} else if (ri->raw_readfn) {
|
2014-02-20 14:35:54 +04:00
|
|
|
return ri->raw_readfn(env, ri);
|
2013-06-25 21:16:07 +04:00
|
|
|
} else if (ri->readfn) {
|
2014-02-20 14:35:54 +04:00
|
|
|
return ri->readfn(env, ri);
|
2013-06-25 21:16:07 +04:00
|
|
|
} else {
|
2014-02-20 14:35:54 +04:00
|
|
|
return raw_read(env, ri);
|
2013-06-25 21:16:07 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
|
2014-02-20 14:35:54 +04:00
|
|
|
uint64_t v)
|
2013-06-25 21:16:07 +04:00
|
|
|
{
|
|
|
|
/* Raw write of a coprocessor register (as needed for migration, etc).
|
|
|
|
* Note that constant registers are treated as write-ignored; the
|
|
|
|
* caller should check for success by whether a readback gives the
|
|
|
|
* value written.
|
|
|
|
*/
|
|
|
|
if (ri->type & ARM_CP_CONST) {
|
2014-02-20 14:35:54 +04:00
|
|
|
return;
|
2013-06-25 21:16:07 +04:00
|
|
|
} else if (ri->raw_writefn) {
|
2014-02-20 14:35:54 +04:00
|
|
|
ri->raw_writefn(env, ri, v);
|
2013-06-25 21:16:07 +04:00
|
|
|
} else if (ri->writefn) {
|
2014-02-20 14:35:54 +04:00
|
|
|
ri->writefn(env, ri, v);
|
2013-06-25 21:16:07 +04:00
|
|
|
} else {
|
2014-01-02 11:58:20 +04:00
|
|
|
raw_write(env, ri, v);
|
2013-06-25 21:16:07 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool write_cpustate_to_list(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
/* Write the coprocessor state from cpu->env to the (index,value) list. */
|
|
|
|
int i;
|
|
|
|
bool ok = true;
|
|
|
|
|
|
|
|
for (i = 0; i < cpu->cpreg_array_len; i++) {
|
|
|
|
uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
|
|
|
|
const ARMCPRegInfo *ri;
|
2014-02-20 14:35:54 +04:00
|
|
|
|
2014-01-05 02:15:44 +04:00
|
|
|
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
|
2013-06-25 21:16:07 +04:00
|
|
|
if (!ri) {
|
|
|
|
ok = false;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ri->type & ARM_CP_NO_MIGRATE) {
|
|
|
|
continue;
|
|
|
|
}
|
2014-02-20 14:35:54 +04:00
|
|
|
cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
|
2013-06-25 21:16:07 +04:00
|
|
|
}
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool write_list_to_cpustate(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
bool ok = true;
|
|
|
|
|
|
|
|
for (i = 0; i < cpu->cpreg_array_len; i++) {
|
|
|
|
uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
|
|
|
|
uint64_t v = cpu->cpreg_values[i];
|
|
|
|
const ARMCPRegInfo *ri;
|
|
|
|
|
2014-01-05 02:15:44 +04:00
|
|
|
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
|
2013-06-25 21:16:07 +04:00
|
|
|
if (!ri) {
|
|
|
|
ok = false;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ri->type & ARM_CP_NO_MIGRATE) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Write value and confirm it reads back as written
|
|
|
|
* (to catch read-only registers and partially read-only
|
|
|
|
* registers where the incoming migration value doesn't match)
|
|
|
|
*/
|
2014-02-20 14:35:54 +04:00
|
|
|
write_raw_cp_reg(&cpu->env, ri, v);
|
|
|
|
if (read_raw_cp_reg(&cpu->env, ri) != v) {
|
2013-06-25 21:16:07 +04:00
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void add_cpreg_to_list(gpointer key, gpointer opaque)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = opaque;
|
|
|
|
uint64_t regidx;
|
|
|
|
const ARMCPRegInfo *ri;
|
|
|
|
|
|
|
|
regidx = *(uint32_t *)key;
|
2014-01-05 02:15:44 +04:00
|
|
|
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
|
2013-06-25 21:16:07 +04:00
|
|
|
|
|
|
|
if (!(ri->type & ARM_CP_NO_MIGRATE)) {
|
|
|
|
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
|
|
|
|
/* The value array need not be initialized at this point */
|
|
|
|
cpu->cpreg_array_len++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void count_cpreg(gpointer key, gpointer opaque)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = opaque;
|
|
|
|
uint64_t regidx;
|
|
|
|
const ARMCPRegInfo *ri;
|
|
|
|
|
|
|
|
regidx = *(uint32_t *)key;
|
2014-01-05 02:15:44 +04:00
|
|
|
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
|
2013-06-25 21:16:07 +04:00
|
|
|
|
|
|
|
if (!(ri->type & ARM_CP_NO_MIGRATE)) {
|
|
|
|
cpu->cpreg_array_len++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
|
|
|
|
{
|
2013-10-11 21:38:44 +04:00
|
|
|
uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
|
|
|
|
uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
|
2013-06-25 21:16:07 +04:00
|
|
|
|
2013-10-11 21:38:44 +04:00
|
|
|
if (aidx > bidx) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (aidx < bidx) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
2013-06-25 21:16:07 +04:00
|
|
|
}
|
|
|
|
|
2013-07-01 15:40:19 +04:00
|
|
|
static void cpreg_make_keylist(gpointer key, gpointer value, gpointer udata)
|
|
|
|
{
|
|
|
|
GList **plist = udata;
|
|
|
|
|
|
|
|
*plist = g_list_prepend(*plist, key);
|
|
|
|
}
|
|
|
|
|
2013-06-25 21:16:07 +04:00
|
|
|
void init_cpreg_list(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
/* Initialise the cpreg_tuples[] array based on the cp_regs hash.
|
|
|
|
* Note that we require cpreg_tuples[] to be sorted by key ID.
|
|
|
|
*/
|
2013-07-01 15:40:19 +04:00
|
|
|
GList *keys = NULL;
|
2013-06-25 21:16:07 +04:00
|
|
|
int arraylen;
|
|
|
|
|
2013-07-01 15:40:19 +04:00
|
|
|
g_hash_table_foreach(cpu->cp_regs, cpreg_make_keylist, &keys);
|
|
|
|
|
2013-06-25 21:16:07 +04:00
|
|
|
keys = g_list_sort(keys, cpreg_key_compare);
|
|
|
|
|
|
|
|
cpu->cpreg_array_len = 0;
|
|
|
|
|
|
|
|
g_list_foreach(keys, count_cpreg, cpu);
|
|
|
|
|
|
|
|
arraylen = cpu->cpreg_array_len;
|
|
|
|
cpu->cpreg_indexes = g_new(uint64_t, arraylen);
|
|
|
|
cpu->cpreg_values = g_new(uint64_t, arraylen);
|
|
|
|
cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
|
|
|
|
cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
|
|
|
|
cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
|
|
|
|
cpu->cpreg_array_len = 0;
|
|
|
|
|
|
|
|
g_list_foreach(keys, add_cpreg_to_list, cpu);
|
|
|
|
|
|
|
|
assert(cpu->cpreg_array_len == arraylen);
|
|
|
|
|
|
|
|
g_list_free(keys);
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
2012-06-20 15:57:13 +04:00
|
|
|
{
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value);
|
2013-09-04 04:19:44 +04:00
|
|
|
tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
|
2012-06-20 15:57:13 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
2012-06-20 15:57:14 +04:00
|
|
|
{
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
2014-06-09 18:43:22 +04:00
|
|
|
if (raw_read(env, ri) != value) {
|
2012-06-20 15:57:14 +04:00
|
|
|
/* Unlike real hardware the qemu TLB uses virtual addresses,
|
|
|
|
* not modified virtual addresses, so this causes a TLB flush.
|
|
|
|
*/
|
2013-09-04 04:19:44 +04:00
|
|
|
tlb_flush(CPU(cpu), 1);
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value);
|
2012-06-20 15:57:14 +04:00
|
|
|
}
|
|
|
|
}
|
2014-02-20 14:35:54 +04:00
|
|
|
|
|
|
|
static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:14 +04:00
|
|
|
{
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
2014-06-09 18:43:22 +04:00
|
|
|
if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
|
2014-04-15 22:18:45 +04:00
|
|
|
&& !extended_addresses_enabled(env)) {
|
2012-06-20 15:57:14 +04:00
|
|
|
/* For VMSA (when not using the LPAE long descriptor page table
|
|
|
|
* format) this register includes the ASID, so do a TLB flush.
|
|
|
|
* For PMSA it is purely a process ID and no action is needed.
|
|
|
|
*/
|
2013-09-04 04:19:44 +04:00
|
|
|
tlb_flush(CPU(cpu), 1);
|
2012-06-20 15:57:14 +04:00
|
|
|
}
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value);
|
2012-06-20 15:57:14 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:16 +04:00
|
|
|
{
|
|
|
|
/* Invalidate all (TLBIALL) */
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
tlb_flush(CPU(cpu), 1);
|
2012-06-20 15:57:16 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:16 +04:00
|
|
|
{
|
|
|
|
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
|
2013-09-04 03:29:02 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
|
2012-06-20 15:57:16 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:16 +04:00
|
|
|
{
|
|
|
|
/* Invalidate by ASID (TLBIASID) */
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
tlb_flush(CPU(cpu), value == 0);
|
2012-06-20 15:57:16 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:16 +04:00
|
|
|
{
|
|
|
|
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
|
2013-09-04 03:29:02 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
|
2012-06-20 15:57:16 +04:00
|
|
|
}
|
|
|
|
|
2014-09-12 17:06:50 +04:00
|
|
|
/* IS variants of TLB operations must affect all cores */
|
|
|
|
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
CPUState *other_cs;
|
|
|
|
|
|
|
|
CPU_FOREACH(other_cs) {
|
|
|
|
tlb_flush(other_cs, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
CPUState *other_cs;
|
|
|
|
|
|
|
|
CPU_FOREACH(other_cs) {
|
|
|
|
tlb_flush(other_cs, value == 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
CPUState *other_cs;
|
|
|
|
|
|
|
|
CPU_FOREACH(other_cs) {
|
|
|
|
tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
CPUState *other_cs;
|
|
|
|
|
|
|
|
CPU_FOREACH(other_cs) {
|
|
|
|
tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:09 +04:00
|
|
|
static const ARMCPRegInfo cp_reginfo[] = {
|
2012-06-20 15:57:14 +04:00
|
|
|
{ .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
|
2013-06-25 21:16:07 +04:00
|
|
|
.resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "CONTEXTIDR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
|
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.contextidr_el1),
|
2013-06-25 21:16:07 +04:00
|
|
|
.resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
|
2014-04-15 22:18:47 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARMCPRegInfo not_v8_cp_reginfo[] = {
|
|
|
|
/* NB: Some of these registers exist in v8 but with more precise
|
|
|
|
* definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
|
|
|
|
*/
|
|
|
|
/* MMU Domain access control / MPU write buffer control */
|
|
|
|
{ .name = "DACR", .cp = 15,
|
|
|
|
.crn = 3, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
|
|
|
|
.resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
|
2012-06-20 15:57:15 +04:00
|
|
|
/* ??? This covers not just the impdef TLB lockdown registers but also
|
|
|
|
* some v7VMSA registers relating to TEX remap, so it is overly broad.
|
|
|
|
*/
|
|
|
|
{ .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = CP_ANY,
|
|
|
|
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
|
2012-06-20 15:57:17 +04:00
|
|
|
/* Cache maintenance ops; some of this space may be overridden later. */
|
|
|
|
{ .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
|
|
|
|
.opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
|
|
|
|
.type = ARM_CP_NOP | ARM_CP_OVERRIDE },
|
2012-06-20 15:57:09 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2012-06-20 15:57:11 +04:00
|
|
|
static const ARMCPRegInfo not_v6_cp_reginfo[] = {
|
|
|
|
/* Not all pre-v6 cores implemented this WFI, so this is slightly
|
|
|
|
* over-broad.
|
|
|
|
*/
|
|
|
|
{ .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
|
|
|
|
.access = PL1_W, .type = ARM_CP_WFI },
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARMCPRegInfo not_v7_cp_reginfo[] = {
|
|
|
|
/* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
|
|
|
|
* is UNPREDICTABLE; we choose to NOP as most implementations do).
|
|
|
|
*/
|
|
|
|
{ .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
|
|
|
|
.access = PL1_W, .type = ARM_CP_WFI },
|
2012-06-20 15:57:18 +04:00
|
|
|
/* L1 cache lockdown. Not architectural in v6 and earlier but in practice
|
|
|
|
* implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
|
|
|
|
* OMAPCP will override this space.
|
|
|
|
*/
|
|
|
|
{ .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
|
|
|
|
.resetvalue = 0 },
|
|
|
|
{ .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
|
|
|
|
.resetvalue = 0 },
|
2012-06-20 15:57:19 +04:00
|
|
|
/* v6 doesn't have the cache ID registers but Linux reads them anyway */
|
|
|
|
{ .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
|
|
|
|
.resetvalue = 0 },
|
2014-08-19 21:56:25 +04:00
|
|
|
/* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
|
|
|
|
* implementing it as RAZ means the "debug architecture version" bits
|
|
|
|
* will read as a reserved value, which should cause Linux to not try
|
|
|
|
* to use the debug hardware.
|
|
|
|
*/
|
|
|
|
{ .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
2014-09-12 17:06:50 +04:00
|
|
|
/* MMU TLB control. Note that the wildcarding means we cover not just
|
|
|
|
* the unified TLB ops but also the dside/iside/inner-shareable variants.
|
|
|
|
*/
|
|
|
|
{ .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
|
|
|
|
.opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
|
|
|
|
.type = ARM_CP_NO_MIGRATE },
|
|
|
|
{ .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
|
|
|
|
.opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
|
|
|
|
.type = ARM_CP_NO_MIGRATE },
|
|
|
|
{ .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
|
|
|
|
.opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
|
|
|
|
.type = ARM_CP_NO_MIGRATE },
|
|
|
|
{ .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
|
|
|
|
.opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
|
|
|
|
.type = ARM_CP_NO_MIGRATE },
|
2012-06-20 15:57:11 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:18 +04:00
|
|
|
{
|
2014-05-27 20:09:49 +04:00
|
|
|
uint32_t mask = 0;
|
|
|
|
|
|
|
|
/* In ARMv8 most bits of CPACR_EL1 are RES0. */
|
|
|
|
if (!arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
/* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
|
|
|
|
* ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
|
|
|
|
* TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
|
|
|
|
*/
|
|
|
|
if (arm_feature(env, ARM_FEATURE_VFP)) {
|
|
|
|
/* VFP coprocessor: cp10 & cp11 [23:20] */
|
|
|
|
mask |= (1 << 31) | (1 << 30) | (0xf << 20);
|
|
|
|
|
|
|
|
if (!arm_feature(env, ARM_FEATURE_NEON)) {
|
|
|
|
/* ASEDIS [31] bit is RAO/WI */
|
|
|
|
value |= (1 << 31);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* VFPv3 and upwards with NEON implement 32 double precision
|
|
|
|
* registers (D0-D31).
|
|
|
|
*/
|
|
|
|
if (!arm_feature(env, ARM_FEATURE_NEON) ||
|
|
|
|
!arm_feature(env, ARM_FEATURE_VFP3)) {
|
|
|
|
/* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
|
|
|
|
value |= (1 << 30);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
value &= mask;
|
2012-06-20 15:57:18 +04:00
|
|
|
}
|
2014-05-27 20:09:49 +04:00
|
|
|
env->cp15.c1_coproc = value;
|
2012-06-20 15:57:18 +04:00
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:11 +04:00
|
|
|
static const ARMCPRegInfo v6_cp_reginfo[] = {
|
|
|
|
/* prefetch by MVA in v6, NOP in v7 */
|
|
|
|
{ .name = "MVA_prefetch",
|
|
|
|
.cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
|
|
|
|
.access = PL0_W, .type = ARM_CP_NOP },
|
2012-07-12 14:58:36 +04:00
|
|
|
{ .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
|
2012-06-20 15:57:11 +04:00
|
|
|
.access = PL0_W, .type = ARM_CP_NOP },
|
2012-07-12 14:58:36 +04:00
|
|
|
{ .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
|
2012-06-20 15:57:11 +04:00
|
|
|
.access = PL0_W, .type = ARM_CP_NOP },
|
2012-06-20 15:57:17 +04:00
|
|
|
{ .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
|
2014-04-15 22:18:42 +04:00
|
|
|
.access = PL1_RW,
|
2014-08-04 17:41:54 +04:00
|
|
|
.fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[1]),
|
2012-06-20 15:57:17 +04:00
|
|
|
.resetvalue = 0, },
|
|
|
|
/* Watchpoint Fault Address Register : should actually only be present
|
|
|
|
* for 1136, 1176, 11MPCore.
|
|
|
|
*/
|
|
|
|
{ .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
|
2014-02-26 21:20:06 +04:00
|
|
|
{ .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
|
|
|
|
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2,
|
2012-06-20 15:57:18 +04:00
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_coproc),
|
|
|
|
.resetvalue = 0, .writefn = cpacr_write },
|
2012-06-20 15:57:11 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-02-20 14:35:52 +04:00
|
|
|
static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
2012-06-20 15:57:12 +04:00
|
|
|
{
|
2014-03-07 22:48:59 +04:00
|
|
|
/* Performance monitor registers user accessibility is controlled
|
2014-02-20 14:35:52 +04:00
|
|
|
* by PMUSERENR.
|
2012-06-20 15:57:12 +04:00
|
|
|
*/
|
2014-10-24 15:19:14 +04:00
|
|
|
if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) {
|
2014-02-20 14:35:52 +04:00
|
|
|
return CP_ACCESS_TRAP;
|
2012-06-20 15:57:12 +04:00
|
|
|
}
|
2014-02-20 14:35:52 +04:00
|
|
|
return CP_ACCESS_OK;
|
2012-06-20 15:57:12 +04:00
|
|
|
}
|
|
|
|
|
2014-03-10 18:56:28 +04:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2014-08-29 18:00:29 +04:00
|
|
|
|
|
|
|
static inline bool arm_ccnt_enabled(CPUARMState *env)
|
|
|
|
{
|
|
|
|
/* This does not support checking PMCCFILTR_EL0 register */
|
|
|
|
|
|
|
|
if (!(env->cp15.c9_pmcr & PMCRE)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-08-29 18:00:29 +04:00
|
|
|
void pmccntr_sync(CPUARMState *env)
|
|
|
|
{
|
|
|
|
uint64_t temp_ticks;
|
|
|
|
|
|
|
|
temp_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
|
|
|
|
get_ticks_per_sec(), 1000000);
|
|
|
|
|
|
|
|
if (env->cp15.c9_pmcr & PMCRD) {
|
|
|
|
/* Increment once every 64 processor clock cycles */
|
|
|
|
temp_ticks /= 64;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arm_ccnt_enabled(env)) {
|
|
|
|
env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:12 +04:00
|
|
|
{
|
2014-08-29 18:00:30 +04:00
|
|
|
pmccntr_sync(env);
|
2014-03-10 18:56:28 +04:00
|
|
|
|
|
|
|
if (value & PMCRC) {
|
|
|
|
/* The counter has been reset */
|
|
|
|
env->cp15.c15_ccnt = 0;
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:12 +04:00
|
|
|
/* only the DP, X, D and E bits are writable */
|
|
|
|
env->cp15.c9_pmcr &= ~0x39;
|
|
|
|
env->cp15.c9_pmcr |= (value & 0x39);
|
2014-03-10 18:56:28 +04:00
|
|
|
|
2014-08-29 18:00:30 +04:00
|
|
|
pmccntr_sync(env);
|
2014-03-10 18:56:28 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
2014-08-29 18:00:29 +04:00
|
|
|
uint64_t total_ticks;
|
2014-03-10 18:56:28 +04:00
|
|
|
|
2014-08-29 18:00:30 +04:00
|
|
|
if (!arm_ccnt_enabled(env)) {
|
2014-03-10 18:56:28 +04:00
|
|
|
/* Counter is disabled, do not change value */
|
|
|
|
return env->cp15.c15_ccnt;
|
|
|
|
}
|
|
|
|
|
2014-08-29 18:00:29 +04:00
|
|
|
total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
|
|
|
|
get_ticks_per_sec(), 1000000);
|
2014-03-10 18:56:28 +04:00
|
|
|
|
|
|
|
if (env->cp15.c9_pmcr & PMCRD) {
|
|
|
|
/* Increment once every 64 processor clock cycles */
|
|
|
|
total_ticks /= 64;
|
|
|
|
}
|
|
|
|
return total_ticks - env->cp15.c15_ccnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
2014-08-29 18:00:29 +04:00
|
|
|
uint64_t total_ticks;
|
2014-03-10 18:56:28 +04:00
|
|
|
|
2014-08-29 18:00:30 +04:00
|
|
|
if (!arm_ccnt_enabled(env)) {
|
2014-03-10 18:56:28 +04:00
|
|
|
/* Counter is disabled, set the absolute value */
|
|
|
|
env->cp15.c15_ccnt = value;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-08-29 18:00:29 +04:00
|
|
|
total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
|
|
|
|
get_ticks_per_sec(), 1000000);
|
2014-03-10 18:56:28 +04:00
|
|
|
|
|
|
|
if (env->cp15.c9_pmcr & PMCRD) {
|
|
|
|
/* Increment once every 64 processor clock cycles */
|
|
|
|
total_ticks /= 64;
|
|
|
|
}
|
|
|
|
env->cp15.c15_ccnt = total_ticks - value;
|
2012-06-20 15:57:12 +04:00
|
|
|
}
|
2014-08-29 18:00:29 +04:00
|
|
|
|
|
|
|
static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
uint64_t cur_val = pmccntr_read(env, NULL);
|
|
|
|
|
|
|
|
pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
|
|
|
|
}
|
|
|
|
|
2014-08-29 18:00:29 +04:00
|
|
|
#else /* CONFIG_USER_ONLY */
|
|
|
|
|
|
|
|
void pmccntr_sync(CPUARMState *env)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2014-03-10 18:56:28 +04:00
|
|
|
#endif
|
2012-06-20 15:57:12 +04:00
|
|
|
|
2014-08-29 18:00:30 +04:00
|
|
|
static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
pmccntr_sync(env);
|
|
|
|
env->cp15.pmccfiltr_el0 = value & 0x7E000000;
|
|
|
|
pmccntr_sync(env);
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
2012-06-20 15:57:12 +04:00
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
value &= (1 << 31);
|
|
|
|
env->cp15.c9_pmcnten |= value;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:12 +04:00
|
|
|
{
|
|
|
|
value &= (1 << 31);
|
|
|
|
env->cp15.c9_pmcnten &= ~value;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:12 +04:00
|
|
|
{
|
|
|
|
env->cp15.c9_pmovsr &= ~value;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:12 +04:00
|
|
|
{
|
|
|
|
env->cp15.c9_pmxevtyper = value & 0xff;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
2012-06-20 15:57:12 +04:00
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
env->cp15.c9_pmuserenr = value & 1;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:12 +04:00
|
|
|
{
|
|
|
|
/* We have no event counters so only the C bit can be changed */
|
|
|
|
value &= (1 << 31);
|
|
|
|
env->cp15.c9_pminten |= value;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:12 +04:00
|
|
|
{
|
|
|
|
value &= (1 << 31);
|
|
|
|
env->cp15.c9_pminten &= ~value;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2013-10-25 18:44:38 +04:00
|
|
|
{
|
2014-02-26 21:20:04 +04:00
|
|
|
/* Note that even though the AArch64 view of this register has bits
|
|
|
|
* [10:0] all RES0 we can only mask the bottom 5, to comply with the
|
|
|
|
* architectural requirements for bits which are RES0 only in some
|
|
|
|
* contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
|
|
|
|
* requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
|
|
|
|
*/
|
2014-05-27 20:09:54 +04:00
|
|
|
raw_write(env, ri, value & ~0x1FULL);
|
2013-10-25 18:44:38 +04:00
|
|
|
}
|
|
|
|
|
2014-09-29 21:48:49 +04:00
|
|
|
static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
|
|
|
{
|
|
|
|
/* We only mask off bits that are RES0 both for AArch64 and AArch32.
|
|
|
|
* For bits that vary between AArch32/64, code needs to check the
|
|
|
|
* current execution mode before directly using the feature bit.
|
|
|
|
*/
|
|
|
|
uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
|
|
|
|
|
|
|
|
if (!arm_feature(env, ARM_FEATURE_EL2)) {
|
|
|
|
valid_mask &= ~SCR_HCE;
|
|
|
|
|
|
|
|
/* On ARMv7, SMD (or SCD as it is called in v7) is only
|
|
|
|
* supported if EL2 exists. The bit is UNK/SBZP when
|
|
|
|
* EL2 is unavailable. In QEMU ARMv7, we force it to always zero
|
|
|
|
* when EL2 is unavailable.
|
|
|
|
*/
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
|
|
valid_mask &= ~SCR_SMD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear all-context RES0 bits. */
|
|
|
|
value &= valid_mask;
|
|
|
|
raw_write(env, ri, value);
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
2012-06-20 15:57:19 +04:00
|
|
|
{
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
2014-02-20 14:35:54 +04:00
|
|
|
return cpu->ccsidr[env->cp15.c0_cssel];
|
2012-06-20 15:57:19 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:19 +04:00
|
|
|
{
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value & 0xf);
|
2012-06-20 15:57:19 +04:00
|
|
|
}
|
|
|
|
|
2014-04-15 22:18:46 +04:00
|
|
|
static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
CPUState *cs = ENV_GET_CPU(env);
|
|
|
|
uint64_t ret = 0;
|
|
|
|
|
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
|
|
|
|
ret |= CPSR_I;
|
|
|
|
}
|
|
|
|
if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
|
|
|
|
ret |= CPSR_F;
|
|
|
|
}
|
|
|
|
/* External aborts are not possible in QEMU so A bit is always clear */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:09 +04:00
|
|
|
static const ARMCPRegInfo v7_cp_reginfo[] = {
|
2012-06-20 15:57:11 +04:00
|
|
|
/* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
|
|
|
|
{ .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
2012-06-20 15:57:12 +04:00
|
|
|
/* Performance monitors are implementation defined in v7,
|
|
|
|
* but with an ARM recommended set of registers, which we
|
|
|
|
* follow (although we don't actually implement any counters)
|
|
|
|
*
|
|
|
|
* Performance registers fall into three categories:
|
|
|
|
* (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
|
|
|
|
* (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
|
|
|
|
* (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
|
|
|
|
* For the cases controlled by PMUSERENR we must set .access to PL0_RW
|
|
|
|
* or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
|
|
|
|
*/
|
|
|
|
{ .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
|
2014-08-29 18:00:29 +04:00
|
|
|
.access = PL0_RW, .type = ARM_CP_NO_MIGRATE,
|
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
|
2014-02-20 14:35:52 +04:00
|
|
|
.writefn = pmcntenset_write,
|
|
|
|
.accessfn = pmreg_access,
|
|
|
|
.raw_writefn = raw_write },
|
2014-08-29 18:00:29 +04:00
|
|
|
{ .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
|
|
|
|
.access = PL0_RW, .accessfn = pmreg_access,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
|
|
|
|
.writefn = pmcntenset_write, .raw_writefn = raw_write },
|
2012-06-20 15:57:12 +04:00
|
|
|
{ .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
|
2014-08-29 18:00:29 +04:00
|
|
|
.access = PL0_RW,
|
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
|
2014-02-20 14:35:52 +04:00
|
|
|
.accessfn = pmreg_access,
|
|
|
|
.writefn = pmcntenclr_write,
|
2013-06-25 21:16:07 +04:00
|
|
|
.type = ARM_CP_NO_MIGRATE },
|
2014-08-29 18:00:29 +04:00
|
|
|
{ .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
|
|
|
|
.access = PL0_RW, .accessfn = pmreg_access,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
|
|
|
|
.writefn = pmcntenclr_write },
|
2012-06-20 15:57:12 +04:00
|
|
|
{ .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
|
|
|
|
.access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
|
2014-02-20 14:35:52 +04:00
|
|
|
.accessfn = pmreg_access,
|
|
|
|
.writefn = pmovsr_write,
|
|
|
|
.raw_writefn = raw_write },
|
|
|
|
/* Unimplemented so WI. */
|
2012-06-20 15:57:12 +04:00
|
|
|
{ .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
|
2014-02-20 14:35:52 +04:00
|
|
|
.access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
|
2012-06-20 15:57:12 +04:00
|
|
|
/* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
|
2014-02-20 14:35:52 +04:00
|
|
|
* We choose to RAZ/WI.
|
2012-06-20 15:57:12 +04:00
|
|
|
*/
|
|
|
|
{ .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
|
2014-02-20 14:35:52 +04:00
|
|
|
.access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
|
|
|
|
.accessfn = pmreg_access },
|
2014-03-10 18:56:28 +04:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2012-06-20 15:57:12 +04:00
|
|
|
{ .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
|
2014-03-10 18:56:28 +04:00
|
|
|
.access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
|
2014-08-29 18:00:29 +04:00
|
|
|
.readfn = pmccntr_read, .writefn = pmccntr_write32,
|
2014-02-20 14:35:52 +04:00
|
|
|
.accessfn = pmreg_access },
|
2014-08-29 18:00:29 +04:00
|
|
|
{ .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
|
|
|
|
.access = PL0_RW, .accessfn = pmreg_access,
|
|
|
|
.type = ARM_CP_IO,
|
|
|
|
.readfn = pmccntr_read, .writefn = pmccntr_write, },
|
2014-03-10 18:56:28 +04:00
|
|
|
#endif
|
2014-08-29 18:00:29 +04:00
|
|
|
{ .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
|
2014-08-29 18:00:30 +04:00
|
|
|
.writefn = pmccfiltr_write,
|
2014-08-29 18:00:29 +04:00
|
|
|
.access = PL0_RW, .accessfn = pmreg_access,
|
|
|
|
.type = ARM_CP_IO,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
|
|
|
|
.resetvalue = 0, },
|
2012-06-20 15:57:12 +04:00
|
|
|
{ .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL0_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
|
2014-02-20 14:35:52 +04:00
|
|
|
.accessfn = pmreg_access, .writefn = pmxevtyper_write,
|
|
|
|
.raw_writefn = raw_write },
|
|
|
|
/* Unimplemented, RAZ/WI. */
|
2012-06-20 15:57:12 +04:00
|
|
|
{ .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
|
2014-02-20 14:35:52 +04:00
|
|
|
.access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
|
|
|
|
.accessfn = pmreg_access },
|
2012-06-20 15:57:12 +04:00
|
|
|
{ .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL0_R | PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
|
|
|
|
.resetvalue = 0,
|
2013-06-25 21:16:07 +04:00
|
|
|
.writefn = pmuserenr_write, .raw_writefn = raw_write },
|
2012-06-20 15:57:12 +04:00
|
|
|
{ .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
|
|
|
|
.resetvalue = 0,
|
2013-06-25 21:16:07 +04:00
|
|
|
.writefn = pmintenset_write, .raw_writefn = raw_write },
|
2012-06-20 15:57:12 +04:00
|
|
|
{ .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
|
2012-06-20 15:57:12 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
|
2013-06-25 21:16:07 +04:00
|
|
|
.resetvalue = 0, .writefn = pmintenclr_write, },
|
2014-02-26 21:20:04 +04:00
|
|
|
{ .name = "VBAR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
|
2013-10-25 18:44:38 +04:00
|
|
|
.access = PL1_RW, .writefn = vbar_write,
|
2014-05-27 20:09:51 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[1]),
|
2013-10-25 18:44:38 +04:00
|
|
|
.resetvalue = 0 },
|
2014-02-26 21:20:01 +04:00
|
|
|
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE },
|
2014-02-26 21:20:01 +04:00
|
|
|
{ .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
|
2012-06-20 15:57:19 +04:00
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c0_cssel),
|
|
|
|
.writefn = csselr_write, .resetvalue = 0 },
|
|
|
|
/* Auxiliary ID register: this actually has an IMPDEF value but for now
|
|
|
|
* just RAZ for all cores:
|
|
|
|
*/
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "AIDR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
|
2012-06-20 15:57:19 +04:00
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
2014-04-15 22:18:47 +04:00
|
|
|
/* Auxiliary fault status registers: these also are IMPDEF, and we
|
|
|
|
* choose to RAZ/WI for all cores.
|
|
|
|
*/
|
|
|
|
{ .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
|
{ .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
|
2014-02-26 21:20:03 +04:00
|
|
|
/* MAIR can just read-as-written because we don't implement caches
|
|
|
|
* and so don't need to care about memory attributes.
|
|
|
|
*/
|
|
|
|
{ .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el1),
|
|
|
|
.resetvalue = 0 },
|
|
|
|
/* For non-long-descriptor page tables these are PRRR and NMRR;
|
|
|
|
* regardless they still act as reads-as-written for QEMU.
|
|
|
|
* The override is necessary because of the overly-broad TLB_LOCKDOWN
|
|
|
|
* definition.
|
|
|
|
*/
|
|
|
|
{ .name = "MAIR0", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
|
|
|
|
.cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
|
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.mair_el1),
|
|
|
|
.resetfn = arm_cp_reset_ignore },
|
|
|
|
{ .name = "MAIR1", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
|
|
|
|
.cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
|
|
|
|
.fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el1),
|
|
|
|
.resetfn = arm_cp_reset_ignore },
|
2014-04-15 22:18:46 +04:00
|
|
|
{ .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_R, .readfn = isr_read },
|
2014-09-12 17:06:50 +04:00
|
|
|
/* 32 bit ITLB invalidates */
|
|
|
|
{ .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
|
|
|
|
{ .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
|
|
|
|
{ .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
|
|
|
|
/* 32 bit DTLB invalidates */
|
|
|
|
{ .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
|
|
|
|
{ .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
|
|
|
|
{ .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
|
|
|
|
/* 32 bit TLB invalidates */
|
|
|
|
{ .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
|
|
|
|
{ .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
|
|
|
|
{ .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
|
|
|
|
{ .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARMCPRegInfo v7mp_cp_reginfo[] = {
|
|
|
|
/* 32 bit TLB invalidates, Inner Shareable */
|
|
|
|
{ .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
|
2014-09-12 17:06:50 +04:00
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_is_write },
|
2014-09-12 17:06:50 +04:00
|
|
|
{ .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
|
2014-09-12 17:06:50 +04:00
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
|
2014-09-12 17:06:50 +04:00
|
|
|
{ .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
|
2014-09-12 17:06:50 +04:00
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W,
|
|
|
|
.writefn = tlbiasid_is_write },
|
2014-09-12 17:06:50 +04:00
|
|
|
{ .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
|
2014-09-12 17:06:50 +04:00
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W,
|
|
|
|
.writefn = tlbimvaa_is_write },
|
2012-06-20 15:57:09 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:10 +04:00
|
|
|
{
|
|
|
|
value &= 1;
|
|
|
|
env->teecr = value;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
2012-06-20 15:57:10 +04:00
|
|
|
{
|
2014-10-24 15:19:14 +04:00
|
|
|
if (arm_current_el(env) == 0 && (env->teecr & 1)) {
|
2014-02-20 14:35:53 +04:00
|
|
|
return CP_ACCESS_TRAP;
|
2012-06-20 15:57:10 +04:00
|
|
|
}
|
2014-02-20 14:35:53 +04:00
|
|
|
return CP_ACCESS_OK;
|
2012-06-20 15:57:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const ARMCPRegInfo t2ee_cp_reginfo[] = {
|
|
|
|
{ .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
|
|
|
|
.resetvalue = 0,
|
|
|
|
.writefn = teecr_write },
|
|
|
|
{ .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
|
|
|
|
.access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
|
2014-02-20 14:35:53 +04:00
|
|
|
.accessfn = teehbr_access, .resetvalue = 0 },
|
2012-06-20 15:57:10 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2012-06-20 15:57:11 +04:00
|
|
|
static const ARMCPRegInfo v6k_cp_reginfo[] = {
|
2014-01-05 02:15:45 +04:00
|
|
|
{ .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
|
|
|
|
.access = PL0_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.tpidr_el0), .resetvalue = 0 },
|
2012-06-20 15:57:11 +04:00
|
|
|
{ .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
|
|
|
|
.access = PL0_RW,
|
2014-01-05 02:15:45 +04:00
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.tpidr_el0),
|
|
|
|
.resetfn = arm_cp_reset_ignore },
|
|
|
|
{ .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
|
|
|
|
.access = PL0_R|PL1_W,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el0), .resetvalue = 0 },
|
2012-06-20 15:57:11 +04:00
|
|
|
{ .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
|
|
|
|
.access = PL0_R|PL1_W,
|
2014-01-05 02:15:45 +04:00
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.tpidrro_el0),
|
|
|
|
.resetfn = arm_cp_reset_ignore },
|
|
|
|
{ .name = "TPIDR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
|
2012-06-20 15:57:11 +04:00
|
|
|
.access = PL1_RW,
|
2014-01-05 02:15:45 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.tpidr_el1), .resetvalue = 0 },
|
2012-06-20 15:57:11 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2013-08-20 17:54:31 +04:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
|
2014-02-20 14:35:53 +04:00
|
|
|
static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
/* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
|
2014-10-24 15:19:14 +04:00
|
|
|
if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
|
2014-02-20 14:35:53 +04:00
|
|
|
return CP_ACCESS_TRAP;
|
|
|
|
}
|
|
|
|
return CP_ACCESS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx)
|
|
|
|
{
|
|
|
|
/* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
|
2014-10-24 15:19:14 +04:00
|
|
|
if (arm_current_el(env) == 0 &&
|
2014-02-20 14:35:53 +04:00
|
|
|
!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
|
|
|
|
return CP_ACCESS_TRAP;
|
|
|
|
}
|
|
|
|
return CP_ACCESS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx)
|
|
|
|
{
|
|
|
|
/* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
|
|
|
|
* EL0[PV]TEN is zero.
|
|
|
|
*/
|
2014-10-24 15:19:14 +04:00
|
|
|
if (arm_current_el(env) == 0 &&
|
2014-02-20 14:35:53 +04:00
|
|
|
!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
|
|
|
|
return CP_ACCESS_TRAP;
|
|
|
|
}
|
|
|
|
return CP_ACCESS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static CPAccessResult gt_pct_access(CPUARMState *env,
|
|
|
|
const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
return gt_counter_access(env, GTIMER_PHYS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static CPAccessResult gt_vct_access(CPUARMState *env,
|
|
|
|
const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
return gt_counter_access(env, GTIMER_VIRT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
return gt_timer_access(env, GTIMER_PHYS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
return gt_timer_access(env, GTIMER_VIRT);
|
|
|
|
}
|
|
|
|
|
2013-08-20 17:54:31 +04:00
|
|
|
static uint64_t gt_get_countervalue(CPUARMState *env)
|
|
|
|
{
|
2013-08-21 19:03:08 +04:00
|
|
|
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
|
2013-08-20 17:54:31 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
|
|
|
|
{
|
|
|
|
ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
|
|
|
|
|
|
|
|
if (gt->ctl & 1) {
|
|
|
|
/* Timer enabled: calculate and set current ISTATUS, irq, and
|
|
|
|
* reset timer to when ISTATUS next has to change
|
|
|
|
*/
|
|
|
|
uint64_t count = gt_get_countervalue(&cpu->env);
|
|
|
|
/* Note that this must be unsigned 64 bit arithmetic: */
|
|
|
|
int istatus = count >= gt->cval;
|
|
|
|
uint64_t nexttick;
|
|
|
|
|
|
|
|
gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
|
|
|
|
qemu_set_irq(cpu->gt_timer_outputs[timeridx],
|
|
|
|
(istatus && !(gt->ctl & 2)));
|
|
|
|
if (istatus) {
|
|
|
|
/* Next transition is when count rolls back over to zero */
|
|
|
|
nexttick = UINT64_MAX;
|
|
|
|
} else {
|
|
|
|
/* Next transition is when we hit cval */
|
|
|
|
nexttick = gt->cval;
|
|
|
|
}
|
|
|
|
/* Note that the desired next expiry time might be beyond the
|
|
|
|
* signed-64-bit range of a QEMUTimer -- in this case we just
|
|
|
|
* set the timer for as far in the future as possible. When the
|
|
|
|
* timer expires we will reset the timer for any remaining period.
|
|
|
|
*/
|
|
|
|
if (nexttick > INT64_MAX / GTIMER_SCALE) {
|
|
|
|
nexttick = INT64_MAX / GTIMER_SCALE;
|
|
|
|
}
|
2013-08-21 19:03:08 +04:00
|
|
|
timer_mod(cpu->gt_timer[timeridx], nexttick);
|
2013-08-20 17:54:31 +04:00
|
|
|
} else {
|
|
|
|
/* Timer disabled: ISTATUS and timer output always clear */
|
|
|
|
gt->ctl &= ~4;
|
|
|
|
qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
|
2013-08-21 19:03:08 +04:00
|
|
|
timer_del(cpu->gt_timer[timeridx]);
|
2013-08-20 17:54:31 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
int timeridx = ri->opc1 & 1;
|
|
|
|
|
2013-08-21 19:03:08 +04:00
|
|
|
timer_del(cpu->gt_timer[timeridx]);
|
2013-08-20 17:54:31 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
2013-08-20 17:54:31 +04:00
|
|
|
{
|
2014-02-20 14:35:54 +04:00
|
|
|
return gt_get_countervalue(env);
|
2013-08-20 17:54:31 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2013-08-20 17:54:31 +04:00
|
|
|
{
|
|
|
|
int timeridx = ri->opc1 & 1;
|
|
|
|
|
|
|
|
env->cp15.c14_timer[timeridx].cval = value;
|
|
|
|
gt_recalc_timer(arm_env_get_cpu(env), timeridx);
|
|
|
|
}
|
2014-02-20 14:35:54 +04:00
|
|
|
|
|
|
|
static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
2013-08-20 17:54:31 +04:00
|
|
|
{
|
|
|
|
int timeridx = ri->crm & 1;
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
|
|
|
|
gt_get_countervalue(env));
|
2013-08-20 17:54:31 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2013-08-20 17:54:31 +04:00
|
|
|
{
|
|
|
|
int timeridx = ri->crm & 1;
|
|
|
|
|
|
|
|
env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) +
|
|
|
|
+ sextract64(value, 0, 32);
|
|
|
|
gt_recalc_timer(arm_env_get_cpu(env), timeridx);
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2013-08-20 17:54:31 +04:00
|
|
|
{
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
int timeridx = ri->crm & 1;
|
|
|
|
uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
|
|
|
|
|
2014-06-09 18:43:26 +04:00
|
|
|
env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
|
2013-08-20 17:54:31 +04:00
|
|
|
if ((oldval ^ value) & 1) {
|
|
|
|
/* Enable toggled */
|
|
|
|
gt_recalc_timer(cpu, timeridx);
|
2014-06-09 18:43:26 +04:00
|
|
|
} else if ((oldval ^ value) & 2) {
|
2013-08-20 17:54:31 +04:00
|
|
|
/* IMASK toggled: don't need to recalculate,
|
|
|
|
* just set the interrupt line based on ISTATUS
|
|
|
|
*/
|
|
|
|
qemu_set_irq(cpu->gt_timer_outputs[timeridx],
|
2014-06-09 18:43:26 +04:00
|
|
|
(oldval & 4) && !(value & 2));
|
2013-08-20 17:54:31 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void arm_gt_ptimer_cb(void *opaque)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = opaque;
|
|
|
|
|
|
|
|
gt_recalc_timer(cpu, GTIMER_PHYS);
|
|
|
|
}
|
|
|
|
|
|
|
|
void arm_gt_vtimer_cb(void *opaque)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = opaque;
|
|
|
|
|
|
|
|
gt_recalc_timer(cpu, GTIMER_VIRT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
|
|
|
|
/* Note that CNTFRQ is purely reads-as-written for the benefit
|
|
|
|
* of software; writing it doesn't actually change the timer frequency.
|
|
|
|
* Our reset value matches the fixed frequency we implement the timer at.
|
|
|
|
*/
|
|
|
|
{ .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
|
2014-02-26 21:20:05 +04:00
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
|
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
|
|
|
|
.resetfn = arm_cp_reset_ignore,
|
|
|
|
},
|
|
|
|
{ .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
|
2013-08-20 17:54:31 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
|
|
|
|
.resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
|
|
|
|
},
|
|
|
|
/* overall control: mostly access permissions */
|
2014-02-26 21:20:05 +04:00
|
|
|
{ .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
|
2013-08-20 17:54:31 +04:00
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
|
|
|
|
.resetvalue = 0,
|
|
|
|
},
|
|
|
|
/* per-timer control */
|
|
|
|
{ .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
|
2014-02-26 21:20:05 +04:00
|
|
|
.type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
|
|
|
|
.accessfn = gt_ptimer_access,
|
|
|
|
.fieldoffset = offsetoflow32(CPUARMState,
|
|
|
|
cp15.c14_timer[GTIMER_PHYS].ctl),
|
|
|
|
.resetfn = arm_cp_reset_ignore,
|
|
|
|
.writefn = gt_ctl_write, .raw_writefn = raw_write,
|
|
|
|
},
|
|
|
|
{ .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
|
2013-08-20 17:54:31 +04:00
|
|
|
.type = ARM_CP_IO, .access = PL1_RW | PL0_R,
|
2014-02-26 21:20:05 +04:00
|
|
|
.accessfn = gt_ptimer_access,
|
2013-08-20 17:54:31 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
|
|
|
|
.resetvalue = 0,
|
2014-02-20 14:35:53 +04:00
|
|
|
.writefn = gt_ctl_write, .raw_writefn = raw_write,
|
2013-08-20 17:54:31 +04:00
|
|
|
},
|
|
|
|
{ .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
|
2014-02-26 21:20:05 +04:00
|
|
|
.type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
|
|
|
|
.accessfn = gt_vtimer_access,
|
|
|
|
.fieldoffset = offsetoflow32(CPUARMState,
|
|
|
|
cp15.c14_timer[GTIMER_VIRT].ctl),
|
|
|
|
.resetfn = arm_cp_reset_ignore,
|
|
|
|
.writefn = gt_ctl_write, .raw_writefn = raw_write,
|
|
|
|
},
|
|
|
|
{ .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
|
2013-08-20 17:54:31 +04:00
|
|
|
.type = ARM_CP_IO, .access = PL1_RW | PL0_R,
|
2014-02-26 21:20:05 +04:00
|
|
|
.accessfn = gt_vtimer_access,
|
2013-08-20 17:54:31 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
|
|
|
|
.resetvalue = 0,
|
2014-02-20 14:35:53 +04:00
|
|
|
.writefn = gt_ctl_write, .raw_writefn = raw_write,
|
2013-08-20 17:54:31 +04:00
|
|
|
},
|
|
|
|
/* TimerValue views: a 32 bit downcounting view of the underlying state */
|
|
|
|
{ .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
|
2014-02-20 14:35:53 +04:00
|
|
|
.accessfn = gt_ptimer_access,
|
2013-08-20 17:54:31 +04:00
|
|
|
.readfn = gt_tval_read, .writefn = gt_tval_write,
|
|
|
|
},
|
2014-02-26 21:20:05 +04:00
|
|
|
{ .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
|
|
|
|
.readfn = gt_tval_read, .writefn = gt_tval_write,
|
|
|
|
},
|
2013-08-20 17:54:31 +04:00
|
|
|
{ .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
|
2014-02-20 14:35:53 +04:00
|
|
|
.accessfn = gt_vtimer_access,
|
2013-08-20 17:54:31 +04:00
|
|
|
.readfn = gt_tval_read, .writefn = gt_tval_write,
|
|
|
|
},
|
2014-02-26 21:20:05 +04:00
|
|
|
{ .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
|
|
|
|
.readfn = gt_tval_read, .writefn = gt_tval_write,
|
|
|
|
},
|
2013-08-20 17:54:31 +04:00
|
|
|
/* The counter itself */
|
|
|
|
{ .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
|
|
|
|
.access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
|
2014-02-20 14:35:53 +04:00
|
|
|
.accessfn = gt_pct_access,
|
2014-02-26 21:20:05 +04:00
|
|
|
.readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
|
|
|
|
},
|
|
|
|
{ .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
|
|
|
|
.access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
|
|
|
|
.accessfn = gt_pct_access,
|
2013-08-20 17:54:31 +04:00
|
|
|
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
|
|
|
|
},
|
|
|
|
{ .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
|
|
|
|
.access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
|
2014-02-20 14:35:53 +04:00
|
|
|
.accessfn = gt_vct_access,
|
2014-02-26 21:20:05 +04:00
|
|
|
.readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
|
|
|
|
},
|
|
|
|
{ .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
|
|
|
|
.access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
|
|
|
|
.accessfn = gt_vct_access,
|
2013-08-20 17:54:31 +04:00
|
|
|
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
|
|
|
|
},
|
|
|
|
/* Comparison value, indicating when the timer goes off */
|
|
|
|
{ .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
|
|
|
|
.access = PL1_RW | PL0_R,
|
2014-02-26 21:20:05 +04:00
|
|
|
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
|
2013-08-20 17:54:31 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
|
2014-02-26 21:20:05 +04:00
|
|
|
.accessfn = gt_ptimer_access, .resetfn = arm_cp_reset_ignore,
|
|
|
|
.writefn = gt_cval_write, .raw_writefn = raw_write,
|
|
|
|
},
|
|
|
|
{ .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
|
|
|
|
.access = PL1_RW | PL0_R,
|
|
|
|
.type = ARM_CP_IO,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
|
|
|
|
.resetvalue = 0, .accessfn = gt_vtimer_access,
|
2014-02-20 14:35:53 +04:00
|
|
|
.writefn = gt_cval_write, .raw_writefn = raw_write,
|
2013-08-20 17:54:31 +04:00
|
|
|
},
|
|
|
|
{ .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
|
|
|
|
.access = PL1_RW | PL0_R,
|
2014-02-26 21:20:05 +04:00
|
|
|
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
|
2013-08-20 17:54:31 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
|
2014-02-26 21:20:05 +04:00
|
|
|
.accessfn = gt_vtimer_access, .resetfn = arm_cp_reset_ignore,
|
|
|
|
.writefn = gt_cval_write, .raw_writefn = raw_write,
|
|
|
|
},
|
|
|
|
{ .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
|
|
|
|
.access = PL1_RW | PL0_R,
|
|
|
|
.type = ARM_CP_IO,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
|
|
|
|
.resetvalue = 0, .accessfn = gt_vtimer_access,
|
2014-02-20 14:35:53 +04:00
|
|
|
.writefn = gt_cval_write, .raw_writefn = raw_write,
|
2013-08-20 17:54:31 +04:00
|
|
|
},
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
|
|
|
#else
|
|
|
|
/* In user-mode none of the generic timer registers are accessible,
|
2013-08-21 19:03:08 +04:00
|
|
|
* and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
|
2013-08-20 17:54:31 +04:00
|
|
|
* so instead just don't register any of them.
|
|
|
|
*/
|
2012-06-20 15:57:12 +04:00
|
|
|
static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2013-08-20 17:54:31 +04:00
|
|
|
#endif
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
2012-06-20 15:57:16 +04:00
|
|
|
{
|
2012-07-12 14:59:09 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_LPAE)) {
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value);
|
2012-07-12 14:59:09 +04:00
|
|
|
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value & 0xfffff6ff);
|
2012-06-20 15:57:16 +04:00
|
|
|
} else {
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value & 0xfffff1ff);
|
2012-06-20 15:57:16 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
/* get_phys_addr() isn't present for user-mode-only targets */
|
2012-07-12 14:59:10 +04:00
|
|
|
|
2014-02-20 14:35:53 +04:00
|
|
|
static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
if (ri->opc2 & 4) {
|
|
|
|
/* Other states are only available with TrustZone; in
|
|
|
|
* a non-TZ implementation these registers don't exist
|
|
|
|
* at all, which is an Uncategorized trap. This underdecoding
|
|
|
|
* is safe because the reginfo is NO_MIGRATE.
|
|
|
|
*/
|
|
|
|
return CP_ACCESS_TRAP_UNCATEGORIZED;
|
|
|
|
}
|
|
|
|
return CP_ACCESS_OK;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
2012-06-20 15:57:16 +04:00
|
|
|
{
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr;
|
2012-06-20 15:57:16 +04:00
|
|
|
target_ulong page_size;
|
|
|
|
int prot;
|
|
|
|
int ret, is_user = ri->opc2 & 2;
|
|
|
|
int access_type = ri->opc2 & 1;
|
|
|
|
|
|
|
|
ret = get_phys_addr(env, value, access_type, is_user,
|
|
|
|
&phys_addr, &prot, &page_size);
|
2012-07-12 14:59:10 +04:00
|
|
|
if (extended_addresses_enabled(env)) {
|
|
|
|
/* ret is a DFSR/IFSR value for the long descriptor
|
|
|
|
* translation table format, but with WnR always clear.
|
|
|
|
* Convert it to a 64-bit PAR.
|
|
|
|
*/
|
|
|
|
uint64_t par64 = (1 << 11); /* LPAE bit always set */
|
|
|
|
if (ret == 0) {
|
|
|
|
par64 |= phys_addr & ~0xfffULL;
|
|
|
|
/* We don't set the ATTR or SH fields in the PAR. */
|
2012-06-20 15:57:16 +04:00
|
|
|
} else {
|
2012-07-12 14:59:10 +04:00
|
|
|
par64 |= 1; /* F */
|
|
|
|
par64 |= (ret & 0x3f) << 1; /* FS */
|
|
|
|
/* Note that S2WLK and FSTAGE are always zero, because we don't
|
|
|
|
* implement virtualization and therefore there can't be a stage 2
|
|
|
|
* fault.
|
|
|
|
*/
|
2012-06-20 15:57:16 +04:00
|
|
|
}
|
2014-04-15 22:18:48 +04:00
|
|
|
env->cp15.par_el1 = par64;
|
2012-06-20 15:57:16 +04:00
|
|
|
} else {
|
2012-07-12 14:59:10 +04:00
|
|
|
/* ret is a DFSR/IFSR value for the short descriptor
|
|
|
|
* translation table format (with WnR always clear).
|
|
|
|
* Convert it to a 32-bit PAR.
|
|
|
|
*/
|
|
|
|
if (ret == 0) {
|
|
|
|
/* We do not set any attribute bits in the PAR */
|
|
|
|
if (page_size == (1 << 24)
|
|
|
|
&& arm_feature(env, ARM_FEATURE_V7)) {
|
2014-04-15 22:18:48 +04:00
|
|
|
env->cp15.par_el1 = (phys_addr & 0xff000000) | 1 << 1;
|
2012-07-12 14:59:10 +04:00
|
|
|
} else {
|
2014-04-15 22:18:48 +04:00
|
|
|
env->cp15.par_el1 = phys_addr & 0xfffff000;
|
2012-07-12 14:59:10 +04:00
|
|
|
}
|
|
|
|
} else {
|
2014-04-15 22:18:48 +04:00
|
|
|
env->cp15.par_el1 = ((ret & (1 << 10)) >> 5) |
|
2014-02-26 21:19:58 +04:00
|
|
|
((ret & (1 << 12)) >> 6) |
|
2012-07-12 14:59:10 +04:00
|
|
|
((ret & 0xf) << 1) | 1;
|
|
|
|
}
|
2012-06-20 15:57:16 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static const ARMCPRegInfo vapa_cp_reginfo[] = {
|
|
|
|
{ .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .resetvalue = 0,
|
2014-04-15 22:18:48 +04:00
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.par_el1),
|
2012-06-20 15:57:16 +04:00
|
|
|
.writefn = par_write },
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
{ .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
|
2014-02-20 14:35:53 +04:00
|
|
|
.access = PL1_W, .accessfn = ats_access,
|
|
|
|
.writefn = ats_write, .type = ARM_CP_NO_MIGRATE },
|
2012-06-20 15:57:16 +04:00
|
|
|
#endif
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2012-06-20 15:57:13 +04:00
|
|
|
/* Return basic MPU access permission bits. */
|
|
|
|
static uint32_t simple_mpu_ap_bits(uint32_t val)
|
|
|
|
{
|
|
|
|
uint32_t ret;
|
|
|
|
uint32_t mask;
|
|
|
|
int i;
|
|
|
|
ret = 0;
|
|
|
|
mask = 3;
|
|
|
|
for (i = 0; i < 16; i += 2) {
|
|
|
|
ret |= (val >> i) & mask;
|
|
|
|
mask <<= 2;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pad basic MPU access permission bits to extended format. */
|
|
|
|
static uint32_t extended_mpu_ap_bits(uint32_t val)
|
|
|
|
{
|
|
|
|
uint32_t ret;
|
|
|
|
uint32_t mask;
|
|
|
|
int i;
|
|
|
|
ret = 0;
|
|
|
|
mask = 3;
|
|
|
|
for (i = 0; i < 16; i += 2) {
|
|
|
|
ret |= (val & mask) << i;
|
|
|
|
mask <<= 2;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:13 +04:00
|
|
|
{
|
2014-04-15 22:18:41 +04:00
|
|
|
env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
|
2012-06-20 15:57:13 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
2012-06-20 15:57:13 +04:00
|
|
|
{
|
2014-04-15 22:18:41 +04:00
|
|
|
return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
|
2012-06-20 15:57:13 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:13 +04:00
|
|
|
{
|
2014-04-15 22:18:41 +04:00
|
|
|
env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
|
2012-06-20 15:57:13 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
2012-06-20 15:57:13 +04:00
|
|
|
{
|
2014-04-15 22:18:41 +04:00
|
|
|
return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
|
2012-06-20 15:57:13 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
|
|
|
|
{ .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
|
2014-04-15 22:18:41 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
|
|
|
|
.resetvalue = 0,
|
2012-06-20 15:57:13 +04:00
|
|
|
.readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
|
|
|
|
{ .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
|
2014-04-15 22:18:41 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
|
|
|
|
.resetvalue = 0,
|
2012-06-20 15:57:13 +04:00
|
|
|
.readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
|
|
|
|
{ .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
|
|
|
|
.access = PL1_RW,
|
2014-04-15 22:18:41 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
|
|
|
|
.resetvalue = 0, },
|
2012-06-20 15:57:13 +04:00
|
|
|
{ .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
|
|
|
|
.access = PL1_RW,
|
2014-04-15 22:18:41 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
|
|
|
|
.resetvalue = 0, },
|
2012-06-20 15:57:14 +04:00
|
|
|
{ .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
|
|
|
|
{ .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
|
2012-06-20 15:57:17 +04:00
|
|
|
/* Protection region base and size registers */
|
2014-02-20 14:35:52 +04:00
|
|
|
{ .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
|
|
|
|
.opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
|
|
|
|
{ .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
|
|
|
|
.opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
|
|
|
|
{ .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
|
|
|
|
.opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
|
|
|
|
{ .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
|
|
|
|
.opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
|
|
|
|
{ .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
|
|
|
|
.opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
|
|
|
|
{ .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
|
|
|
|
.opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
|
|
|
|
{ .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
|
|
|
|
.opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
|
|
|
|
{ .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
|
|
|
|
.opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
|
2012-06-20 15:57:13 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:14 +04:00
|
|
|
{
|
2013-06-27 19:38:47 +04:00
|
|
|
int maskshift = extract32(value, 0, 3);
|
|
|
|
|
2014-06-19 21:06:24 +04:00
|
|
|
if (!arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
|
|
|
|
/* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
|
|
|
|
* using Long-desciptor translation table format */
|
|
|
|
value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
|
|
|
|
} else if (arm_feature(env, ARM_FEATURE_EL3)) {
|
|
|
|
/* In an implementation that includes the Security Extensions
|
|
|
|
* TTBCR has additional fields PD0 [4] and PD1 [5] for
|
|
|
|
* Short-descriptor translation table format.
|
|
|
|
*/
|
|
|
|
value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
|
|
|
|
} else {
|
|
|
|
value &= TTBCR_N;
|
|
|
|
}
|
2012-07-12 14:59:11 +04:00
|
|
|
}
|
2014-06-19 21:06:24 +04:00
|
|
|
|
2012-07-12 14:59:11 +04:00
|
|
|
/* Note that we always calculate c2_mask and c2_base_mask, but
|
|
|
|
* they are only used for short-descriptor tables (ie if EAE is 0);
|
|
|
|
* for long-descriptor tables the TTBCR fields are used differently
|
|
|
|
* and the c2_mask and c2_base_mask values are meaningless.
|
|
|
|
*/
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value);
|
2013-06-27 19:38:47 +04:00
|
|
|
env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> maskshift);
|
|
|
|
env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> maskshift);
|
2012-06-20 15:57:14 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2013-06-25 21:16:07 +04:00
|
|
|
{
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
2013-06-25 21:16:07 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_LPAE)) {
|
|
|
|
/* With LPAE the TTBCR could result in a change of ASID
|
|
|
|
* via the TTBCR.A1 bit, so do a TLB flush.
|
|
|
|
*/
|
2013-09-04 04:19:44 +04:00
|
|
|
tlb_flush(CPU(cpu), 1);
|
2013-06-25 21:16:07 +04:00
|
|
|
}
|
2014-02-20 14:35:54 +04:00
|
|
|
vmsa_ttbcr_raw_write(env, ri, value);
|
2013-06-25 21:16:07 +04:00
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:14 +04:00
|
|
|
static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
env->cp15.c2_base_mask = 0xffffc000u;
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, 0);
|
2012-06-20 15:57:14 +04:00
|
|
|
env->cp15.c2_mask = 0;
|
|
|
|
}
|
|
|
|
|
2014-02-26 21:20:04 +04:00
|
|
|
static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
2014-02-26 21:20:04 +04:00
|
|
|
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
|
2013-09-04 04:19:44 +04:00
|
|
|
tlb_flush(CPU(cpu), 1);
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value);
|
2014-02-26 21:20:04 +04:00
|
|
|
}
|
|
|
|
|
2014-02-26 21:20:04 +04:00
|
|
|
static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
/* 64 bit accesses to the TTBRs can change the ASID and so we
|
|
|
|
* must flush the TLB.
|
|
|
|
*/
|
|
|
|
if (cpreg_field_is_64bit(ri)) {
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
tlb_flush(CPU(cpu), 1);
|
2014-02-26 21:20:04 +04:00
|
|
|
}
|
|
|
|
raw_write(env, ri, value);
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:13 +04:00
|
|
|
static const ARMCPRegInfo vmsa_cp_reginfo[] = {
|
|
|
|
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
|
2014-04-15 22:18:42 +04:00
|
|
|
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
|
2014-05-27 20:09:51 +04:00
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
|
2014-04-15 22:18:42 +04:00
|
|
|
.resetfn = arm_cp_reset_ignore, },
|
2012-06-20 15:57:13 +04:00
|
|
|
{ .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL1_RW,
|
2014-04-15 22:18:42 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.ifsr_el2), .resetvalue = 0, },
|
|
|
|
{ .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW,
|
2014-05-27 20:09:51 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
|
2014-02-26 21:20:04 +04:00
|
|
|
{ .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
|
|
|
|
.writefn = vmsa_ttbr_write, .resetvalue = 0 },
|
|
|
|
{ .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
|
|
|
|
.writefn = vmsa_ttbr_write, .resetvalue = 0 },
|
2014-02-26 21:20:04 +04:00
|
|
|
{ .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
|
|
|
|
.access = PL1_RW, .writefn = vmsa_tcr_el1_write,
|
|
|
|
.resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
|
2012-06-20 15:57:14 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c2_control) },
|
2014-02-26 21:20:04 +04:00
|
|
|
{ .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE, .writefn = vmsa_ttbcr_write,
|
|
|
|
.resetfn = arm_cp_reset_ignore, .raw_writefn = vmsa_ttbcr_raw_write,
|
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.c2_control) },
|
2014-04-15 22:18:42 +04:00
|
|
|
/* 64-bit FAR; this entry also gives us the AArch32 DFAR */
|
|
|
|
{ .name = "FAR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
|
2014-08-04 17:41:54 +04:00
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
|
2012-06-20 15:57:17 +04:00
|
|
|
.resetvalue = 0, },
|
2012-06-20 15:57:13 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:15 +04:00
|
|
|
{
|
|
|
|
env->cp15.c15_ticonfig = value & 0xe7;
|
|
|
|
/* The OS_TYPE bit in this register changes the reported CPUID! */
|
|
|
|
env->cp15.c0_cpuid = (value & (1 << 5)) ?
|
|
|
|
ARM_CPUID_TI915T : ARM_CPUID_TI925T;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:15 +04:00
|
|
|
{
|
|
|
|
env->cp15.c15_threadid = value & 0xffff;
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:15 +04:00
|
|
|
{
|
|
|
|
/* Wait-for-interrupt (deprecated) */
|
2013-01-18 18:03:43 +04:00
|
|
|
cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
|
2012-06-20 15:57:15 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:17 +04:00
|
|
|
{
|
|
|
|
/* On OMAP there are registers indicating the max/min index of dcache lines
|
|
|
|
* containing a dirty line; cache flush operations have to reset these.
|
|
|
|
*/
|
|
|
|
env->cp15.c15_i_max = 0x000;
|
|
|
|
env->cp15.c15_i_min = 0xff0;
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:13 +04:00
|
|
|
static const ARMCPRegInfo omap_cp_reginfo[] = {
|
|
|
|
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
|
|
|
|
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
|
2014-05-27 20:09:51 +04:00
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
|
2014-04-15 22:18:42 +04:00
|
|
|
.resetvalue = 0, },
|
2012-06-20 15:57:15 +04:00
|
|
|
{ .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
|
|
|
|
.writefn = omap_ticonfig_write },
|
|
|
|
{ .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
|
|
|
|
{ .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .resetvalue = 0xff0,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
|
|
|
|
{ .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
|
|
|
|
.writefn = omap_threadid_write },
|
|
|
|
{ .name = "TI925T_STATUS", .cp = 15, .crn = 15,
|
|
|
|
.crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
|
2013-06-25 21:16:07 +04:00
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
2012-06-20 15:57:15 +04:00
|
|
|
.readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
|
|
|
|
/* TODO: Peripheral port remap register:
|
|
|
|
* On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
|
|
|
|
* base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
|
|
|
|
* when MMU is off.
|
|
|
|
*/
|
2012-06-20 15:57:17 +04:00
|
|
|
{ .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
|
2013-06-25 21:16:07 +04:00
|
|
|
.opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
|
|
|
|
.type = ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE,
|
2012-06-20 15:57:17 +04:00
|
|
|
.writefn = omap_cachemaint_write },
|
2012-06-20 15:57:18 +04:00
|
|
|
{ .name = "C9", .cp = 15, .crn = 9,
|
|
|
|
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
|
|
|
|
.type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
|
2012-06-20 15:57:15 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:15 +04:00
|
|
|
{
|
2014-09-29 21:48:48 +04:00
|
|
|
env->cp15.c15_cpar = value & 0x3fff;
|
2012-06-20 15:57:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const ARMCPRegInfo xscale_cp_reginfo[] = {
|
|
|
|
{ .name = "XSCALE_CPAR",
|
|
|
|
.cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
|
|
|
|
.writefn = xscale_cpar_write, },
|
2012-06-20 15:57:18 +04:00
|
|
|
{ .name = "XSCALE_AUXCR",
|
|
|
|
.cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
|
|
|
|
.resetvalue = 0, },
|
2014-05-01 18:24:44 +04:00
|
|
|
/* XScale specific cache-lockdown: since we have no cache we NOP these
|
|
|
|
* and hope the guest does not really rely on cache behaviour.
|
|
|
|
*/
|
|
|
|
{ .name = "XSCALE_LOCK_ICACHE_LINE",
|
|
|
|
.cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "XSCALE_UNLOCK_ICACHE",
|
|
|
|
.cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "XSCALE_DCACHE_LOCK",
|
|
|
|
.cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "XSCALE_UNLOCK_DCACHE",
|
|
|
|
.cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
2012-06-20 15:57:15 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
|
|
|
|
/* RAZ/WI the whole crn=15 space, when we don't have a more specific
|
|
|
|
* implementation of this implementation-defined space.
|
|
|
|
* Ideally this should eventually disappear in favour of actually
|
|
|
|
* implementing the correct behaviour for all cores.
|
|
|
|
*/
|
|
|
|
{ .name = "C15_IMPDEF", .cp = 15, .crn = 15,
|
|
|
|
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
|
2013-12-17 23:42:27 +04:00
|
|
|
.access = PL1_RW,
|
|
|
|
.type = ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE,
|
2013-06-25 21:16:07 +04:00
|
|
|
.resetvalue = 0 },
|
2012-06-20 15:57:13 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2012-06-20 15:57:17 +04:00
|
|
|
static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
|
|
|
|
/* Cache status: RAZ because we have no cache so it's always clean */
|
|
|
|
{ .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
|
|
|
|
.resetvalue = 0 },
|
2012-06-20 15:57:17 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
|
|
|
|
/* We never have a a block transfer operation in progress */
|
|
|
|
{ .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
|
|
|
|
.resetvalue = 0 },
|
2012-06-20 15:57:22 +04:00
|
|
|
/* The cache ops themselves: these all NOP for QEMU */
|
|
|
|
{ .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
|
|
|
|
{ .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
|
|
|
|
{ .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
|
|
|
|
.access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
|
|
|
|
{ .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
|
|
|
|
.access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
|
|
|
|
{ .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
|
|
|
|
.access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
|
|
|
|
{ .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
|
2012-06-20 15:57:17 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
|
|
|
|
/* The cache test-and-clean instructions always return (1 << 30)
|
|
|
|
* to indicate that there are no dirty cache lines.
|
|
|
|
*/
|
|
|
|
{ .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
|
|
|
|
.resetvalue = (1 << 30) },
|
2012-06-20 15:57:17 +04:00
|
|
|
{ .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
|
|
|
|
.resetvalue = (1 << 30) },
|
2012-06-20 15:57:17 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2012-06-20 15:57:18 +04:00
|
|
|
static const ARMCPRegInfo strongarm_cp_reginfo[] = {
|
|
|
|
/* Ignore ReadBuffer accesses */
|
|
|
|
{ .name = "C9_READBUFFER", .cp = 15, .crn = 9,
|
|
|
|
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL1_RW, .resetvalue = 0,
|
|
|
|
.type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE },
|
2012-06-20 15:57:18 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
2012-06-20 15:57:20 +04:00
|
|
|
{
|
2012-12-17 09:18:02 +04:00
|
|
|
CPUState *cs = CPU(arm_env_get_cpu(env));
|
|
|
|
uint32_t mpidr = cs->cpu_index;
|
2014-02-26 21:20:04 +04:00
|
|
|
/* We don't support setting cluster ID ([8..11]) (known as Aff1
|
|
|
|
* in later ARM ARM versions), or any of the higher affinity level fields,
|
2012-06-20 15:57:20 +04:00
|
|
|
* so these bits always RAZ.
|
|
|
|
*/
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V7MP)) {
|
2013-09-10 22:09:32 +04:00
|
|
|
mpidr |= (1U << 31);
|
2012-06-20 15:57:20 +04:00
|
|
|
/* Cores which are uniprocessor (non-coherent)
|
|
|
|
* but still implement the MP extensions set
|
|
|
|
* bit 30. (For instance, A9UP.) However we do
|
|
|
|
* not currently model any of those cores.
|
|
|
|
*/
|
|
|
|
}
|
2014-02-20 14:35:54 +04:00
|
|
|
return mpidr;
|
2012-06-20 15:57:20 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const ARMCPRegInfo mpidr_cp_reginfo[] = {
|
2014-02-26 21:20:04 +04:00
|
|
|
{ .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
|
2013-06-25 21:16:07 +04:00
|
|
|
.access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_MIGRATE },
|
2012-06-20 15:57:20 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2012-07-12 14:59:07 +04:00
|
|
|
static const ARMCPRegInfo lpae_cp_reginfo[] = {
|
2012-08-06 20:42:18 +04:00
|
|
|
/* NOP AMAIR0/1: the override is because these clash with the rather
|
2012-07-12 14:59:07 +04:00
|
|
|
* broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
|
|
|
|
*/
|
2014-02-26 21:20:03 +04:00
|
|
|
{ .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
|
2012-07-12 14:59:07 +04:00
|
|
|
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
|
|
|
|
.resetvalue = 0 },
|
2014-02-26 21:20:03 +04:00
|
|
|
/* AMAIR1 is mapped to AMAIR_EL1[63:32] */
|
2012-07-12 14:59:07 +04:00
|
|
|
{ .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
|
|
|
|
.resetvalue = 0 },
|
2012-07-12 14:59:09 +04:00
|
|
|
{ .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_64BIT,
|
2014-04-15 22:18:48 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.par_el1), .resetvalue = 0 },
|
2012-07-12 14:59:09 +04:00
|
|
|
{ .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
|
2014-02-26 21:20:04 +04:00
|
|
|
.access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
|
|
|
|
.writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
|
2012-07-12 14:59:09 +04:00
|
|
|
{ .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
|
2014-02-26 21:20:04 +04:00
|
|
|
.access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
|
|
|
|
.writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
|
2012-07-12 14:59:07 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
2014-01-05 02:15:45 +04:00
|
|
|
{
|
2014-02-20 14:35:54 +04:00
|
|
|
return vfp_get_fpcr(env);
|
2014-01-05 02:15:45 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2014-01-05 02:15:45 +04:00
|
|
|
{
|
|
|
|
vfp_set_fpcr(env, value);
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
2014-01-05 02:15:45 +04:00
|
|
|
{
|
2014-02-20 14:35:54 +04:00
|
|
|
return vfp_get_fpsr(env);
|
2014-01-05 02:15:45 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2014-01-05 02:15:45 +04:00
|
|
|
{
|
|
|
|
vfp_set_fpsr(env, value);
|
|
|
|
}
|
|
|
|
|
2014-04-15 22:18:37 +04:00
|
|
|
static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
2014-10-24 15:19:14 +04:00
|
|
|
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
|
2014-04-15 22:18:37 +04:00
|
|
|
return CP_ACCESS_TRAP;
|
|
|
|
}
|
|
|
|
return CP_ACCESS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
env->daif = value & PSTATE_DAIF;
|
|
|
|
}
|
|
|
|
|
2014-02-26 21:20:02 +04:00
|
|
|
static CPAccessResult aa64_cacheop_access(CPUARMState *env,
|
|
|
|
const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
/* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
|
|
|
|
* SCTLR_EL1.UCI is set.
|
|
|
|
*/
|
2014-10-24 15:19:14 +04:00
|
|
|
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
|
2014-02-26 21:20:02 +04:00
|
|
|
return CP_ACCESS_TRAP;
|
|
|
|
}
|
|
|
|
return CP_ACCESS_OK;
|
|
|
|
}
|
|
|
|
|
2014-08-04 17:41:56 +04:00
|
|
|
/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
|
|
|
|
* Page D4-1736 (DDI0487A.b)
|
|
|
|
*/
|
|
|
|
|
2014-02-26 21:20:03 +04:00
|
|
|
static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
/* Invalidate by VA (AArch64 version) */
|
2013-09-04 03:29:02 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
2014-08-04 17:41:56 +04:00
|
|
|
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
|
|
|
|
2013-09-04 03:29:02 +04:00
|
|
|
tlb_flush_page(CPU(cpu), pageaddr);
|
2014-02-26 21:20:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
/* Invalidate by VA, all ASIDs (AArch64 version) */
|
2013-09-04 03:29:02 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
2014-08-04 17:41:56 +04:00
|
|
|
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
|
|
|
|
2013-09-04 03:29:02 +04:00
|
|
|
tlb_flush_page(CPU(cpu), pageaddr);
|
2014-02-26 21:20:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
/* Invalidate by ASID (AArch64 version) */
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
2014-02-26 21:20:03 +04:00
|
|
|
int asid = extract64(value, 48, 16);
|
2013-09-04 04:19:44 +04:00
|
|
|
tlb_flush(CPU(cpu), asid == 0);
|
2014-02-26 21:20:03 +04:00
|
|
|
}
|
|
|
|
|
2014-09-12 17:06:50 +04:00
|
|
|
static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
CPUState *other_cs;
|
|
|
|
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
|
|
|
|
|
|
|
CPU_FOREACH(other_cs) {
|
|
|
|
tlb_flush_page(other_cs, pageaddr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
CPUState *other_cs;
|
|
|
|
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
|
|
|
|
|
|
|
CPU_FOREACH(other_cs) {
|
|
|
|
tlb_flush_page(other_cs, pageaddr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
CPUState *other_cs;
|
|
|
|
int asid = extract64(value, 48, 16);
|
|
|
|
|
|
|
|
CPU_FOREACH(other_cs) {
|
|
|
|
tlb_flush(other_cs, asid == 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-15 22:18:41 +04:00
|
|
|
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
/* We don't implement EL2, so the only control on DC ZVA is the
|
|
|
|
* bit in the SCTLR which can prohibit access for EL0.
|
|
|
|
*/
|
2014-10-24 15:19:14 +04:00
|
|
|
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
|
2014-04-15 22:18:41 +04:00
|
|
|
return CP_ACCESS_TRAP;
|
|
|
|
}
|
|
|
|
return CP_ACCESS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
int dzp_bit = 1 << 4;
|
|
|
|
|
|
|
|
/* DZP indicates whether DC ZVA access is allowed */
|
2014-10-24 15:19:13 +04:00
|
|
|
if (aa64_zva_access(env, NULL) == CP_ACCESS_OK) {
|
2014-04-15 22:18:41 +04:00
|
|
|
dzp_bit = 0;
|
|
|
|
}
|
|
|
|
return cpu->dcz_blocksize | dzp_bit;
|
|
|
|
}
|
|
|
|
|
2014-04-15 22:18:43 +04:00
|
|
|
static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
2014-08-04 17:41:55 +04:00
|
|
|
if (!(env->pstate & PSTATE_SP)) {
|
2014-04-15 22:18:43 +04:00
|
|
|
/* Access to SP_EL0 is undefined if it's being used as
|
|
|
|
* the stack pointer.
|
|
|
|
*/
|
|
|
|
return CP_ACCESS_TRAP_UNCATEGORIZED;
|
|
|
|
}
|
|
|
|
return CP_ACCESS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
return env->pstate & PSTATE_SP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
|
|
|
|
{
|
|
|
|
update_spsel(env, val);
|
|
|
|
}
|
|
|
|
|
2014-01-05 02:15:45 +04:00
|
|
|
static const ARMCPRegInfo v8_cp_reginfo[] = {
|
|
|
|
/* Minimal set of EL0-visible registers. This will need to be expanded
|
|
|
|
* significantly for system emulation of AArch64 CPUs.
|
|
|
|
*/
|
|
|
|
{ .name = "NZCV", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
|
|
|
|
.access = PL0_RW, .type = ARM_CP_NZCV },
|
2014-04-15 22:18:37 +04:00
|
|
|
{ .name = "DAIF", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.access = PL0_RW, .accessfn = aa64_daif_access,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, daif),
|
|
|
|
.writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
|
2014-01-05 02:15:45 +04:00
|
|
|
{ .name = "FPCR", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
|
|
|
|
.access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
|
|
|
|
{ .name = "FPSR", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
|
|
|
|
.access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
|
|
|
|
{ .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
|
2014-04-15 22:18:41 +04:00
|
|
|
.access = PL0_R, .type = ARM_CP_NO_MIGRATE,
|
|
|
|
.readfn = aa64_dczid_read },
|
|
|
|
{ .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
|
|
|
|
.access = PL0_W, .type = ARM_CP_DC_ZVA,
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
/* Avoid overhead of an access check that always passes in user-mode */
|
|
|
|
.accessfn = aa64_zva_access,
|
|
|
|
#endif
|
|
|
|
},
|
2014-02-26 21:20:02 +04:00
|
|
|
{ .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CURRENTEL },
|
2014-02-26 21:20:02 +04:00
|
|
|
/* Cache ops: all NOPs since we don't emulate caches */
|
|
|
|
{ .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
|
|
|
|
.access = PL0_W, .type = ARM_CP_NOP,
|
|
|
|
.accessfn = aa64_cacheop_access },
|
|
|
|
{ .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
|
|
|
|
.access = PL0_W, .type = ARM_CP_NOP,
|
|
|
|
.accessfn = aa64_cacheop_access },
|
|
|
|
{ .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
|
|
|
{ .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
|
|
|
|
.access = PL0_W, .type = ARM_CP_NOP,
|
|
|
|
.accessfn = aa64_cacheop_access },
|
|
|
|
{ .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
|
|
|
|
.access = PL0_W, .type = ARM_CP_NOP,
|
|
|
|
.accessfn = aa64_cacheop_access },
|
|
|
|
{ .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
2014-02-26 21:20:03 +04:00
|
|
|
/* TLBI operations */
|
|
|
|
{ .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
2014-09-12 17:06:50 +04:00
|
|
|
.writefn = tlbiall_is_write },
|
2014-02-26 21:20:03 +04:00
|
|
|
{ .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
2014-09-12 17:06:50 +04:00
|
|
|
.writefn = tlbi_aa64_va_is_write },
|
2014-02-26 21:20:03 +04:00
|
|
|
{ .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
2014-09-12 17:06:50 +04:00
|
|
|
.writefn = tlbi_aa64_asid_is_write },
|
2014-02-26 21:20:03 +04:00
|
|
|
{ .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
2014-09-12 17:06:50 +04:00
|
|
|
.writefn = tlbi_aa64_vaa_is_write },
|
2014-02-26 21:20:03 +04:00
|
|
|
{ .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
2014-09-12 17:06:50 +04:00
|
|
|
.writefn = tlbi_aa64_va_is_write },
|
2014-02-26 21:20:03 +04:00
|
|
|
{ .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
2014-09-12 17:06:50 +04:00
|
|
|
.writefn = tlbi_aa64_vaa_is_write },
|
2014-02-26 21:20:03 +04:00
|
|
|
{ .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
|
|
|
.writefn = tlbiall_write },
|
|
|
|
{ .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
|
|
|
.writefn = tlbi_aa64_va_write },
|
|
|
|
{ .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
|
|
|
.writefn = tlbi_aa64_asid_write },
|
|
|
|
{ .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
|
|
|
.writefn = tlbi_aa64_vaa_write },
|
|
|
|
{ .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
|
|
|
.writefn = tlbi_aa64_va_write },
|
|
|
|
{ .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
|
2014-05-01 18:24:46 +04:00
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
|
2014-02-26 21:20:03 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
|
|
|
|
.writefn = tlbi_aa64_vaa_write },
|
2014-04-15 22:18:48 +04:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
/* 64 bit address translation operations */
|
|
|
|
{ .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
|
|
|
|
{ .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
|
|
|
|
{ .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
|
|
|
|
{ .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
|
|
|
|
.access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
|
|
|
|
#endif
|
2014-09-12 17:06:50 +04:00
|
|
|
/* TLB invalidate last level of translation table walk */
|
2014-04-15 22:18:47 +04:00
|
|
|
{ .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
|
2014-09-12 17:06:50 +04:00
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
|
2014-04-15 22:18:47 +04:00
|
|
|
{ .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
|
2014-09-12 17:06:50 +04:00
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W,
|
|
|
|
.writefn = tlbimvaa_is_write },
|
2014-04-15 22:18:47 +04:00
|
|
|
{ .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
|
|
|
|
{ .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
|
|
|
|
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
|
|
|
|
/* 32 bit cache operations */
|
|
|
|
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
{ .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
|
|
|
|
.type = ARM_CP_NOP, .access = PL1_W },
|
|
|
|
/* MMU Domain access control / MPU write buffer control */
|
|
|
|
{ .name = "DACR", .cp = 15,
|
|
|
|
.opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
|
|
|
|
.resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
|
2014-04-15 22:18:42 +04:00
|
|
|
{ .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
|
2014-05-27 20:09:51 +04:00
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, elr_el[1]) },
|
2014-04-15 22:18:43 +04:00
|
|
|
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[0]) },
|
2014-04-15 22:18:43 +04:00
|
|
|
/* We rely on the access checks not allowing the guest to write to the
|
|
|
|
* state field when SPSel indicates that it's being used as the stack
|
|
|
|
* pointer.
|
|
|
|
*/
|
|
|
|
{ .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .accessfn = sp_el0_access,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, sp_el[0]) },
|
|
|
|
{ .name = "SPSel", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
|
2014-01-05 02:15:45 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-05-27 20:09:54 +04:00
|
|
|
/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
|
|
|
|
static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
|
|
|
|
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL2_RW,
|
|
|
|
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
|
2014-09-29 21:48:48 +04:00
|
|
|
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
|
|
|
|
.access = PL2_RW,
|
|
|
|
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
|
2014-05-27 20:09:54 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-09-29 21:48:48 +04:00
|
|
|
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
uint64_t valid_mask = HCR_MASK;
|
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
|
|
|
valid_mask &= ~HCR_HCD;
|
|
|
|
} else {
|
|
|
|
valid_mask &= ~HCR_TSC;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear RES0 bits. */
|
|
|
|
value &= valid_mask;
|
|
|
|
|
|
|
|
/* These bits change the MMU setup:
|
|
|
|
* HCR_VM enables stage 2 translation
|
|
|
|
* HCR_PTW forbids certain page-table setups
|
|
|
|
* HCR_DC Disables stage1 and enables stage2 translation
|
|
|
|
*/
|
|
|
|
if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
|
|
|
|
tlb_flush(CPU(cpu), 1);
|
|
|
|
}
|
|
|
|
raw_write(env, ri, value);
|
|
|
|
}
|
|
|
|
|
2014-05-27 20:09:53 +04:00
|
|
|
static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
|
2014-09-29 21:48:48 +04:00
|
|
|
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
|
|
|
|
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
|
|
|
|
.writefn = hcr_write },
|
2014-05-27 20:09:53 +04:00
|
|
|
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
|
|
|
|
.access = PL2_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, elr_el[2]) },
|
2014-08-04 17:41:55 +04:00
|
|
|
{ .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
|
|
|
|
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
|
2014-08-04 17:41:55 +04:00
|
|
|
{ .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
|
2014-05-27 20:09:53 +04:00
|
|
|
{ .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[6]) },
|
2014-05-27 20:09:54 +04:00
|
|
|
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL2_RW, .writefn = vbar_write,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
|
|
|
|
.resetvalue = 0 },
|
2014-05-27 20:09:53 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-05-27 20:09:53 +04:00
|
|
|
static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
|
|
|
|
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
|
|
|
|
.access = PL3_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, elr_el[3]) },
|
2014-08-04 17:41:55 +04:00
|
|
|
{ .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
|
|
|
|
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
|
2014-08-04 17:41:55 +04:00
|
|
|
{ .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
|
2014-05-27 20:09:53 +04:00
|
|
|
{ .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[7]) },
|
2014-05-27 20:09:55 +04:00
|
|
|
{ .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL3_RW, .writefn = vbar_write,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
|
|
|
|
.resetvalue = 0 },
|
2014-12-11 15:07:49 +03:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARMCPRegInfo el3_cp_reginfo[] = {
|
2014-09-29 21:48:49 +04:00
|
|
|
{ .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
|
|
|
|
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
|
2014-12-11 15:07:49 +03:00
|
|
|
.resetvalue = 0, .writefn = scr_write },
|
|
|
|
{ .name = "SCR", .type = ARM_CP_NO_MIGRATE,
|
|
|
|
.cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
|
|
|
|
.access = PL3_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
|
|
|
|
.resetfn = arm_cp_reset_ignore, .writefn = scr_write },
|
2014-05-27 20:09:53 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:18 +04:00
|
|
|
{
|
2013-09-04 04:19:44 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
2014-06-09 18:43:22 +04:00
|
|
|
if (raw_read(env, ri) == value) {
|
2014-05-13 19:09:38 +04:00
|
|
|
/* Skip the TLB flush if nothing actually changed; Linux likes
|
|
|
|
* to do a lot of pointless SCTLR writes.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-06-09 18:43:22 +04:00
|
|
|
raw_write(env, ri, value);
|
2012-06-20 15:57:18 +04:00
|
|
|
/* ??? Lots of these bits are not implemented. */
|
|
|
|
/* This may enable/disable the MMU, so do a TLB flush. */
|
2013-09-04 04:19:44 +04:00
|
|
|
tlb_flush(CPU(cpu), 1);
|
2012-06-20 15:57:18 +04:00
|
|
|
}
|
|
|
|
|
2014-02-26 21:20:01 +04:00
|
|
|
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
|
{
|
|
|
|
/* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
|
|
|
|
* but the AArch32 CTR has its own reginfo struct)
|
|
|
|
*/
|
2014-10-24 15:19:14 +04:00
|
|
|
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
|
2014-02-26 21:20:01 +04:00
|
|
|
return CP_ACCESS_TRAP;
|
|
|
|
}
|
|
|
|
return CP_ACCESS_OK;
|
|
|
|
}
|
|
|
|
|
2014-08-19 21:56:25 +04:00
|
|
|
static const ARMCPRegInfo debug_cp_reginfo[] = {
|
|
|
|
/* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
|
2014-08-19 21:56:25 +04:00
|
|
|
* debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
|
|
|
|
* unlike DBGDRAR it is never accessible from EL0.
|
|
|
|
* DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
|
|
|
|
* accessor.
|
2014-08-19 21:56:25 +04:00
|
|
|
*/
|
|
|
|
{ .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
2014-08-19 21:56:25 +04:00
|
|
|
{ .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
2014-08-19 21:56:25 +04:00
|
|
|
{ .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
2014-09-12 17:06:49 +04:00
|
|
|
/* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
|
2014-08-19 21:56:25 +04:00
|
|
|
{ .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
|
2014-08-19 21:56:27 +04:00
|
|
|
.access = PL1_RW,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
|
|
|
|
.resetvalue = 0 },
|
2014-09-12 17:06:50 +04:00
|
|
|
/* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
|
|
|
|
* We don't implement the configurable EL0 access.
|
|
|
|
*/
|
|
|
|
{ .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
|
|
|
|
.type = ARM_CP_NO_MIGRATE,
|
|
|
|
.access = PL1_R,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
|
|
|
|
.resetfn = arm_cp_reset_ignore },
|
2014-08-19 21:56:25 +04:00
|
|
|
/* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
|
2014-08-19 21:56:25 +04:00
|
|
|
{ .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
|
2014-08-19 21:56:25 +04:00
|
|
|
.access = PL1_W, .type = ARM_CP_NOP },
|
2014-09-12 17:06:50 +04:00
|
|
|
/* Dummy OSDLR_EL1: 32-bit Linux will read this */
|
|
|
|
{ .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_NOP },
|
|
|
|
/* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
|
|
|
|
* implement vector catch debug events yet.
|
|
|
|
*/
|
|
|
|
{ .name = "DBGVCR",
|
|
|
|
.cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
|
|
|
|
.access = PL1_RW, .type = ARM_CP_NOP },
|
2014-08-19 21:56:25 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
|
|
|
|
/* 64 bit access versions of the (dummy) debug registers */
|
|
|
|
{ .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
|
|
|
|
.access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
|
|
|
|
{ .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
|
|
|
|
.access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
|
2014-09-12 17:06:49 +04:00
|
|
|
void hw_watchpoint_update(ARMCPU *cpu, int n)
|
|
|
|
{
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
vaddr len = 0;
|
|
|
|
vaddr wvr = env->cp15.dbgwvr[n];
|
|
|
|
uint64_t wcr = env->cp15.dbgwcr[n];
|
|
|
|
int mask;
|
|
|
|
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
|
|
|
|
|
|
|
|
if (env->cpu_watchpoint[n]) {
|
|
|
|
cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
|
|
|
|
env->cpu_watchpoint[n] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!extract64(wcr, 0, 1)) {
|
|
|
|
/* E bit clear : watchpoint disabled */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (extract64(wcr, 3, 2)) {
|
|
|
|
case 0:
|
|
|
|
/* LSC 00 is reserved and must behave as if the wp is disabled */
|
|
|
|
return;
|
|
|
|
case 1:
|
|
|
|
flags |= BP_MEM_READ;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
flags |= BP_MEM_WRITE;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
flags |= BP_MEM_ACCESS;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Attempts to use both MASK and BAS fields simultaneously are
|
|
|
|
* CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
|
|
|
|
* thus generating a watchpoint for every byte in the masked region.
|
|
|
|
*/
|
|
|
|
mask = extract64(wcr, 24, 4);
|
|
|
|
if (mask == 1 || mask == 2) {
|
|
|
|
/* Reserved values of MASK; we must act as if the mask value was
|
|
|
|
* some non-reserved value, or as if the watchpoint were disabled.
|
|
|
|
* We choose the latter.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
} else if (mask) {
|
|
|
|
/* Watchpoint covers an aligned area up to 2GB in size */
|
|
|
|
len = 1ULL << mask;
|
|
|
|
/* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
|
|
|
|
* whether the watchpoint fires when the unmasked bits match; we opt
|
|
|
|
* to generate the exceptions.
|
|
|
|
*/
|
|
|
|
wvr &= ~(len - 1);
|
|
|
|
} else {
|
|
|
|
/* Watchpoint covers bytes defined by the byte address select bits */
|
|
|
|
int bas = extract64(wcr, 5, 8);
|
|
|
|
int basstart;
|
|
|
|
|
|
|
|
if (bas == 0) {
|
|
|
|
/* This must act as if the watchpoint is disabled */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (extract64(wvr, 2, 1)) {
|
|
|
|
/* Deprecated case of an only 4-aligned address. BAS[7:4] are
|
|
|
|
* ignored, and BAS[3:0] define which bytes to watch.
|
|
|
|
*/
|
|
|
|
bas &= 0xf;
|
|
|
|
}
|
|
|
|
/* The BAS bits are supposed to be programmed to indicate a contiguous
|
|
|
|
* range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
|
|
|
|
* we fire for each byte in the word/doubleword addressed by the WVR.
|
|
|
|
* We choose to ignore any non-zero bits after the first range of 1s.
|
|
|
|
*/
|
|
|
|
basstart = ctz32(bas);
|
|
|
|
len = cto32(bas >> basstart);
|
|
|
|
wvr += basstart;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
|
|
|
|
&env->cpu_watchpoint[n]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hw_watchpoint_update_all(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
|
|
|
/* Completely clear out existing QEMU watchpoints and our array, to
|
|
|
|
* avoid possible stale entries following migration load.
|
|
|
|
*/
|
|
|
|
cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
|
|
|
|
memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
|
|
|
|
hw_watchpoint_update(cpu, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
int i = ri->crm;
|
|
|
|
|
|
|
|
/* Bits [63:49] are hardwired to the value of bit [48]; that is, the
|
|
|
|
* register reads and behaves as if values written are sign extended.
|
|
|
|
* Bits [1:0] are RES0.
|
|
|
|
*/
|
|
|
|
value = sextract64(value, 0, 49) & ~3ULL;
|
|
|
|
|
|
|
|
raw_write(env, ri, value);
|
|
|
|
hw_watchpoint_update(cpu, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
int i = ri->crm;
|
|
|
|
|
|
|
|
raw_write(env, ri, value);
|
|
|
|
hw_watchpoint_update(cpu, i);
|
|
|
|
}
|
|
|
|
|
2014-09-29 21:48:46 +04:00
|
|
|
void hw_breakpoint_update(ARMCPU *cpu, int n)
|
|
|
|
{
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
uint64_t bvr = env->cp15.dbgbvr[n];
|
|
|
|
uint64_t bcr = env->cp15.dbgbcr[n];
|
|
|
|
vaddr addr;
|
|
|
|
int bt;
|
|
|
|
int flags = BP_CPU;
|
|
|
|
|
|
|
|
if (env->cpu_breakpoint[n]) {
|
|
|
|
cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
|
|
|
|
env->cpu_breakpoint[n] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!extract64(bcr, 0, 1)) {
|
|
|
|
/* E bit clear : watchpoint disabled */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bt = extract64(bcr, 20, 4);
|
|
|
|
|
|
|
|
switch (bt) {
|
|
|
|
case 4: /* unlinked address mismatch (reserved if AArch64) */
|
|
|
|
case 5: /* linked address mismatch (reserved if AArch64) */
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"arm: address mismatch breakpoint types not implemented");
|
|
|
|
return;
|
|
|
|
case 0: /* unlinked address match */
|
|
|
|
case 1: /* linked address match */
|
|
|
|
{
|
|
|
|
/* Bits [63:49] are hardwired to the value of bit [48]; that is,
|
|
|
|
* we behave as if the register was sign extended. Bits [1:0] are
|
|
|
|
* RES0. The BAS field is used to allow setting breakpoints on 16
|
|
|
|
* bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
|
|
|
|
* a bp will fire if the addresses covered by the bp and the addresses
|
|
|
|
* covered by the insn overlap but the insn doesn't start at the
|
|
|
|
* start of the bp address range. We choose to require the insn and
|
|
|
|
* the bp to have the same address. The constraints on writing to
|
|
|
|
* BAS enforced in dbgbcr_write mean we have only four cases:
|
|
|
|
* 0b0000 => no breakpoint
|
|
|
|
* 0b0011 => breakpoint on addr
|
|
|
|
* 0b1100 => breakpoint on addr + 2
|
|
|
|
* 0b1111 => breakpoint on addr
|
|
|
|
* See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
|
|
|
|
*/
|
|
|
|
int bas = extract64(bcr, 5, 4);
|
|
|
|
addr = sextract64(bvr, 0, 49) & ~3ULL;
|
|
|
|
if (bas == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (bas == 0xc) {
|
|
|
|
addr += 2;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 2: /* unlinked context ID match */
|
|
|
|
case 8: /* unlinked VMID match (reserved if no EL2) */
|
|
|
|
case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"arm: unlinked context breakpoint types not implemented");
|
|
|
|
return;
|
|
|
|
case 9: /* linked VMID match (reserved if no EL2) */
|
|
|
|
case 11: /* linked context ID and VMID match (reserved if no EL2) */
|
|
|
|
case 3: /* linked context ID match */
|
|
|
|
default:
|
|
|
|
/* We must generate no events for Linked context matches (unless
|
|
|
|
* they are linked to by some other bp/wp, which is handled in
|
|
|
|
* updates for the linking bp/wp). We choose to also generate no events
|
|
|
|
* for reserved values.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hw_breakpoint_update_all(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
|
|
|
/* Completely clear out existing QEMU breakpoints and our array, to
|
|
|
|
* avoid possible stale entries following migration load.
|
|
|
|
*/
|
|
|
|
cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
|
|
|
|
memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
|
|
|
|
hw_breakpoint_update(cpu, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
int i = ri->crm;
|
|
|
|
|
|
|
|
raw_write(env, ri, value);
|
|
|
|
hw_breakpoint_update(cpu, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
int i = ri->crm;
|
|
|
|
|
|
|
|
/* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
|
|
|
|
* copy of BAS[0].
|
|
|
|
*/
|
|
|
|
value = deposit64(value, 6, 1, extract64(value, 5, 1));
|
|
|
|
value = deposit64(value, 8, 1, extract64(value, 7, 1));
|
|
|
|
|
|
|
|
raw_write(env, ri, value);
|
|
|
|
hw_breakpoint_update(cpu, i);
|
|
|
|
}
|
|
|
|
|
2014-08-19 21:56:25 +04:00
|
|
|
static void define_debug_regs(ARMCPU *cpu)
|
2014-02-26 21:20:05 +04:00
|
|
|
{
|
2014-08-19 21:56:25 +04:00
|
|
|
/* Define v7 and v8 architectural debug registers.
|
|
|
|
* These are just dummy implementations for now.
|
2014-02-26 21:20:05 +04:00
|
|
|
*/
|
|
|
|
int i;
|
2014-09-12 17:06:49 +04:00
|
|
|
int wrps, brps, ctx_cmps;
|
2014-08-19 21:56:25 +04:00
|
|
|
ARMCPRegInfo dbgdidr = {
|
|
|
|
.name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
|
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
|
|
|
|
};
|
|
|
|
|
2014-09-12 17:06:49 +04:00
|
|
|
/* Note that all these register fields hold "number of Xs minus 1". */
|
2014-08-19 21:56:25 +04:00
|
|
|
brps = extract32(cpu->dbgdidr, 24, 4);
|
|
|
|
wrps = extract32(cpu->dbgdidr, 28, 4);
|
2014-09-12 17:06:49 +04:00
|
|
|
ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
|
|
|
|
|
|
|
|
assert(ctx_cmps <= brps);
|
2014-08-19 21:56:25 +04:00
|
|
|
|
|
|
|
/* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
|
|
|
|
* of the debug registers such as number of breakpoints;
|
|
|
|
* check that if they both exist then they agree.
|
|
|
|
*/
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
|
|
|
assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
|
|
|
|
assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
|
2014-09-12 17:06:49 +04:00
|
|
|
assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
|
2014-08-19 21:56:25 +04:00
|
|
|
}
|
2014-02-26 21:20:05 +04:00
|
|
|
|
2014-08-19 21:56:25 +04:00
|
|
|
define_one_arm_cp_reg(cpu, &dbgdidr);
|
2014-08-19 21:56:25 +04:00
|
|
|
define_arm_cp_regs(cpu, debug_cp_reginfo);
|
|
|
|
|
|
|
|
if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
|
|
|
|
define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
|
|
|
|
}
|
|
|
|
|
2014-08-19 21:56:25 +04:00
|
|
|
for (i = 0; i < brps + 1; i++) {
|
2014-02-26 21:20:05 +04:00
|
|
|
ARMCPRegInfo dbgregs[] = {
|
2014-08-19 21:56:25 +04:00
|
|
|
{ .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
|
2014-02-26 21:20:05 +04:00
|
|
|
.access = PL1_RW,
|
2014-09-29 21:48:46 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
|
|
|
|
.writefn = dbgbvr_write, .raw_writefn = raw_write
|
|
|
|
},
|
2014-08-19 21:56:25 +04:00
|
|
|
{ .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
|
2014-02-26 21:20:05 +04:00
|
|
|
.access = PL1_RW,
|
2014-09-29 21:48:46 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
|
|
|
|
.writefn = dbgbcr_write, .raw_writefn = raw_write
|
|
|
|
},
|
2014-08-19 21:56:25 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
define_arm_cp_regs(cpu, dbgregs);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < wrps + 1; i++) {
|
|
|
|
ARMCPRegInfo dbgregs[] = {
|
2014-08-19 21:56:25 +04:00
|
|
|
{ .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
|
2014-02-26 21:20:05 +04:00
|
|
|
.access = PL1_RW,
|
2014-09-12 17:06:49 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
|
|
|
|
.writefn = dbgwvr_write, .raw_writefn = raw_write
|
|
|
|
},
|
2014-08-19 21:56:25 +04:00
|
|
|
{ .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
|
2014-02-26 21:20:05 +04:00
|
|
|
.access = PL1_RW,
|
2014-09-12 17:06:49 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
|
|
|
|
.writefn = dbgwcr_write, .raw_writefn = raw_write
|
|
|
|
},
|
|
|
|
REGINFO_SENTINEL
|
2014-02-26 21:20:05 +04:00
|
|
|
};
|
|
|
|
define_arm_cp_regs(cpu, dbgregs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:09 +04:00
|
|
|
void register_cp_regs_for_features(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
/* Register all the coprocessor registers based on feature bits */
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
/* M profile has no coprocessor registers */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:09 +04:00
|
|
|
define_arm_cp_regs(cpu, cp_reginfo);
|
2014-04-15 22:18:47 +04:00
|
|
|
if (!arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
/* Must go early as it is full of wildcards that may be
|
|
|
|
* overridden by later definitions.
|
|
|
|
*/
|
|
|
|
define_arm_cp_regs(cpu, not_v8_cp_reginfo);
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:11 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V6)) {
|
2012-06-20 15:57:19 +04:00
|
|
|
/* The ID registers all have impdef reset values */
|
|
|
|
ARMCPRegInfo v6_idregs[] = {
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_pfr0 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_pfr1 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_dfr0 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_afr0 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_mmfr0 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_mmfr1 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_mmfr2 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_mmfr3 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_isar0 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_isar1 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_isar2 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_isar3 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_isar4 },
|
2014-04-15 22:18:45 +04:00
|
|
|
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2012-06-20 15:57:19 +04:00
|
|
|
.resetvalue = cpu->id_isar5 },
|
|
|
|
/* 6..7 are as yet unallocated and must RAZ */
|
|
|
|
{ .name = "ID_ISAR6", .cp = 15, .crn = 0, .crm = 2,
|
|
|
|
.opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = 0 },
|
|
|
|
{ .name = "ID_ISAR7", .cp = 15, .crn = 0, .crm = 2,
|
|
|
|
.opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = 0 },
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
define_arm_cp_regs(cpu, v6_idregs);
|
2012-06-20 15:57:11 +04:00
|
|
|
define_arm_cp_regs(cpu, v6_cp_reginfo);
|
|
|
|
} else {
|
|
|
|
define_arm_cp_regs(cpu, not_v6_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:11 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V6K)) {
|
|
|
|
define_arm_cp_regs(cpu, v6k_cp_reginfo);
|
|
|
|
}
|
2014-09-12 17:06:50 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V7MP)) {
|
|
|
|
define_arm_cp_regs(cpu, v7mp_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:09 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V7)) {
|
2012-06-20 15:57:12 +04:00
|
|
|
/* v7 performance monitor control register: same implementor
|
2014-03-10 18:56:28 +04:00
|
|
|
* field as main ID register, and we implement only the cycle
|
|
|
|
* count register.
|
2012-06-20 15:57:12 +04:00
|
|
|
*/
|
2014-03-10 18:56:28 +04:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2012-06-20 15:57:12 +04:00
|
|
|
ARMCPRegInfo pmcr = {
|
|
|
|
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
|
2014-08-29 18:00:29 +04:00
|
|
|
.access = PL0_RW,
|
|
|
|
.type = ARM_CP_IO | ARM_CP_NO_MIGRATE,
|
|
|
|
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
|
2014-02-20 14:35:52 +04:00
|
|
|
.accessfn = pmreg_access, .writefn = pmcr_write,
|
|
|
|
.raw_writefn = raw_write,
|
2012-06-20 15:57:12 +04:00
|
|
|
};
|
2014-08-29 18:00:29 +04:00
|
|
|
ARMCPRegInfo pmcr64 = {
|
|
|
|
.name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
|
|
|
|
.access = PL0_RW, .accessfn = pmreg_access,
|
|
|
|
.type = ARM_CP_IO,
|
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
|
|
|
|
.resetvalue = cpu->midr & 0xff000000,
|
|
|
|
.writefn = pmcr_write, .raw_writefn = raw_write,
|
|
|
|
};
|
2014-03-10 18:56:28 +04:00
|
|
|
define_one_arm_cp_reg(cpu, &pmcr);
|
2014-08-29 18:00:29 +04:00
|
|
|
define_one_arm_cp_reg(cpu, &pmcr64);
|
2014-03-10 18:56:28 +04:00
|
|
|
#endif
|
2012-06-20 15:57:19 +04:00
|
|
|
ARMCPRegInfo clidr = {
|
2014-02-26 21:20:01 +04:00
|
|
|
.name = "CLIDR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
|
2012-06-20 15:57:19 +04:00
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
|
|
|
|
};
|
|
|
|
define_one_arm_cp_reg(cpu, &clidr);
|
2012-06-20 15:57:09 +04:00
|
|
|
define_arm_cp_regs(cpu, v7_cp_reginfo);
|
2014-08-19 21:56:25 +04:00
|
|
|
define_debug_regs(cpu);
|
2012-06-20 15:57:11 +04:00
|
|
|
} else {
|
|
|
|
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
|
2012-06-20 15:57:09 +04:00
|
|
|
}
|
2014-01-05 02:15:45 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
2014-02-26 21:20:05 +04:00
|
|
|
/* AArch64 ID registers, which all have impdef reset values */
|
|
|
|
ARMCPRegInfo v8_idregs[] = {
|
|
|
|
{ .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->id_aa64pfr0 },
|
|
|
|
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->id_aa64pfr1},
|
|
|
|
{ .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
2014-06-13 22:42:57 +04:00
|
|
|
/* We mask out the PMUVer field, because we don't currently
|
2014-04-15 22:18:41 +04:00
|
|
|
* implement the PMU. Not advertising it prevents the guest
|
|
|
|
* from trying to use it and getting UNDEFs on registers we
|
|
|
|
* don't implement.
|
|
|
|
*/
|
|
|
|
.resetvalue = cpu->id_aa64dfr0 & ~0xf00 },
|
2014-02-26 21:20:05 +04:00
|
|
|
{ .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->id_aa64dfr1 },
|
|
|
|
{ .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->id_aa64afr0 },
|
|
|
|
{ .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->id_aa64afr1 },
|
|
|
|
{ .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->id_aa64isar0 },
|
|
|
|
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->id_aa64isar1 },
|
|
|
|
{ .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->id_aa64mmfr0 },
|
|
|
|
{ .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->id_aa64mmfr1 },
|
2014-04-15 22:18:44 +04:00
|
|
|
{ .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->mvfr0 },
|
|
|
|
{ .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->mvfr1 },
|
|
|
|
{ .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->mvfr2 },
|
2014-02-26 21:20:05 +04:00
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
2014-04-15 22:18:48 +04:00
|
|
|
ARMCPRegInfo rvbar = {
|
|
|
|
.name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
|
|
|
|
.type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
|
|
|
|
};
|
|
|
|
define_one_arm_cp_reg(cpu, &rvbar);
|
2014-02-26 21:20:05 +04:00
|
|
|
define_arm_cp_regs(cpu, v8_idregs);
|
2014-01-05 02:15:45 +04:00
|
|
|
define_arm_cp_regs(cpu, v8_cp_reginfo);
|
|
|
|
}
|
2014-05-27 20:09:53 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
|
|
|
define_arm_cp_regs(cpu, v8_el2_cp_reginfo);
|
2014-05-27 20:09:54 +04:00
|
|
|
} else {
|
|
|
|
/* If EL2 is missing but higher ELs are enabled, we need to
|
|
|
|
* register the no_el2 reginfos.
|
|
|
|
*/
|
|
|
|
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
|
|
|
define_arm_cp_regs(cpu, v8_el3_no_el2_cp_reginfo);
|
|
|
|
}
|
2014-05-27 20:09:53 +04:00
|
|
|
}
|
2014-05-27 20:09:53 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
2014-12-11 15:07:49 +03:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
define_arm_cp_regs(cpu, v8_el3_cp_reginfo);
|
|
|
|
}
|
|
|
|
define_arm_cp_regs(cpu, el3_cp_reginfo);
|
2014-05-27 20:09:53 +04:00
|
|
|
}
|
2012-06-20 15:57:13 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_MPU)) {
|
|
|
|
/* These are the MPU registers prior to PMSAv6. Any new
|
|
|
|
* PMSA core later than the ARM946 will require that we
|
|
|
|
* implement the PMSAv6 or PMSAv7 registers, which are
|
|
|
|
* completely different.
|
|
|
|
*/
|
|
|
|
assert(!arm_feature(env, ARM_FEATURE_V6));
|
|
|
|
define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
|
|
|
|
} else {
|
|
|
|
define_arm_cp_regs(cpu, vmsa_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:10 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
|
|
|
|
define_arm_cp_regs(cpu, t2ee_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:12 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
|
|
|
|
define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:16 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_VAPA)) {
|
|
|
|
define_arm_cp_regs(cpu, vapa_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:17 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
|
|
|
|
define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
|
|
|
|
define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
|
|
|
|
define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:13 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
|
|
|
|
define_arm_cp_regs(cpu, omap_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:18 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
|
|
|
|
define_arm_cp_regs(cpu, strongarm_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:15 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
|
|
|
define_arm_cp_regs(cpu, xscale_cp_reginfo);
|
|
|
|
}
|
|
|
|
if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
|
|
|
|
define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
|
|
|
|
}
|
2012-07-12 14:59:07 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_LPAE)) {
|
|
|
|
define_arm_cp_regs(cpu, lpae_cp_reginfo);
|
|
|
|
}
|
2012-06-20 15:57:20 +04:00
|
|
|
/* Slightly awkwardly, the OMAP and StrongARM cores need all of
|
|
|
|
* cp15 crn=0 to be writes-ignored, whereas for other cores they should
|
|
|
|
* be read-only (ie write causes UNDEF exception).
|
|
|
|
*/
|
|
|
|
{
|
2014-04-15 22:18:47 +04:00
|
|
|
ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
|
|
|
|
/* Pre-v8 MIDR space.
|
|
|
|
* Note that the MIDR isn't a simple constant register because
|
2012-06-20 15:57:20 +04:00
|
|
|
* of the TI925 behaviour where writes to another register can
|
|
|
|
* cause the MIDR value to change.
|
2013-07-10 08:22:21 +04:00
|
|
|
*
|
|
|
|
* Unimplemented registers in the c15 0 0 0 space default to
|
|
|
|
* MIDR. Define MIDR first as this entire space, then CTR, TCMTR
|
|
|
|
* and friends override accordingly.
|
2012-06-20 15:57:20 +04:00
|
|
|
*/
|
|
|
|
{ .name = "MIDR",
|
2013-07-10 08:22:21 +04:00
|
|
|
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
|
2012-06-20 15:57:20 +04:00
|
|
|
.access = PL1_R, .resetvalue = cpu->midr,
|
2013-06-25 21:16:07 +04:00
|
|
|
.writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
|
2013-07-10 08:22:21 +04:00
|
|
|
.fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
|
|
|
|
.type = ARM_CP_OVERRIDE },
|
2012-06-20 15:57:20 +04:00
|
|
|
/* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
|
|
|
|
{ .name = "DUMMY",
|
|
|
|
.cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
|
{ .name = "DUMMY",
|
|
|
|
.cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
|
{ .name = "DUMMY",
|
|
|
|
.cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
|
{ .name = "DUMMY",
|
|
|
|
.cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
|
{ .name = "DUMMY",
|
|
|
|
.cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
2014-04-15 22:18:47 +04:00
|
|
|
ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
|
|
|
|
/* v8 MIDR -- the wildcard isn't necessary, and nor is the
|
|
|
|
* variable-MIDR TI925 behaviour. Instead we have a single
|
|
|
|
* (strictly speaking IMPDEF) alias of the MIDR, REVIDR.
|
|
|
|
*/
|
|
|
|
{ .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr },
|
|
|
|
{ .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr },
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
ARMCPRegInfo id_cp_reginfo[] = {
|
|
|
|
/* These are common to v8 and pre-v8 */
|
|
|
|
{ .name = "CTR",
|
|
|
|
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
|
|
|
|
{ .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
|
|
|
|
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
|
|
|
|
.access = PL0_R, .accessfn = ctr_el0_access,
|
|
|
|
.type = ARM_CP_CONST, .resetvalue = cpu->ctr },
|
|
|
|
/* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
|
|
|
|
{ .name = "TCMTR",
|
|
|
|
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
|
{ .name = "TLBTR",
|
|
|
|
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
|
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
2012-06-20 15:57:20 +04:00
|
|
|
ARMCPRegInfo crn0_wi_reginfo = {
|
|
|
|
.name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
|
|
|
|
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
|
|
|
|
.type = ARM_CP_NOP | ARM_CP_OVERRIDE
|
|
|
|
};
|
|
|
|
if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
|
|
|
|
arm_feature(env, ARM_FEATURE_STRONGARM)) {
|
|
|
|
ARMCPRegInfo *r;
|
|
|
|
/* Register the blanket "writes ignored" value first to cover the
|
2013-07-10 08:21:42 +04:00
|
|
|
* whole space. Then update the specific ID registers to allow write
|
|
|
|
* access, so that they ignore writes rather than causing them to
|
|
|
|
* UNDEF.
|
2012-06-20 15:57:20 +04:00
|
|
|
*/
|
|
|
|
define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
|
2014-04-15 22:18:47 +04:00
|
|
|
for (r = id_pre_v8_midr_cp_reginfo;
|
|
|
|
r->type != ARM_CP_SENTINEL; r++) {
|
|
|
|
r->access = PL1_RW;
|
|
|
|
}
|
2012-06-20 15:57:20 +04:00
|
|
|
for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
|
|
|
|
r->access = PL1_RW;
|
|
|
|
}
|
|
|
|
}
|
2014-04-15 22:18:47 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
|
define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
|
|
|
|
} else {
|
|
|
|
define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
|
|
|
|
}
|
2013-07-10 08:21:42 +04:00
|
|
|
define_arm_cp_regs(cpu, id_cp_reginfo);
|
2012-06-20 15:57:20 +04:00
|
|
|
}
|
|
|
|
|
2013-07-10 08:22:21 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_MPIDR)) {
|
|
|
|
define_arm_cp_regs(cpu, mpidr_cp_reginfo);
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:18 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_AUXCR)) {
|
|
|
|
ARMCPRegInfo auxcr = {
|
2014-04-15 22:18:45 +04:00
|
|
|
.name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
|
2012-06-20 15:57:18 +04:00
|
|
|
.access = PL1_RW, .type = ARM_CP_CONST,
|
|
|
|
.resetvalue = cpu->reset_auxcr
|
|
|
|
};
|
|
|
|
define_one_arm_cp_reg(cpu, &auxcr);
|
|
|
|
}
|
|
|
|
|
2013-12-17 23:42:28 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_CBAR)) {
|
2014-04-15 22:18:49 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
|
|
|
|
/* 32 bit view is [31:18] 0...0 [43:32]. */
|
|
|
|
uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
|
|
|
|
| extract64(cpu->reset_cbar, 32, 12);
|
|
|
|
ARMCPRegInfo cbar_reginfo[] = {
|
|
|
|
{ .name = "CBAR",
|
|
|
|
.type = ARM_CP_CONST,
|
|
|
|
.cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
|
|
|
|
.access = PL1_R, .resetvalue = cpu->reset_cbar },
|
|
|
|
{ .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
|
.type = ARM_CP_CONST,
|
|
|
|
.opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
|
|
|
|
.access = PL1_R, .resetvalue = cbar32 },
|
|
|
|
REGINFO_SENTINEL
|
|
|
|
};
|
|
|
|
/* We don't implement a r/w 64 bit CBAR currently */
|
|
|
|
assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
|
|
|
|
define_arm_cp_regs(cpu, cbar_reginfo);
|
|
|
|
} else {
|
|
|
|
ARMCPRegInfo cbar = {
|
|
|
|
.name = "CBAR",
|
|
|
|
.cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
|
|
|
|
.access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
|
|
|
|
.fieldoffset = offsetof(CPUARMState,
|
|
|
|
cp15.c15_config_base_address)
|
|
|
|
};
|
|
|
|
if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
|
|
|
|
cbar.access = PL1_R;
|
|
|
|
cbar.fieldoffset = 0;
|
|
|
|
cbar.type = ARM_CP_CONST;
|
|
|
|
}
|
|
|
|
define_one_arm_cp_reg(cpu, &cbar);
|
|
|
|
}
|
2013-12-17 23:42:28 +04:00
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:18 +04:00
|
|
|
/* Generic registers whose values depend on the implementation */
|
|
|
|
{
|
|
|
|
ARMCPRegInfo sctlr = {
|
2014-02-26 21:20:03 +04:00
|
|
|
.name = "SCTLR", .state = ARM_CP_STATE_BOTH,
|
|
|
|
.opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
|
2012-06-20 15:57:18 +04:00
|
|
|
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sys),
|
2013-06-25 21:16:07 +04:00
|
|
|
.writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
|
|
|
|
.raw_writefn = raw_write,
|
2012-06-20 15:57:18 +04:00
|
|
|
};
|
|
|
|
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
|
|
|
/* Normally we would always end the TB on an SCTLR write, but Linux
|
|
|
|
* arch/arm/mach-pxa/sleep.S expects two instructions following
|
|
|
|
* an MMU enable to execute from cache. Imitate this behaviour.
|
|
|
|
*/
|
|
|
|
sctlr.type |= ARM_CP_SUPPRESS_TB_END;
|
|
|
|
}
|
|
|
|
define_one_arm_cp_reg(cpu, &sctlr);
|
|
|
|
}
|
2012-06-20 15:57:09 +04:00
|
|
|
}
|
|
|
|
|
2012-04-20 11:39:14 +04:00
|
|
|
ARMCPU *cpu_arm_init(const char *cpu_model)
|
2006-02-20 03:33:36 +03:00
|
|
|
{
|
2014-03-04 06:17:10 +04:00
|
|
|
return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
|
2013-01-05 13:18:18 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
|
|
|
|
{
|
2013-06-28 23:27:39 +04:00
|
|
|
CPUState *cs = CPU(cpu);
|
2013-01-05 13:18:18 +04:00
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
2013-12-17 23:42:32 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
|
|
|
|
gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
|
|
|
|
aarch64_fpu_gdb_set_reg,
|
|
|
|
34, "aarch64-fpu.xml", 0);
|
|
|
|
} else if (arm_feature(env, ARM_FEATURE_NEON)) {
|
2013-06-28 23:27:39 +04:00
|
|
|
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
|
2008-10-11 21:55:29 +04:00
|
|
|
51, "arm-neon.xml", 0);
|
|
|
|
} else if (arm_feature(env, ARM_FEATURE_VFP3)) {
|
2013-06-28 23:27:39 +04:00
|
|
|
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
|
2008-10-11 21:55:29 +04:00
|
|
|
35, "arm-vfp3.xml", 0);
|
|
|
|
} else if (arm_feature(env, ARM_FEATURE_VFP)) {
|
2013-06-28 23:27:39 +04:00
|
|
|
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
|
2008-10-11 21:55:29 +04:00
|
|
|
19, "arm-vfp.xml", 0);
|
|
|
|
}
|
2006-02-20 03:33:36 +03:00
|
|
|
}
|
|
|
|
|
2012-04-20 21:58:31 +04:00
|
|
|
/* Sort alphabetically by type name, except for "any". */
|
|
|
|
static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
|
2007-03-08 06:15:18 +03:00
|
|
|
{
|
2012-04-20 21:58:31 +04:00
|
|
|
ObjectClass *class_a = (ObjectClass *)a;
|
|
|
|
ObjectClass *class_b = (ObjectClass *)b;
|
|
|
|
const char *name_a, *name_b;
|
2007-03-08 06:15:18 +03:00
|
|
|
|
2012-04-20 21:58:31 +04:00
|
|
|
name_a = object_class_get_name(class_a);
|
|
|
|
name_b = object_class_get_name(class_b);
|
2013-01-27 20:30:10 +04:00
|
|
|
if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
|
2012-04-20 21:58:31 +04:00
|
|
|
return 1;
|
2013-01-27 20:30:10 +04:00
|
|
|
} else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
|
2012-04-20 21:58:31 +04:00
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return strcmp(name_a, name_b);
|
2007-03-08 06:15:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-20 21:58:31 +04:00
|
|
|
static void arm_cpu_list_entry(gpointer data, gpointer user_data)
|
2006-02-20 03:33:36 +03:00
|
|
|
{
|
2012-04-20 21:58:31 +04:00
|
|
|
ObjectClass *oc = data;
|
2012-12-16 05:17:02 +04:00
|
|
|
CPUListState *s = user_data;
|
2013-01-27 20:30:10 +04:00
|
|
|
const char *typename;
|
|
|
|
char *name;
|
2007-03-08 06:04:12 +03:00
|
|
|
|
2013-01-27 20:30:10 +04:00
|
|
|
typename = object_class_get_name(oc);
|
|
|
|
name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
|
2012-04-20 21:58:31 +04:00
|
|
|
(*s->cpu_fprintf)(s->file, " %s\n",
|
2013-01-27 20:30:10 +04:00
|
|
|
name);
|
|
|
|
g_free(name);
|
2012-04-20 21:58:31 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
|
|
|
|
{
|
2012-12-16 05:17:02 +04:00
|
|
|
CPUListState s = {
|
2012-04-20 21:58:31 +04:00
|
|
|
.file = f,
|
|
|
|
.cpu_fprintf = cpu_fprintf,
|
|
|
|
};
|
|
|
|
GSList *list;
|
|
|
|
|
|
|
|
list = object_class_get_list(TYPE_ARM_CPU, false);
|
|
|
|
list = g_slist_sort(list, arm_cpu_list_compare);
|
|
|
|
(*cpu_fprintf)(f, "Available CPUs:\n");
|
|
|
|
g_slist_foreach(list, arm_cpu_list_entry, &s);
|
|
|
|
g_slist_free(list);
|
2013-11-22 21:17:17 +04:00
|
|
|
#ifdef CONFIG_KVM
|
|
|
|
/* The 'host' CPU type is dynamically registered only if KVM is
|
|
|
|
* enabled, so we have to special-case it here:
|
|
|
|
*/
|
|
|
|
(*cpu_fprintf)(f, " host (only available in KVM mode)\n");
|
|
|
|
#endif
|
2006-02-20 03:33:36 +03:00
|
|
|
}
|
|
|
|
|
2013-09-10 22:09:33 +04:00
|
|
|
static void arm_cpu_add_definition(gpointer data, gpointer user_data)
|
|
|
|
{
|
|
|
|
ObjectClass *oc = data;
|
|
|
|
CpuDefinitionInfoList **cpu_list = user_data;
|
|
|
|
CpuDefinitionInfoList *entry;
|
|
|
|
CpuDefinitionInfo *info;
|
|
|
|
const char *typename;
|
|
|
|
|
|
|
|
typename = object_class_get_name(oc);
|
|
|
|
info = g_malloc0(sizeof(*info));
|
|
|
|
info->name = g_strndup(typename,
|
|
|
|
strlen(typename) - strlen("-" TYPE_ARM_CPU));
|
|
|
|
|
|
|
|
entry = g_malloc0(sizeof(*entry));
|
|
|
|
entry->value = info;
|
|
|
|
entry->next = *cpu_list;
|
|
|
|
*cpu_list = entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
|
|
|
|
{
|
|
|
|
CpuDefinitionInfoList *cpu_list = NULL;
|
|
|
|
GSList *list;
|
|
|
|
|
|
|
|
list = object_class_get_list(TYPE_ARM_CPU, false);
|
|
|
|
g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
|
|
|
|
g_slist_free(list);
|
|
|
|
|
|
|
|
return cpu_list;
|
|
|
|
}
|
|
|
|
|
2013-12-23 02:32:30 +04:00
|
|
|
static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
|
2014-12-11 15:07:49 +03:00
|
|
|
void *opaque, int state, int secstate,
|
2014-01-05 02:15:44 +04:00
|
|
|
int crm, int opc1, int opc2)
|
2013-12-23 02:32:30 +04:00
|
|
|
{
|
|
|
|
/* Private utility function for define_one_arm_cp_reg_with_opaque():
|
|
|
|
* add a single reginfo struct to the hash table.
|
|
|
|
*/
|
|
|
|
uint32_t *key = g_new(uint32_t, 1);
|
|
|
|
ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
|
|
|
|
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
|
2014-12-11 15:07:49 +03:00
|
|
|
int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
|
|
|
|
|
|
|
|
/* Reset the secure state to the specific incoming state. This is
|
|
|
|
* necessary as the register may have been defined with both states.
|
|
|
|
*/
|
|
|
|
r2->secure = secstate;
|
|
|
|
|
|
|
|
if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
|
|
|
|
/* Register is banked (using both entries in array).
|
|
|
|
* Overwriting fieldoffset as the array is only used to define
|
|
|
|
* banked registers but later only fieldoffset is used.
|
2014-01-05 02:15:44 +04:00
|
|
|
*/
|
2014-12-11 15:07:49 +03:00
|
|
|
r2->fieldoffset = r->bank_fieldoffsets[ns];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (state == ARM_CP_STATE_AA32) {
|
|
|
|
if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
|
|
|
|
/* If the register is banked then we don't need to migrate or
|
|
|
|
* reset the 32-bit instance in certain cases:
|
|
|
|
*
|
|
|
|
* 1) If the register has both 32-bit and 64-bit instances then we
|
|
|
|
* can count on the 64-bit instance taking care of the
|
|
|
|
* non-secure bank.
|
|
|
|
* 2) If ARMv8 is enabled then we can count on a 64-bit version
|
|
|
|
* taking care of the secure bank. This requires that separate
|
|
|
|
* 32 and 64-bit definitions are provided.
|
|
|
|
*/
|
|
|
|
if ((r->state == ARM_CP_STATE_BOTH && ns) ||
|
|
|
|
(arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
|
|
|
|
r2->type |= ARM_CP_NO_MIGRATE;
|
|
|
|
r2->resetfn = arm_cp_reset_ignore;
|
|
|
|
}
|
|
|
|
} else if ((secstate != r->secure) && !ns) {
|
|
|
|
/* The register is not banked so we only want to allow migration of
|
|
|
|
* the non-secure instance.
|
|
|
|
*/
|
|
|
|
r2->type |= ARM_CP_NO_MIGRATE;
|
|
|
|
r2->resetfn = arm_cp_reset_ignore;
|
2014-08-19 21:56:25 +04:00
|
|
|
}
|
2014-12-11 15:07:49 +03:00
|
|
|
|
|
|
|
if (r->state == ARM_CP_STATE_BOTH) {
|
|
|
|
/* We assume it is a cp15 register if the .cp field is left unset.
|
|
|
|
*/
|
|
|
|
if (r2->cp == 0) {
|
|
|
|
r2->cp = 15;
|
|
|
|
}
|
|
|
|
|
2014-01-05 02:15:44 +04:00
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
2014-12-11 15:07:49 +03:00
|
|
|
if (r2->fieldoffset) {
|
|
|
|
r2->fieldoffset += sizeof(uint32_t);
|
|
|
|
}
|
2014-01-05 02:15:44 +04:00
|
|
|
#endif
|
2014-12-11 15:07:49 +03:00
|
|
|
}
|
2014-01-05 02:15:44 +04:00
|
|
|
}
|
|
|
|
if (state == ARM_CP_STATE_AA64) {
|
|
|
|
/* To allow abbreviation of ARMCPRegInfo
|
|
|
|
* definitions, we treat cp == 0 as equivalent to
|
|
|
|
* the value for "standard guest-visible sysreg".
|
2014-08-19 21:56:25 +04:00
|
|
|
* STATE_BOTH definitions are also always "standard
|
|
|
|
* sysreg" in their AArch64 view (the .cp value may
|
|
|
|
* be non-zero for the benefit of the AArch32 view).
|
2014-01-05 02:15:44 +04:00
|
|
|
*/
|
2014-08-19 21:56:25 +04:00
|
|
|
if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
|
2014-01-05 02:15:44 +04:00
|
|
|
r2->cp = CP_REG_ARM64_SYSREG_CP;
|
|
|
|
}
|
|
|
|
*key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
|
|
|
|
r2->opc0, opc1, opc2);
|
|
|
|
} else {
|
2014-12-11 15:07:49 +03:00
|
|
|
*key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
|
2014-01-05 02:15:44 +04:00
|
|
|
}
|
2013-12-23 02:32:30 +04:00
|
|
|
if (opaque) {
|
|
|
|
r2->opaque = opaque;
|
|
|
|
}
|
2014-02-26 21:20:01 +04:00
|
|
|
/* reginfo passed to helpers is correct for the actual access,
|
|
|
|
* and is never ARM_CP_STATE_BOTH:
|
|
|
|
*/
|
|
|
|
r2->state = state;
|
2013-12-23 02:32:30 +04:00
|
|
|
/* Make sure reginfo passed to helpers for wildcarded regs
|
|
|
|
* has the correct crm/opc1/opc2 for this reg, not CP_ANY:
|
|
|
|
*/
|
|
|
|
r2->crm = crm;
|
|
|
|
r2->opc1 = opc1;
|
|
|
|
r2->opc2 = opc2;
|
|
|
|
/* By convention, for wildcarded registers only the first
|
|
|
|
* entry is used for migration; the others are marked as
|
|
|
|
* NO_MIGRATE so we don't try to transfer the register
|
|
|
|
* multiple times. Special registers (ie NOP/WFI) are
|
|
|
|
* never migratable.
|
|
|
|
*/
|
|
|
|
if ((r->type & ARM_CP_SPECIAL) ||
|
|
|
|
((r->crm == CP_ANY) && crm != 0) ||
|
|
|
|
((r->opc1 == CP_ANY) && opc1 != 0) ||
|
|
|
|
((r->opc2 == CP_ANY) && opc2 != 0)) {
|
|
|
|
r2->type |= ARM_CP_NO_MIGRATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Overriding of an existing definition must be explicitly
|
|
|
|
* requested.
|
|
|
|
*/
|
|
|
|
if (!(r->type & ARM_CP_OVERRIDE)) {
|
|
|
|
ARMCPRegInfo *oldreg;
|
|
|
|
oldreg = g_hash_table_lookup(cpu->cp_regs, key);
|
|
|
|
if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
|
|
|
|
fprintf(stderr, "Register redefined: cp=%d %d bit "
|
|
|
|
"crn=%d crm=%d opc1=%d opc2=%d, "
|
|
|
|
"was %s, now %s\n", r2->cp, 32 + 32 * is64,
|
|
|
|
r2->crn, r2->crm, r2->opc1, r2->opc2,
|
|
|
|
oldreg->name, r2->name);
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
g_hash_table_insert(cpu->cp_regs, key, r2);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-06-20 15:57:06 +04:00
|
|
|
void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
|
|
|
|
const ARMCPRegInfo *r, void *opaque)
|
|
|
|
{
|
|
|
|
/* Define implementations of coprocessor registers.
|
|
|
|
* We store these in a hashtable because typically
|
|
|
|
* there are less than 150 registers in a space which
|
|
|
|
* is 16*16*16*8*8 = 262144 in size.
|
|
|
|
* Wildcarding is supported for the crm, opc1 and opc2 fields.
|
|
|
|
* If a register is defined twice then the second definition is
|
|
|
|
* used, so this can be used to define some generic registers and
|
|
|
|
* then override them with implementation specific variations.
|
|
|
|
* At least one of the original and the second definition should
|
|
|
|
* include ARM_CP_OVERRIDE in its type bits -- this is just a guard
|
|
|
|
* against accidental use.
|
2014-01-05 02:15:44 +04:00
|
|
|
*
|
|
|
|
* The state field defines whether the register is to be
|
|
|
|
* visible in the AArch32 or AArch64 execution state. If the
|
|
|
|
* state is set to ARM_CP_STATE_BOTH then we synthesise a
|
|
|
|
* reginfo structure for the AArch32 view, which sees the lower
|
|
|
|
* 32 bits of the 64 bit register.
|
|
|
|
*
|
|
|
|
* Only registers visible in AArch64 may set r->opc0; opc0 cannot
|
|
|
|
* be wildcarded. AArch64 registers are always considered to be 64
|
|
|
|
* bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
|
|
|
|
* the register, if any.
|
2012-06-20 15:57:06 +04:00
|
|
|
*/
|
2014-01-05 02:15:44 +04:00
|
|
|
int crm, opc1, opc2, state;
|
2012-06-20 15:57:06 +04:00
|
|
|
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
|
|
|
|
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
|
|
|
|
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
|
|
|
|
int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
|
|
|
|
int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
|
|
|
|
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
|
|
|
|
/* 64 bit registers have only CRm and Opc1 fields */
|
|
|
|
assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
|
2014-01-05 02:15:44 +04:00
|
|
|
/* op0 only exists in the AArch64 encodings */
|
|
|
|
assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
|
|
|
|
/* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
|
|
|
|
assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
|
|
|
|
/* The AArch64 pseudocode CheckSystemAccess() specifies that op1
|
|
|
|
* encodes a minimum access level for the register. We roll this
|
|
|
|
* runtime check into our general permission check code, so check
|
|
|
|
* here that the reginfo's specified permissions are strict enough
|
|
|
|
* to encompass the generic architectural permission check.
|
|
|
|
*/
|
|
|
|
if (r->state != ARM_CP_STATE_AA32) {
|
|
|
|
int mask = 0;
|
|
|
|
switch (r->opc1) {
|
|
|
|
case 0: case 1: case 2:
|
|
|
|
/* min_EL EL1 */
|
|
|
|
mask = PL1_RW;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
/* min_EL EL0 */
|
|
|
|
mask = PL0_RW;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
/* min_EL EL2 */
|
|
|
|
mask = PL2_RW;
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
/* unallocated encoding, so not possible */
|
|
|
|
assert(false);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
/* min_EL EL3 */
|
|
|
|
mask = PL3_RW;
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
/* min_EL EL1, secure mode only (we don't check the latter) */
|
|
|
|
mask = PL1_RW;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* broken reginfo with out-of-range opc1 */
|
|
|
|
assert(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* assert our permissions are not too lax (stricter is fine) */
|
|
|
|
assert((r->access & ~mask) == 0);
|
|
|
|
}
|
|
|
|
|
2012-06-20 15:57:06 +04:00
|
|
|
/* Check that the register definition has enough info to handle
|
|
|
|
* reads and writes if they are permitted.
|
|
|
|
*/
|
|
|
|
if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
|
|
|
|
if (r->access & PL3_R) {
|
2014-12-11 15:07:49 +03:00
|
|
|
assert((r->fieldoffset ||
|
|
|
|
(r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
|
|
|
|
r->readfn);
|
2012-06-20 15:57:06 +04:00
|
|
|
}
|
|
|
|
if (r->access & PL3_W) {
|
2014-12-11 15:07:49 +03:00
|
|
|
assert((r->fieldoffset ||
|
|
|
|
(r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
|
|
|
|
r->writefn);
|
2012-06-20 15:57:06 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Bad type field probably means missing sentinel at end of reg list */
|
|
|
|
assert(cptype_valid(r->type));
|
|
|
|
for (crm = crmmin; crm <= crmmax; crm++) {
|
|
|
|
for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
|
|
|
|
for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
|
2014-01-05 02:15:44 +04:00
|
|
|
for (state = ARM_CP_STATE_AA32;
|
|
|
|
state <= ARM_CP_STATE_AA64; state++) {
|
|
|
|
if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
|
|
|
|
continue;
|
|
|
|
}
|
2014-12-11 15:07:49 +03:00
|
|
|
if (state == ARM_CP_STATE_AA32) {
|
|
|
|
/* Under AArch32 CP registers can be common
|
|
|
|
* (same for secure and non-secure world) or banked.
|
|
|
|
*/
|
|
|
|
switch (r->secure) {
|
|
|
|
case ARM_CP_SECSTATE_S:
|
|
|
|
case ARM_CP_SECSTATE_NS:
|
|
|
|
add_cpreg_to_hashtable(cpu, r, opaque, state,
|
|
|
|
r->secure, crm, opc1, opc2);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
add_cpreg_to_hashtable(cpu, r, opaque, state,
|
|
|
|
ARM_CP_SECSTATE_S,
|
|
|
|
crm, opc1, opc2);
|
|
|
|
add_cpreg_to_hashtable(cpu, r, opaque, state,
|
|
|
|
ARM_CP_SECSTATE_NS,
|
|
|
|
crm, opc1, opc2);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* AArch64 registers get mapped to non-secure instance
|
|
|
|
* of AArch32 */
|
|
|
|
add_cpreg_to_hashtable(cpu, r, opaque, state,
|
|
|
|
ARM_CP_SECSTATE_NS,
|
|
|
|
crm, opc1, opc2);
|
|
|
|
}
|
2014-01-05 02:15:44 +04:00
|
|
|
}
|
2012-06-20 15:57:06 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
|
|
|
|
const ARMCPRegInfo *regs, void *opaque)
|
|
|
|
{
|
|
|
|
/* Define a whole list of registers */
|
|
|
|
const ARMCPRegInfo *r;
|
|
|
|
for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
|
|
|
|
define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-05 02:15:44 +04:00
|
|
|
const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
|
2012-06-20 15:57:06 +04:00
|
|
|
{
|
2014-01-05 02:15:44 +04:00
|
|
|
return g_hash_table_lookup(cpregs, &encoded_cp);
|
2012-06-20 15:57:06 +04:00
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
|
uint64_t value)
|
2012-06-20 15:57:06 +04:00
|
|
|
{
|
|
|
|
/* Helper coprocessor write function for write-ignore registers */
|
|
|
|
}
|
|
|
|
|
2014-02-20 14:35:54 +04:00
|
|
|
uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
|
2012-06-20 15:57:06 +04:00
|
|
|
{
|
|
|
|
/* Helper coprocessor write function for read-as-zero registers */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-05 02:15:44 +04:00
|
|
|
void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
|
|
|
|
{
|
|
|
|
/* Helper coprocessor reset function for do-nothing-on-reset registers */
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
static int bad_mode_switch(CPUARMState *env, int mode)
|
2012-01-05 19:49:06 +04:00
|
|
|
{
|
|
|
|
/* Return true if it is not valid for us to switch to
|
|
|
|
* this CPU mode (ie all the UNPREDICTABLE cases in
|
|
|
|
* the ARM ARM CPSRWriteByInstr pseudocode).
|
|
|
|
*/
|
|
|
|
switch (mode) {
|
|
|
|
case ARM_CPU_MODE_USR:
|
|
|
|
case ARM_CPU_MODE_SYS:
|
|
|
|
case ARM_CPU_MODE_SVC:
|
|
|
|
case ARM_CPU_MODE_ABT:
|
|
|
|
case ARM_CPU_MODE_UND:
|
|
|
|
case ARM_CPU_MODE_IRQ:
|
|
|
|
case ARM_CPU_MODE_FIQ:
|
|
|
|
return 0;
|
2014-10-24 15:19:14 +04:00
|
|
|
case ARM_CPU_MODE_MON:
|
|
|
|
return !arm_is_secure(env);
|
2012-01-05 19:49:06 +04:00
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-13 04:50:15 +03:00
|
|
|
uint32_t cpsr_read(CPUARMState *env)
|
|
|
|
{
|
|
|
|
int ZF;
|
2008-04-01 21:19:11 +04:00
|
|
|
ZF = (env->ZF == 0);
|
|
|
|
return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
|
2007-11-13 04:50:15 +03:00
|
|
|
(env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
|
|
|
|
| (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
|
|
|
|
| ((env->condexec_bits & 0xfc) << 8)
|
2014-03-10 18:56:28 +04:00
|
|
|
| (env->GE << 16) | (env->daif & CPSR_AIF);
|
2007-11-13 04:50:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
|
|
|
{
|
|
|
|
if (mask & CPSR_NZCV) {
|
2008-04-01 21:19:11 +04:00
|
|
|
env->ZF = (~val) & CPSR_Z;
|
|
|
|
env->NF = val;
|
2007-11-13 04:50:15 +03:00
|
|
|
env->CF = (val >> 29) & 1;
|
|
|
|
env->VF = (val << 3) & 0x80000000;
|
|
|
|
}
|
|
|
|
if (mask & CPSR_Q)
|
|
|
|
env->QF = ((val & CPSR_Q) != 0);
|
|
|
|
if (mask & CPSR_T)
|
|
|
|
env->thumb = ((val & CPSR_T) != 0);
|
|
|
|
if (mask & CPSR_IT_0_1) {
|
|
|
|
env->condexec_bits &= ~3;
|
|
|
|
env->condexec_bits |= (val >> 25) & 3;
|
|
|
|
}
|
|
|
|
if (mask & CPSR_IT_2_7) {
|
|
|
|
env->condexec_bits &= 3;
|
|
|
|
env->condexec_bits |= (val >> 8) & 0xfc;
|
|
|
|
}
|
|
|
|
if (mask & CPSR_GE) {
|
|
|
|
env->GE = (val >> 16) & 0xf;
|
|
|
|
}
|
|
|
|
|
2014-02-26 21:20:06 +04:00
|
|
|
env->daif &= ~(CPSR_AIF & mask);
|
|
|
|
env->daif |= val & CPSR_AIF & mask;
|
|
|
|
|
2007-11-13 04:50:15 +03:00
|
|
|
if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
|
2012-01-05 19:49:06 +04:00
|
|
|
if (bad_mode_switch(env, val & CPSR_M)) {
|
|
|
|
/* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
|
|
|
|
* We choose to ignore the attempt and leave the CPSR M field
|
|
|
|
* untouched.
|
|
|
|
*/
|
|
|
|
mask &= ~CPSR_M;
|
|
|
|
} else {
|
|
|
|
switch_mode(env, val & CPSR_M);
|
|
|
|
}
|
2007-11-13 04:50:15 +03:00
|
|
|
}
|
|
|
|
mask &= ~CACHED_CPSR_BITS;
|
|
|
|
env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
|
|
|
|
}
|
|
|
|
|
2008-03-31 07:44:26 +04:00
|
|
|
/* Sign/zero extend */
|
|
|
|
uint32_t HELPER(sxtb16)(uint32_t x)
|
|
|
|
{
|
|
|
|
uint32_t res;
|
|
|
|
res = (uint16_t)(int8_t)x;
|
|
|
|
res |= (uint32_t)(int8_t)(x >> 16) << 16;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t HELPER(uxtb16)(uint32_t x)
|
|
|
|
{
|
|
|
|
uint32_t res;
|
|
|
|
res = (uint16_t)(uint8_t)x;
|
|
|
|
res |= (uint32_t)(uint8_t)(x >> 16) << 16;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2008-03-31 07:45:13 +04:00
|
|
|
uint32_t HELPER(clz)(uint32_t x)
|
|
|
|
{
|
2009-10-16 01:14:52 +04:00
|
|
|
return clz32(x);
|
2008-03-31 07:45:13 +04:00
|
|
|
}
|
|
|
|
|
2008-03-31 07:46:19 +04:00
|
|
|
int32_t HELPER(sdiv)(int32_t num, int32_t den)
|
|
|
|
{
|
|
|
|
if (den == 0)
|
|
|
|
return 0;
|
2009-10-16 01:08:46 +04:00
|
|
|
if (num == INT_MIN && den == -1)
|
|
|
|
return INT_MIN;
|
2008-03-31 07:46:19 +04:00
|
|
|
return num / den;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
|
|
|
|
{
|
|
|
|
if (den == 0)
|
|
|
|
return 0;
|
|
|
|
return num / den;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t HELPER(rbit)(uint32_t x)
|
|
|
|
{
|
|
|
|
x = ((x & 0xff000000) >> 24)
|
|
|
|
| ((x & 0x00ff0000) >> 8)
|
|
|
|
| ((x & 0x0000ff00) << 8)
|
|
|
|
| ((x & 0x000000ff) << 24);
|
|
|
|
x = ((x & 0xf0f0f0f0) >> 4)
|
|
|
|
| ((x & 0x0f0f0f0f) << 4);
|
|
|
|
x = ((x & 0x88888888) >> 3)
|
|
|
|
| ((x & 0x44444444) >> 1)
|
|
|
|
| ((x & 0x22222222) << 1)
|
|
|
|
| ((x & 0x11111111) << 3);
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
2007-09-17 01:08:06 +04:00
|
|
|
#if defined(CONFIG_USER_ONLY)
|
2005-11-26 13:38:39 +03:00
|
|
|
|
2013-08-26 05:01:33 +04:00
|
|
|
int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
|
|
|
|
int mmu_idx)
|
2005-11-26 13:38:39 +03:00
|
|
|
{
|
2013-08-26 05:01:33 +04:00
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
target-arm: Define exception record for AArch64 exceptions
For AArch32 exceptions, the only information provided about
the cause of an exception is the individual exception type (data
abort, undef, etc), which we store in cs->exception_index. For
AArch64, the CPU provides much more detail about the cause of
the exception, which can be found in the syndrome register.
Create a set of fields in CPUARMState which must be filled in
whenever an exception is raised, so that exception entry can
correctly fill in the syndrome register for the guest.
This includes the information which in AArch32 appears in
the DFAR and IFAR (fault address registers) and the DFSR
and IFSR (fault status registers) for data aborts and
prefetch aborts, since if we end up taking the MMU fault
to AArch64 rather than AArch32 this will need to end up
in different system registers.
This patch does a refactoring which moves the setting of the
AArch32 DFAR/DFSR/IFAR/IFSR from the point where the exception
is raised to the point where it is taken. (This is no change
for cores with an MMU, retains the existing clearly incorrect
behaviour for ARM946 of trashing the MP access permissions
registers which share the c5_data and c5_insn state fields,
and has no effect for v7M because we don't implement its
MPU fault status or address registers.)
As a side effect of the cleanup we fix a bug in the AArch64
linux-user mode code where we were passing a 64 bit fault
address through the 32 bit c6_data/c6_insn fields: it now
goes via the always-64-bit exception.vaddress.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
2014-04-15 22:18:38 +04:00
|
|
|
env->exception.vaddress = address;
|
2005-11-26 13:38:39 +03:00
|
|
|
if (rw == 2) {
|
2013-08-26 10:31:06 +04:00
|
|
|
cs->exception_index = EXCP_PREFETCH_ABORT;
|
2005-11-26 13:38:39 +03:00
|
|
|
} else {
|
2013-08-26 10:31:06 +04:00
|
|
|
cs->exception_index = EXCP_DATA_ABORT;
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2007-11-11 03:04:49 +03:00
|
|
|
/* These should probably raise undefined insn exceptions. */
|
2012-03-14 04:38:21 +04:00
|
|
|
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2013-09-03 19:38:47 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
|
2007-11-11 03:04:49 +03:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2013-09-03 19:38:47 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
|
2007-11-11 03:04:49 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
void switch_mode(CPUARMState *env, int mode)
|
2005-11-26 13:38:39 +03:00
|
|
|
{
|
2013-09-03 19:38:47 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
if (mode != ARM_CPU_MODE_USR) {
|
|
|
|
cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
|
|
|
|
}
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2013-09-03 19:38:47 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
cpu_abort(CPU(cpu), "banked r13 write\n");
|
2007-11-11 03:04:49 +03:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2013-09-03 19:38:47 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
|
|
|
cpu_abort(CPU(cpu), "banked r13 read\n");
|
2007-11-11 03:04:49 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-29 21:48:49 +04:00
|
|
|
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2005-11-26 13:38:39 +03:00
|
|
|
#else
|
|
|
|
|
|
|
|
/* Map CPU modes onto saved register banks. */
|
2013-03-05 04:34:41 +04:00
|
|
|
int bank_number(int mode)
|
2005-11-26 13:38:39 +03:00
|
|
|
{
|
|
|
|
switch (mode) {
|
|
|
|
case ARM_CPU_MODE_USR:
|
|
|
|
case ARM_CPU_MODE_SYS:
|
|
|
|
return 0;
|
|
|
|
case ARM_CPU_MODE_SVC:
|
|
|
|
return 1;
|
|
|
|
case ARM_CPU_MODE_ABT:
|
|
|
|
return 2;
|
|
|
|
case ARM_CPU_MODE_UND:
|
|
|
|
return 3;
|
|
|
|
case ARM_CPU_MODE_IRQ:
|
|
|
|
return 4;
|
|
|
|
case ARM_CPU_MODE_FIQ:
|
|
|
|
return 5;
|
2014-05-27 20:09:52 +04:00
|
|
|
case ARM_CPU_MODE_HYP:
|
|
|
|
return 6;
|
|
|
|
case ARM_CPU_MODE_MON:
|
|
|
|
return 7;
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
2013-03-05 04:34:40 +04:00
|
|
|
hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode);
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
void switch_mode(CPUARMState *env, int mode)
|
2005-11-26 13:38:39 +03:00
|
|
|
{
|
|
|
|
int old_mode;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
old_mode = env->uncached_cpsr & CPSR_M;
|
|
|
|
if (mode == old_mode)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (old_mode == ARM_CPU_MODE_FIQ) {
|
|
|
|
memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
|
2006-03-14 17:20:32 +03:00
|
|
|
memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
|
2005-11-26 13:38:39 +03:00
|
|
|
} else if (mode == ARM_CPU_MODE_FIQ) {
|
|
|
|
memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
|
2006-03-14 17:20:32 +03:00
|
|
|
memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
|
|
|
|
2013-03-05 04:34:40 +04:00
|
|
|
i = bank_number(old_mode);
|
2005-11-26 13:38:39 +03:00
|
|
|
env->banked_r13[i] = env->regs[13];
|
|
|
|
env->banked_r14[i] = env->regs[14];
|
|
|
|
env->banked_spsr[i] = env->spsr;
|
|
|
|
|
2013-03-05 04:34:40 +04:00
|
|
|
i = bank_number(mode);
|
2005-11-26 13:38:39 +03:00
|
|
|
env->regs[13] = env->banked_r13[i];
|
|
|
|
env->regs[14] = env->banked_r14[i];
|
|
|
|
env->spsr = env->banked_spsr[i];
|
|
|
|
}
|
|
|
|
|
2014-12-11 15:07:48 +03:00
|
|
|
/* Physical Interrupt Target EL Lookup Table
|
|
|
|
*
|
|
|
|
* [ From ARM ARM section G1.13.4 (Table G1-15) ]
|
|
|
|
*
|
|
|
|
* The below multi-dimensional table is used for looking up the target
|
|
|
|
* exception level given numerous condition criteria. Specifically, the
|
|
|
|
* target EL is based on SCR and HCR routing controls as well as the
|
|
|
|
* currently executing EL and secure state.
|
|
|
|
*
|
|
|
|
* Dimensions:
|
|
|
|
* target_el_table[2][2][2][2][2][4]
|
|
|
|
* | | | | | +--- Current EL
|
|
|
|
* | | | | +------ Non-secure(0)/Secure(1)
|
|
|
|
* | | | +--------- HCR mask override
|
|
|
|
* | | +------------ SCR exec state control
|
|
|
|
* | +--------------- SCR mask override
|
|
|
|
* +------------------ 32-bit(0)/64-bit(1) EL3
|
|
|
|
*
|
|
|
|
* The table values are as such:
|
|
|
|
* 0-3 = EL0-EL3
|
|
|
|
* -1 = Cannot occur
|
|
|
|
*
|
|
|
|
* The ARM ARM target EL table includes entries indicating that an "exception
|
|
|
|
* is not taken". The two cases where this is applicable are:
|
|
|
|
* 1) An exception is taken from EL3 but the SCR does not have the exception
|
|
|
|
* routed to EL3.
|
|
|
|
* 2) An exception is taken from EL2 but the HCR does not have the exception
|
|
|
|
* routed to EL2.
|
|
|
|
* In these two cases, the below table contain a target of EL1. This value is
|
|
|
|
* returned as it is expected that the consumer of the table data will check
|
|
|
|
* for "target EL >= current EL" to ensure the exception is not taken.
|
|
|
|
*
|
|
|
|
* SCR HCR
|
|
|
|
* 64 EA AMO From
|
|
|
|
* BIT IRQ IMO Non-secure Secure
|
|
|
|
* EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
|
|
|
|
*/
|
|
|
|
const int8_t target_el_table[2][2][2][2][2][4] = {
|
|
|
|
{{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
|
|
|
|
{/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
|
|
|
|
{{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
|
|
|
|
{/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
|
|
|
|
{{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
|
|
|
|
{/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
|
|
|
|
{{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
|
|
|
|
{/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
|
|
|
|
{{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
|
|
|
|
{/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
|
|
|
|
{{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
|
|
|
|
{/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
|
|
|
|
{{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
|
|
|
|
{/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
|
|
|
|
{{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
|
|
|
|
{/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine the target EL for physical exceptions
|
|
|
|
*/
|
|
|
|
static inline uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
|
|
|
|
uint32_t cur_el, bool secure)
|
|
|
|
{
|
|
|
|
CPUARMState *env = cs->env_ptr;
|
|
|
|
int rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
|
|
|
|
int scr;
|
|
|
|
int hcr;
|
|
|
|
int target_el;
|
|
|
|
int is64 = arm_el_is_aa64(env, 3);
|
|
|
|
|
|
|
|
switch (excp_idx) {
|
|
|
|
case EXCP_IRQ:
|
|
|
|
scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
|
|
|
|
hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
|
|
|
|
break;
|
|
|
|
case EXCP_FIQ:
|
|
|
|
scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
|
|
|
|
hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
|
|
|
|
hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* If HCR.TGE is set then HCR is treated as being 1 */
|
|
|
|
hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
|
|
|
|
|
|
|
|
/* Perform a table-lookup for the target EL given the current state */
|
|
|
|
target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
|
|
|
|
|
|
|
|
assert(target_el > 0);
|
|
|
|
|
|
|
|
return target_el;
|
|
|
|
}
|
|
|
|
|
2014-09-29 21:48:49 +04:00
|
|
|
/*
|
|
|
|
* Determine the target EL for a given exception type.
|
|
|
|
*/
|
|
|
|
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
|
|
|
|
{
|
2014-09-29 21:48:50 +04:00
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
2014-10-24 15:19:14 +04:00
|
|
|
unsigned int cur_el = arm_current_el(env);
|
2014-09-29 21:48:50 +04:00
|
|
|
unsigned int target_el;
|
2014-12-11 15:07:48 +03:00
|
|
|
bool secure = arm_is_secure(env);
|
2014-09-29 21:48:50 +04:00
|
|
|
|
|
|
|
switch (excp_idx) {
|
|
|
|
case EXCP_HVC:
|
2014-09-29 21:48:50 +04:00
|
|
|
case EXCP_HYP_TRAP:
|
2014-09-29 21:48:50 +04:00
|
|
|
target_el = 2;
|
|
|
|
break;
|
2014-09-29 21:48:50 +04:00
|
|
|
case EXCP_SMC:
|
|
|
|
target_el = 3;
|
|
|
|
break;
|
2014-09-29 21:48:51 +04:00
|
|
|
case EXCP_FIQ:
|
|
|
|
case EXCP_IRQ:
|
2014-12-11 15:07:48 +03:00
|
|
|
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
|
2014-09-29 21:48:51 +04:00
|
|
|
break;
|
2014-09-29 21:48:51 +04:00
|
|
|
case EXCP_VIRQ:
|
|
|
|
case EXCP_VFIQ:
|
|
|
|
target_el = 1;
|
|
|
|
break;
|
2014-09-29 21:48:50 +04:00
|
|
|
default:
|
|
|
|
target_el = MAX(cur_el, 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return target_el;
|
2014-09-29 21:48:49 +04:00
|
|
|
}
|
|
|
|
|
2007-11-11 03:04:49 +03:00
|
|
|
static void v7m_push(CPUARMState *env, uint32_t val)
|
|
|
|
{
|
2014-03-09 22:10:29 +04:00
|
|
|
CPUState *cs = CPU(arm_env_get_cpu(env));
|
|
|
|
|
2007-11-11 03:04:49 +03:00
|
|
|
env->regs[13] -= 4;
|
2013-12-17 09:07:29 +04:00
|
|
|
stl_phys(cs->as, env->regs[13], val);
|
2007-11-11 03:04:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t v7m_pop(CPUARMState *env)
|
|
|
|
{
|
2014-03-09 22:10:29 +04:00
|
|
|
CPUState *cs = CPU(arm_env_get_cpu(env));
|
2007-11-11 03:04:49 +03:00
|
|
|
uint32_t val;
|
2014-03-09 22:10:29 +04:00
|
|
|
|
2013-11-15 17:46:38 +04:00
|
|
|
val = ldl_phys(cs->as, env->regs[13]);
|
2007-11-11 03:04:49 +03:00
|
|
|
env->regs[13] += 4;
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Switch to V7M main or process stack pointer. */
|
|
|
|
static void switch_v7m_sp(CPUARMState *env, int process)
|
|
|
|
{
|
|
|
|
uint32_t tmp;
|
|
|
|
if (env->v7m.current_sp != process) {
|
|
|
|
tmp = env->v7m.other_sp;
|
|
|
|
env->v7m.other_sp = env->regs[13];
|
|
|
|
env->regs[13] = tmp;
|
|
|
|
env->v7m.current_sp = process;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void do_v7m_exception_exit(CPUARMState *env)
|
|
|
|
{
|
|
|
|
uint32_t type;
|
|
|
|
uint32_t xpsr;
|
|
|
|
|
|
|
|
type = env->regs[15];
|
|
|
|
if (env->v7m.exception != 0)
|
2010-04-05 22:34:51 +04:00
|
|
|
armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
|
2007-11-11 03:04:49 +03:00
|
|
|
|
|
|
|
/* Switch to the target stack. */
|
|
|
|
switch_v7m_sp(env, (type & 4) != 0);
|
|
|
|
/* Pop registers. */
|
|
|
|
env->regs[0] = v7m_pop(env);
|
|
|
|
env->regs[1] = v7m_pop(env);
|
|
|
|
env->regs[2] = v7m_pop(env);
|
|
|
|
env->regs[3] = v7m_pop(env);
|
|
|
|
env->regs[12] = v7m_pop(env);
|
|
|
|
env->regs[14] = v7m_pop(env);
|
|
|
|
env->regs[15] = v7m_pop(env);
|
|
|
|
xpsr = v7m_pop(env);
|
|
|
|
xpsr_write(env, xpsr, 0xfffffdff);
|
|
|
|
/* Undo stack alignment. */
|
|
|
|
if (xpsr & 0x200)
|
|
|
|
env->regs[13] |= 4;
|
|
|
|
/* ??? The exception return type specifies Thread/Handler mode. However
|
|
|
|
this is also implied by the xPSR value. Not sure what to do
|
|
|
|
if there is a mismatch. */
|
|
|
|
/* ??? Likewise for mismatches between the CONTROL register and the stack
|
|
|
|
pointer. */
|
|
|
|
}
|
|
|
|
|
2013-02-02 15:33:14 +04:00
|
|
|
void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2013-02-02 15:33:14 +04:00
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
2007-11-11 03:04:49 +03:00
|
|
|
uint32_t xpsr = xpsr_read(env);
|
|
|
|
uint32_t lr;
|
|
|
|
uint32_t addr;
|
|
|
|
|
2013-08-26 10:31:06 +04:00
|
|
|
arm_log_exception(cs->exception_index);
|
2013-08-20 17:54:28 +04:00
|
|
|
|
2007-11-11 03:04:49 +03:00
|
|
|
lr = 0xfffffff1;
|
|
|
|
if (env->v7m.current_sp)
|
|
|
|
lr |= 4;
|
|
|
|
if (env->v7m.exception == 0)
|
|
|
|
lr |= 8;
|
|
|
|
|
|
|
|
/* For exceptions we just mark as pending on the NVIC, and let that
|
|
|
|
handle it. */
|
|
|
|
/* TODO: Need to escalate if the current priority is higher than the
|
|
|
|
one we're raising. */
|
2013-08-26 10:31:06 +04:00
|
|
|
switch (cs->exception_index) {
|
2007-11-11 03:04:49 +03:00
|
|
|
case EXCP_UDEF:
|
2010-04-05 22:34:51 +04:00
|
|
|
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
|
2007-11-11 03:04:49 +03:00
|
|
|
return;
|
|
|
|
case EXCP_SWI:
|
2013-01-11 19:21:22 +04:00
|
|
|
/* The PC already points to the next instruction. */
|
2010-04-05 22:34:51 +04:00
|
|
|
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
|
2007-11-11 03:04:49 +03:00
|
|
|
return;
|
|
|
|
case EXCP_PREFETCH_ABORT:
|
|
|
|
case EXCP_DATA_ABORT:
|
target-arm: Define exception record for AArch64 exceptions
For AArch32 exceptions, the only information provided about
the cause of an exception is the individual exception type (data
abort, undef, etc), which we store in cs->exception_index. For
AArch64, the CPU provides much more detail about the cause of
the exception, which can be found in the syndrome register.
Create a set of fields in CPUARMState which must be filled in
whenever an exception is raised, so that exception entry can
correctly fill in the syndrome register for the guest.
This includes the information which in AArch32 appears in
the DFAR and IFAR (fault address registers) and the DFSR
and IFSR (fault status registers) for data aborts and
prefetch aborts, since if we end up taking the MMU fault
to AArch64 rather than AArch32 this will need to end up
in different system registers.
This patch does a refactoring which moves the setting of the
AArch32 DFAR/DFSR/IFAR/IFSR from the point where the exception
is raised to the point where it is taken. (This is no change
for cores with an MMU, retains the existing clearly incorrect
behaviour for ARM946 of trashing the MP access permissions
registers which share the c5_data and c5_insn state fields,
and has no effect for v7M because we don't implement its
MPU fault status or address registers.)
As a side effect of the cleanup we fix a bug in the AArch64
linux-user mode code where we were passing a 64 bit fault
address through the 32 bit c6_data/c6_insn fields: it now
goes via the always-64-bit exception.vaddress.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
2014-04-15 22:18:38 +04:00
|
|
|
/* TODO: if we implemented the MPU registers, this is where we
|
|
|
|
* should set the MMFAR, etc from exception.fsr and exception.vaddress.
|
|
|
|
*/
|
2010-04-05 22:34:51 +04:00
|
|
|
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
|
2007-11-11 03:04:49 +03:00
|
|
|
return;
|
|
|
|
case EXCP_BKPT:
|
2007-11-25 02:22:11 +03:00
|
|
|
if (semihosting_enabled) {
|
|
|
|
int nr;
|
2012-09-05 00:25:59 +04:00
|
|
|
nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
|
2007-11-25 02:22:11 +03:00
|
|
|
if (nr == 0xab) {
|
|
|
|
env->regs[15] += 2;
|
|
|
|
env->regs[0] = do_arm_semihosting(env);
|
2013-08-20 17:54:28 +04:00
|
|
|
qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
|
2007-11-25 02:22:11 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2010-04-05 22:34:51 +04:00
|
|
|
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
|
2007-11-11 03:04:49 +03:00
|
|
|
return;
|
|
|
|
case EXCP_IRQ:
|
2010-04-05 22:34:51 +04:00
|
|
|
env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
|
2007-11-11 03:04:49 +03:00
|
|
|
break;
|
|
|
|
case EXCP_EXCEPTION_EXIT:
|
|
|
|
do_v7m_exception_exit(env);
|
|
|
|
return;
|
|
|
|
default:
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
|
2007-11-11 03:04:49 +03:00
|
|
|
return; /* Never happens. Keep compiler happy. */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Align stack pointer. */
|
|
|
|
/* ??? Should only do this if Configuration Control Register
|
|
|
|
STACKALIGN bit is set. */
|
|
|
|
if (env->regs[13] & 4) {
|
2008-07-02 20:44:09 +04:00
|
|
|
env->regs[13] -= 4;
|
2007-11-11 03:04:49 +03:00
|
|
|
xpsr |= 0x200;
|
|
|
|
}
|
2008-04-13 04:57:49 +04:00
|
|
|
/* Switch to the handler mode. */
|
2007-11-11 03:04:49 +03:00
|
|
|
v7m_push(env, xpsr);
|
|
|
|
v7m_push(env, env->regs[15]);
|
|
|
|
v7m_push(env, env->regs[14]);
|
|
|
|
v7m_push(env, env->regs[12]);
|
|
|
|
v7m_push(env, env->regs[3]);
|
|
|
|
v7m_push(env, env->regs[2]);
|
|
|
|
v7m_push(env, env->regs[1]);
|
|
|
|
v7m_push(env, env->regs[0]);
|
|
|
|
switch_v7m_sp(env, 0);
|
2012-03-14 16:26:10 +04:00
|
|
|
/* Clear IT bits */
|
|
|
|
env->condexec_bits = 0;
|
2007-11-11 03:04:49 +03:00
|
|
|
env->regs[14] = lr;
|
2013-11-15 17:46:38 +04:00
|
|
|
addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4);
|
2007-11-11 03:04:49 +03:00
|
|
|
env->regs[15] = addr & 0xfffffffe;
|
|
|
|
env->thumb = addr & 1;
|
|
|
|
}
|
|
|
|
|
2005-11-26 13:38:39 +03:00
|
|
|
/* Handle a CPU exception. */
|
2013-02-02 13:57:51 +04:00
|
|
|
void arm_cpu_do_interrupt(CPUState *cs)
|
2005-11-26 13:38:39 +03:00
|
|
|
{
|
2013-02-02 13:57:51 +04:00
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
2005-11-26 13:38:39 +03:00
|
|
|
uint32_t addr;
|
|
|
|
uint32_t mask;
|
|
|
|
int new_mode;
|
|
|
|
uint32_t offset;
|
2014-09-12 17:06:49 +04:00
|
|
|
uint32_t moe;
|
2005-11-26 13:38:39 +03:00
|
|
|
|
2013-02-02 15:33:14 +04:00
|
|
|
assert(!IS_M(env));
|
|
|
|
|
2013-08-26 10:31:06 +04:00
|
|
|
arm_log_exception(cs->exception_index);
|
2013-08-20 17:54:28 +04:00
|
|
|
|
2014-10-24 15:19:13 +04:00
|
|
|
if (arm_is_psci_call(cpu, cs->exception_index)) {
|
|
|
|
arm_handle_psci_call(cpu);
|
|
|
|
qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-09-12 17:06:49 +04:00
|
|
|
/* If this is a debug exception we must update the DBGDSCR.MOE bits */
|
|
|
|
switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
|
|
|
|
case EC_BREAKPOINT:
|
|
|
|
case EC_BREAKPOINT_SAME_EL:
|
|
|
|
moe = 1;
|
|
|
|
break;
|
|
|
|
case EC_WATCHPOINT:
|
|
|
|
case EC_WATCHPOINT_SAME_EL:
|
|
|
|
moe = 10;
|
|
|
|
break;
|
|
|
|
case EC_AA32_BKPT:
|
|
|
|
moe = 3;
|
|
|
|
break;
|
|
|
|
case EC_VECTORCATCH:
|
|
|
|
moe = 5;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
moe = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (moe) {
|
|
|
|
env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
|
|
|
|
}
|
|
|
|
|
2005-11-26 13:38:39 +03:00
|
|
|
/* TODO: Vectored interrupt controller. */
|
2013-08-26 10:31:06 +04:00
|
|
|
switch (cs->exception_index) {
|
2005-11-26 13:38:39 +03:00
|
|
|
case EXCP_UDEF:
|
|
|
|
new_mode = ARM_CPU_MODE_UND;
|
|
|
|
addr = 0x04;
|
|
|
|
mask = CPSR_I;
|
|
|
|
if (env->thumb)
|
|
|
|
offset = 2;
|
|
|
|
else
|
|
|
|
offset = 4;
|
|
|
|
break;
|
|
|
|
case EXCP_SWI:
|
2007-01-20 20:12:09 +03:00
|
|
|
if (semihosting_enabled) {
|
|
|
|
/* Check for semihosting interrupt. */
|
|
|
|
if (env->thumb) {
|
2012-09-05 00:25:59 +04:00
|
|
|
mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code)
|
|
|
|
& 0xff;
|
2007-01-20 20:12:09 +03:00
|
|
|
} else {
|
2012-09-05 00:25:59 +04:00
|
|
|
mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code)
|
2012-03-30 21:02:50 +04:00
|
|
|
& 0xffffff;
|
2007-01-20 20:12:09 +03:00
|
|
|
}
|
|
|
|
/* Only intercept calls from privileged modes, to provide some
|
|
|
|
semblance of security. */
|
|
|
|
if (((mask == 0x123456 && !env->thumb)
|
|
|
|
|| (mask == 0xab && env->thumb))
|
|
|
|
&& (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
|
|
|
|
env->regs[0] = do_arm_semihosting(env);
|
2013-08-20 17:54:28 +04:00
|
|
|
qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
|
2007-01-20 20:12:09 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2005-11-26 13:38:39 +03:00
|
|
|
new_mode = ARM_CPU_MODE_SVC;
|
|
|
|
addr = 0x08;
|
|
|
|
mask = CPSR_I;
|
2008-04-20 05:03:45 +04:00
|
|
|
/* The PC already points to the next instruction. */
|
2005-11-26 13:38:39 +03:00
|
|
|
offset = 0;
|
|
|
|
break;
|
2006-02-04 22:35:26 +03:00
|
|
|
case EXCP_BKPT:
|
2007-11-11 03:04:49 +03:00
|
|
|
/* See if this is a semihosting syscall. */
|
2007-11-25 02:22:11 +03:00
|
|
|
if (env->thumb && semihosting_enabled) {
|
2012-09-05 00:25:59 +04:00
|
|
|
mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
|
2007-11-11 03:04:49 +03:00
|
|
|
if (mask == 0xab
|
|
|
|
&& (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
|
|
|
|
env->regs[15] += 2;
|
|
|
|
env->regs[0] = do_arm_semihosting(env);
|
2013-08-20 17:54:28 +04:00
|
|
|
qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
|
2007-11-11 03:04:49 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
target-arm: Define exception record for AArch64 exceptions
For AArch32 exceptions, the only information provided about
the cause of an exception is the individual exception type (data
abort, undef, etc), which we store in cs->exception_index. For
AArch64, the CPU provides much more detail about the cause of
the exception, which can be found in the syndrome register.
Create a set of fields in CPUARMState which must be filled in
whenever an exception is raised, so that exception entry can
correctly fill in the syndrome register for the guest.
This includes the information which in AArch32 appears in
the DFAR and IFAR (fault address registers) and the DFSR
and IFSR (fault status registers) for data aborts and
prefetch aborts, since if we end up taking the MMU fault
to AArch64 rather than AArch32 this will need to end up
in different system registers.
This patch does a refactoring which moves the setting of the
AArch32 DFAR/DFSR/IFAR/IFSR from the point where the exception
is raised to the point where it is taken. (This is no change
for cores with an MMU, retains the existing clearly incorrect
behaviour for ARM946 of trashing the MP access permissions
registers which share the c5_data and c5_insn state fields,
and has no effect for v7M because we don't implement its
MPU fault status or address registers.)
As a side effect of the cleanup we fix a bug in the AArch64
linux-user mode code where we were passing a 64 bit fault
address through the 32 bit c6_data/c6_insn fields: it now
goes via the always-64-bit exception.vaddress.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
2014-04-15 22:18:38 +04:00
|
|
|
env->exception.fsr = 2;
|
2007-11-11 03:04:49 +03:00
|
|
|
/* Fall through to prefetch abort. */
|
|
|
|
case EXCP_PREFETCH_ABORT:
|
2014-04-15 22:18:42 +04:00
|
|
|
env->cp15.ifsr_el2 = env->exception.fsr;
|
2014-08-04 17:41:54 +04:00
|
|
|
env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 32, 32,
|
|
|
|
env->exception.vaddress);
|
2013-08-20 17:54:28 +04:00
|
|
|
qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
|
2014-04-15 22:18:42 +04:00
|
|
|
env->cp15.ifsr_el2, (uint32_t)env->exception.vaddress);
|
2005-11-26 13:38:39 +03:00
|
|
|
new_mode = ARM_CPU_MODE_ABT;
|
|
|
|
addr = 0x0c;
|
|
|
|
mask = CPSR_A | CPSR_I;
|
|
|
|
offset = 4;
|
|
|
|
break;
|
|
|
|
case EXCP_DATA_ABORT:
|
2014-05-27 20:09:51 +04:00
|
|
|
env->cp15.esr_el[1] = env->exception.fsr;
|
2014-08-04 17:41:54 +04:00
|
|
|
env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 0, 32,
|
|
|
|
env->exception.vaddress);
|
2013-08-20 17:54:28 +04:00
|
|
|
qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
|
2014-05-27 20:09:51 +04:00
|
|
|
(uint32_t)env->cp15.esr_el[1],
|
2014-04-15 22:18:42 +04:00
|
|
|
(uint32_t)env->exception.vaddress);
|
2005-11-26 13:38:39 +03:00
|
|
|
new_mode = ARM_CPU_MODE_ABT;
|
|
|
|
addr = 0x10;
|
|
|
|
mask = CPSR_A | CPSR_I;
|
|
|
|
offset = 8;
|
|
|
|
break;
|
|
|
|
case EXCP_IRQ:
|
|
|
|
new_mode = ARM_CPU_MODE_IRQ;
|
|
|
|
addr = 0x18;
|
|
|
|
/* Disable IRQ and imprecise data aborts. */
|
|
|
|
mask = CPSR_A | CPSR_I;
|
|
|
|
offset = 4;
|
2014-12-11 15:07:49 +03:00
|
|
|
if (env->cp15.scr_el3 & SCR_IRQ) {
|
|
|
|
/* IRQ routed to monitor mode */
|
|
|
|
new_mode = ARM_CPU_MODE_MON;
|
|
|
|
mask |= CPSR_F;
|
|
|
|
}
|
2005-11-26 13:38:39 +03:00
|
|
|
break;
|
|
|
|
case EXCP_FIQ:
|
|
|
|
new_mode = ARM_CPU_MODE_FIQ;
|
|
|
|
addr = 0x1c;
|
|
|
|
/* Disable FIQ, IRQ and imprecise data aborts. */
|
|
|
|
mask = CPSR_A | CPSR_I | CPSR_F;
|
2014-12-11 15:07:49 +03:00
|
|
|
if (env->cp15.scr_el3 & SCR_FIQ) {
|
|
|
|
/* FIQ routed to monitor mode */
|
|
|
|
new_mode = ARM_CPU_MODE_MON;
|
|
|
|
}
|
2005-11-26 13:38:39 +03:00
|
|
|
offset = 4;
|
|
|
|
break;
|
2014-10-24 15:19:15 +04:00
|
|
|
case EXCP_SMC:
|
|
|
|
new_mode = ARM_CPU_MODE_MON;
|
|
|
|
addr = 0x08;
|
|
|
|
mask = CPSR_A | CPSR_I | CPSR_F;
|
|
|
|
offset = 0;
|
|
|
|
break;
|
2005-11-26 13:38:39 +03:00
|
|
|
default:
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
|
2005-11-26 13:38:39 +03:00
|
|
|
return; /* Never happens. Keep compiler happy. */
|
|
|
|
}
|
|
|
|
/* High vectors. */
|
2014-02-20 14:35:51 +04:00
|
|
|
if (env->cp15.c1_sys & SCTLR_V) {
|
2013-10-25 18:44:38 +04:00
|
|
|
/* when enabled, base address cannot be remapped. */
|
2005-11-26 13:38:39 +03:00
|
|
|
addr += 0xffff0000;
|
2013-10-25 18:44:38 +04:00
|
|
|
} else {
|
|
|
|
/* ARM v7 architectures provide a vector base address register to remap
|
|
|
|
* the interrupt vector table.
|
|
|
|
* This register is only followed in non-monitor mode, and has a secure
|
|
|
|
* and un-secure copy. Since the cpu is always in a un-secure operation
|
|
|
|
* and is never in monitor mode this feature is always active.
|
|
|
|
* Note: only bits 31:5 are valid.
|
|
|
|
*/
|
2014-05-27 20:09:51 +04:00
|
|
|
addr += env->cp15.vbar_el[1];
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
2014-10-24 15:19:15 +04:00
|
|
|
|
|
|
|
if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
|
|
|
|
env->cp15.scr_el3 &= ~SCR_NS;
|
|
|
|
}
|
|
|
|
|
2005-11-26 13:38:39 +03:00
|
|
|
switch_mode (env, new_mode);
|
2014-08-19 21:56:26 +04:00
|
|
|
/* For exceptions taken to AArch32 we must clear the SS bit in both
|
|
|
|
* PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
|
|
|
|
*/
|
|
|
|
env->uncached_cpsr &= ~PSTATE_SS;
|
2005-11-26 13:38:39 +03:00
|
|
|
env->spsr = cpsr_read(env);
|
2007-11-11 03:04:49 +03:00
|
|
|
/* Clear IT bits. */
|
|
|
|
env->condexec_bits = 0;
|
2010-02-14 21:32:36 +03:00
|
|
|
/* Switch to the new mode, and to the correct instruction set. */
|
2005-12-18 19:54:08 +03:00
|
|
|
env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
|
2014-02-26 21:20:06 +04:00
|
|
|
env->daif |= mask;
|
arm: basic support for ARMv4/ARMv4T emulation
Currently target-arm/ assumes at least ARMv5 core. Add support for
handling also ARMv4/ARMv4T. This changes the following instructions:
BX(v4T and later)
BKPT, BLX, CDP2, CLZ, LDC2, LDRD, MCRR, MCRR2, MRRC, MCRR, MRC2, MRRC,
MRRC2, PLD QADD, QDADD, QDSUB, QSUB, STRD, SMLAxy, SMLALxy, SMLAWxy,
SMULxy, SMULWxy, STC2 (v5 and later)
All instructions that are "v5TE and later" are also bound to just v5, as
that's how it was before.
This patch doesn _not_ include disabling of cp15 access and base-updated
data abort model (that will be required to emulate chips based on a
ARM7TDMI), because:
* no ARM7TDMI chips are currently emulated (or planned)
* those features aren't strictly necessary for my purposes (SA-1 core
emulation).
All v5 models are handled as they are v5T. Internally we still have a
check if the model is a v5(T) or v5TE, but as all emulated cores are
v5TE, those two cases are simply aliased (for now).
Patch is heavily based on patch by Filip Navara <filip.navara@gmail.com>
which in turn is based on work by Ulrich Hecht <uli@suse.de> and Vincent
Sanders <vince@kyllikki.org>.
Signed-off-by: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-04-04 17:38:44 +04:00
|
|
|
/* this is a lie, as the was no c1_sys on V4T/V5, but who cares
|
|
|
|
* and we should just guard the thumb mode on V4 */
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V4T)) {
|
2014-02-20 14:35:51 +04:00
|
|
|
env->thumb = (env->cp15.c1_sys & SCTLR_TE) != 0;
|
arm: basic support for ARMv4/ARMv4T emulation
Currently target-arm/ assumes at least ARMv5 core. Add support for
handling also ARMv4/ARMv4T. This changes the following instructions:
BX(v4T and later)
BKPT, BLX, CDP2, CLZ, LDC2, LDRD, MCRR, MCRR2, MRRC, MCRR, MRC2, MRRC,
MRRC2, PLD QADD, QDADD, QDSUB, QSUB, STRD, SMLAxy, SMLALxy, SMLAWxy,
SMULxy, SMULWxy, STC2 (v5 and later)
All instructions that are "v5TE and later" are also bound to just v5, as
that's how it was before.
This patch doesn _not_ include disabling of cp15 access and base-updated
data abort model (that will be required to emulate chips based on a
ARM7TDMI), because:
* no ARM7TDMI chips are currently emulated (or planned)
* those features aren't strictly necessary for my purposes (SA-1 core
emulation).
All v5 models are handled as they are v5T. Internally we still have a
check if the model is a v5(T) or v5TE, but as all emulated cores are
v5TE, those two cases are simply aliased (for now).
Patch is heavily based on patch by Filip Navara <filip.navara@gmail.com>
which in turn is based on work by Ulrich Hecht <uli@suse.de> and Vincent
Sanders <vince@kyllikki.org>.
Signed-off-by: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-04-04 17:38:44 +04:00
|
|
|
}
|
2005-11-26 13:38:39 +03:00
|
|
|
env->regs[14] = env->regs[15] + offset;
|
|
|
|
env->regs[15] = addr;
|
2013-01-17 21:51:17 +04:00
|
|
|
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check section/page access permissions.
|
|
|
|
Returns the page protection flags, or zero if the access is not
|
|
|
|
permitted. */
|
2012-03-14 04:38:21 +04:00
|
|
|
static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
|
2011-12-13 22:19:23 +04:00
|
|
|
int access_type, int is_user)
|
2005-11-26 13:38:39 +03:00
|
|
|
{
|
2007-11-11 03:04:49 +03:00
|
|
|
int prot_ro;
|
|
|
|
|
2011-12-13 22:19:23 +04:00
|
|
|
if (domain_prot == 3) {
|
2005-11-26 13:38:39 +03:00
|
|
|
return PAGE_READ | PAGE_WRITE;
|
2011-12-13 22:19:23 +04:00
|
|
|
}
|
2005-11-26 13:38:39 +03:00
|
|
|
|
2007-11-11 03:04:49 +03:00
|
|
|
if (access_type == 1)
|
|
|
|
prot_ro = 0;
|
|
|
|
else
|
|
|
|
prot_ro = PAGE_READ;
|
|
|
|
|
2005-11-26 13:38:39 +03:00
|
|
|
switch (ap) {
|
|
|
|
case 0:
|
2014-02-20 14:35:51 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
|
|
return 0;
|
|
|
|
}
|
2006-09-09 18:36:26 +04:00
|
|
|
if (access_type == 1)
|
2005-11-26 13:38:39 +03:00
|
|
|
return 0;
|
2014-02-20 14:35:51 +04:00
|
|
|
switch (env->cp15.c1_sys & (SCTLR_S | SCTLR_R)) {
|
|
|
|
case SCTLR_S:
|
2005-11-26 13:38:39 +03:00
|
|
|
return is_user ? 0 : PAGE_READ;
|
2014-02-20 14:35:51 +04:00
|
|
|
case SCTLR_R:
|
2005-11-26 13:38:39 +03:00
|
|
|
return PAGE_READ;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case 1:
|
|
|
|
return is_user ? 0 : PAGE_READ | PAGE_WRITE;
|
|
|
|
case 2:
|
|
|
|
if (is_user)
|
2007-11-11 03:04:49 +03:00
|
|
|
return prot_ro;
|
2005-11-26 13:38:39 +03:00
|
|
|
else
|
|
|
|
return PAGE_READ | PAGE_WRITE;
|
|
|
|
case 3:
|
|
|
|
return PAGE_READ | PAGE_WRITE;
|
2008-12-19 15:39:00 +03:00
|
|
|
case 4: /* Reserved. */
|
2007-11-11 03:04:49 +03:00
|
|
|
return 0;
|
|
|
|
case 5:
|
|
|
|
return is_user ? 0 : prot_ro;
|
|
|
|
case 6:
|
|
|
|
return prot_ro;
|
2008-12-19 15:39:00 +03:00
|
|
|
case 7:
|
2011-06-23 05:12:59 +04:00
|
|
|
if (!arm_feature (env, ARM_FEATURE_V6K))
|
2008-12-19 15:39:00 +03:00
|
|
|
return 0;
|
|
|
|
return prot_ro;
|
2005-11-26 13:38:39 +03:00
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-19 21:06:24 +04:00
|
|
|
static bool get_level1_table_address(CPUARMState *env, uint32_t *table,
|
|
|
|
uint32_t address)
|
2008-10-22 23:22:30 +04:00
|
|
|
{
|
2014-06-19 21:06:24 +04:00
|
|
|
if (address & env->cp15.c2_mask) {
|
|
|
|
if ((env->cp15.c2_control & TTBCR_PD1)) {
|
|
|
|
/* Translation table walk disabled for TTBR1 */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*table = env->cp15.ttbr1_el1 & 0xffffc000;
|
|
|
|
} else {
|
|
|
|
if ((env->cp15.c2_control & TTBCR_PD0)) {
|
|
|
|
/* Translation table walk disabled for TTBR0 */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*table = env->cp15.ttbr0_el1 & env->cp15.c2_base_mask;
|
|
|
|
}
|
|
|
|
*table |= (address >> 18) & 0x3ffc;
|
|
|
|
return true;
|
2008-10-22 23:22:30 +04:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
|
2012-10-23 14:30:10 +04:00
|
|
|
int is_user, hwaddr *phys_ptr,
|
2012-07-12 14:59:09 +04:00
|
|
|
int *prot, target_ulong *page_size)
|
2005-11-26 13:38:39 +03:00
|
|
|
{
|
2014-03-09 22:10:29 +04:00
|
|
|
CPUState *cs = CPU(arm_env_get_cpu(env));
|
2005-11-26 13:38:39 +03:00
|
|
|
int code;
|
|
|
|
uint32_t table;
|
|
|
|
uint32_t desc;
|
|
|
|
int type;
|
|
|
|
int ap;
|
2014-06-19 21:06:24 +04:00
|
|
|
int domain = 0;
|
2011-12-13 22:19:23 +04:00
|
|
|
int domain_prot;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr;
|
2005-11-26 13:38:39 +03:00
|
|
|
|
2007-11-11 03:04:49 +03:00
|
|
|
/* Pagetable walk. */
|
|
|
|
/* Lookup l1 descriptor. */
|
2014-06-19 21:06:24 +04:00
|
|
|
if (!get_level1_table_address(env, &table, address)) {
|
|
|
|
/* Section translation fault if page walk is disabled by PD0 or PD1 */
|
|
|
|
code = 5;
|
|
|
|
goto do_fault;
|
|
|
|
}
|
2013-11-15 17:46:38 +04:00
|
|
|
desc = ldl_phys(cs->as, table);
|
2007-11-11 03:04:49 +03:00
|
|
|
type = (desc & 3);
|
2011-12-13 22:19:23 +04:00
|
|
|
domain = (desc >> 5) & 0x0f;
|
|
|
|
domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
|
2007-11-11 03:04:49 +03:00
|
|
|
if (type == 0) {
|
2008-04-20 05:03:45 +04:00
|
|
|
/* Section translation fault. */
|
2007-11-11 03:04:49 +03:00
|
|
|
code = 5;
|
|
|
|
goto do_fault;
|
|
|
|
}
|
2011-12-13 22:19:23 +04:00
|
|
|
if (domain_prot == 0 || domain_prot == 2) {
|
2007-11-11 03:04:49 +03:00
|
|
|
if (type == 2)
|
|
|
|
code = 9; /* Section domain fault. */
|
|
|
|
else
|
|
|
|
code = 11; /* Page domain fault. */
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
if (type == 2) {
|
|
|
|
/* 1Mb section. */
|
|
|
|
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
|
|
|
|
ap = (desc >> 10) & 3;
|
|
|
|
code = 13;
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = 1024 * 1024;
|
2007-11-11 03:04:49 +03:00
|
|
|
} else {
|
|
|
|
/* Lookup l2 entry. */
|
|
|
|
if (type == 1) {
|
|
|
|
/* Coarse pagetable. */
|
|
|
|
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
|
|
|
|
} else {
|
|
|
|
/* Fine pagetable. */
|
|
|
|
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
|
|
|
|
}
|
2013-11-15 17:46:38 +04:00
|
|
|
desc = ldl_phys(cs->as, table);
|
2007-11-11 03:04:49 +03:00
|
|
|
switch (desc & 3) {
|
|
|
|
case 0: /* Page translation fault. */
|
|
|
|
code = 7;
|
|
|
|
goto do_fault;
|
|
|
|
case 1: /* 64k page. */
|
|
|
|
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
|
|
|
|
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = 0x10000;
|
2007-05-08 06:30:40 +04:00
|
|
|
break;
|
2007-11-11 03:04:49 +03:00
|
|
|
case 2: /* 4k page. */
|
|
|
|
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
|
2014-02-26 21:19:59 +04:00
|
|
|
ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = 0x1000;
|
2007-05-08 06:30:40 +04:00
|
|
|
break;
|
2007-11-11 03:04:49 +03:00
|
|
|
case 3: /* 1k page. */
|
|
|
|
if (type == 1) {
|
|
|
|
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
|
|
|
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
|
|
|
|
} else {
|
|
|
|
/* Page translation fault. */
|
|
|
|
code = 7;
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
|
|
|
|
}
|
|
|
|
ap = (desc >> 4) & 3;
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = 0x400;
|
2007-05-08 06:30:40 +04:00
|
|
|
break;
|
|
|
|
default:
|
2007-11-11 03:04:49 +03:00
|
|
|
/* Never happens, but compiler isn't smart enough to tell. */
|
|
|
|
abort();
|
2007-05-08 06:30:40 +04:00
|
|
|
}
|
2007-11-11 03:04:49 +03:00
|
|
|
code = 15;
|
|
|
|
}
|
2011-12-13 22:19:23 +04:00
|
|
|
*prot = check_ap(env, ap, domain_prot, access_type, is_user);
|
2007-11-11 03:04:49 +03:00
|
|
|
if (!*prot) {
|
|
|
|
/* Access permission fault. */
|
|
|
|
goto do_fault;
|
|
|
|
}
|
2010-03-19 23:58:03 +03:00
|
|
|
*prot |= PAGE_EXEC;
|
2007-11-11 03:04:49 +03:00
|
|
|
*phys_ptr = phys_addr;
|
|
|
|
return 0;
|
|
|
|
do_fault:
|
|
|
|
return code | (domain << 4);
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
2012-10-23 14:30:10 +04:00
|
|
|
int is_user, hwaddr *phys_ptr,
|
2012-07-12 14:59:09 +04:00
|
|
|
int *prot, target_ulong *page_size)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2014-03-09 22:10:29 +04:00
|
|
|
CPUState *cs = CPU(arm_env_get_cpu(env));
|
2007-11-11 03:04:49 +03:00
|
|
|
int code;
|
|
|
|
uint32_t table;
|
|
|
|
uint32_t desc;
|
|
|
|
uint32_t xn;
|
2012-07-12 14:59:05 +04:00
|
|
|
uint32_t pxn = 0;
|
2007-11-11 03:04:49 +03:00
|
|
|
int type;
|
|
|
|
int ap;
|
2012-07-12 14:59:05 +04:00
|
|
|
int domain = 0;
|
2011-12-13 22:19:23 +04:00
|
|
|
int domain_prot;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr;
|
2007-11-11 03:04:49 +03:00
|
|
|
|
|
|
|
/* Pagetable walk. */
|
|
|
|
/* Lookup l1 descriptor. */
|
2014-06-19 21:06:24 +04:00
|
|
|
if (!get_level1_table_address(env, &table, address)) {
|
|
|
|
/* Section translation fault if page walk is disabled by PD0 or PD1 */
|
|
|
|
code = 5;
|
|
|
|
goto do_fault;
|
|
|
|
}
|
2013-11-15 17:46:38 +04:00
|
|
|
desc = ldl_phys(cs->as, table);
|
2007-11-11 03:04:49 +03:00
|
|
|
type = (desc & 3);
|
2012-07-12 14:59:05 +04:00
|
|
|
if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
|
|
|
|
/* Section translation fault, or attempt to use the encoding
|
|
|
|
* which is Reserved on implementations without PXN.
|
|
|
|
*/
|
2007-11-11 03:04:49 +03:00
|
|
|
code = 5;
|
|
|
|
goto do_fault;
|
2012-07-12 14:59:05 +04:00
|
|
|
}
|
|
|
|
if ((type == 1) || !(desc & (1 << 18))) {
|
|
|
|
/* Page or Section. */
|
2011-12-13 22:19:23 +04:00
|
|
|
domain = (desc >> 5) & 0x0f;
|
2007-11-11 03:04:49 +03:00
|
|
|
}
|
2011-12-13 22:19:23 +04:00
|
|
|
domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
|
|
|
|
if (domain_prot == 0 || domain_prot == 2) {
|
2012-07-12 14:59:05 +04:00
|
|
|
if (type != 1) {
|
2007-11-11 03:04:49 +03:00
|
|
|
code = 9; /* Section domain fault. */
|
2012-07-12 14:59:05 +04:00
|
|
|
} else {
|
2007-11-11 03:04:49 +03:00
|
|
|
code = 11; /* Page domain fault. */
|
2012-07-12 14:59:05 +04:00
|
|
|
}
|
2007-11-11 03:04:49 +03:00
|
|
|
goto do_fault;
|
|
|
|
}
|
2012-07-12 14:59:05 +04:00
|
|
|
if (type != 1) {
|
2007-11-11 03:04:49 +03:00
|
|
|
if (desc & (1 << 18)) {
|
|
|
|
/* Supersection. */
|
|
|
|
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = 0x1000000;
|
2005-11-26 13:38:39 +03:00
|
|
|
} else {
|
2007-11-11 03:04:49 +03:00
|
|
|
/* Section. */
|
|
|
|
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = 0x100000;
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
2007-11-11 03:04:49 +03:00
|
|
|
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
|
|
|
|
xn = desc & (1 << 4);
|
2012-07-12 14:59:05 +04:00
|
|
|
pxn = desc & 1;
|
2007-11-11 03:04:49 +03:00
|
|
|
code = 13;
|
|
|
|
} else {
|
2012-07-12 14:59:05 +04:00
|
|
|
if (arm_feature(env, ARM_FEATURE_PXN)) {
|
|
|
|
pxn = (desc >> 2) & 1;
|
|
|
|
}
|
2007-11-11 03:04:49 +03:00
|
|
|
/* Lookup l2 entry. */
|
|
|
|
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
|
2013-11-15 17:46:38 +04:00
|
|
|
desc = ldl_phys(cs->as, table);
|
2007-11-11 03:04:49 +03:00
|
|
|
ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
|
|
|
|
switch (desc & 3) {
|
|
|
|
case 0: /* Page translation fault. */
|
|
|
|
code = 7;
|
2005-11-26 13:38:39 +03:00
|
|
|
goto do_fault;
|
2007-11-11 03:04:49 +03:00
|
|
|
case 1: /* 64k page. */
|
|
|
|
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
|
|
|
|
xn = desc & (1 << 15);
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = 0x10000;
|
2007-11-11 03:04:49 +03:00
|
|
|
break;
|
|
|
|
case 2: case 3: /* 4k page. */
|
|
|
|
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
|
|
|
|
xn = desc & 1;
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = 0x1000;
|
2007-11-11 03:04:49 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Never happens, but compiler isn't smart enough to tell. */
|
|
|
|
abort();
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
2007-11-11 03:04:49 +03:00
|
|
|
code = 15;
|
|
|
|
}
|
2011-12-13 22:19:23 +04:00
|
|
|
if (domain_prot == 3) {
|
2010-12-08 14:15:16 +03:00
|
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
|
} else {
|
2012-07-12 14:59:05 +04:00
|
|
|
if (pxn && !is_user) {
|
|
|
|
xn = 1;
|
|
|
|
}
|
2010-12-08 14:15:16 +03:00
|
|
|
if (xn && access_type == 2)
|
|
|
|
goto do_fault;
|
2007-11-11 03:04:49 +03:00
|
|
|
|
2010-12-08 14:15:16 +03:00
|
|
|
/* The simplified model uses AP[0] as an access control bit. */
|
2014-02-20 14:35:51 +04:00
|
|
|
if ((env->cp15.c1_sys & SCTLR_AFE) && (ap & 1) == 0) {
|
2010-12-08 14:15:16 +03:00
|
|
|
/* Access flag fault. */
|
|
|
|
code = (code == 15) ? 6 : 3;
|
|
|
|
goto do_fault;
|
|
|
|
}
|
2011-12-13 22:19:23 +04:00
|
|
|
*prot = check_ap(env, ap, domain_prot, access_type, is_user);
|
2010-12-08 14:15:16 +03:00
|
|
|
if (!*prot) {
|
|
|
|
/* Access permission fault. */
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
if (!xn) {
|
|
|
|
*prot |= PAGE_EXEC;
|
|
|
|
}
|
2010-03-19 23:58:03 +03:00
|
|
|
}
|
2007-11-11 03:04:49 +03:00
|
|
|
*phys_ptr = phys_addr;
|
2005-11-26 13:38:39 +03:00
|
|
|
return 0;
|
|
|
|
do_fault:
|
|
|
|
return code | (domain << 4);
|
|
|
|
}
|
|
|
|
|
2012-07-12 14:59:12 +04:00
|
|
|
/* Fault type for long-descriptor MMU fault reporting; this corresponds
|
|
|
|
* to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
|
|
|
|
*/
|
|
|
|
typedef enum {
|
|
|
|
translation_fault = 1,
|
|
|
|
access_fault = 2,
|
|
|
|
permission_fault = 3,
|
|
|
|
} MMUFaultType;
|
|
|
|
|
2014-04-15 22:18:40 +04:00
|
|
|
static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
2012-07-12 14:59:12 +04:00
|
|
|
int access_type, int is_user,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr *phys_ptr, int *prot,
|
2012-07-12 14:59:12 +04:00
|
|
|
target_ulong *page_size_ptr)
|
|
|
|
{
|
2014-03-09 22:10:29 +04:00
|
|
|
CPUState *cs = CPU(arm_env_get_cpu(env));
|
2012-07-12 14:59:12 +04:00
|
|
|
/* Read an LPAE long-descriptor translation table. */
|
|
|
|
MMUFaultType fault_type = translation_fault;
|
|
|
|
uint32_t level = 1;
|
|
|
|
uint32_t epd;
|
2014-04-15 22:18:40 +04:00
|
|
|
int32_t tsz;
|
|
|
|
uint32_t tg;
|
2012-07-12 14:59:12 +04:00
|
|
|
uint64_t ttbr;
|
|
|
|
int ttbr_select;
|
2014-04-15 22:18:40 +04:00
|
|
|
hwaddr descaddr, descmask;
|
2012-07-12 14:59:12 +04:00
|
|
|
uint32_t tableattrs;
|
|
|
|
target_ulong page_size;
|
|
|
|
uint32_t attrs;
|
2014-04-15 22:18:40 +04:00
|
|
|
int32_t granule_sz = 9;
|
|
|
|
int32_t va_size = 32;
|
|
|
|
int32_t tbi = 0;
|
|
|
|
|
|
|
|
if (arm_el_is_aa64(env, 1)) {
|
|
|
|
va_size = 64;
|
|
|
|
if (extract64(address, 55, 1))
|
|
|
|
tbi = extract64(env->cp15.c2_control, 38, 1);
|
|
|
|
else
|
|
|
|
tbi = extract64(env->cp15.c2_control, 37, 1);
|
|
|
|
tbi *= 8;
|
|
|
|
}
|
2012-07-12 14:59:12 +04:00
|
|
|
|
|
|
|
/* Determine whether this address is in the region controlled by
|
|
|
|
* TTBR0 or TTBR1 (or if it is in neither region and should fault).
|
|
|
|
* This is a Non-secure PL0/1 stage 1 translation, so controlled by
|
|
|
|
* TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
|
|
|
|
*/
|
2014-04-15 22:18:40 +04:00
|
|
|
uint32_t t0sz = extract32(env->cp15.c2_control, 0, 6);
|
|
|
|
if (arm_el_is_aa64(env, 1)) {
|
|
|
|
t0sz = MIN(t0sz, 39);
|
|
|
|
t0sz = MAX(t0sz, 16);
|
|
|
|
}
|
|
|
|
uint32_t t1sz = extract32(env->cp15.c2_control, 16, 6);
|
|
|
|
if (arm_el_is_aa64(env, 1)) {
|
|
|
|
t1sz = MIN(t1sz, 39);
|
|
|
|
t1sz = MAX(t1sz, 16);
|
|
|
|
}
|
|
|
|
if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) {
|
2012-07-12 14:59:12 +04:00
|
|
|
/* there is a ttbr0 region and we are in it (high bits all zero) */
|
|
|
|
ttbr_select = 0;
|
2014-04-15 22:18:40 +04:00
|
|
|
} else if (t1sz && !extract64(~address, va_size - t1sz, t1sz - tbi)) {
|
2012-07-12 14:59:12 +04:00
|
|
|
/* there is a ttbr1 region and we are in it (high bits all one) */
|
|
|
|
ttbr_select = 1;
|
|
|
|
} else if (!t0sz) {
|
|
|
|
/* ttbr0 region is "everything not in the ttbr1 region" */
|
|
|
|
ttbr_select = 0;
|
|
|
|
} else if (!t1sz) {
|
|
|
|
/* ttbr1 region is "everything not in the ttbr0 region" */
|
|
|
|
ttbr_select = 1;
|
|
|
|
} else {
|
|
|
|
/* in the gap between the two regions, this is a Translation fault */
|
|
|
|
fault_type = translation_fault;
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note that QEMU ignores shareability and cacheability attributes,
|
|
|
|
* so we don't need to do anything with the SH, ORGN, IRGN fields
|
|
|
|
* in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
|
|
|
|
* ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
|
|
|
|
* implement any ASID-like capability so we can ignore it (instead
|
|
|
|
* we will always flush the TLB any time the ASID is changed).
|
|
|
|
*/
|
|
|
|
if (ttbr_select == 0) {
|
2014-02-26 21:20:04 +04:00
|
|
|
ttbr = env->cp15.ttbr0_el1;
|
2012-07-12 14:59:12 +04:00
|
|
|
epd = extract32(env->cp15.c2_control, 7, 1);
|
|
|
|
tsz = t0sz;
|
2014-04-15 22:18:40 +04:00
|
|
|
|
|
|
|
tg = extract32(env->cp15.c2_control, 14, 2);
|
|
|
|
if (tg == 1) { /* 64KB pages */
|
|
|
|
granule_sz = 13;
|
|
|
|
}
|
|
|
|
if (tg == 2) { /* 16KB pages */
|
|
|
|
granule_sz = 11;
|
|
|
|
}
|
2012-07-12 14:59:12 +04:00
|
|
|
} else {
|
2014-02-26 21:20:04 +04:00
|
|
|
ttbr = env->cp15.ttbr1_el1;
|
2012-07-12 14:59:12 +04:00
|
|
|
epd = extract32(env->cp15.c2_control, 23, 1);
|
|
|
|
tsz = t1sz;
|
2014-04-15 22:18:40 +04:00
|
|
|
|
|
|
|
tg = extract32(env->cp15.c2_control, 30, 2);
|
|
|
|
if (tg == 3) { /* 64KB pages */
|
|
|
|
granule_sz = 13;
|
|
|
|
}
|
|
|
|
if (tg == 1) { /* 16KB pages */
|
|
|
|
granule_sz = 11;
|
|
|
|
}
|
2012-07-12 14:59:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (epd) {
|
|
|
|
/* Translation table walk disabled => Translation fault on TLB miss */
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
|
2014-11-13 17:56:09 +03:00
|
|
|
/* The starting level depends on the virtual address size (which can be
|
|
|
|
* up to 48 bits) and the translation granule size. It indicates the number
|
|
|
|
* of strides (granule_sz bits at a time) needed to consume the bits
|
|
|
|
* of the input address. In the pseudocode this is:
|
|
|
|
* level = 4 - RoundUp((inputsize - grainsize) / stride)
|
|
|
|
* where their 'inputsize' is our 'va_size - tsz', 'grainsize' is
|
|
|
|
* our 'granule_sz + 3' and 'stride' is our 'granule_sz'.
|
|
|
|
* Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
|
|
|
|
* = 4 - (va_size - tsz - granule_sz - 3 + granule_sz - 1) / granule_sz
|
|
|
|
* = 4 - (va_size - tsz - 4) / granule_sz;
|
2012-07-12 14:59:12 +04:00
|
|
|
*/
|
2014-11-13 17:56:09 +03:00
|
|
|
level = 4 - (va_size - tsz - 4) / granule_sz;
|
2012-07-12 14:59:12 +04:00
|
|
|
|
|
|
|
/* Clear the vaddr bits which aren't part of the within-region address,
|
|
|
|
* so that we don't have to special case things when calculating the
|
|
|
|
* first descriptor address.
|
|
|
|
*/
|
2014-04-15 22:18:40 +04:00
|
|
|
if (tsz) {
|
|
|
|
address &= (1ULL << (va_size - tsz)) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
descmask = (1ULL << (granule_sz + 3)) - 1;
|
2012-07-12 14:59:12 +04:00
|
|
|
|
|
|
|
/* Now we can extract the actual base address from the TTBR */
|
2014-04-15 22:18:40 +04:00
|
|
|
descaddr = extract64(ttbr, 0, 48);
|
|
|
|
descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1);
|
2012-07-12 14:59:12 +04:00
|
|
|
|
|
|
|
tableattrs = 0;
|
|
|
|
for (;;) {
|
|
|
|
uint64_t descriptor;
|
|
|
|
|
2014-04-15 22:18:40 +04:00
|
|
|
descaddr |= (address >> (granule_sz * (4 - level))) & descmask;
|
|
|
|
descaddr &= ~7ULL;
|
2013-12-17 08:05:40 +04:00
|
|
|
descriptor = ldq_phys(cs->as, descaddr);
|
2012-07-12 14:59:12 +04:00
|
|
|
if (!(descriptor & 1) ||
|
|
|
|
(!(descriptor & 2) && (level == 3))) {
|
|
|
|
/* Invalid, or the Reserved level 3 encoding */
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
descaddr = descriptor & 0xfffffff000ULL;
|
|
|
|
|
|
|
|
if ((descriptor & 2) && (level < 3)) {
|
|
|
|
/* Table entry. The top five bits are attributes which may
|
|
|
|
* propagate down through lower levels of the table (and
|
|
|
|
* which are all arranged so that 0 means "no effect", so
|
|
|
|
* we can gather them up by ORing in the bits at each level).
|
|
|
|
*/
|
|
|
|
tableattrs |= extract64(descriptor, 59, 5);
|
|
|
|
level++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Block entry at level 1 or 2, or page entry at level 3.
|
|
|
|
* These are basically the same thing, although the number
|
|
|
|
* of bits we pull in from the vaddr varies.
|
|
|
|
*/
|
2014-06-19 21:06:24 +04:00
|
|
|
page_size = (1ULL << ((granule_sz * (4 - level)) + 3));
|
2012-07-12 14:59:12 +04:00
|
|
|
descaddr |= (address & (page_size - 1));
|
|
|
|
/* Extract attributes from the descriptor and merge with table attrs */
|
2014-06-09 18:43:23 +04:00
|
|
|
attrs = extract64(descriptor, 2, 10)
|
|
|
|
| (extract64(descriptor, 52, 12) << 10);
|
2012-07-12 14:59:12 +04:00
|
|
|
attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
|
|
|
|
attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
|
|
|
|
/* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
|
|
|
|
* means "force PL1 access only", which means forcing AP[1] to 0.
|
|
|
|
*/
|
|
|
|
if (extract32(tableattrs, 2, 1)) {
|
|
|
|
attrs &= ~(1 << 4);
|
|
|
|
}
|
|
|
|
/* Since we're always in the Non-secure state, NSTable is ignored. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Here descaddr is the final physical address, and attributes
|
|
|
|
* are all in attrs.
|
|
|
|
*/
|
|
|
|
fault_type = access_fault;
|
|
|
|
if ((attrs & (1 << 8)) == 0) {
|
|
|
|
/* Access flag */
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
fault_type = permission_fault;
|
|
|
|
if (is_user && !(attrs & (1 << 4))) {
|
|
|
|
/* Unprivileged access not enabled */
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
2014-06-09 18:43:23 +04:00
|
|
|
if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) ||
|
|
|
|
(!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) ||
|
|
|
|
(!is_user && (attrs & (1 << 11)))) {
|
|
|
|
/* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally
|
|
|
|
* treat XN/UXN as UXN for v8.
|
|
|
|
*/
|
2012-07-12 14:59:12 +04:00
|
|
|
if (access_type == 2) {
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
*prot &= ~PAGE_EXEC;
|
|
|
|
}
|
|
|
|
if (attrs & (1 << 5)) {
|
|
|
|
/* Write access forbidden */
|
|
|
|
if (access_type == 1) {
|
|
|
|
goto do_fault;
|
|
|
|
}
|
|
|
|
*prot &= ~PAGE_WRITE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*phys_ptr = descaddr;
|
|
|
|
*page_size_ptr = page_size;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
do_fault:
|
|
|
|
/* Long-descriptor format IFSR/DFSR value */
|
|
|
|
return (1 << 9) | (fault_type << 2) | level;
|
|
|
|
}
|
|
|
|
|
2012-07-12 14:59:09 +04:00
|
|
|
static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
|
|
|
int access_type, int is_user,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr *phys_ptr, int *prot)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
uint32_t mask;
|
|
|
|
uint32_t base;
|
|
|
|
|
|
|
|
*phys_ptr = address;
|
|
|
|
for (n = 7; n >= 0; n--) {
|
|
|
|
base = env->cp15.c6_region[n];
|
|
|
|
if ((base & 1) == 0)
|
|
|
|
continue;
|
|
|
|
mask = 1 << ((base >> 1) & 0x1f);
|
|
|
|
/* Keep this shift separate from the above to avoid an
|
|
|
|
(undefined) << 32. */
|
|
|
|
mask = (mask << 1) - 1;
|
|
|
|
if (((base ^ address) & ~mask) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (n < 0)
|
|
|
|
return 2;
|
|
|
|
|
|
|
|
if (access_type == 2) {
|
2014-04-15 22:18:41 +04:00
|
|
|
mask = env->cp15.pmsav5_insn_ap;
|
2007-11-11 03:04:49 +03:00
|
|
|
} else {
|
2014-04-15 22:18:41 +04:00
|
|
|
mask = env->cp15.pmsav5_data_ap;
|
2007-11-11 03:04:49 +03:00
|
|
|
}
|
|
|
|
mask = (mask >> (n * 4)) & 0xf;
|
|
|
|
switch (mask) {
|
|
|
|
case 0:
|
|
|
|
return 1;
|
|
|
|
case 1:
|
|
|
|
if (is_user)
|
|
|
|
return 1;
|
|
|
|
*prot = PAGE_READ | PAGE_WRITE;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
*prot = PAGE_READ;
|
|
|
|
if (!is_user)
|
|
|
|
*prot |= PAGE_WRITE;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
*prot = PAGE_READ | PAGE_WRITE;
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
if (is_user)
|
|
|
|
return 1;
|
|
|
|
*prot = PAGE_READ;
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
*prot = PAGE_READ;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Bad permission. */
|
|
|
|
return 1;
|
|
|
|
}
|
2010-03-19 23:58:03 +03:00
|
|
|
*prot |= PAGE_EXEC;
|
2007-11-11 03:04:49 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-12 14:59:10 +04:00
|
|
|
/* get_phys_addr - get the physical address for this virtual address
|
|
|
|
*
|
|
|
|
* Find the physical address corresponding to the given virtual address,
|
|
|
|
* by doing a translation table walk on MMU based systems or using the
|
|
|
|
* MPU state on MPU based systems.
|
|
|
|
*
|
|
|
|
* Returns 0 if the translation was successful. Otherwise, phys_ptr,
|
|
|
|
* prot and page_size are not filled in, and the return value provides
|
|
|
|
* information on why the translation aborted, in the format of a
|
|
|
|
* DFSR/IFSR fault register, with the following caveats:
|
|
|
|
* * we honour the short vs long DFSR format differences.
|
|
|
|
* * the WnR bit is never set (the caller must do this).
|
|
|
|
* * for MPU based systems we don't bother to return a full FSR format
|
|
|
|
* value.
|
|
|
|
*
|
|
|
|
* @env: CPUARMState
|
|
|
|
* @address: virtual address to get physical address for
|
|
|
|
* @access_type: 0 for read, 1 for write, 2 for execute
|
|
|
|
* @is_user: 0 for privileged access, 1 for user
|
|
|
|
* @phys_ptr: set to the physical address corresponding to the virtual address
|
|
|
|
* @prot: set to the permissions for the page containing phys_ptr
|
|
|
|
* @page_size: set to the size of the page containing phys_ptr
|
|
|
|
*/
|
2014-04-15 22:18:40 +04:00
|
|
|
static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
2007-11-11 03:04:49 +03:00
|
|
|
int access_type, int is_user,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr *phys_ptr, int *prot,
|
2010-03-17 05:14:28 +03:00
|
|
|
target_ulong *page_size)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
|
|
|
/* Fast Context Switch Extension. */
|
|
|
|
if (address < 0x02000000)
|
|
|
|
address += env->cp15.c13_fcse;
|
|
|
|
|
2014-02-20 14:35:51 +04:00
|
|
|
if ((env->cp15.c1_sys & SCTLR_M) == 0) {
|
2007-11-11 03:04:49 +03:00
|
|
|
/* MMU/MPU disabled. */
|
|
|
|
*phys_ptr = address;
|
2010-03-19 23:58:03 +03:00
|
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = TARGET_PAGE_SIZE;
|
2007-11-11 03:04:49 +03:00
|
|
|
return 0;
|
|
|
|
} else if (arm_feature(env, ARM_FEATURE_MPU)) {
|
2010-03-17 05:14:28 +03:00
|
|
|
*page_size = TARGET_PAGE_SIZE;
|
2007-11-11 03:04:49 +03:00
|
|
|
return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
|
|
|
|
prot);
|
2012-07-12 14:59:12 +04:00
|
|
|
} else if (extended_addresses_enabled(env)) {
|
|
|
|
return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
|
|
|
|
prot, page_size);
|
2014-02-20 14:35:51 +04:00
|
|
|
} else if (env->cp15.c1_sys & SCTLR_XP) {
|
2007-11-11 03:04:49 +03:00
|
|
|
return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
|
2010-03-17 05:14:28 +03:00
|
|
|
prot, page_size);
|
2007-11-11 03:04:49 +03:00
|
|
|
} else {
|
|
|
|
return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
|
2010-03-17 05:14:28 +03:00
|
|
|
prot, page_size);
|
2007-11-11 03:04:49 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-26 05:01:33 +04:00
|
|
|
int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
|
|
|
|
int access_type, int mmu_idx)
|
2005-11-26 13:38:39 +03:00
|
|
|
{
|
2013-08-26 05:01:33 +04:00
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr;
|
2010-03-17 05:14:28 +03:00
|
|
|
target_ulong page_size;
|
2005-11-26 13:38:39 +03:00
|
|
|
int prot;
|
2007-10-14 11:07:08 +04:00
|
|
|
int ret, is_user;
|
2014-04-15 22:18:39 +04:00
|
|
|
uint32_t syn;
|
2014-10-24 15:19:14 +04:00
|
|
|
bool same_el = (arm_current_el(env) != 0);
|
2005-11-26 13:38:39 +03:00
|
|
|
|
2007-10-14 11:07:08 +04:00
|
|
|
is_user = mmu_idx == MMU_USER_IDX;
|
2010-03-17 05:14:28 +03:00
|
|
|
ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
|
|
|
|
&page_size);
|
2005-11-26 13:38:39 +03:00
|
|
|
if (ret == 0) {
|
|
|
|
/* Map a single [sub]page. */
|
2014-08-04 17:41:55 +04:00
|
|
|
phys_addr &= TARGET_PAGE_MASK;
|
|
|
|
address &= TARGET_PAGE_MASK;
|
2013-09-03 15:59:37 +04:00
|
|
|
tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
|
2010-03-17 05:14:28 +03:00
|
|
|
return 0;
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
|
|
|
|
2014-04-15 22:18:39 +04:00
|
|
|
/* AArch64 syndrome does not have an LPAE bit */
|
|
|
|
syn = ret & ~(1 << 9);
|
|
|
|
|
|
|
|
/* For insn and data aborts we assume there is no instruction syndrome
|
|
|
|
* information; this is always true for exceptions reported to EL1.
|
|
|
|
*/
|
2005-11-26 13:38:39 +03:00
|
|
|
if (access_type == 2) {
|
2014-04-15 22:18:39 +04:00
|
|
|
syn = syn_insn_abort(same_el, 0, 0, syn);
|
2013-08-26 10:31:06 +04:00
|
|
|
cs->exception_index = EXCP_PREFETCH_ABORT;
|
2005-11-26 13:38:39 +03:00
|
|
|
} else {
|
2014-04-15 22:18:39 +04:00
|
|
|
syn = syn_data_abort(same_el, 0, 0, 0, access_type == 1, syn);
|
target-arm: Define exception record for AArch64 exceptions
For AArch32 exceptions, the only information provided about
the cause of an exception is the individual exception type (data
abort, undef, etc), which we store in cs->exception_index. For
AArch64, the CPU provides much more detail about the cause of
the exception, which can be found in the syndrome register.
Create a set of fields in CPUARMState which must be filled in
whenever an exception is raised, so that exception entry can
correctly fill in the syndrome register for the guest.
This includes the information which in AArch32 appears in
the DFAR and IFAR (fault address registers) and the DFSR
and IFSR (fault status registers) for data aborts and
prefetch aborts, since if we end up taking the MMU fault
to AArch64 rather than AArch32 this will need to end up
in different system registers.
This patch does a refactoring which moves the setting of the
AArch32 DFAR/DFSR/IFAR/IFSR from the point where the exception
is raised to the point where it is taken. (This is no change
for cores with an MMU, retains the existing clearly incorrect
behaviour for ARM946 of trashing the MP access permissions
registers which share the c5_data and c5_insn state fields,
and has no effect for v7M because we don't implement its
MPU fault status or address registers.)
As a side effect of the cleanup we fix a bug in the AArch64
linux-user mode code where we were passing a 64 bit fault
address through the 32 bit c6_data/c6_insn fields: it now
goes via the always-64-bit exception.vaddress.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
2014-04-15 22:18:38 +04:00
|
|
|
if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) {
|
|
|
|
ret |= (1 << 11);
|
|
|
|
}
|
2013-08-26 10:31:06 +04:00
|
|
|
cs->exception_index = EXCP_DATA_ABORT;
|
2005-11-26 13:38:39 +03:00
|
|
|
}
|
2014-04-15 22:18:39 +04:00
|
|
|
|
|
|
|
env->exception.syndrome = syn;
|
target-arm: Define exception record for AArch64 exceptions
For AArch32 exceptions, the only information provided about
the cause of an exception is the individual exception type (data
abort, undef, etc), which we store in cs->exception_index. For
AArch64, the CPU provides much more detail about the cause of
the exception, which can be found in the syndrome register.
Create a set of fields in CPUARMState which must be filled in
whenever an exception is raised, so that exception entry can
correctly fill in the syndrome register for the guest.
This includes the information which in AArch32 appears in
the DFAR and IFAR (fault address registers) and the DFSR
and IFSR (fault status registers) for data aborts and
prefetch aborts, since if we end up taking the MMU fault
to AArch64 rather than AArch32 this will need to end up
in different system registers.
This patch does a refactoring which moves the setting of the
AArch32 DFAR/DFSR/IFAR/IFSR from the point where the exception
is raised to the point where it is taken. (This is no change
for cores with an MMU, retains the existing clearly incorrect
behaviour for ARM946 of trashing the MP access permissions
registers which share the c5_data and c5_insn state fields,
and has no effect for v7M because we don't implement its
MPU fault status or address registers.)
As a side effect of the cleanup we fix a bug in the AArch64
linux-user mode code where we were passing a 64 bit fault
address through the 32 bit c6_data/c6_insn fields: it now
goes via the always-64-bit exception.vaddress.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
2014-04-15 22:18:38 +04:00
|
|
|
env->exception.vaddress = address;
|
|
|
|
env->exception.fsr = ret;
|
2005-11-26 13:38:39 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-06-29 20:55:54 +04:00
|
|
|
hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
2005-11-26 13:38:39 +03:00
|
|
|
{
|
2013-06-29 20:55:54 +04:00
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr;
|
2010-03-17 05:14:28 +03:00
|
|
|
target_ulong page_size;
|
2005-11-26 13:38:39 +03:00
|
|
|
int prot;
|
|
|
|
int ret;
|
|
|
|
|
2013-06-29 20:55:54 +04:00
|
|
|
ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size);
|
2005-11-26 13:38:39 +03:00
|
|
|
|
2013-06-29 20:55:54 +04:00
|
|
|
if (ret != 0) {
|
2005-11-26 13:38:39 +03:00
|
|
|
return -1;
|
2013-06-29 20:55:54 +04:00
|
|
|
}
|
2005-11-26 13:38:39 +03:00
|
|
|
|
|
|
|
return phys_addr;
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2011-01-14 22:39:18 +03:00
|
|
|
if ((env->uncached_cpsr & CPSR_M) == mode) {
|
|
|
|
env->regs[13] = val;
|
|
|
|
} else {
|
2013-03-05 04:34:40 +04:00
|
|
|
env->banked_r13[bank_number(mode)] = val;
|
2011-01-14 22:39:18 +03:00
|
|
|
}
|
2007-11-11 03:04:49 +03:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2011-01-14 22:39:18 +03:00
|
|
|
if ((env->uncached_cpsr & CPSR_M) == mode) {
|
|
|
|
return env->regs[13];
|
|
|
|
} else {
|
2013-03-05 04:34:40 +04:00
|
|
|
return env->banked_r13[bank_number(mode)];
|
2011-01-14 22:39:18 +03:00
|
|
|
}
|
2007-11-11 03:04:49 +03:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2013-09-03 19:38:47 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
2007-11-11 03:04:49 +03:00
|
|
|
switch (reg) {
|
|
|
|
case 0: /* APSR */
|
|
|
|
return xpsr_read(env) & 0xf8000000;
|
|
|
|
case 1: /* IAPSR */
|
|
|
|
return xpsr_read(env) & 0xf80001ff;
|
|
|
|
case 2: /* EAPSR */
|
|
|
|
return xpsr_read(env) & 0xff00fc00;
|
|
|
|
case 3: /* xPSR */
|
|
|
|
return xpsr_read(env) & 0xff00fdff;
|
|
|
|
case 5: /* IPSR */
|
|
|
|
return xpsr_read(env) & 0x000001ff;
|
|
|
|
case 6: /* EPSR */
|
|
|
|
return xpsr_read(env) & 0x0700fc00;
|
|
|
|
case 7: /* IEPSR */
|
|
|
|
return xpsr_read(env) & 0x0700edff;
|
|
|
|
case 8: /* MSP */
|
|
|
|
return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
|
|
|
|
case 9: /* PSP */
|
|
|
|
return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
|
|
|
|
case 16: /* PRIMASK */
|
2014-02-26 21:20:06 +04:00
|
|
|
return (env->daif & PSTATE_I) != 0;
|
2011-05-29 06:58:41 +04:00
|
|
|
case 17: /* BASEPRI */
|
|
|
|
case 18: /* BASEPRI_MAX */
|
2007-11-11 03:04:49 +03:00
|
|
|
return env->v7m.basepri;
|
2011-05-29 06:58:41 +04:00
|
|
|
case 19: /* FAULTMASK */
|
2014-02-26 21:20:06 +04:00
|
|
|
return (env->daif & PSTATE_F) != 0;
|
2007-11-11 03:04:49 +03:00
|
|
|
case 20: /* CONTROL */
|
|
|
|
return env->v7m.control;
|
|
|
|
default:
|
|
|
|
/* ??? For debugging only. */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
|
2007-11-11 03:04:49 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
|
2007-11-11 03:04:49 +03:00
|
|
|
{
|
2013-09-03 19:38:47 +04:00
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
|
2007-11-11 03:04:49 +03:00
|
|
|
switch (reg) {
|
|
|
|
case 0: /* APSR */
|
|
|
|
xpsr_write(env, val, 0xf8000000);
|
|
|
|
break;
|
|
|
|
case 1: /* IAPSR */
|
|
|
|
xpsr_write(env, val, 0xf8000000);
|
|
|
|
break;
|
|
|
|
case 2: /* EAPSR */
|
|
|
|
xpsr_write(env, val, 0xfe00fc00);
|
|
|
|
break;
|
|
|
|
case 3: /* xPSR */
|
|
|
|
xpsr_write(env, val, 0xfe00fc00);
|
|
|
|
break;
|
|
|
|
case 5: /* IPSR */
|
|
|
|
/* IPSR bits are readonly. */
|
|
|
|
break;
|
|
|
|
case 6: /* EPSR */
|
|
|
|
xpsr_write(env, val, 0x0600fc00);
|
|
|
|
break;
|
|
|
|
case 7: /* IEPSR */
|
|
|
|
xpsr_write(env, val, 0x0600fc00);
|
|
|
|
break;
|
|
|
|
case 8: /* MSP */
|
|
|
|
if (env->v7m.current_sp)
|
|
|
|
env->v7m.other_sp = val;
|
|
|
|
else
|
|
|
|
env->regs[13] = val;
|
|
|
|
break;
|
|
|
|
case 9: /* PSP */
|
|
|
|
if (env->v7m.current_sp)
|
|
|
|
env->regs[13] = val;
|
|
|
|
else
|
|
|
|
env->v7m.other_sp = val;
|
|
|
|
break;
|
|
|
|
case 16: /* PRIMASK */
|
2014-02-26 21:20:06 +04:00
|
|
|
if (val & 1) {
|
|
|
|
env->daif |= PSTATE_I;
|
|
|
|
} else {
|
|
|
|
env->daif &= ~PSTATE_I;
|
|
|
|
}
|
2007-11-11 03:04:49 +03:00
|
|
|
break;
|
2011-05-29 06:58:41 +04:00
|
|
|
case 17: /* BASEPRI */
|
2007-11-11 03:04:49 +03:00
|
|
|
env->v7m.basepri = val & 0xff;
|
|
|
|
break;
|
2011-05-29 06:58:41 +04:00
|
|
|
case 18: /* BASEPRI_MAX */
|
2007-11-11 03:04:49 +03:00
|
|
|
val &= 0xff;
|
|
|
|
if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
|
|
|
|
env->v7m.basepri = val;
|
|
|
|
break;
|
2011-05-29 06:58:41 +04:00
|
|
|
case 19: /* FAULTMASK */
|
2014-02-26 21:20:06 +04:00
|
|
|
if (val & 1) {
|
|
|
|
env->daif |= PSTATE_F;
|
|
|
|
} else {
|
|
|
|
env->daif &= ~PSTATE_F;
|
|
|
|
}
|
2011-05-29 06:58:41 +04:00
|
|
|
break;
|
2007-11-11 03:04:49 +03:00
|
|
|
case 20: /* CONTROL */
|
|
|
|
env->v7m.control = val & 3;
|
|
|
|
switch_v7m_sp(env, (val & 2) != 0);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* ??? For debugging only. */
|
2013-09-03 19:38:47 +04:00
|
|
|
cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
|
2007-11-11 03:04:49 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-11-26 13:38:39 +03:00
|
|
|
#endif
|
2008-03-31 07:46:33 +04:00
|
|
|
|
2014-04-15 22:18:41 +04:00
|
|
|
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
|
|
|
|
{
|
|
|
|
/* Implement DC ZVA, which zeroes a fixed-length block of memory.
|
|
|
|
* Note that we do not implement the (architecturally mandated)
|
|
|
|
* alignment fault for attempts to use this on Device memory
|
|
|
|
* (which matches the usual QEMU behaviour of not implementing either
|
|
|
|
* alignment faults or any memory attribute handling).
|
|
|
|
*/
|
|
|
|
|
|
|
|
ARMCPU *cpu = arm_env_get_cpu(env);
|
|
|
|
uint64_t blocklen = 4 << cpu->dcz_blocksize;
|
|
|
|
uint64_t vaddr = vaddr_in & ~(blocklen - 1);
|
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
{
|
|
|
|
/* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
|
|
|
|
* the block size so we might have to do more than one TLB lookup.
|
|
|
|
* We know that in fact for any v8 CPU the page size is at least 4K
|
|
|
|
* and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
|
|
|
|
* 1K as an artefact of legacy v5 subpage support being present in the
|
|
|
|
* same QEMU executable.
|
|
|
|
*/
|
|
|
|
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
|
|
|
|
void *hostaddr[maxidx];
|
|
|
|
int try, i;
|
|
|
|
|
|
|
|
for (try = 0; try < 2; try++) {
|
|
|
|
|
|
|
|
for (i = 0; i < maxidx; i++) {
|
|
|
|
hostaddr[i] = tlb_vaddr_to_host(env,
|
|
|
|
vaddr + TARGET_PAGE_SIZE * i,
|
|
|
|
1, cpu_mmu_index(env));
|
|
|
|
if (!hostaddr[i]) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (i == maxidx) {
|
|
|
|
/* If it's all in the TLB it's fair game for just writing to;
|
|
|
|
* we know we don't need to update dirty status, etc.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < maxidx - 1; i++) {
|
|
|
|
memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
|
|
|
|
}
|
|
|
|
memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* OK, try a store and see if we can populate the tlb. This
|
|
|
|
* might cause an exception if the memory isn't writable,
|
|
|
|
* in which case we will longjmp out of here. We must for
|
|
|
|
* this purpose use the actual register value passed to us
|
|
|
|
* so that we get the fault address right.
|
|
|
|
*/
|
|
|
|
helper_ret_stb_mmu(env, vaddr_in, 0, cpu_mmu_index(env), GETRA());
|
|
|
|
/* Now we can populate the other TLB entries, if any */
|
|
|
|
for (i = 0; i < maxidx; i++) {
|
|
|
|
uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
|
|
|
|
if (va != (vaddr_in & TARGET_PAGE_MASK)) {
|
|
|
|
helper_ret_stb_mmu(env, va, 0, cpu_mmu_index(env), GETRA());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Slow path (probably attempt to do this to an I/O device or
|
|
|
|
* similar, or clearing of a block of code we have translations
|
|
|
|
* cached for). Just do a series of byte writes as the architecture
|
|
|
|
* demands. It's not worth trying to use a cpu_physical_memory_map(),
|
|
|
|
* memset(), unmap() sequence here because:
|
|
|
|
* + we'd need to account for the blocksize being larger than a page
|
|
|
|
* + the direct-RAM access case is almost always going to be dealt
|
|
|
|
* with in the fastpath code above, so there's no speed benefit
|
|
|
|
* + we would have to deal with the map returning NULL because the
|
|
|
|
* bounce buffer was in use
|
|
|
|
*/
|
|
|
|
for (i = 0; i < blocklen; i++) {
|
|
|
|
helper_ret_stb_mmu(env, vaddr + i, 0, cpu_mmu_index(env), GETRA());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
memset(g2h(vaddr), 0, blocklen);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-03-31 07:46:33 +04:00
|
|
|
/* Note that signed overflow is undefined in C. The following routines are
|
|
|
|
careful to use unsigned types where modulo arithmetic is required.
|
|
|
|
Failure to do so _will_ break on newer gcc. */
|
|
|
|
|
|
|
|
/* Signed saturating arithmetic. */
|
|
|
|
|
2008-04-11 08:55:07 +04:00
|
|
|
/* Perform 16-bit signed saturating addition. */
|
2008-03-31 07:46:33 +04:00
|
|
|
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
|
|
|
|
{
|
|
|
|
uint16_t res;
|
|
|
|
|
|
|
|
res = a + b;
|
|
|
|
if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
|
|
|
|
if (a & 0x8000)
|
|
|
|
res = 0x8000;
|
|
|
|
else
|
|
|
|
res = 0x7fff;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2008-04-11 08:55:07 +04:00
|
|
|
/* Perform 8-bit signed saturating addition. */
|
2008-03-31 07:46:33 +04:00
|
|
|
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
|
|
|
|
{
|
|
|
|
uint8_t res;
|
|
|
|
|
|
|
|
res = a + b;
|
|
|
|
if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
|
|
|
|
if (a & 0x80)
|
|
|
|
res = 0x80;
|
|
|
|
else
|
|
|
|
res = 0x7f;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2008-04-11 08:55:07 +04:00
|
|
|
/* Perform 16-bit signed saturating subtraction. */
|
2008-03-31 07:46:33 +04:00
|
|
|
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
|
|
|
|
{
|
|
|
|
uint16_t res;
|
|
|
|
|
|
|
|
res = a - b;
|
|
|
|
if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
|
|
|
|
if (a & 0x8000)
|
|
|
|
res = 0x8000;
|
|
|
|
else
|
|
|
|
res = 0x7fff;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2008-04-11 08:55:07 +04:00
|
|
|
/* Perform 8-bit signed saturating subtraction. */
|
2008-03-31 07:46:33 +04:00
|
|
|
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
|
|
|
|
{
|
|
|
|
uint8_t res;
|
|
|
|
|
|
|
|
res = a - b;
|
|
|
|
if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
|
|
|
|
if (a & 0x80)
|
|
|
|
res = 0x80;
|
|
|
|
else
|
|
|
|
res = 0x7f;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
|
|
|
|
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
|
|
|
|
#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
|
|
|
|
#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
|
|
|
|
#define PFX q
|
|
|
|
|
|
|
|
#include "op_addsub.h"
|
|
|
|
|
|
|
|
/* Unsigned saturating arithmetic. */
|
2008-05-01 16:04:35 +04:00
|
|
|
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
|
2008-03-31 07:46:33 +04:00
|
|
|
{
|
|
|
|
uint16_t res;
|
|
|
|
res = a + b;
|
|
|
|
if (res < a)
|
|
|
|
res = 0xffff;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2008-05-01 16:04:35 +04:00
|
|
|
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
|
2008-03-31 07:46:33 +04:00
|
|
|
{
|
2010-06-28 19:54:06 +04:00
|
|
|
if (a > b)
|
2008-03-31 07:46:33 +04:00
|
|
|
return a - b;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
|
|
|
|
{
|
|
|
|
uint8_t res;
|
|
|
|
res = a + b;
|
|
|
|
if (res < a)
|
|
|
|
res = 0xff;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
|
|
|
|
{
|
2010-06-28 19:54:06 +04:00
|
|
|
if (a > b)
|
2008-03-31 07:46:33 +04:00
|
|
|
return a - b;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
|
|
|
|
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
|
|
|
|
#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
|
|
|
|
#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
|
|
|
|
#define PFX uq
|
|
|
|
|
|
|
|
#include "op_addsub.h"
|
|
|
|
|
|
|
|
/* Signed modulo arithmetic. */
|
|
|
|
#define SARITH16(a, b, n, op) do { \
|
|
|
|
int32_t sum; \
|
2011-03-10 21:51:49 +03:00
|
|
|
sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
|
2008-03-31 07:46:33 +04:00
|
|
|
RESULT(sum, n, 16); \
|
|
|
|
if (sum >= 0) \
|
|
|
|
ge |= 3 << (n * 2); \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define SARITH8(a, b, n, op) do { \
|
|
|
|
int32_t sum; \
|
2011-03-10 21:51:49 +03:00
|
|
|
sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
|
2008-03-31 07:46:33 +04:00
|
|
|
RESULT(sum, n, 8); \
|
|
|
|
if (sum >= 0) \
|
|
|
|
ge |= 1 << n; \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
|
|
|
|
#define ADD16(a, b, n) SARITH16(a, b, n, +)
|
|
|
|
#define SUB16(a, b, n) SARITH16(a, b, n, -)
|
|
|
|
#define ADD8(a, b, n) SARITH8(a, b, n, +)
|
|
|
|
#define SUB8(a, b, n) SARITH8(a, b, n, -)
|
|
|
|
#define PFX s
|
|
|
|
#define ARITH_GE
|
|
|
|
|
|
|
|
#include "op_addsub.h"
|
|
|
|
|
|
|
|
/* Unsigned modulo arithmetic. */
|
|
|
|
#define ADD16(a, b, n) do { \
|
|
|
|
uint32_t sum; \
|
|
|
|
sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
|
|
|
|
RESULT(sum, n, 16); \
|
2008-07-19 14:46:13 +04:00
|
|
|
if ((sum >> 16) == 1) \
|
2008-03-31 07:46:33 +04:00
|
|
|
ge |= 3 << (n * 2); \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define ADD8(a, b, n) do { \
|
|
|
|
uint32_t sum; \
|
|
|
|
sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
|
|
|
|
RESULT(sum, n, 8); \
|
2008-07-19 14:46:13 +04:00
|
|
|
if ((sum >> 8) == 1) \
|
|
|
|
ge |= 1 << n; \
|
2008-03-31 07:46:33 +04:00
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define SUB16(a, b, n) do { \
|
|
|
|
uint32_t sum; \
|
|
|
|
sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
|
|
|
|
RESULT(sum, n, 16); \
|
|
|
|
if ((sum >> 16) == 0) \
|
|
|
|
ge |= 3 << (n * 2); \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define SUB8(a, b, n) do { \
|
|
|
|
uint32_t sum; \
|
|
|
|
sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
|
|
|
|
RESULT(sum, n, 8); \
|
|
|
|
if ((sum >> 8) == 0) \
|
2008-07-19 14:46:13 +04:00
|
|
|
ge |= 1 << n; \
|
2008-03-31 07:46:33 +04:00
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define PFX u
|
|
|
|
#define ARITH_GE
|
|
|
|
|
|
|
|
#include "op_addsub.h"
|
|
|
|
|
|
|
|
/* Halved signed arithmetic. */
|
|
|
|
#define ADD16(a, b, n) \
|
|
|
|
RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
|
|
|
|
#define SUB16(a, b, n) \
|
|
|
|
RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
|
|
|
|
#define ADD8(a, b, n) \
|
|
|
|
RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
|
|
|
|
#define SUB8(a, b, n) \
|
|
|
|
RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
|
|
|
|
#define PFX sh
|
|
|
|
|
|
|
|
#include "op_addsub.h"
|
|
|
|
|
|
|
|
/* Halved unsigned arithmetic. */
|
|
|
|
#define ADD16(a, b, n) \
|
|
|
|
RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
|
|
|
|
#define SUB16(a, b, n) \
|
|
|
|
RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
|
|
|
|
#define ADD8(a, b, n) \
|
|
|
|
RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
|
|
|
|
#define SUB8(a, b, n) \
|
|
|
|
RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
|
|
|
|
#define PFX uh
|
|
|
|
|
|
|
|
#include "op_addsub.h"
|
|
|
|
|
|
|
|
static inline uint8_t do_usad(uint8_t a, uint8_t b)
|
|
|
|
{
|
|
|
|
if (a > b)
|
|
|
|
return a - b;
|
|
|
|
else
|
|
|
|
return b - a;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unsigned sum of absolute byte differences. */
|
|
|
|
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
|
|
|
|
{
|
|
|
|
uint32_t sum;
|
|
|
|
sum = do_usad(a, b);
|
|
|
|
sum += do_usad(a >> 8, b >> 8);
|
|
|
|
sum += do_usad(a >> 16, b >>16);
|
|
|
|
sum += do_usad(a >> 24, b >> 24);
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For ARMv6 SEL instruction. */
|
|
|
|
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
|
|
|
|
{
|
|
|
|
uint32_t mask;
|
|
|
|
|
|
|
|
mask = 0;
|
|
|
|
if (flags & 1)
|
|
|
|
mask |= 0xff;
|
|
|
|
if (flags & 2)
|
|
|
|
mask |= 0xff00;
|
|
|
|
if (flags & 4)
|
|
|
|
mask |= 0xff0000;
|
|
|
|
if (flags & 8)
|
|
|
|
mask |= 0xff000000;
|
|
|
|
return (a & mask) | (b & ~mask);
|
|
|
|
}
|
|
|
|
|
2012-08-06 20:42:18 +04:00
|
|
|
/* VFP support. We follow the convention used for VFP instructions:
|
|
|
|
Single precision routines have a "s" suffix, double precision a
|
2008-03-31 07:47:19 +04:00
|
|
|
"d" suffix. */
|
|
|
|
|
|
|
|
/* Convert host exception flags to vfp form. */
|
|
|
|
static inline int vfp_exceptbits_from_host(int host_bits)
|
|
|
|
{
|
|
|
|
int target_bits = 0;
|
|
|
|
|
|
|
|
if (host_bits & float_flag_invalid)
|
|
|
|
target_bits |= 1;
|
|
|
|
if (host_bits & float_flag_divbyzero)
|
|
|
|
target_bits |= 2;
|
|
|
|
if (host_bits & float_flag_overflow)
|
|
|
|
target_bits |= 4;
|
2011-05-19 17:46:18 +04:00
|
|
|
if (host_bits & (float_flag_underflow | float_flag_output_denormal))
|
2008-03-31 07:47:19 +04:00
|
|
|
target_bits |= 8;
|
|
|
|
if (host_bits & float_flag_inexact)
|
|
|
|
target_bits |= 0x10;
|
2011-01-06 22:37:55 +03:00
|
|
|
if (host_bits & float_flag_input_denormal)
|
|
|
|
target_bits |= 0x80;
|
2008-03-31 07:47:19 +04:00
|
|
|
return target_bits;
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint32_t fpscr;
|
|
|
|
|
|
|
|
fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
|
|
|
|
| (env->vfp.vec_len << 16)
|
|
|
|
| (env->vfp.vec_stride << 20);
|
|
|
|
i = get_float_exception_flags(&env->vfp.fp_status);
|
2011-01-14 22:39:18 +03:00
|
|
|
i |= get_float_exception_flags(&env->vfp.standard_fp_status);
|
2008-03-31 07:47:19 +04:00
|
|
|
fpscr |= vfp_exceptbits_from_host(i);
|
|
|
|
return fpscr;
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
uint32_t vfp_get_fpscr(CPUARMState *env)
|
2010-11-24 18:20:04 +03:00
|
|
|
{
|
|
|
|
return HELPER(vfp_get_fpscr)(env);
|
|
|
|
}
|
|
|
|
|
2008-03-31 07:47:19 +04:00
|
|
|
/* Convert vfp exception flags to target form. */
|
|
|
|
static inline int vfp_exceptbits_to_host(int target_bits)
|
|
|
|
{
|
|
|
|
int host_bits = 0;
|
|
|
|
|
|
|
|
if (target_bits & 1)
|
|
|
|
host_bits |= float_flag_invalid;
|
|
|
|
if (target_bits & 2)
|
|
|
|
host_bits |= float_flag_divbyzero;
|
|
|
|
if (target_bits & 4)
|
|
|
|
host_bits |= float_flag_overflow;
|
|
|
|
if (target_bits & 8)
|
|
|
|
host_bits |= float_flag_underflow;
|
|
|
|
if (target_bits & 0x10)
|
|
|
|
host_bits |= float_flag_inexact;
|
2011-01-06 22:37:55 +03:00
|
|
|
if (target_bits & 0x80)
|
|
|
|
host_bits |= float_flag_input_denormal;
|
2008-03-31 07:47:19 +04:00
|
|
|
return host_bits;
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint32_t changed;
|
|
|
|
|
|
|
|
changed = env->vfp.xregs[ARM_VFP_FPSCR];
|
|
|
|
env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
|
|
|
|
env->vfp.vec_len = (val >> 16) & 7;
|
|
|
|
env->vfp.vec_stride = (val >> 20) & 3;
|
|
|
|
|
|
|
|
changed ^= val;
|
|
|
|
if (changed & (3 << 22)) {
|
|
|
|
i = (val >> 22) & 3;
|
|
|
|
switch (i) {
|
2014-01-05 02:15:51 +04:00
|
|
|
case FPROUNDING_TIEEVEN:
|
2008-03-31 07:47:19 +04:00
|
|
|
i = float_round_nearest_even;
|
|
|
|
break;
|
2014-01-05 02:15:51 +04:00
|
|
|
case FPROUNDING_POSINF:
|
2008-03-31 07:47:19 +04:00
|
|
|
i = float_round_up;
|
|
|
|
break;
|
2014-01-05 02:15:51 +04:00
|
|
|
case FPROUNDING_NEGINF:
|
2008-03-31 07:47:19 +04:00
|
|
|
i = float_round_down;
|
|
|
|
break;
|
2014-01-05 02:15:51 +04:00
|
|
|
case FPROUNDING_ZERO:
|
2008-03-31 07:47:19 +04:00
|
|
|
i = float_round_to_zero;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
set_float_rounding_mode(i, &env->vfp.fp_status);
|
|
|
|
}
|
2011-01-06 22:37:55 +03:00
|
|
|
if (changed & (1 << 24)) {
|
2008-12-19 17:33:59 +03:00
|
|
|
set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
|
2011-01-06 22:37:55 +03:00
|
|
|
set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
|
|
|
|
}
|
2008-12-19 16:53:37 +03:00
|
|
|
if (changed & (1 << 25))
|
|
|
|
set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
|
2008-03-31 07:47:19 +04:00
|
|
|
|
2011-01-06 22:37:54 +03:00
|
|
|
i = vfp_exceptbits_to_host(val);
|
2008-03-31 07:47:19 +04:00
|
|
|
set_float_exception_flags(i, &env->vfp.fp_status);
|
2011-01-14 22:39:18 +03:00
|
|
|
set_float_exception_flags(0, &env->vfp.standard_fp_status);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
void vfp_set_fpscr(CPUARMState *env, uint32_t val)
|
2010-11-24 18:20:04 +03:00
|
|
|
{
|
|
|
|
HELPER(vfp_set_fpscr)(env, val);
|
|
|
|
}
|
|
|
|
|
2008-03-31 07:47:19 +04:00
|
|
|
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
|
|
|
|
|
|
|
|
#define VFP_BINOP(name) \
|
2011-05-25 18:51:48 +04:00
|
|
|
float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
|
2008-03-31 07:47:19 +04:00
|
|
|
{ \
|
2011-05-25 18:51:48 +04:00
|
|
|
float_status *fpst = fpstp; \
|
|
|
|
return float32_ ## name(a, b, fpst); \
|
2008-03-31 07:47:19 +04:00
|
|
|
} \
|
2011-05-25 18:51:48 +04:00
|
|
|
float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
|
2008-03-31 07:47:19 +04:00
|
|
|
{ \
|
2011-05-25 18:51:48 +04:00
|
|
|
float_status *fpst = fpstp; \
|
|
|
|
return float64_ ## name(a, b, fpst); \
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
VFP_BINOP(add)
|
|
|
|
VFP_BINOP(sub)
|
|
|
|
VFP_BINOP(mul)
|
|
|
|
VFP_BINOP(div)
|
2014-01-05 02:15:49 +04:00
|
|
|
VFP_BINOP(min)
|
|
|
|
VFP_BINOP(max)
|
|
|
|
VFP_BINOP(minnum)
|
|
|
|
VFP_BINOP(maxnum)
|
2008-03-31 07:47:19 +04:00
|
|
|
#undef VFP_BINOP
|
|
|
|
|
|
|
|
float32 VFP_HELPER(neg, s)(float32 a)
|
|
|
|
{
|
|
|
|
return float32_chs(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
float64 VFP_HELPER(neg, d)(float64 a)
|
|
|
|
{
|
2008-04-20 04:58:01 +04:00
|
|
|
return float64_chs(a);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
float32 VFP_HELPER(abs, s)(float32 a)
|
|
|
|
{
|
|
|
|
return float32_abs(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
float64 VFP_HELPER(abs, d)(float64 a)
|
|
|
|
{
|
2008-04-20 04:58:01 +04:00
|
|
|
return float64_abs(a);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
|
|
|
return float32_sqrt(a, &env->vfp.fp_status);
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
|
|
|
return float64_sqrt(a, &env->vfp.fp_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX: check quiet/signaling case */
|
|
|
|
#define DO_VFP_cmp(p, type) \
|
2012-03-14 04:38:21 +04:00
|
|
|
void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
|
2008-03-31 07:47:19 +04:00
|
|
|
{ \
|
|
|
|
uint32_t flags; \
|
|
|
|
switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
|
|
|
|
case 0: flags = 0x6; break; \
|
|
|
|
case -1: flags = 0x8; break; \
|
|
|
|
case 1: flags = 0x2; break; \
|
|
|
|
default: case 2: flags = 0x3; break; \
|
|
|
|
} \
|
|
|
|
env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
|
|
|
|
| (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
|
|
|
|
} \
|
2012-03-14 04:38:21 +04:00
|
|
|
void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
|
2008-03-31 07:47:19 +04:00
|
|
|
{ \
|
|
|
|
uint32_t flags; \
|
|
|
|
switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
|
|
|
|
case 0: flags = 0x6; break; \
|
|
|
|
case -1: flags = 0x8; break; \
|
|
|
|
case 1: flags = 0x2; break; \
|
|
|
|
default: case 2: flags = 0x3; break; \
|
|
|
|
} \
|
|
|
|
env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
|
|
|
|
| (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
|
|
|
|
}
|
|
|
|
DO_VFP_cmp(s, float32)
|
|
|
|
DO_VFP_cmp(d, float64)
|
|
|
|
#undef DO_VFP_cmp
|
|
|
|
|
2011-05-19 17:46:19 +04:00
|
|
|
/* Integer to float and float to integer conversions */
|
2008-03-31 07:47:19 +04:00
|
|
|
|
2011-05-19 17:46:19 +04:00
|
|
|
#define CONV_ITOF(name, fsz, sign) \
|
|
|
|
float##fsz HELPER(name)(uint32_t x, void *fpstp) \
|
|
|
|
{ \
|
|
|
|
float_status *fpst = fpstp; \
|
2012-01-25 15:49:46 +04:00
|
|
|
return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2011-05-19 17:46:19 +04:00
|
|
|
#define CONV_FTOI(name, fsz, sign, round) \
|
|
|
|
uint32_t HELPER(name)(float##fsz x, void *fpstp) \
|
|
|
|
{ \
|
|
|
|
float_status *fpst = fpstp; \
|
|
|
|
if (float##fsz##_is_any_nan(x)) { \
|
|
|
|
float_raise(float_flag_invalid, fpst); \
|
|
|
|
return 0; \
|
|
|
|
} \
|
|
|
|
return float##fsz##_to_##sign##int32##round(x, fpst); \
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2011-05-19 17:46:19 +04:00
|
|
|
#define FLOAT_CONVS(name, p, fsz, sign) \
|
|
|
|
CONV_ITOF(vfp_##name##to##p, fsz, sign) \
|
|
|
|
CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
|
|
|
|
CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
|
2008-03-31 07:47:19 +04:00
|
|
|
|
2011-05-19 17:46:19 +04:00
|
|
|
FLOAT_CONVS(si, s, 32, )
|
|
|
|
FLOAT_CONVS(si, d, 64, )
|
|
|
|
FLOAT_CONVS(ui, s, 32, u)
|
|
|
|
FLOAT_CONVS(ui, d, 64, u)
|
2008-03-31 07:47:19 +04:00
|
|
|
|
2011-05-19 17:46:19 +04:00
|
|
|
#undef CONV_ITOF
|
|
|
|
#undef CONV_FTOI
|
|
|
|
#undef FLOAT_CONVS
|
2008-03-31 07:47:19 +04:00
|
|
|
|
|
|
|
/* floating point conversion */
|
2012-03-14 04:38:21 +04:00
|
|
|
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
2010-12-07 18:37:34 +03:00
|
|
|
float64 r = float32_to_float64(x, &env->vfp.fp_status);
|
|
|
|
/* ARM requires that S<->D conversion of any kind of NaN generates
|
|
|
|
* a quiet NaN by forcing the most significant frac bit to 1.
|
|
|
|
*/
|
|
|
|
return float64_maybe_silence_nan(r);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
2010-12-07 18:37:34 +03:00
|
|
|
float32 r = float64_to_float32(x, &env->vfp.fp_status);
|
|
|
|
/* ARM requires that S<->D conversion of any kind of NaN generates
|
|
|
|
* a quiet NaN by forcing the most significant frac bit to 1.
|
|
|
|
*/
|
|
|
|
return float32_maybe_silence_nan(r);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* VFP3 fixed point conversion. */
|
2014-01-07 21:19:13 +04:00
|
|
|
#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
|
2014-01-07 21:19:13 +04:00
|
|
|
float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
|
|
|
|
void *fpstp) \
|
2008-03-31 07:47:19 +04:00
|
|
|
{ \
|
2011-05-19 17:46:19 +04:00
|
|
|
float_status *fpst = fpstp; \
|
2011-03-14 10:23:11 +03:00
|
|
|
float##fsz tmp; \
|
2014-01-07 21:19:13 +04:00
|
|
|
tmp = itype##_to_##float##fsz(x, fpst); \
|
2011-05-19 17:46:19 +04:00
|
|
|
return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
|
2014-01-07 21:19:13 +04:00
|
|
|
}
|
|
|
|
|
2014-01-07 21:19:13 +04:00
|
|
|
/* Notice that we want only input-denormal exception flags from the
|
|
|
|
* scalbn operation: the other possible flags (overflow+inexact if
|
|
|
|
* we overflow to infinity, output-denormal) aren't correct for the
|
|
|
|
* complete scale-and-convert operation.
|
|
|
|
*/
|
2014-01-07 21:19:13 +04:00
|
|
|
#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
|
|
|
|
uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
|
|
|
|
uint32_t shift, \
|
|
|
|
void *fpstp) \
|
2008-03-31 07:47:19 +04:00
|
|
|
{ \
|
2011-05-19 17:46:19 +04:00
|
|
|
float_status *fpst = fpstp; \
|
2014-01-07 21:19:13 +04:00
|
|
|
int old_exc_flags = get_float_exception_flags(fpst); \
|
2011-03-14 10:23:11 +03:00
|
|
|
float##fsz tmp; \
|
|
|
|
if (float##fsz##_is_any_nan(x)) { \
|
2011-05-19 17:46:19 +04:00
|
|
|
float_raise(float_flag_invalid, fpst); \
|
2011-03-14 10:23:11 +03:00
|
|
|
return 0; \
|
2010-12-07 18:37:34 +03:00
|
|
|
} \
|
2011-05-19 17:46:19 +04:00
|
|
|
tmp = float##fsz##_scalbn(x, shift, fpst); \
|
2014-01-07 21:19:13 +04:00
|
|
|
old_exc_flags |= get_float_exception_flags(fpst) \
|
|
|
|
& float_flag_input_denormal; \
|
|
|
|
set_float_exception_flags(old_exc_flags, fpst); \
|
2014-01-07 21:19:13 +04:00
|
|
|
return float##fsz##_to_##itype##round(tmp, fpst); \
|
2011-03-14 10:23:11 +03:00
|
|
|
}
|
|
|
|
|
2014-01-07 21:19:13 +04:00
|
|
|
#define VFP_CONV_FIX(name, p, fsz, isz, itype) \
|
|
|
|
VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
|
2014-01-07 21:19:14 +04:00
|
|
|
VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
|
|
|
|
VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
|
|
|
|
|
|
|
|
#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
|
|
|
|
VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
|
|
|
|
VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
|
2014-01-07 21:19:13 +04:00
|
|
|
|
2014-01-07 21:19:13 +04:00
|
|
|
VFP_CONV_FIX(sh, d, 64, 64, int16)
|
|
|
|
VFP_CONV_FIX(sl, d, 64, 64, int32)
|
2014-01-07 21:19:14 +04:00
|
|
|
VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
|
2014-01-07 21:19:13 +04:00
|
|
|
VFP_CONV_FIX(uh, d, 64, 64, uint16)
|
|
|
|
VFP_CONV_FIX(ul, d, 64, 64, uint32)
|
2014-01-07 21:19:14 +04:00
|
|
|
VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
|
2014-01-07 21:19:13 +04:00
|
|
|
VFP_CONV_FIX(sh, s, 32, 32, int16)
|
|
|
|
VFP_CONV_FIX(sl, s, 32, 32, int32)
|
2014-01-07 21:19:14 +04:00
|
|
|
VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
|
2014-01-07 21:19:13 +04:00
|
|
|
VFP_CONV_FIX(uh, s, 32, 32, uint16)
|
|
|
|
VFP_CONV_FIX(ul, s, 32, 32, uint32)
|
2014-01-07 21:19:14 +04:00
|
|
|
VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
|
2008-03-31 07:47:19 +04:00
|
|
|
#undef VFP_CONV_FIX
|
2014-01-07 21:19:13 +04:00
|
|
|
#undef VFP_CONV_FIX_FLOAT
|
|
|
|
#undef VFP_CONV_FLOAT_FIX_ROUND
|
2008-03-31 07:47:19 +04:00
|
|
|
|
2014-01-07 21:19:14 +04:00
|
|
|
/* Set the current fp rounding mode and return the old one.
|
|
|
|
* The argument is a softfloat float_round_ value.
|
|
|
|
*/
|
|
|
|
uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
|
|
|
|
{
|
|
|
|
float_status *fp_status = &env->vfp.fp_status;
|
|
|
|
|
|
|
|
uint32_t prev_rmode = get_float_rounding_mode(fp_status);
|
|
|
|
set_float_rounding_mode(rmode, fp_status);
|
|
|
|
|
|
|
|
return prev_rmode;
|
|
|
|
}
|
|
|
|
|
2014-01-31 18:47:35 +04:00
|
|
|
/* Set the current fp rounding mode in the standard fp status and return
|
|
|
|
* the old one. This is for NEON instructions that need to change the
|
|
|
|
* rounding mode but wish to use the standard FPSCR values for everything
|
|
|
|
* else. Always set the rounding mode back to the correct value after
|
|
|
|
* modifying it.
|
|
|
|
* The argument is a softfloat float_round_ value.
|
|
|
|
*/
|
|
|
|
uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
|
|
|
|
{
|
|
|
|
float_status *fp_status = &env->vfp.standard_fp_status;
|
|
|
|
|
|
|
|
uint32_t prev_rmode = get_float_rounding_mode(fp_status);
|
|
|
|
set_float_rounding_mode(rmode, fp_status);
|
|
|
|
|
|
|
|
return prev_rmode;
|
|
|
|
}
|
|
|
|
|
2009-11-19 19:45:20 +03:00
|
|
|
/* Half precision conversions. */
|
2012-03-14 04:38:21 +04:00
|
|
|
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
|
2009-11-19 19:45:20 +03:00
|
|
|
{
|
|
|
|
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
|
2011-02-10 14:29:00 +03:00
|
|
|
float32 r = float16_to_float32(make_float16(a), ieee, s);
|
|
|
|
if (ieee) {
|
|
|
|
return float32_maybe_silence_nan(r);
|
|
|
|
}
|
|
|
|
return r;
|
2009-11-19 19:45:20 +03:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
|
2009-11-19 19:45:20 +03:00
|
|
|
{
|
|
|
|
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
|
2011-02-10 14:29:00 +03:00
|
|
|
float16 r = float32_to_float16(a, ieee, s);
|
|
|
|
if (ieee) {
|
|
|
|
r = float16_maybe_silence_nan(r);
|
|
|
|
}
|
|
|
|
return float16_val(r);
|
2009-11-19 19:45:20 +03:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
|
2011-02-10 14:29:01 +03:00
|
|
|
{
|
|
|
|
return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
|
2011-02-10 14:29:01 +03:00
|
|
|
{
|
|
|
|
return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
|
2011-02-10 14:29:01 +03:00
|
|
|
{
|
|
|
|
return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
|
2011-02-10 14:29:01 +03:00
|
|
|
{
|
|
|
|
return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
|
|
|
|
}
|
|
|
|
|
2014-01-07 21:19:15 +04:00
|
|
|
float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
|
|
|
|
{
|
|
|
|
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
|
|
|
|
float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
|
|
|
|
if (ieee) {
|
|
|
|
return float64_maybe_silence_nan(r);
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
|
|
|
|
{
|
|
|
|
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
|
|
|
|
float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
|
|
|
|
if (ieee) {
|
|
|
|
r = float16_maybe_silence_nan(r);
|
|
|
|
}
|
|
|
|
return float16_val(r);
|
|
|
|
}
|
|
|
|
|
2011-03-14 18:37:12 +03:00
|
|
|
#define float32_two make_float32(0x40000000)
|
2011-03-14 18:37:13 +03:00
|
|
|
#define float32_three make_float32(0x40400000)
|
|
|
|
#define float32_one_point_five make_float32(0x3fc00000)
|
2011-03-14 18:37:12 +03:00
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
2011-03-14 18:37:12 +03:00
|
|
|
float_status *s = &env->vfp.standard_fp_status;
|
|
|
|
if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
|
|
|
|
(float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
|
2011-05-19 17:46:15 +04:00
|
|
|
if (!(float32_is_zero(a) || float32_is_zero(b))) {
|
|
|
|
float_raise(float_flag_input_denormal, s);
|
|
|
|
}
|
2011-03-14 18:37:12 +03:00
|
|
|
return float32_two;
|
|
|
|
}
|
|
|
|
return float32_sub(float32_two, float32_mul(a, b, s), s);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:38:21 +04:00
|
|
|
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
2011-01-14 22:39:18 +03:00
|
|
|
float_status *s = &env->vfp.standard_fp_status;
|
2011-01-14 22:39:18 +03:00
|
|
|
float32 product;
|
|
|
|
if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
|
|
|
|
(float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
|
2011-05-19 17:46:15 +04:00
|
|
|
if (!(float32_is_zero(a) || float32_is_zero(b))) {
|
|
|
|
float_raise(float_flag_input_denormal, s);
|
|
|
|
}
|
2011-03-14 18:37:13 +03:00
|
|
|
return float32_one_point_five;
|
2011-01-14 22:39:18 +03:00
|
|
|
}
|
2011-03-14 18:37:13 +03:00
|
|
|
product = float32_mul(a, b, s);
|
|
|
|
return float32_div(float32_sub(float32_three, product, s), float32_two, s);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2008-03-31 07:48:01 +04:00
|
|
|
/* NEON helpers. */
|
|
|
|
|
2011-02-21 19:38:46 +03:00
|
|
|
/* Constants 256 and 512 are used in some helpers; we avoid relying on
|
|
|
|
* int->float conversions at run-time. */
|
|
|
|
#define float64_256 make_float64(0x4070000000000000LL)
|
|
|
|
#define float64_512 make_float64(0x4080000000000000LL)
|
2014-03-17 20:31:52 +04:00
|
|
|
#define float32_maxnorm make_float32(0x7f7fffff)
|
|
|
|
#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
|
2011-02-21 19:38:46 +03:00
|
|
|
|
2014-03-17 20:31:52 +04:00
|
|
|
/* Reciprocal functions
|
|
|
|
*
|
|
|
|
* The algorithm that must be used to calculate the estimate
|
|
|
|
* is specified by the ARM ARM, see FPRecipEstimate()
|
2011-02-21 19:38:47 +03:00
|
|
|
*/
|
2014-03-17 20:31:52 +04:00
|
|
|
|
|
|
|
static float64 recip_estimate(float64 a, float_status *real_fp_status)
|
2011-02-21 19:38:47 +03:00
|
|
|
{
|
2011-05-19 17:46:14 +04:00
|
|
|
/* These calculations mustn't set any fp exception flags,
|
|
|
|
* so we use a local copy of the fp_status.
|
|
|
|
*/
|
2014-03-17 20:31:52 +04:00
|
|
|
float_status dummy_status = *real_fp_status;
|
2011-05-19 17:46:14 +04:00
|
|
|
float_status *s = &dummy_status;
|
2011-02-21 19:38:47 +03:00
|
|
|
/* q = (int)(a * 512.0) */
|
|
|
|
float64 q = float64_mul(float64_512, a, s);
|
|
|
|
int64_t q_int = float64_to_int64_round_to_zero(q, s);
|
|
|
|
|
|
|
|
/* r = 1.0 / (((double)q + 0.5) / 512.0) */
|
|
|
|
q = int64_to_float64(q_int, s);
|
|
|
|
q = float64_add(q, float64_half, s);
|
|
|
|
q = float64_div(q, float64_512, s);
|
|
|
|
q = float64_div(float64_one, q, s);
|
|
|
|
|
|
|
|
/* s = (int)(256.0 * r + 0.5) */
|
|
|
|
q = float64_mul(q, float64_256, s);
|
|
|
|
q = float64_add(q, float64_half, s);
|
|
|
|
q_int = float64_to_int64_round_to_zero(q, s);
|
|
|
|
|
|
|
|
/* return (double)s / 256.0 */
|
|
|
|
return float64_div(int64_to_float64(q_int, s), float64_256, s);
|
|
|
|
}
|
|
|
|
|
2014-03-17 20:31:52 +04:00
|
|
|
/* Common wrapper to call recip_estimate */
|
|
|
|
static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
2014-03-17 20:31:52 +04:00
|
|
|
uint64_t val64 = float64_val(num);
|
|
|
|
uint64_t frac = extract64(val64, 0, 52);
|
|
|
|
int64_t exp = extract64(val64, 52, 11);
|
|
|
|
uint64_t sbit;
|
|
|
|
float64 scaled, estimate;
|
2011-02-21 19:38:47 +03:00
|
|
|
|
2014-03-17 20:31:52 +04:00
|
|
|
/* Generate the scaled number for the estimate function */
|
|
|
|
if (exp == 0) {
|
|
|
|
if (extract64(frac, 51, 1) == 0) {
|
|
|
|
exp = -1;
|
|
|
|
frac = extract64(frac, 0, 50) << 2;
|
|
|
|
} else {
|
|
|
|
frac = extract64(frac, 0, 51) << 1;
|
|
|
|
}
|
|
|
|
}
|
2011-02-21 19:38:47 +03:00
|
|
|
|
2014-03-17 20:31:52 +04:00
|
|
|
/* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
|
|
|
|
scaled = make_float64((0x3feULL << 52)
|
|
|
|
| extract64(frac, 44, 8) << 44);
|
|
|
|
|
|
|
|
estimate = recip_estimate(scaled, fpst);
|
|
|
|
|
|
|
|
/* Build new result */
|
|
|
|
val64 = float64_val(estimate);
|
|
|
|
sbit = 0x8000000000000000ULL & val64;
|
|
|
|
exp = off - exp;
|
|
|
|
frac = extract64(val64, 0, 52);
|
|
|
|
|
|
|
|
if (exp == 0) {
|
|
|
|
frac = 1ULL << 51 | extract64(frac, 1, 51);
|
|
|
|
} else if (exp == -1) {
|
|
|
|
frac = 1ULL << 50 | extract64(frac, 2, 50);
|
|
|
|
exp = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return make_float64(sbit | (exp << 52) | frac);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool round_to_inf(float_status *fpst, bool sign_bit)
|
|
|
|
{
|
|
|
|
switch (fpst->float_rounding_mode) {
|
|
|
|
case float_round_nearest_even: /* Round to Nearest */
|
|
|
|
return true;
|
|
|
|
case float_round_up: /* Round to +Inf */
|
|
|
|
return !sign_bit;
|
|
|
|
case float_round_down: /* Round to -Inf */
|
|
|
|
return sign_bit;
|
|
|
|
case float_round_to_zero: /* Round to Zero */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
float32 HELPER(recpe_f32)(float32 input, void *fpstp)
|
|
|
|
{
|
|
|
|
float_status *fpst = fpstp;
|
|
|
|
float32 f32 = float32_squash_input_denormal(input, fpst);
|
|
|
|
uint32_t f32_val = float32_val(f32);
|
|
|
|
uint32_t f32_sbit = 0x80000000ULL & f32_val;
|
|
|
|
int32_t f32_exp = extract32(f32_val, 23, 8);
|
|
|
|
uint32_t f32_frac = extract32(f32_val, 0, 23);
|
|
|
|
float64 f64, r64;
|
|
|
|
uint64_t r64_val;
|
|
|
|
int64_t r64_exp;
|
|
|
|
uint64_t r64_frac;
|
|
|
|
|
|
|
|
if (float32_is_any_nan(f32)) {
|
|
|
|
float32 nan = f32;
|
|
|
|
if (float32_is_signaling_nan(f32)) {
|
|
|
|
float_raise(float_flag_invalid, fpst);
|
|
|
|
nan = float32_maybe_silence_nan(f32);
|
2011-02-21 19:38:47 +03:00
|
|
|
}
|
2014-03-17 20:31:52 +04:00
|
|
|
if (fpst->default_nan_mode) {
|
|
|
|
nan = float32_default_nan;
|
2011-05-19 17:46:15 +04:00
|
|
|
}
|
2014-03-17 20:31:52 +04:00
|
|
|
return nan;
|
|
|
|
} else if (float32_is_infinity(f32)) {
|
|
|
|
return float32_set_sign(float32_zero, float32_is_neg(f32));
|
|
|
|
} else if (float32_is_zero(f32)) {
|
|
|
|
float_raise(float_flag_divbyzero, fpst);
|
|
|
|
return float32_set_sign(float32_infinity, float32_is_neg(f32));
|
|
|
|
} else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
|
|
|
|
/* Abs(value) < 2.0^-128 */
|
|
|
|
float_raise(float_flag_overflow | float_flag_inexact, fpst);
|
|
|
|
if (round_to_inf(fpst, f32_sbit)) {
|
|
|
|
return float32_set_sign(float32_infinity, float32_is_neg(f32));
|
|
|
|
} else {
|
|
|
|
return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
|
|
|
|
}
|
|
|
|
} else if (f32_exp >= 253 && fpst->flush_to_zero) {
|
|
|
|
float_raise(float_flag_underflow, fpst);
|
|
|
|
return float32_set_sign(float32_zero, float32_is_neg(f32));
|
2011-02-21 19:38:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-03-17 20:31:52 +04:00
|
|
|
f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
|
|
|
|
r64 = call_recip_estimate(f64, 253, fpst);
|
|
|
|
r64_val = float64_val(r64);
|
|
|
|
r64_exp = extract64(r64_val, 52, 11);
|
|
|
|
r64_frac = extract64(r64_val, 0, 52);
|
|
|
|
|
|
|
|
/* result = sign : result_exp<7:0> : fraction<51:29>; */
|
|
|
|
return make_float32(f32_sbit |
|
|
|
|
(r64_exp & 0xff) << 23 |
|
|
|
|
extract64(r64_frac, 29, 24));
|
|
|
|
}
|
|
|
|
|
|
|
|
float64 HELPER(recpe_f64)(float64 input, void *fpstp)
|
|
|
|
{
|
|
|
|
float_status *fpst = fpstp;
|
|
|
|
float64 f64 = float64_squash_input_denormal(input, fpst);
|
|
|
|
uint64_t f64_val = float64_val(f64);
|
|
|
|
uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
|
|
|
|
int64_t f64_exp = extract64(f64_val, 52, 11);
|
|
|
|
float64 r64;
|
|
|
|
uint64_t r64_val;
|
|
|
|
int64_t r64_exp;
|
|
|
|
uint64_t r64_frac;
|
|
|
|
|
|
|
|
/* Deal with any special cases */
|
|
|
|
if (float64_is_any_nan(f64)) {
|
|
|
|
float64 nan = f64;
|
|
|
|
if (float64_is_signaling_nan(f64)) {
|
|
|
|
float_raise(float_flag_invalid, fpst);
|
|
|
|
nan = float64_maybe_silence_nan(f64);
|
|
|
|
}
|
|
|
|
if (fpst->default_nan_mode) {
|
|
|
|
nan = float64_default_nan;
|
|
|
|
}
|
|
|
|
return nan;
|
|
|
|
} else if (float64_is_infinity(f64)) {
|
|
|
|
return float64_set_sign(float64_zero, float64_is_neg(f64));
|
|
|
|
} else if (float64_is_zero(f64)) {
|
|
|
|
float_raise(float_flag_divbyzero, fpst);
|
|
|
|
return float64_set_sign(float64_infinity, float64_is_neg(f64));
|
|
|
|
} else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
|
|
|
|
/* Abs(value) < 2.0^-1024 */
|
|
|
|
float_raise(float_flag_overflow | float_flag_inexact, fpst);
|
|
|
|
if (round_to_inf(fpst, f64_sbit)) {
|
|
|
|
return float64_set_sign(float64_infinity, float64_is_neg(f64));
|
|
|
|
} else {
|
|
|
|
return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
|
|
|
|
}
|
|
|
|
} else if (f64_exp >= 1023 && fpst->flush_to_zero) {
|
|
|
|
float_raise(float_flag_underflow, fpst);
|
|
|
|
return float64_set_sign(float64_zero, float64_is_neg(f64));
|
|
|
|
}
|
2011-02-21 19:38:47 +03:00
|
|
|
|
2014-03-17 20:31:52 +04:00
|
|
|
r64 = call_recip_estimate(f64, 2045, fpst);
|
|
|
|
r64_val = float64_val(r64);
|
|
|
|
r64_exp = extract64(r64_val, 52, 11);
|
|
|
|
r64_frac = extract64(r64_val, 0, 52);
|
2011-02-21 19:38:47 +03:00
|
|
|
|
2014-03-17 20:31:52 +04:00
|
|
|
/* result = sign : result_exp<10:0> : fraction<51:0> */
|
|
|
|
return make_float64(f64_sbit |
|
|
|
|
((r64_exp & 0x7ff) << 52) |
|
|
|
|
r64_frac);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2011-02-21 19:38:48 +03:00
|
|
|
/* The algorithm that must be used to calculate the estimate
|
|
|
|
* is specified by the ARM ARM.
|
|
|
|
*/
|
2014-03-17 20:31:53 +04:00
|
|
|
static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
|
2011-02-21 19:38:48 +03:00
|
|
|
{
|
2011-05-19 17:46:14 +04:00
|
|
|
/* These calculations mustn't set any fp exception flags,
|
|
|
|
* so we use a local copy of the fp_status.
|
|
|
|
*/
|
2014-03-17 20:31:53 +04:00
|
|
|
float_status dummy_status = *real_fp_status;
|
2011-05-19 17:46:14 +04:00
|
|
|
float_status *s = &dummy_status;
|
2011-02-21 19:38:48 +03:00
|
|
|
float64 q;
|
|
|
|
int64_t q_int;
|
|
|
|
|
|
|
|
if (float64_lt(a, float64_half, s)) {
|
|
|
|
/* range 0.25 <= a < 0.5 */
|
|
|
|
|
|
|
|
/* a in units of 1/512 rounded down */
|
|
|
|
/* q0 = (int)(a * 512.0); */
|
|
|
|
q = float64_mul(float64_512, a, s);
|
|
|
|
q_int = float64_to_int64_round_to_zero(q, s);
|
|
|
|
|
|
|
|
/* reciprocal root r */
|
|
|
|
/* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
|
|
|
|
q = int64_to_float64(q_int, s);
|
|
|
|
q = float64_add(q, float64_half, s);
|
|
|
|
q = float64_div(q, float64_512, s);
|
|
|
|
q = float64_sqrt(q, s);
|
|
|
|
q = float64_div(float64_one, q, s);
|
|
|
|
} else {
|
|
|
|
/* range 0.5 <= a < 1.0 */
|
|
|
|
|
|
|
|
/* a in units of 1/256 rounded down */
|
|
|
|
/* q1 = (int)(a * 256.0); */
|
|
|
|
q = float64_mul(float64_256, a, s);
|
|
|
|
int64_t q_int = float64_to_int64_round_to_zero(q, s);
|
|
|
|
|
|
|
|
/* reciprocal root r */
|
|
|
|
/* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
|
|
|
|
q = int64_to_float64(q_int, s);
|
|
|
|
q = float64_add(q, float64_half, s);
|
|
|
|
q = float64_div(q, float64_256, s);
|
|
|
|
q = float64_sqrt(q, s);
|
|
|
|
q = float64_div(float64_one, q, s);
|
|
|
|
}
|
|
|
|
/* r in units of 1/256 rounded to nearest */
|
|
|
|
/* s = (int)(256.0 * r + 0.5); */
|
|
|
|
|
|
|
|
q = float64_mul(q, float64_256,s );
|
|
|
|
q = float64_add(q, float64_half, s);
|
|
|
|
q_int = float64_to_int64_round_to_zero(q, s);
|
|
|
|
|
|
|
|
/* return (double)s / 256.0;*/
|
|
|
|
return float64_div(int64_to_float64(q_int, s), float64_256, s);
|
|
|
|
}
|
|
|
|
|
2014-03-17 20:31:53 +04:00
|
|
|
float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
2014-03-17 20:31:53 +04:00
|
|
|
float_status *s = fpstp;
|
|
|
|
float32 f32 = float32_squash_input_denormal(input, s);
|
|
|
|
uint32_t val = float32_val(f32);
|
|
|
|
uint32_t f32_sbit = 0x80000000 & val;
|
|
|
|
int32_t f32_exp = extract32(val, 23, 8);
|
|
|
|
uint32_t f32_frac = extract32(val, 0, 23);
|
|
|
|
uint64_t f64_frac;
|
|
|
|
uint64_t val64;
|
2011-02-21 19:38:48 +03:00
|
|
|
int result_exp;
|
|
|
|
float64 f64;
|
|
|
|
|
2014-03-17 20:31:53 +04:00
|
|
|
if (float32_is_any_nan(f32)) {
|
|
|
|
float32 nan = f32;
|
|
|
|
if (float32_is_signaling_nan(f32)) {
|
2011-02-21 19:38:48 +03:00
|
|
|
float_raise(float_flag_invalid, s);
|
2014-03-17 20:31:53 +04:00
|
|
|
nan = float32_maybe_silence_nan(f32);
|
2011-02-21 19:38:48 +03:00
|
|
|
}
|
2014-03-17 20:31:53 +04:00
|
|
|
if (s->default_nan_mode) {
|
|
|
|
nan = float32_default_nan;
|
2011-05-19 17:46:15 +04:00
|
|
|
}
|
2014-03-17 20:31:53 +04:00
|
|
|
return nan;
|
|
|
|
} else if (float32_is_zero(f32)) {
|
2011-02-21 19:38:48 +03:00
|
|
|
float_raise(float_flag_divbyzero, s);
|
2014-03-17 20:31:53 +04:00
|
|
|
return float32_set_sign(float32_infinity, float32_is_neg(f32));
|
|
|
|
} else if (float32_is_neg(f32)) {
|
2011-02-21 19:38:48 +03:00
|
|
|
float_raise(float_flag_invalid, s);
|
|
|
|
return float32_default_nan;
|
2014-03-17 20:31:53 +04:00
|
|
|
} else if (float32_is_infinity(f32)) {
|
2011-02-21 19:38:48 +03:00
|
|
|
return float32_zero;
|
|
|
|
}
|
|
|
|
|
2014-03-17 20:31:53 +04:00
|
|
|
/* Scale and normalize to a double-precision value between 0.25 and 1.0,
|
2011-02-21 19:38:48 +03:00
|
|
|
* preserving the parity of the exponent. */
|
2014-03-17 20:31:53 +04:00
|
|
|
|
|
|
|
f64_frac = ((uint64_t) f32_frac) << 29;
|
|
|
|
if (f32_exp == 0) {
|
|
|
|
while (extract64(f64_frac, 51, 1) == 0) {
|
|
|
|
f64_frac = f64_frac << 1;
|
|
|
|
f32_exp = f32_exp-1;
|
|
|
|
}
|
|
|
|
f64_frac = extract64(f64_frac, 0, 51) << 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (extract64(f32_exp, 0, 1) == 0) {
|
|
|
|
f64 = make_float64(((uint64_t) f32_sbit) << 32
|
2011-02-21 19:38:48 +03:00
|
|
|
| (0x3feULL << 52)
|
2014-03-17 20:31:53 +04:00
|
|
|
| f64_frac);
|
2011-02-21 19:38:48 +03:00
|
|
|
} else {
|
2014-03-17 20:31:53 +04:00
|
|
|
f64 = make_float64(((uint64_t) f32_sbit) << 32
|
2011-02-21 19:38:48 +03:00
|
|
|
| (0x3fdULL << 52)
|
2014-03-17 20:31:53 +04:00
|
|
|
| f64_frac);
|
2011-02-21 19:38:48 +03:00
|
|
|
}
|
|
|
|
|
2014-03-17 20:31:53 +04:00
|
|
|
result_exp = (380 - f32_exp) / 2;
|
2011-02-21 19:38:48 +03:00
|
|
|
|
2014-03-17 20:31:53 +04:00
|
|
|
f64 = recip_sqrt_estimate(f64, s);
|
2011-02-21 19:38:48 +03:00
|
|
|
|
|
|
|
val64 = float64_val(f64);
|
|
|
|
|
2011-10-19 20:14:05 +04:00
|
|
|
val = ((result_exp & 0xff) << 23)
|
2011-02-21 19:38:48 +03:00
|
|
|
| ((val64 >> 29) & 0x7fffff);
|
|
|
|
return make_float32(val);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2014-03-17 20:31:53 +04:00
|
|
|
float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
|
|
|
|
{
|
|
|
|
float_status *s = fpstp;
|
|
|
|
float64 f64 = float64_squash_input_denormal(input, s);
|
|
|
|
uint64_t val = float64_val(f64);
|
|
|
|
uint64_t f64_sbit = 0x8000000000000000ULL & val;
|
|
|
|
int64_t f64_exp = extract64(val, 52, 11);
|
|
|
|
uint64_t f64_frac = extract64(val, 0, 52);
|
|
|
|
int64_t result_exp;
|
|
|
|
uint64_t result_frac;
|
|
|
|
|
|
|
|
if (float64_is_any_nan(f64)) {
|
|
|
|
float64 nan = f64;
|
|
|
|
if (float64_is_signaling_nan(f64)) {
|
|
|
|
float_raise(float_flag_invalid, s);
|
|
|
|
nan = float64_maybe_silence_nan(f64);
|
|
|
|
}
|
|
|
|
if (s->default_nan_mode) {
|
|
|
|
nan = float64_default_nan;
|
|
|
|
}
|
|
|
|
return nan;
|
|
|
|
} else if (float64_is_zero(f64)) {
|
|
|
|
float_raise(float_flag_divbyzero, s);
|
|
|
|
return float64_set_sign(float64_infinity, float64_is_neg(f64));
|
|
|
|
} else if (float64_is_neg(f64)) {
|
|
|
|
float_raise(float_flag_invalid, s);
|
|
|
|
return float64_default_nan;
|
|
|
|
} else if (float64_is_infinity(f64)) {
|
|
|
|
return float64_zero;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Scale and normalize to a double-precision value between 0.25 and 1.0,
|
|
|
|
* preserving the parity of the exponent. */
|
|
|
|
|
|
|
|
if (f64_exp == 0) {
|
|
|
|
while (extract64(f64_frac, 51, 1) == 0) {
|
|
|
|
f64_frac = f64_frac << 1;
|
|
|
|
f64_exp = f64_exp - 1;
|
|
|
|
}
|
|
|
|
f64_frac = extract64(f64_frac, 0, 51) << 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (extract64(f64_exp, 0, 1) == 0) {
|
|
|
|
f64 = make_float64(f64_sbit
|
|
|
|
| (0x3feULL << 52)
|
|
|
|
| f64_frac);
|
|
|
|
} else {
|
|
|
|
f64 = make_float64(f64_sbit
|
|
|
|
| (0x3fdULL << 52)
|
|
|
|
| f64_frac);
|
|
|
|
}
|
|
|
|
|
|
|
|
result_exp = (3068 - f64_exp) / 2;
|
|
|
|
|
|
|
|
f64 = recip_sqrt_estimate(f64, s);
|
|
|
|
|
|
|
|
result_frac = extract64(float64_val(f64), 0, 52);
|
|
|
|
|
|
|
|
return make_float64(f64_sbit |
|
|
|
|
((result_exp & 0x7ff) << 52) |
|
|
|
|
result_frac);
|
|
|
|
}
|
|
|
|
|
2014-03-17 20:31:52 +04:00
|
|
|
uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
2014-03-17 20:31:52 +04:00
|
|
|
float_status *s = fpstp;
|
2011-02-21 19:38:47 +03:00
|
|
|
float64 f64;
|
|
|
|
|
|
|
|
if ((a & 0x80000000) == 0) {
|
|
|
|
return 0xffffffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
f64 = make_float64((0x3feULL << 52)
|
|
|
|
| ((int64_t)(a & 0x7fffffff) << 21));
|
|
|
|
|
2014-03-17 20:31:52 +04:00
|
|
|
f64 = recip_estimate(f64, s);
|
2011-02-21 19:38:47 +03:00
|
|
|
|
|
|
|
return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
|
|
|
|
2014-03-17 20:31:53 +04:00
|
|
|
uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
|
2008-03-31 07:47:19 +04:00
|
|
|
{
|
2014-03-17 20:31:53 +04:00
|
|
|
float_status *fpst = fpstp;
|
2011-02-21 19:38:48 +03:00
|
|
|
float64 f64;
|
|
|
|
|
|
|
|
if ((a & 0xc0000000) == 0) {
|
|
|
|
return 0xffffffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a & 0x80000000) {
|
|
|
|
f64 = make_float64((0x3feULL << 52)
|
|
|
|
| ((uint64_t)(a & 0x7fffffff) << 21));
|
|
|
|
} else { /* bits 31-30 == '01' */
|
|
|
|
f64 = make_float64((0x3fdULL << 52)
|
|
|
|
| ((uint64_t)(a & 0x3fffffff) << 22));
|
|
|
|
}
|
|
|
|
|
2014-03-17 20:31:53 +04:00
|
|
|
f64 = recip_sqrt_estimate(f64, fpst);
|
2011-02-21 19:38:48 +03:00
|
|
|
|
|
|
|
return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
|
2008-03-31 07:47:19 +04:00
|
|
|
}
|
2008-12-19 16:18:36 +03:00
|
|
|
|
2011-10-19 20:14:07 +04:00
|
|
|
/* VFPv4 fused multiply-accumulate */
|
|
|
|
float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
|
|
|
|
{
|
|
|
|
float_status *fpst = fpstp;
|
|
|
|
return float32_muladd(a, b, c, 0, fpst);
|
|
|
|
}
|
|
|
|
|
|
|
|
float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
|
|
|
|
{
|
|
|
|
float_status *fpst = fpstp;
|
|
|
|
return float64_muladd(a, b, c, 0, fpst);
|
|
|
|
}
|
2014-01-07 21:19:14 +04:00
|
|
|
|
|
|
|
/* ARMv8 round to integral */
|
|
|
|
float32 HELPER(rints_exact)(float32 x, void *fp_status)
|
|
|
|
{
|
|
|
|
return float32_round_to_int(x, fp_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
float64 HELPER(rintd_exact)(float64 x, void *fp_status)
|
|
|
|
{
|
|
|
|
return float64_round_to_int(x, fp_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
float32 HELPER(rints)(float32 x, void *fp_status)
|
|
|
|
{
|
|
|
|
int old_flags = get_float_exception_flags(fp_status), new_flags;
|
|
|
|
float32 ret;
|
|
|
|
|
|
|
|
ret = float32_round_to_int(x, fp_status);
|
|
|
|
|
|
|
|
/* Suppress any inexact exceptions the conversion produced */
|
|
|
|
if (!(old_flags & float_flag_inexact)) {
|
|
|
|
new_flags = get_float_exception_flags(fp_status);
|
|
|
|
set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
float64 HELPER(rintd)(float64 x, void *fp_status)
|
|
|
|
{
|
|
|
|
int old_flags = get_float_exception_flags(fp_status), new_flags;
|
|
|
|
float64 ret;
|
|
|
|
|
|
|
|
ret = float64_round_to_int(x, fp_status);
|
|
|
|
|
|
|
|
new_flags = get_float_exception_flags(fp_status);
|
|
|
|
|
|
|
|
/* Suppress any inexact exceptions the conversion produced */
|
|
|
|
if (!(old_flags & float_flag_inexact)) {
|
|
|
|
new_flags = get_float_exception_flags(fp_status);
|
|
|
|
set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2014-01-31 18:47:33 +04:00
|
|
|
|
|
|
|
/* Convert ARM rounding mode to softfloat */
|
|
|
|
int arm_rmode_to_sf(int rmode)
|
|
|
|
{
|
|
|
|
switch (rmode) {
|
|
|
|
case FPROUNDING_TIEAWAY:
|
|
|
|
rmode = float_round_ties_away;
|
|
|
|
break;
|
|
|
|
case FPROUNDING_ODD:
|
|
|
|
/* FIXME: add support for TIEAWAY and ODD */
|
|
|
|
qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
|
|
|
|
rmode);
|
|
|
|
case FPROUNDING_TIEEVEN:
|
|
|
|
default:
|
|
|
|
rmode = float_round_nearest_even;
|
|
|
|
break;
|
|
|
|
case FPROUNDING_POSINF:
|
|
|
|
rmode = float_round_up;
|
|
|
|
break;
|
|
|
|
case FPROUNDING_NEGINF:
|
|
|
|
rmode = float_round_down;
|
|
|
|
break;
|
|
|
|
case FPROUNDING_ZERO:
|
|
|
|
rmode = float_round_to_zero;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return rmode;
|
|
|
|
}
|
2014-02-26 21:20:07 +04:00
|
|
|
|
2014-06-09 18:43:25 +04:00
|
|
|
/* CRC helpers.
|
|
|
|
* The upper bytes of val (above the number specified by 'bytes') must have
|
|
|
|
* been zeroed out by the caller.
|
|
|
|
*/
|
2014-02-26 21:20:07 +04:00
|
|
|
uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
|
|
|
|
{
|
|
|
|
uint8_t buf[4];
|
|
|
|
|
2014-06-09 18:43:25 +04:00
|
|
|
stl_le_p(buf, val);
|
2014-02-26 21:20:07 +04:00
|
|
|
|
|
|
|
/* zlib crc32 converts the accumulator and output to one's complement. */
|
|
|
|
return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
|
|
|
|
{
|
|
|
|
uint8_t buf[4];
|
|
|
|
|
2014-06-09 18:43:25 +04:00
|
|
|
stl_le_p(buf, val);
|
2014-02-26 21:20:07 +04:00
|
|
|
|
|
|
|
/* Linux crc32c converts the output to one's complement. */
|
|
|
|
return crc32c(acc, buf, bytes) ^ 0xffffffff;
|
|
|
|
}
|