diff --git a/linux-user/elfload.c b/linux-user/elfload.c index f6693e5760..07b16cc0f4 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -656,6 +656,7 @@ static uint32_t get_elf_hwcap(void) GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT); GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB); GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM); + GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP); return hwcaps; } @@ -665,6 +666,7 @@ static uint32_t get_elf_hwcap2(void) ARMCPU *cpu = ARM_CPU(thread_cpu); uint32_t hwcaps = 0; + GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP); GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2); GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index cebb3511a5..4106e4ae59 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3618,6 +3618,16 @@ static inline bool isar_feature_aa64_frint(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0; } +static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0; +} + +static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2; +} + static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically wrt FP16. */ diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index a39d6fcea3..61fd0ade29 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -646,6 +646,7 @@ static void aarch64_max_initfn(Object *obj) cpu->isar.id_aa64isar0 = t; t = cpu->isar.id_aa64isar1; + t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); t = FIELD_DP64(t, ID_AA64ISAR1, APA, 1); /* PAuth, architected only */ diff --git a/target/arm/helper.c b/target/arm/helper.c index a4f7b61b4e..3a93844a3b 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -5974,6 +5974,52 @@ static const ARMCPRegInfo rndr_reginfo[] = { .access = PL0_R, .readfn = rndr_readfn }, REGINFO_SENTINEL }; + +#ifndef CONFIG_USER_ONLY +static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + /* CTR_EL0 System register -> DminLine, bits [19:16] */ + uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); + uint64_t vaddr_in = (uint64_t) value; + uint64_t vaddr = vaddr_in & ~(dline_size - 1); + void *haddr; + int mem_idx = cpu_mmu_index(env, false); + + /* This won't be crossing page boundaries */ + haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); + if (haddr) { + + ram_addr_t offset; + MemoryRegion *mr; + + /* RCU lock is already being held */ + mr = memory_region_from_host(haddr, &offset); + + if (mr) { + memory_region_do_writeback(mr, offset, dline_size); + } + } +} + +static const ARMCPRegInfo dcpop_reg[] = { + { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, + .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo dcpodp_reg[] = { + { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, + .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, + REGINFO_SENTINEL +}; +#endif /*CONFIG_USER_ONLY*/ + #endif static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, @@ -7046,6 +7092,16 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_rndr, cpu)) { define_arm_cp_regs(cpu, rndr_reginfo); } +#ifndef CONFIG_USER_ONLY + /* Data Cache clean instructions up to PoP */ + if (cpu_isar_feature(aa64_dcpop, cpu)) { + define_one_arm_cp_reg(cpu, dcpop_reg); + + if (cpu_isar_feature(aa64_dcpodp, cpu)) { + define_one_arm_cp_reg(cpu, dcpodp_reg); + } + } +#endif /*CONFIG_USER_ONLY*/ #endif /*