2018-03-02 15:31:10 +03:00
|
|
|
/*
|
2018-04-10 03:29:01 +03:00
|
|
|
* RISC-V CPU helpers for qemu.
|
2018-03-02 15:31:10 +03:00
|
|
|
*
|
|
|
|
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
|
|
|
|
* Copyright (c) 2017-2018 SiFive, Inc.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2 or later, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qemu/log.h"
|
2019-10-09 01:04:18 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2018-03-02 15:31:10 +03:00
|
|
|
#include "cpu.h"
|
|
|
|
#include "exec/exec-all.h"
|
2020-01-01 14:23:00 +03:00
|
|
|
#include "tcg/tcg-op.h"
|
2019-03-16 04:21:12 +03:00
|
|
|
#include "trace.h"
|
2021-03-05 16:54:49 +03:00
|
|
|
#include "semihosting/common-semi.h"
|
2018-03-02 15:31:10 +03:00
|
|
|
|
|
|
|
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
return env->priv;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-10-20 06:16:55 +03:00
|
|
|
void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
|
|
|
|
target_ulong *cs_base, uint32_t *pflags)
|
|
|
|
{
|
2022-01-18 04:45:04 +03:00
|
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
RISCVCPU *cpu = RISCV_CPU(cs);
|
|
|
|
|
2021-10-20 06:16:55 +03:00
|
|
|
uint32_t flags = 0;
|
|
|
|
|
2022-01-20 15:20:33 +03:00
|
|
|
*pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
|
2021-10-20 06:16:55 +03:00
|
|
|
*cs_base = 0;
|
|
|
|
|
2022-01-18 04:45:14 +03:00
|
|
|
if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
|
2021-12-10 10:56:12 +03:00
|
|
|
/*
|
|
|
|
* If env->vl equals to VLMAX, we can use generic vector operation
|
|
|
|
* expanders (GVEC) to accerlate the vector operations.
|
|
|
|
* However, as LMUL could be a fractional number. The maximum
|
|
|
|
* vector size can be operated might be less than 8 bytes,
|
|
|
|
* which is not supported by GVEC. So we set vl_eq_vlmax flag to true
|
|
|
|
* only when maxsz >= 8 bytes.
|
|
|
|
*/
|
2021-10-20 06:16:55 +03:00
|
|
|
uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
|
2021-12-10 10:56:12 +03:00
|
|
|
uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
|
|
|
|
uint32_t maxsz = vlmax << sew;
|
|
|
|
bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
|
|
|
|
(maxsz >= 8);
|
2022-01-20 15:20:42 +03:00
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
|
2021-12-10 10:56:12 +03:00
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
|
2021-10-20 06:16:55 +03:00
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
|
|
|
|
FIELD_EX64(env->vtype, VTYPE, VLMUL));
|
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
|
|
|
|
} else {
|
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
flags |= TB_FLAGS_MSTATUS_FS;
|
2021-12-10 10:55:49 +03:00
|
|
|
flags |= TB_FLAGS_MSTATUS_VS;
|
2021-10-20 06:16:55 +03:00
|
|
|
#else
|
|
|
|
flags |= cpu_mmu_index(env, 0);
|
|
|
|
if (riscv_cpu_fp_enabled(env)) {
|
|
|
|
flags |= env->mstatus & MSTATUS_FS;
|
|
|
|
}
|
|
|
|
|
2021-12-10 10:55:49 +03:00
|
|
|
if (riscv_cpu_vector_enabled(env)) {
|
|
|
|
flags |= env->mstatus & MSTATUS_VS;
|
|
|
|
}
|
|
|
|
|
2021-10-20 06:16:55 +03:00
|
|
|
if (riscv_has_ext(env, RVH)) {
|
|
|
|
if (env->priv == PRV_M ||
|
|
|
|
(env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
|
|
|
|
(env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
|
|
|
|
get_field(env->hstatus, HSTATUS_HU))) {
|
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS,
|
|
|
|
get_field(env->mstatus_hs, MSTATUS_FS));
|
2021-12-10 10:55:53 +03:00
|
|
|
|
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS,
|
|
|
|
get_field(env->mstatus_hs, MSTATUS_VS));
|
2021-10-20 06:16:55 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-01-20 15:20:32 +03:00
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
|
2022-01-20 15:20:41 +03:00
|
|
|
if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) {
|
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
|
|
|
|
}
|
|
|
|
if (env->cur_pmbase != 0) {
|
|
|
|
flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
|
|
|
|
}
|
2021-10-20 06:16:59 +03:00
|
|
|
|
2021-10-20 06:16:55 +03:00
|
|
|
*pflags = flags;
|
|
|
|
}
|
|
|
|
|
2022-01-20 15:20:38 +03:00
|
|
|
void riscv_cpu_update_mask(CPURISCVState *env)
|
|
|
|
{
|
|
|
|
target_ulong mask = -1, base = 0;
|
|
|
|
/*
|
|
|
|
* TODO: Current RVJ spec does not specify
|
|
|
|
* how the extension interacts with XLEN.
|
|
|
|
*/
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
if (riscv_has_ext(env, RVJ)) {
|
|
|
|
switch (env->priv) {
|
|
|
|
case PRV_M:
|
|
|
|
if (env->mmte & M_PM_ENABLE) {
|
|
|
|
mask = env->mpmmask;
|
|
|
|
base = env->mpmbase;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PRV_S:
|
|
|
|
if (env->mmte & S_PM_ENABLE) {
|
|
|
|
mask = env->spmmask;
|
|
|
|
base = env->spmbase;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PRV_U:
|
|
|
|
if (env->mmte & U_PM_ENABLE) {
|
|
|
|
mask = env->upmmask;
|
|
|
|
base = env->upmbase;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (env->xl == MXL_RV32) {
|
|
|
|
env->cur_pmmask = mask & UINT32_MAX;
|
|
|
|
env->cur_pmbase = base & UINT32_MAX;
|
|
|
|
} else {
|
|
|
|
env->cur_pmmask = mask;
|
|
|
|
env->cur_pmbase = base;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-02 15:31:10 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2018-04-19 04:19:06 +03:00
|
|
|
static int riscv_cpu_local_irq_pending(CPURISCVState *env)
|
2018-03-02 15:31:10 +03:00
|
|
|
{
|
target/riscv: fix VS interrupts forwarding to HS
VS interrupts (2, 6, 10) were not correctly forwarded to hs-mode when
not delegated in hideleg (which was not being taken into account). This
was mainly because hs level sie was not always considered enabled when
it should. The spec states that "Interrupts for higher-privilege modes,
y>x, are always globally enabled regardless of the setting of the global
yIE bit for the higher-privilege mode." and also "For purposes of
interrupt global enables, HS-mode is considered more privileged than
VS-mode, and VS-mode is considered more privileged than VU-mode". Also,
vs-level interrupts were not being taken into account unless V=1, but
should be unless delegated.
Finally, there is no need for a special case for to handle vs interrupts
as the current privilege level, the state of the global ie and of the
delegation registers should be enough to route all interrupts to the
appropriate privilege level in riscv_cpu_do_interrupt.
Signed-off-by: Jose Martins <josemartins90@gmail.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-id: 20211026145126.11025-2-josemartins90@gmail.com
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2021-10-26 17:51:25 +03:00
|
|
|
target_ulong virt_enabled = riscv_cpu_virt_enabled(env);
|
2020-02-01 04:02:23 +03:00
|
|
|
|
2018-04-19 04:19:06 +03:00
|
|
|
target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
|
|
|
|
target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
|
2020-02-01 04:02:23 +03:00
|
|
|
|
target/riscv: fix VS interrupts forwarding to HS
VS interrupts (2, 6, 10) were not correctly forwarded to hs-mode when
not delegated in hideleg (which was not being taken into account). This
was mainly because hs level sie was not always considered enabled when
it should. The spec states that "Interrupts for higher-privilege modes,
y>x, are always globally enabled regardless of the setting of the global
yIE bit for the higher-privilege mode." and also "For purposes of
interrupt global enables, HS-mode is considered more privileged than
VS-mode, and VS-mode is considered more privileged than VU-mode". Also,
vs-level interrupts were not being taken into account unless V=1, but
should be unless delegated.
Finally, there is no need for a special case for to handle vs interrupts
as the current privilege level, the state of the global ie and of the
delegation registers should be enough to route all interrupts to the
appropriate privilege level in riscv_cpu_do_interrupt.
Signed-off-by: Jose Martins <josemartins90@gmail.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-id: 20211026145126.11025-2-josemartins90@gmail.com
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2021-10-26 17:51:25 +03:00
|
|
|
target_ulong pending = env->mip & env->mie;
|
2020-02-01 04:02:23 +03:00
|
|
|
|
|
|
|
target_ulong mie = env->priv < PRV_M ||
|
|
|
|
(env->priv == PRV_M && mstatus_mie);
|
|
|
|
target_ulong sie = env->priv < PRV_S ||
|
|
|
|
(env->priv == PRV_S && mstatus_sie);
|
target/riscv: fix VS interrupts forwarding to HS
VS interrupts (2, 6, 10) were not correctly forwarded to hs-mode when
not delegated in hideleg (which was not being taken into account). This
was mainly because hs level sie was not always considered enabled when
it should. The spec states that "Interrupts for higher-privilege modes,
y>x, are always globally enabled regardless of the setting of the global
yIE bit for the higher-privilege mode." and also "For purposes of
interrupt global enables, HS-mode is considered more privileged than
VS-mode, and VS-mode is considered more privileged than VU-mode". Also,
vs-level interrupts were not being taken into account unless V=1, but
should be unless delegated.
Finally, there is no need for a special case for to handle vs interrupts
as the current privilege level, the state of the global ie and of the
delegation registers should be enough to route all interrupts to the
appropriate privilege level in riscv_cpu_do_interrupt.
Signed-off-by: Jose Martins <josemartins90@gmail.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-id: 20211026145126.11025-2-josemartins90@gmail.com
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2021-10-26 17:51:25 +03:00
|
|
|
target_ulong hsie = virt_enabled || sie;
|
|
|
|
target_ulong vsie = virt_enabled && sie;
|
2020-02-01 04:02:23 +03:00
|
|
|
|
target/riscv: fix VS interrupts forwarding to HS
VS interrupts (2, 6, 10) were not correctly forwarded to hs-mode when
not delegated in hideleg (which was not being taken into account). This
was mainly because hs level sie was not always considered enabled when
it should. The spec states that "Interrupts for higher-privilege modes,
y>x, are always globally enabled regardless of the setting of the global
yIE bit for the higher-privilege mode." and also "For purposes of
interrupt global enables, HS-mode is considered more privileged than
VS-mode, and VS-mode is considered more privileged than VU-mode". Also,
vs-level interrupts were not being taken into account unless V=1, but
should be unless delegated.
Finally, there is no need for a special case for to handle vs interrupts
as the current privilege level, the state of the global ie and of the
delegation registers should be enough to route all interrupts to the
appropriate privilege level in riscv_cpu_do_interrupt.
Signed-off-by: Jose Martins <josemartins90@gmail.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-id: 20211026145126.11025-2-josemartins90@gmail.com
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2021-10-26 17:51:25 +03:00
|
|
|
target_ulong irqs =
|
|
|
|
(pending & ~env->mideleg & -mie) |
|
|
|
|
(pending & env->mideleg & ~env->hideleg & -hsie) |
|
|
|
|
(pending & env->mideleg & env->hideleg & -vsie);
|
2018-03-02 15:31:10 +03:00
|
|
|
|
2018-04-19 04:19:06 +03:00
|
|
|
if (irqs) {
|
|
|
|
return ctz64(irqs); /* since non-zero */
|
2018-03-02 15:31:10 +03:00
|
|
|
} else {
|
2021-04-01 18:17:29 +03:00
|
|
|
return RISCV_EXCP_NONE; /* indicates no pending interrupt */
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|
|
|
{
|
|
|
|
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
|
|
|
RISCVCPU *cpu = RISCV_CPU(cs);
|
|
|
|
CPURISCVState *env = &cpu->env;
|
2018-04-19 04:19:06 +03:00
|
|
|
int interruptno = riscv_cpu_local_irq_pending(env);
|
2018-03-02 15:31:10 +03:00
|
|
|
if (interruptno >= 0) {
|
|
|
|
cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
|
|
|
|
riscv_cpu_do_interrupt(cs);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-31 02:35:24 +03:00
|
|
|
/* Return true is floating point support is currently enabled */
|
|
|
|
bool riscv_cpu_fp_enabled(CPURISCVState *env)
|
|
|
|
{
|
|
|
|
if (env->mstatus & MSTATUS_FS) {
|
2020-02-01 04:02:44 +03:00
|
|
|
if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-07-31 02:35:24 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-12-10 10:55:49 +03:00
|
|
|
/* Return true is vector support is currently enabled */
|
|
|
|
bool riscv_cpu_vector_enabled(CPURISCVState *env)
|
|
|
|
{
|
|
|
|
if (env->mstatus & MSTATUS_VS) {
|
|
|
|
if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-01 04:02:12 +03:00
|
|
|
void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
|
|
|
|
{
|
2020-10-26 14:55:25 +03:00
|
|
|
uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
|
|
|
|
MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
|
2021-12-10 10:55:49 +03:00
|
|
|
MSTATUS64_UXL | MSTATUS_VS;
|
2020-02-01 04:02:12 +03:00
|
|
|
bool current_virt = riscv_cpu_virt_enabled(env);
|
|
|
|
|
|
|
|
g_assert(riscv_has_ext(env, RVH));
|
|
|
|
|
|
|
|
if (current_virt) {
|
|
|
|
/* Current V=1 and we are about to change to V=0 */
|
|
|
|
env->vsstatus = env->mstatus & mstatus_mask;
|
|
|
|
env->mstatus &= ~mstatus_mask;
|
|
|
|
env->mstatus |= env->mstatus_hs;
|
|
|
|
|
|
|
|
env->vstvec = env->stvec;
|
|
|
|
env->stvec = env->stvec_hs;
|
|
|
|
|
|
|
|
env->vsscratch = env->sscratch;
|
|
|
|
env->sscratch = env->sscratch_hs;
|
|
|
|
|
|
|
|
env->vsepc = env->sepc;
|
|
|
|
env->sepc = env->sepc_hs;
|
|
|
|
|
|
|
|
env->vscause = env->scause;
|
|
|
|
env->scause = env->scause_hs;
|
|
|
|
|
2021-03-19 22:45:29 +03:00
|
|
|
env->vstval = env->stval;
|
|
|
|
env->stval = env->stval_hs;
|
2020-02-01 04:02:12 +03:00
|
|
|
|
|
|
|
env->vsatp = env->satp;
|
|
|
|
env->satp = env->satp_hs;
|
|
|
|
} else {
|
|
|
|
/* Current V=0 and we are about to change to V=1 */
|
|
|
|
env->mstatus_hs = env->mstatus & mstatus_mask;
|
|
|
|
env->mstatus &= ~mstatus_mask;
|
|
|
|
env->mstatus |= env->vsstatus;
|
|
|
|
|
|
|
|
env->stvec_hs = env->stvec;
|
|
|
|
env->stvec = env->vstvec;
|
|
|
|
|
|
|
|
env->sscratch_hs = env->sscratch;
|
|
|
|
env->sscratch = env->vsscratch;
|
|
|
|
|
|
|
|
env->sepc_hs = env->sepc;
|
|
|
|
env->sepc = env->vsepc;
|
|
|
|
|
|
|
|
env->scause_hs = env->scause;
|
|
|
|
env->scause = env->vscause;
|
|
|
|
|
2021-03-19 22:45:29 +03:00
|
|
|
env->stval_hs = env->stval;
|
|
|
|
env->stval = env->vstval;
|
2020-02-01 04:02:12 +03:00
|
|
|
|
|
|
|
env->satp_hs = env->satp;
|
|
|
|
env->satp = env->vsatp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-01 04:01:51 +03:00
|
|
|
bool riscv_cpu_virt_enabled(CPURISCVState *env)
|
|
|
|
{
|
|
|
|
if (!riscv_has_ext(env, RVH)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return get_field(env->virt, VIRT_ONOFF);
|
|
|
|
}
|
|
|
|
|
|
|
|
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
|
|
|
|
{
|
|
|
|
if (!riscv_has_ext(env, RVH)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-01 04:02:25 +03:00
|
|
|
/* Flush the TLB on all virt mode changes. */
|
|
|
|
if (get_field(env->virt, VIRT_ONOFF) != enable) {
|
|
|
|
tlb_flush(env_cpu(env));
|
|
|
|
}
|
|
|
|
|
2020-02-01 04:01:51 +03:00
|
|
|
env->virt = set_field(env->virt, VIRT_ONOFF, enable);
|
|
|
|
}
|
|
|
|
|
2020-11-04 07:43:29 +03:00
|
|
|
bool riscv_cpu_two_stage_lookup(int mmu_idx)
|
2020-08-12 22:13:16 +03:00
|
|
|
{
|
2020-11-04 07:43:29 +03:00
|
|
|
return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
|
2020-08-12 22:13:16 +03:00
|
|
|
}
|
|
|
|
|
2019-03-16 04:20:20 +03:00
|
|
|
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
|
|
|
|
{
|
|
|
|
CPURISCVState *env = &cpu->env;
|
|
|
|
if (env->miclaim & interrupts) {
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
env->miclaim |= interrupts;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-10 03:29:01 +03:00
|
|
|
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
|
|
|
|
{
|
|
|
|
CPURISCVState *env = &cpu->env;
|
2019-04-20 05:26:54 +03:00
|
|
|
CPUState *cs = CPU(cpu);
|
2019-10-09 01:04:18 +03:00
|
|
|
uint32_t old = env->mip;
|
|
|
|
bool locked = false;
|
|
|
|
|
|
|
|
if (!qemu_mutex_iothread_locked()) {
|
|
|
|
locked = true;
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
}
|
2018-04-10 03:29:01 +03:00
|
|
|
|
2019-10-09 01:04:18 +03:00
|
|
|
env->mip = (env->mip & ~mask) | (value & mask);
|
2018-04-10 03:29:01 +03:00
|
|
|
|
2019-10-09 01:04:18 +03:00
|
|
|
if (env->mip) {
|
|
|
|
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
|
|
|
} else {
|
|
|
|
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
|
|
|
|
}
|
2019-04-20 05:26:54 +03:00
|
|
|
|
2019-10-09 01:04:18 +03:00
|
|
|
if (locked) {
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
2018-04-10 03:29:01 +03:00
|
|
|
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2020-09-01 04:39:10 +03:00
|
|
|
void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
|
|
|
|
uint32_t arg)
|
2020-02-02 16:42:16 +03:00
|
|
|
{
|
|
|
|
env->rdtime_fn = fn;
|
2020-09-01 04:39:10 +03:00
|
|
|
env->rdtime_fn_arg = arg;
|
2020-02-02 16:42:16 +03:00
|
|
|
}
|
|
|
|
|
2019-01-15 02:58:23 +03:00
|
|
|
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
|
2018-04-10 03:29:01 +03:00
|
|
|
{
|
|
|
|
if (newpriv > PRV_M) {
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
if (newpriv == PRV_H) {
|
|
|
|
newpriv = PRV_U;
|
|
|
|
}
|
|
|
|
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
|
|
|
|
env->priv = newpriv;
|
2022-01-20 15:20:32 +03:00
|
|
|
env->xl = cpu_recompute_xl(env);
|
2022-01-20 15:20:38 +03:00
|
|
|
riscv_cpu_update_mask(env);
|
2019-06-24 21:08:38 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the load reservation - otherwise a reservation placed in one
|
|
|
|
* context/process can be used by another, resulting in an SC succeeding
|
|
|
|
* incorrectly. Version 2.2 of the ISA specification explicitly requires
|
|
|
|
* this behaviour, while later revisions say that the kernel "should" use
|
|
|
|
* an SC instruction to force the yielding of a load reservation on a
|
|
|
|
* preemptive context switch. As a result, do both.
|
|
|
|
*/
|
|
|
|
env->load_res = -1;
|
2018-04-10 03:29:01 +03:00
|
|
|
}
|
|
|
|
|
2021-02-21 17:01:20 +03:00
|
|
|
/*
|
|
|
|
* get_physical_address_pmp - check PMP permission for this physical address
|
|
|
|
*
|
|
|
|
* Match the PMP region and check permission for this physical address and it's
|
|
|
|
* TLB page. Returns 0 if the permission checking was successful
|
|
|
|
*
|
|
|
|
* @env: CPURISCVState
|
|
|
|
* @prot: The returned protection attributes
|
|
|
|
* @tlb_size: TLB page size containing addr. It could be modified after PMP
|
|
|
|
* permission checking. NULL if not set TLB page for addr.
|
|
|
|
* @addr: The physical address to be checked permission
|
|
|
|
* @access_type: The type of MMU access
|
|
|
|
* @mode: Indicates current privilege level.
|
|
|
|
*/
|
|
|
|
static int get_physical_address_pmp(CPURISCVState *env, int *prot,
|
|
|
|
target_ulong *tlb_size, hwaddr addr,
|
|
|
|
int size, MMUAccessType access_type,
|
|
|
|
int mode)
|
|
|
|
{
|
|
|
|
pmp_priv_t pmp_priv;
|
|
|
|
target_ulong tlb_size_pmp = 0;
|
|
|
|
|
|
|
|
if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
|
|
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
|
return TRANSLATE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
|
|
|
|
mode)) {
|
|
|
|
*prot = 0;
|
|
|
|
return TRANSLATE_PMP_FAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*prot = pmp_priv_to_page_prot(pmp_priv);
|
|
|
|
if (tlb_size != NULL) {
|
|
|
|
if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
|
|
|
|
*tlb_size = tlb_size_pmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRANSLATE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-03-02 15:31:10 +03:00
|
|
|
/* get_physical_address - get the physical address for this virtual address
|
|
|
|
*
|
|
|
|
* Do a page table walk to obtain the physical address corresponding to a
|
|
|
|
* virtual address. Returns 0 if the translation was successful
|
|
|
|
*
|
|
|
|
* Adapted from Spike's mmu_t::translate and mmu_t::walk
|
|
|
|
*
|
2020-02-01 04:02:53 +03:00
|
|
|
* @env: CPURISCVState
|
|
|
|
* @physical: This will be set to the calculated physical address
|
|
|
|
* @prot: The returned protection attributes
|
|
|
|
* @addr: The virtual address to be translated
|
2020-10-14 13:17:28 +03:00
|
|
|
* @fault_pte_addr: If not NULL, this will be set to fault pte address
|
|
|
|
* when a error occurs on pte address translation.
|
|
|
|
* This will already be shifted to match htval.
|
2020-02-01 04:02:53 +03:00
|
|
|
* @access_type: The type of MMU access
|
|
|
|
* @mmu_idx: Indicates current privilege level
|
|
|
|
* @first_stage: Are we in first stage translation?
|
|
|
|
* Second stage is used for hypervisor guest translation
|
2020-02-01 04:02:56 +03:00
|
|
|
* @two_stage: Are we going to perform two stage translation
|
2021-04-06 14:31:09 +03:00
|
|
|
* @is_debug: Is this access from a debugger or the monitor?
|
2018-03-02 15:31:10 +03:00
|
|
|
*/
|
|
|
|
static int get_physical_address(CPURISCVState *env, hwaddr *physical,
|
|
|
|
int *prot, target_ulong addr,
|
2020-10-14 13:17:28 +03:00
|
|
|
target_ulong *fault_pte_addr,
|
2020-02-01 04:02:53 +03:00
|
|
|
int access_type, int mmu_idx,
|
2021-04-06 14:31:09 +03:00
|
|
|
bool first_stage, bool two_stage,
|
|
|
|
bool is_debug)
|
2018-03-02 15:31:10 +03:00
|
|
|
{
|
|
|
|
/* NOTE: the env->pc value visible here will not be
|
|
|
|
* correct, but the value visible to the exception handler
|
|
|
|
* (riscv_cpu_do_interrupt) is correct */
|
2019-10-08 23:51:50 +03:00
|
|
|
MemTxResult res;
|
|
|
|
MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
|
2020-11-04 07:43:23 +03:00
|
|
|
int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
|
2020-02-01 04:02:56 +03:00
|
|
|
bool use_background = false;
|
2018-03-02 15:31:10 +03:00
|
|
|
|
2020-02-01 04:02:56 +03:00
|
|
|
/*
|
|
|
|
* Check if we should use the background registers for the two
|
|
|
|
* stage translation. We don't need to check if we actually need
|
|
|
|
* two stage translation as that happened before this function
|
|
|
|
* was called. Background registers will be used if the guest has
|
|
|
|
* forced a two stage translation to be on (in HS or M mode).
|
|
|
|
*/
|
2021-03-11 13:30:36 +03:00
|
|
|
if (!riscv_cpu_virt_enabled(env) && two_stage) {
|
2020-08-12 22:13:22 +03:00
|
|
|
use_background = true;
|
|
|
|
}
|
|
|
|
|
2021-03-11 13:30:05 +03:00
|
|
|
/* MPRV does not affect the virtual-machine load/store
|
|
|
|
instructions, HLV, HLVX, and HSV. */
|
|
|
|
if (riscv_cpu_two_stage_lookup(mmu_idx)) {
|
|
|
|
mode = get_field(env->hstatus, HSTATUS_SPVP);
|
|
|
|
} else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
|
2018-03-02 15:31:10 +03:00
|
|
|
if (get_field(env->mstatus, MSTATUS_MPRV)) {
|
|
|
|
mode = get_field(env->mstatus, MSTATUS_MPP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-01 04:02:56 +03:00
|
|
|
if (first_stage == false) {
|
|
|
|
/* We are in stage 2 translation, this is similar to stage 1. */
|
|
|
|
/* Stage 2 is always taken as U-mode */
|
|
|
|
mode = PRV_U;
|
|
|
|
}
|
|
|
|
|
2018-03-02 15:31:10 +03:00
|
|
|
if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
|
|
|
|
*physical = addr;
|
|
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
|
return TRANSLATE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
*prot = 0;
|
|
|
|
|
2019-08-08 05:49:30 +03:00
|
|
|
hwaddr base;
|
2020-02-01 04:02:56 +03:00
|
|
|
int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
|
|
|
|
|
|
|
|
if (first_stage == true) {
|
|
|
|
mxr = get_field(env->mstatus, MSTATUS_MXR);
|
|
|
|
} else {
|
|
|
|
mxr = get_field(env->vsstatus, MSTATUS_MXR);
|
|
|
|
}
|
2018-03-02 15:31:10 +03:00
|
|
|
|
2020-05-05 23:04:50 +03:00
|
|
|
if (first_stage == true) {
|
|
|
|
if (use_background) {
|
2021-10-20 06:16:58 +03:00
|
|
|
if (riscv_cpu_mxl(env) == MXL_RV32) {
|
2021-04-24 06:33:31 +03:00
|
|
|
base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
|
|
|
|
vm = get_field(env->vsatp, SATP32_MODE);
|
|
|
|
} else {
|
|
|
|
base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
|
|
|
|
vm = get_field(env->vsatp, SATP64_MODE);
|
|
|
|
}
|
2020-02-01 04:02:56 +03:00
|
|
|
} else {
|
2021-10-20 06:16:58 +03:00
|
|
|
if (riscv_cpu_mxl(env) == MXL_RV32) {
|
2021-04-24 06:33:31 +03:00
|
|
|
base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
|
|
|
|
vm = get_field(env->satp, SATP32_MODE);
|
|
|
|
} else {
|
|
|
|
base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
|
|
|
|
vm = get_field(env->satp, SATP64_MODE);
|
|
|
|
}
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
2020-02-01 04:02:56 +03:00
|
|
|
widened = 0;
|
2020-05-05 23:04:50 +03:00
|
|
|
} else {
|
2021-10-20 06:16:58 +03:00
|
|
|
if (riscv_cpu_mxl(env) == MXL_RV32) {
|
2021-04-24 06:31:55 +03:00
|
|
|
base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
|
|
|
|
vm = get_field(env->hgatp, SATP32_MODE);
|
|
|
|
} else {
|
|
|
|
base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
|
|
|
|
vm = get_field(env->hgatp, SATP64_MODE);
|
|
|
|
}
|
2020-05-05 23:04:50 +03:00
|
|
|
widened = 2;
|
|
|
|
}
|
2020-11-30 04:28:10 +03:00
|
|
|
/* status.SUM will be ignored if execute on background */
|
2021-04-06 14:31:09 +03:00
|
|
|
sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug;
|
2020-05-05 23:04:50 +03:00
|
|
|
switch (vm) {
|
|
|
|
case VM_1_10_SV32:
|
|
|
|
levels = 2; ptidxbits = 10; ptesize = 4; break;
|
|
|
|
case VM_1_10_SV39:
|
|
|
|
levels = 3; ptidxbits = 9; ptesize = 8; break;
|
|
|
|
case VM_1_10_SV48:
|
|
|
|
levels = 4; ptidxbits = 9; ptesize = 8; break;
|
|
|
|
case VM_1_10_SV57:
|
|
|
|
levels = 5; ptidxbits = 9; ptesize = 8; break;
|
|
|
|
case VM_1_10_MBARE:
|
|
|
|
*physical = addr;
|
|
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
|
return TRANSLATE_SUCCESS;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
|
|
|
|
2019-03-23 05:11:37 +03:00
|
|
|
CPUState *cs = env_cpu(env);
|
2020-02-01 04:02:56 +03:00
|
|
|
int va_bits = PGSHIFT + levels * ptidxbits + widened;
|
|
|
|
target_ulong mask, masked_msbs;
|
|
|
|
|
|
|
|
if (TARGET_LONG_BITS > (va_bits - 1)) {
|
|
|
|
mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
|
|
|
|
} else {
|
|
|
|
mask = 0;
|
|
|
|
}
|
|
|
|
masked_msbs = (addr >> (va_bits - 1)) & mask;
|
|
|
|
|
2018-03-02 15:31:10 +03:00
|
|
|
if (masked_msbs != 0 && masked_msbs != mask) {
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ptshift = (levels - 1) * ptidxbits;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
#if !TCG_OVERSIZED_GUEST
|
|
|
|
restart:
|
|
|
|
#endif
|
|
|
|
for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
|
2020-02-01 04:02:56 +03:00
|
|
|
target_ulong idx;
|
|
|
|
if (i == 0) {
|
|
|
|
idx = (addr >> (PGSHIFT + ptshift)) &
|
|
|
|
((1 << (ptidxbits + widened)) - 1);
|
|
|
|
} else {
|
|
|
|
idx = (addr >> (PGSHIFT + ptshift)) &
|
2018-03-02 15:31:10 +03:00
|
|
|
((1 << ptidxbits) - 1);
|
2020-02-01 04:02:56 +03:00
|
|
|
}
|
2018-03-02 15:31:10 +03:00
|
|
|
|
|
|
|
/* check that physical address of PTE is legal */
|
2020-02-01 04:02:56 +03:00
|
|
|
hwaddr pte_addr;
|
|
|
|
|
|
|
|
if (two_stage && first_stage) {
|
2020-03-27 01:44:07 +03:00
|
|
|
int vbase_prot;
|
2020-02-01 04:02:56 +03:00
|
|
|
hwaddr vbase;
|
|
|
|
|
|
|
|
/* Do the second stage translation on the base PTE address. */
|
2020-03-27 22:54:45 +03:00
|
|
|
int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
|
2020-10-14 13:17:28 +03:00
|
|
|
base, NULL, MMU_DATA_LOAD,
|
2021-04-06 14:31:09 +03:00
|
|
|
mmu_idx, false, true,
|
|
|
|
is_debug);
|
2020-03-27 22:54:45 +03:00
|
|
|
|
|
|
|
if (vbase_ret != TRANSLATE_SUCCESS) {
|
2020-10-14 13:17:28 +03:00
|
|
|
if (fault_pte_addr) {
|
|
|
|
*fault_pte_addr = (base + idx * ptesize) >> 2;
|
|
|
|
}
|
|
|
|
return TRANSLATE_G_STAGE_FAIL;
|
2020-03-27 22:54:45 +03:00
|
|
|
}
|
2020-02-01 04:02:56 +03:00
|
|
|
|
|
|
|
pte_addr = vbase + idx * ptesize;
|
|
|
|
} else {
|
|
|
|
pte_addr = base + idx * ptesize;
|
|
|
|
}
|
2019-06-14 15:19:02 +03:00
|
|
|
|
2021-02-21 17:01:20 +03:00
|
|
|
int pmp_prot;
|
|
|
|
int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
|
|
|
|
sizeof(target_ulong),
|
|
|
|
MMU_DATA_LOAD, PRV_S);
|
|
|
|
if (pmp_ret != TRANSLATE_SUCCESS) {
|
2019-06-14 15:19:02 +03:00
|
|
|
return TRANSLATE_PMP_FAIL;
|
|
|
|
}
|
2019-10-08 23:51:50 +03:00
|
|
|
|
2020-12-16 21:22:59 +03:00
|
|
|
target_ulong pte;
|
2021-10-20 06:16:58 +03:00
|
|
|
if (riscv_cpu_mxl(env) == MXL_RV32) {
|
2020-12-16 21:22:59 +03:00
|
|
|
pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
|
|
|
|
} else {
|
|
|
|
pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
|
|
|
|
}
|
|
|
|
|
2019-10-08 23:51:50 +03:00
|
|
|
if (res != MEMTX_OK) {
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
}
|
|
|
|
|
2019-08-08 05:49:30 +03:00
|
|
|
hwaddr ppn = pte >> PTE_PPN_SHIFT;
|
2018-03-02 15:31:10 +03:00
|
|
|
|
2018-03-04 23:27:28 +03:00
|
|
|
if (!(pte & PTE_V)) {
|
|
|
|
/* Invalid PTE */
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
} else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
|
|
|
|
/* Inner PTE, continue walking */
|
2018-03-02 15:31:10 +03:00
|
|
|
base = ppn << PGSHIFT;
|
2018-03-04 23:27:28 +03:00
|
|
|
} else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
|
|
|
|
/* Reserved leaf PTE flags: PTE_W */
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
} else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
|
|
|
|
/* Reserved leaf PTE flags: PTE_W + PTE_X */
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
} else if ((pte & PTE_U) && ((mode != PRV_U) &&
|
|
|
|
(!sum || access_type == MMU_INST_FETCH))) {
|
|
|
|
/* User PTE flags when not U mode and mstatus.SUM is not set,
|
|
|
|
or the access type is an instruction fetch */
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
} else if (!(pte & PTE_U) && (mode != PRV_S)) {
|
|
|
|
/* Supervisor PTE flags when not S mode */
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
} else if (ppn & ((1ULL << ptshift) - 1)) {
|
|
|
|
/* Misaligned PPN */
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
} else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
|
|
|
|
((pte & PTE_X) && mxr))) {
|
|
|
|
/* Read access check failed */
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
} else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
|
|
|
|
/* Write access check failed */
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
} else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
|
|
|
|
/* Fetch access check failed */
|
|
|
|
return TRANSLATE_FAIL;
|
2018-03-02 15:31:10 +03:00
|
|
|
} else {
|
|
|
|
/* if necessary, set accessed and dirty bits. */
|
|
|
|
target_ulong updated_pte = pte | PTE_A |
|
|
|
|
(access_type == MMU_DATA_STORE ? PTE_D : 0);
|
|
|
|
|
|
|
|
/* Page table updates need to be atomic with MTTCG enabled */
|
|
|
|
if (updated_pte != pte) {
|
2018-03-04 23:27:28 +03:00
|
|
|
/*
|
|
|
|
* - if accessed or dirty bits need updating, and the PTE is
|
|
|
|
* in RAM, then we do so atomically with a compare and swap.
|
|
|
|
* - if the PTE is in IO space or ROM, then it can't be updated
|
|
|
|
* and we return TRANSLATE_FAIL.
|
|
|
|
* - if the PTE changed by the time we went to update it, then
|
|
|
|
* it is no longer valid and we must re-walk the page table.
|
|
|
|
*/
|
2018-03-02 15:31:10 +03:00
|
|
|
MemoryRegion *mr;
|
|
|
|
hwaddr l = sizeof(target_ulong), addr1;
|
|
|
|
mr = address_space_translate(cs->as, pte_addr,
|
2018-05-31 16:50:52 +03:00
|
|
|
&addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
|
2018-03-04 23:27:28 +03:00
|
|
|
if (memory_region_is_ram(mr)) {
|
2018-03-02 15:31:10 +03:00
|
|
|
target_ulong *pte_pa =
|
|
|
|
qemu_map_ram_ptr(mr->ram_block, addr1);
|
|
|
|
#if TCG_OVERSIZED_GUEST
|
|
|
|
/* MTTCG is not enabled on oversized TCG guests so
|
|
|
|
* page table updates do not need to be atomic */
|
|
|
|
*pte_pa = pte = updated_pte;
|
|
|
|
#else
|
|
|
|
target_ulong old_pte =
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_cmpxchg(pte_pa, pte, updated_pte);
|
2018-03-02 15:31:10 +03:00
|
|
|
if (old_pte != pte) {
|
|
|
|
goto restart;
|
|
|
|
} else {
|
|
|
|
pte = updated_pte;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
/* misconfigured PTE in ROM (AD bits are not preset) or
|
|
|
|
* PTE is in IO space and can't be updated atomically */
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* for superpage mappings, make a fake leaf PTE for the TLB's
|
|
|
|
benefit. */
|
|
|
|
target_ulong vpn = addr >> PGSHIFT;
|
2020-07-28 11:26:16 +03:00
|
|
|
*physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) |
|
|
|
|
(addr & ~TARGET_PAGE_MASK);
|
2018-03-02 15:31:10 +03:00
|
|
|
|
2018-03-04 23:27:28 +03:00
|
|
|
/* set permissions on the TLB entry */
|
|
|
|
if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
|
2018-03-02 15:31:10 +03:00
|
|
|
*prot |= PAGE_READ;
|
|
|
|
}
|
|
|
|
if ((pte & PTE_X)) {
|
|
|
|
*prot |= PAGE_EXEC;
|
|
|
|
}
|
2018-03-04 23:27:28 +03:00
|
|
|
/* add write permission on stores or if the page is already dirty,
|
|
|
|
so that we TLB miss on later writes to update the dirty bit */
|
2018-03-02 15:31:10 +03:00
|
|
|
if ((pte & PTE_W) &&
|
|
|
|
(access_type == MMU_DATA_STORE || (pte & PTE_D))) {
|
|
|
|
*prot |= PAGE_WRITE;
|
|
|
|
}
|
|
|
|
return TRANSLATE_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return TRANSLATE_FAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
|
2020-02-01 04:02:53 +03:00
|
|
|
MMUAccessType access_type, bool pmp_violation,
|
2020-11-04 07:43:29 +03:00
|
|
|
bool first_stage, bool two_stage)
|
2018-03-02 15:31:10 +03:00
|
|
|
{
|
2019-03-23 05:11:37 +03:00
|
|
|
CPUState *cs = env_cpu(env);
|
2021-04-24 06:31:55 +03:00
|
|
|
int page_fault_exceptions, vm;
|
2021-04-24 06:33:31 +03:00
|
|
|
uint64_t stap_mode;
|
|
|
|
|
2021-10-20 06:16:58 +03:00
|
|
|
if (riscv_cpu_mxl(env) == MXL_RV32) {
|
2021-04-24 06:33:31 +03:00
|
|
|
stap_mode = SATP32_MODE;
|
|
|
|
} else {
|
|
|
|
stap_mode = SATP64_MODE;
|
|
|
|
}
|
2021-04-24 06:31:55 +03:00
|
|
|
|
2020-02-01 04:02:53 +03:00
|
|
|
if (first_stage) {
|
2021-04-24 06:33:31 +03:00
|
|
|
vm = get_field(env->satp, stap_mode);
|
2020-02-01 04:02:53 +03:00
|
|
|
} else {
|
2021-04-24 06:33:31 +03:00
|
|
|
vm = get_field(env->hgatp, stap_mode);
|
2020-02-01 04:02:53 +03:00
|
|
|
}
|
2021-04-24 06:33:31 +03:00
|
|
|
|
2021-04-24 06:31:55 +03:00
|
|
|
page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation;
|
|
|
|
|
2018-03-02 15:31:10 +03:00
|
|
|
switch (access_type) {
|
|
|
|
case MMU_INST_FETCH:
|
2020-02-01 04:02:59 +03:00
|
|
|
if (riscv_cpu_virt_enabled(env) && !first_stage) {
|
|
|
|
cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
|
|
|
|
} else {
|
|
|
|
cs->exception_index = page_fault_exceptions ?
|
|
|
|
RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
|
|
|
|
}
|
2018-03-02 15:31:10 +03:00
|
|
|
break;
|
|
|
|
case MMU_DATA_LOAD:
|
2020-11-04 07:43:29 +03:00
|
|
|
if (two_stage && !first_stage) {
|
2020-02-01 04:02:59 +03:00
|
|
|
cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
|
|
|
|
} else {
|
|
|
|
cs->exception_index = page_fault_exceptions ?
|
|
|
|
RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
|
|
|
|
}
|
2018-03-02 15:31:10 +03:00
|
|
|
break;
|
|
|
|
case MMU_DATA_STORE:
|
2020-11-04 07:43:29 +03:00
|
|
|
if (two_stage && !first_stage) {
|
2020-02-01 04:02:59 +03:00
|
|
|
cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
|
|
|
|
} else {
|
|
|
|
cs->exception_index = page_fault_exceptions ?
|
|
|
|
RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
|
|
|
|
}
|
2018-03-02 15:31:10 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
env->badaddr = address;
|
2021-03-19 17:14:59 +03:00
|
|
|
env->two_stage_lookup = two_stage;
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|
|
|
{
|
|
|
|
RISCVCPU *cpu = RISCV_CPU(cs);
|
2020-02-01 04:02:56 +03:00
|
|
|
CPURISCVState *env = &cpu->env;
|
2018-03-02 15:31:10 +03:00
|
|
|
hwaddr phys_addr;
|
|
|
|
int prot;
|
|
|
|
int mmu_idx = cpu_mmu_index(&cpu->env, false);
|
|
|
|
|
2020-10-14 13:17:28 +03:00
|
|
|
if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
|
2021-04-06 14:31:09 +03:00
|
|
|
true, riscv_cpu_virt_enabled(env), true)) {
|
2018-03-02 15:31:10 +03:00
|
|
|
return -1;
|
|
|
|
}
|
2020-02-01 04:02:56 +03:00
|
|
|
|
|
|
|
if (riscv_cpu_virt_enabled(env)) {
|
2020-10-14 13:17:28 +03:00
|
|
|
if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
|
2021-04-06 14:31:09 +03:00
|
|
|
0, mmu_idx, false, true, true)) {
|
2020-02-01 04:02:56 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-28 11:26:16 +03:00
|
|
|
return phys_addr & TARGET_PAGE_MASK;
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
|
|
|
|
2019-10-08 23:51:52 +03:00
|
|
|
void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
|
|
|
vaddr addr, unsigned size,
|
|
|
|
MMUAccessType access_type,
|
|
|
|
int mmu_idx, MemTxAttrs attrs,
|
|
|
|
MemTxResult response, uintptr_t retaddr)
|
2019-05-18 01:11:06 +03:00
|
|
|
{
|
|
|
|
RISCVCPU *cpu = RISCV_CPU(cs);
|
|
|
|
CPURISCVState *env = &cpu->env;
|
|
|
|
|
2019-10-08 23:51:52 +03:00
|
|
|
if (access_type == MMU_DATA_STORE) {
|
2019-05-18 01:11:06 +03:00
|
|
|
cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
|
2021-04-16 17:17:11 +03:00
|
|
|
} else if (access_type == MMU_DATA_LOAD) {
|
2019-05-18 01:11:06 +03:00
|
|
|
cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
|
2021-04-16 17:17:11 +03:00
|
|
|
} else {
|
|
|
|
cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
|
2019-05-18 01:11:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
env->badaddr = addr;
|
2021-03-19 17:14:59 +03:00
|
|
|
env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
|
|
|
|
riscv_cpu_two_stage_lookup(mmu_idx);
|
2019-10-08 23:51:52 +03:00
|
|
|
riscv_raise_exception(&cpu->env, cs->exception_index, retaddr);
|
2019-05-18 01:11:06 +03:00
|
|
|
}
|
|
|
|
|
2018-03-02 15:31:10 +03:00
|
|
|
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
|
|
|
MMUAccessType access_type, int mmu_idx,
|
|
|
|
uintptr_t retaddr)
|
|
|
|
{
|
|
|
|
RISCVCPU *cpu = RISCV_CPU(cs);
|
|
|
|
CPURISCVState *env = &cpu->env;
|
|
|
|
switch (access_type) {
|
|
|
|
case MMU_INST_FETCH:
|
|
|
|
cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
|
|
|
|
break;
|
|
|
|
case MMU_DATA_LOAD:
|
|
|
|
cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
|
|
|
|
break;
|
|
|
|
case MMU_DATA_STORE:
|
|
|
|
cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
env->badaddr = addr;
|
2021-03-19 17:14:59 +03:00
|
|
|
env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
|
|
|
|
riscv_cpu_two_stage_lookup(mmu_idx);
|
2019-01-15 02:58:23 +03:00
|
|
|
riscv_raise_exception(env, cs->exception_index, retaddr);
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
|
|
|
|
2019-04-02 13:12:38 +03:00
|
|
|
bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|
|
|
MMUAccessType access_type, int mmu_idx,
|
|
|
|
bool probe, uintptr_t retaddr)
|
2018-03-02 15:31:10 +03:00
|
|
|
{
|
|
|
|
RISCVCPU *cpu = RISCV_CPU(cs);
|
|
|
|
CPURISCVState *env = &cpu->env;
|
2020-02-01 04:02:56 +03:00
|
|
|
vaddr im_address;
|
2018-03-02 15:31:10 +03:00
|
|
|
hwaddr pa = 0;
|
2021-02-21 17:01:20 +03:00
|
|
|
int prot, prot2, prot_pmp;
|
2019-06-14 15:17:28 +03:00
|
|
|
bool pmp_violation = false;
|
2020-02-01 04:02:56 +03:00
|
|
|
bool first_stage_error = true;
|
2020-11-04 07:43:29 +03:00
|
|
|
bool two_stage_lookup = false;
|
2018-03-02 15:31:10 +03:00
|
|
|
int ret = TRANSLATE_FAIL;
|
2019-05-30 16:51:32 +03:00
|
|
|
int mode = mmu_idx;
|
2021-02-21 17:01:20 +03:00
|
|
|
/* default TLB page size */
|
|
|
|
target_ulong tlb_size = TARGET_PAGE_SIZE;
|
2018-03-02 15:31:10 +03:00
|
|
|
|
2020-02-01 04:02:56 +03:00
|
|
|
env->guest_phys_fault_addr = 0;
|
|
|
|
|
2019-04-02 13:12:38 +03:00
|
|
|
qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
|
|
|
|
__func__, address, access_type, mmu_idx);
|
|
|
|
|
2021-03-11 13:30:05 +03:00
|
|
|
/* MPRV does not affect the virtual-machine load/store
|
|
|
|
instructions, HLV, HLVX, and HSV. */
|
|
|
|
if (riscv_cpu_two_stage_lookup(mmu_idx)) {
|
|
|
|
mode = get_field(env->hstatus, HSTATUS_SPVP);
|
|
|
|
} else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
|
|
|
|
get_field(env->mstatus, MSTATUS_MPRV)) {
|
|
|
|
mode = get_field(env->mstatus, MSTATUS_MPP);
|
|
|
|
if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
|
|
|
|
two_stage_lookup = true;
|
2019-05-30 16:51:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-12 22:13:22 +03:00
|
|
|
if (riscv_cpu_virt_enabled(env) ||
|
2020-11-04 07:43:29 +03:00
|
|
|
((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
|
|
|
|
access_type != MMU_INST_FETCH)) {
|
2020-02-01 04:02:56 +03:00
|
|
|
/* Two stage lookup */
|
2020-10-14 13:17:28 +03:00
|
|
|
ret = get_physical_address(env, &pa, &prot, address,
|
|
|
|
&env->guest_phys_fault_addr, access_type,
|
2021-04-06 14:31:09 +03:00
|
|
|
mmu_idx, true, true, false);
|
2020-02-01 04:02:56 +03:00
|
|
|
|
2020-10-14 13:17:28 +03:00
|
|
|
/*
|
|
|
|
* A G-stage exception may be triggered during two state lookup.
|
|
|
|
* And the env->guest_phys_fault_addr has already been set in
|
|
|
|
* get_physical_address().
|
|
|
|
*/
|
|
|
|
if (ret == TRANSLATE_G_STAGE_FAIL) {
|
|
|
|
first_stage_error = false;
|
|
|
|
access_type = MMU_DATA_LOAD;
|
|
|
|
}
|
|
|
|
|
2020-02-01 04:02:56 +03:00
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
|
|
|
|
TARGET_FMT_plx " prot %d\n",
|
|
|
|
__func__, address, ret, pa, prot);
|
|
|
|
|
2020-10-14 13:17:28 +03:00
|
|
|
if (ret == TRANSLATE_SUCCESS) {
|
2020-02-01 04:02:56 +03:00
|
|
|
/* Second stage lookup */
|
|
|
|
im_address = pa;
|
|
|
|
|
2020-10-14 13:17:28 +03:00
|
|
|
ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
|
2021-04-06 14:31:09 +03:00
|
|
|
access_type, mmu_idx, false, true,
|
|
|
|
false);
|
2020-02-01 04:02:56 +03:00
|
|
|
|
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
|
|
|
|
TARGET_FMT_plx " prot %d\n",
|
2020-03-27 01:44:09 +03:00
|
|
|
__func__, im_address, ret, pa, prot2);
|
|
|
|
|
|
|
|
prot &= prot2;
|
2020-02-01 04:02:56 +03:00
|
|
|
|
2021-02-21 17:01:20 +03:00
|
|
|
if (ret == TRANSLATE_SUCCESS) {
|
|
|
|
ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
|
|
|
|
size, access_type, mode);
|
2021-02-21 17:01:21 +03:00
|
|
|
|
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"%s PMP address=" TARGET_FMT_plx " ret %d prot"
|
|
|
|
" %d tlb_size " TARGET_FMT_lu "\n",
|
|
|
|
__func__, pa, ret, prot_pmp, tlb_size);
|
|
|
|
|
2021-02-21 17:01:20 +03:00
|
|
|
prot &= prot_pmp;
|
2020-02-01 04:02:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ret != TRANSLATE_SUCCESS) {
|
|
|
|
/*
|
|
|
|
* Guest physical address translation failed, this is a HS
|
|
|
|
* level exception
|
|
|
|
*/
|
|
|
|
first_stage_error = false;
|
|
|
|
env->guest_phys_fault_addr = (im_address |
|
|
|
|
(address &
|
|
|
|
(TARGET_PAGE_SIZE - 1))) >> 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Single stage lookup */
|
2020-10-14 13:17:28 +03:00
|
|
|
ret = get_physical_address(env, &pa, &prot, address, NULL,
|
2021-04-06 14:31:09 +03:00
|
|
|
access_type, mmu_idx, true, false, false);
|
2020-02-01 04:02:56 +03:00
|
|
|
|
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"%s address=%" VADDR_PRIx " ret %d physical "
|
|
|
|
TARGET_FMT_plx " prot %d\n",
|
|
|
|
__func__, address, ret, pa, prot);
|
2019-04-02 13:12:38 +03:00
|
|
|
|
2021-02-21 17:01:20 +03:00
|
|
|
if (ret == TRANSLATE_SUCCESS) {
|
|
|
|
ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
|
|
|
|
size, access_type, mode);
|
2021-02-21 17:01:21 +03:00
|
|
|
|
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"%s PMP address=" TARGET_FMT_plx " ret %d prot"
|
|
|
|
" %d tlb_size " TARGET_FMT_lu "\n",
|
|
|
|
__func__, pa, ret, prot_pmp, tlb_size);
|
|
|
|
|
2021-02-21 17:01:20 +03:00
|
|
|
prot &= prot_pmp;
|
|
|
|
}
|
2019-06-14 15:19:02 +03:00
|
|
|
}
|
2021-02-21 17:01:20 +03:00
|
|
|
|
2019-06-14 15:19:02 +03:00
|
|
|
if (ret == TRANSLATE_PMP_FAIL) {
|
2019-06-14 15:17:28 +03:00
|
|
|
pmp_violation = true;
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
2020-02-01 04:02:56 +03:00
|
|
|
|
2018-03-02 15:31:10 +03:00
|
|
|
if (ret == TRANSLATE_SUCCESS) {
|
2021-02-21 17:01:20 +03:00
|
|
|
tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
|
|
|
|
prot, mmu_idx, tlb_size);
|
2019-04-02 13:12:38 +03:00
|
|
|
return true;
|
|
|
|
} else if (probe) {
|
|
|
|
return false;
|
|
|
|
} else {
|
2020-11-04 07:43:29 +03:00
|
|
|
raise_mmu_exception(env, address, access_type, pmp_violation,
|
|
|
|
first_stage_error,
|
|
|
|
riscv_cpu_virt_enabled(env) ||
|
|
|
|
riscv_cpu_two_stage_lookup(mmu_idx));
|
2019-04-02 13:12:38 +03:00
|
|
|
riscv_raise_exception(env, cs->exception_index, retaddr);
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
2020-02-01 04:02:56 +03:00
|
|
|
|
|
|
|
return true;
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
2021-09-15 06:46:38 +03:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
2018-03-02 15:31:10 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle Traps
|
|
|
|
*
|
|
|
|
* Adapted from Spike's processor_t::take_trap.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void riscv_cpu_do_interrupt(CPUState *cs)
|
|
|
|
{
|
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
|
|
|
|
RISCVCPU *cpu = RISCV_CPU(cs);
|
|
|
|
CPURISCVState *env = &cpu->env;
|
2021-12-20 09:49:15 +03:00
|
|
|
bool write_gva = false;
|
2020-10-26 14:55:25 +03:00
|
|
|
uint64_t s;
|
2018-03-02 15:31:10 +03:00
|
|
|
|
2019-03-16 04:21:03 +03:00
|
|
|
/* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
|
|
|
|
* so we mask off the MSB and separate into trap type and cause.
|
|
|
|
*/
|
|
|
|
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
|
|
|
|
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
|
|
|
|
target_ulong deleg = async ? env->mideleg : env->medeleg;
|
|
|
|
target_ulong tval = 0;
|
2020-02-01 04:03:02 +03:00
|
|
|
target_ulong htval = 0;
|
|
|
|
target_ulong mtval2 = 0;
|
2019-03-16 04:21:03 +03:00
|
|
|
|
2021-01-09 01:42:52 +03:00
|
|
|
if (cause == RISCV_EXCP_SEMIHOST) {
|
|
|
|
if (env->priv >= PRV_S) {
|
|
|
|
env->gpr[xA0] = do_common_semihosting(cs);
|
|
|
|
env->pc += 4;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cause = RISCV_EXCP_BREAKPOINT;
|
|
|
|
}
|
|
|
|
|
2019-03-16 04:21:03 +03:00
|
|
|
if (!async) {
|
|
|
|
/* set tval to badaddr for traps with address information */
|
|
|
|
switch (cause) {
|
2020-02-01 04:01:46 +03:00
|
|
|
case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
|
|
|
|
case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
|
|
|
|
case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
|
2019-03-16 04:21:03 +03:00
|
|
|
case RISCV_EXCP_INST_ADDR_MIS:
|
|
|
|
case RISCV_EXCP_INST_ACCESS_FAULT:
|
|
|
|
case RISCV_EXCP_LOAD_ADDR_MIS:
|
|
|
|
case RISCV_EXCP_STORE_AMO_ADDR_MIS:
|
|
|
|
case RISCV_EXCP_LOAD_ACCESS_FAULT:
|
|
|
|
case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
|
|
|
|
case RISCV_EXCP_INST_PAGE_FAULT:
|
|
|
|
case RISCV_EXCP_LOAD_PAGE_FAULT:
|
|
|
|
case RISCV_EXCP_STORE_PAGE_FAULT:
|
2021-12-20 09:49:15 +03:00
|
|
|
write_gva = true;
|
2019-03-16 04:21:03 +03:00
|
|
|
tval = env->badaddr;
|
|
|
|
break;
|
2021-12-20 09:49:16 +03:00
|
|
|
case RISCV_EXCP_ILLEGAL_INST:
|
|
|
|
tval = env->bins;
|
|
|
|
break;
|
2019-03-16 04:21:03 +03:00
|
|
|
default:
|
|
|
|
break;
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
2019-03-16 04:21:03 +03:00
|
|
|
/* ecall is dispatched as one cause so translate based on mode */
|
|
|
|
if (cause == RISCV_EXCP_U_ECALL) {
|
|
|
|
assert(env->priv <= 3);
|
2020-02-01 04:02:30 +03:00
|
|
|
|
|
|
|
if (env->priv == PRV_M) {
|
|
|
|
cause = RISCV_EXCP_M_ECALL;
|
|
|
|
} else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
|
|
|
|
cause = RISCV_EXCP_VS_ECALL;
|
|
|
|
} else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
|
|
|
|
cause = RISCV_EXCP_S_ECALL;
|
|
|
|
} else if (env->priv == PRV_U) {
|
|
|
|
cause = RISCV_EXCP_U_ECALL;
|
|
|
|
}
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-14 06:58:19 +03:00
|
|
|
trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
|
2020-10-02 18:24:14 +03:00
|
|
|
riscv_cpu_get_trap_name(cause, async));
|
|
|
|
|
|
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
|
|
"%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
|
|
|
|
"epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
|
|
|
|
__func__, env->mhartid, async, cause, env->pc, tval,
|
|
|
|
riscv_cpu_get_trap_name(cause, async));
|
2018-03-02 15:31:10 +03:00
|
|
|
|
2019-03-16 04:21:03 +03:00
|
|
|
if (env->priv <= PRV_S &&
|
|
|
|
cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
|
2018-03-02 15:31:10 +03:00
|
|
|
/* handle the trap in S-mode */
|
2020-02-01 04:02:30 +03:00
|
|
|
if (riscv_has_ext(env, RVH)) {
|
|
|
|
target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
|
2020-11-04 07:43:29 +03:00
|
|
|
|
2021-10-26 17:51:26 +03:00
|
|
|
if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
|
2020-08-12 22:13:30 +03:00
|
|
|
/* Trap to VS mode */
|
2020-02-23 13:28:06 +03:00
|
|
|
/*
|
|
|
|
* See if we need to adjust cause. Yes if its VS mode interrupt
|
|
|
|
* no if hypervisor has delegated one of hs mode's interrupt
|
|
|
|
*/
|
|
|
|
if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
|
2020-08-12 22:13:30 +03:00
|
|
|
cause == IRQ_VS_EXT) {
|
2020-02-23 13:28:06 +03:00
|
|
|
cause = cause - 1;
|
2020-08-12 22:13:30 +03:00
|
|
|
}
|
2021-12-20 09:49:15 +03:00
|
|
|
write_gva = false;
|
2020-02-01 04:02:30 +03:00
|
|
|
} else if (riscv_cpu_virt_enabled(env)) {
|
|
|
|
/* Trap into HS mode, from virt */
|
|
|
|
riscv_cpu_swap_hypervisor_regs(env);
|
2020-08-12 22:13:33 +03:00
|
|
|
env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
|
2020-10-13 18:10:54 +03:00
|
|
|
env->priv);
|
2020-02-01 04:02:30 +03:00
|
|
|
env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
|
|
|
|
riscv_cpu_virt_enabled(env));
|
|
|
|
|
2021-12-20 09:49:15 +03:00
|
|
|
|
2020-02-01 04:03:02 +03:00
|
|
|
htval = env->guest_phys_fault_addr;
|
|
|
|
|
2020-02-01 04:02:30 +03:00
|
|
|
riscv_cpu_set_virt_enabled(env, 0);
|
|
|
|
} else {
|
|
|
|
/* Trap into HS mode */
|
2021-03-19 17:14:59 +03:00
|
|
|
env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
|
2020-02-01 04:03:02 +03:00
|
|
|
htval = env->guest_phys_fault_addr;
|
2021-12-20 09:49:15 +03:00
|
|
|
write_gva = false;
|
2020-02-01 04:02:30 +03:00
|
|
|
}
|
2021-12-20 09:49:15 +03:00
|
|
|
env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
|
2020-02-01 04:02:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
s = env->mstatus;
|
2020-05-05 23:04:50 +03:00
|
|
|
s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
|
2018-03-02 15:31:10 +03:00
|
|
|
s = set_field(s, MSTATUS_SPP, env->priv);
|
|
|
|
s = set_field(s, MSTATUS_SIE, 0);
|
2019-01-05 02:23:55 +03:00
|
|
|
env->mstatus = s;
|
2019-04-20 05:27:02 +03:00
|
|
|
env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
|
2019-03-16 04:21:03 +03:00
|
|
|
env->sepc = env->pc;
|
2021-03-19 22:45:29 +03:00
|
|
|
env->stval = tval;
|
2020-02-01 04:03:02 +03:00
|
|
|
env->htval = htval;
|
2019-03-16 04:21:03 +03:00
|
|
|
env->pc = (env->stvec >> 2 << 2) +
|
|
|
|
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
|
2019-01-15 02:58:23 +03:00
|
|
|
riscv_cpu_set_mode(env, PRV_S);
|
2018-03-02 15:31:10 +03:00
|
|
|
} else {
|
2019-03-16 04:21:03 +03:00
|
|
|
/* handle the trap in M-mode */
|
2020-02-01 04:02:30 +03:00
|
|
|
if (riscv_has_ext(env, RVH)) {
|
|
|
|
if (riscv_cpu_virt_enabled(env)) {
|
|
|
|
riscv_cpu_swap_hypervisor_regs(env);
|
|
|
|
}
|
|
|
|
env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
|
2020-10-26 14:55:25 +03:00
|
|
|
riscv_cpu_virt_enabled(env));
|
2020-08-12 22:13:27 +03:00
|
|
|
if (riscv_cpu_virt_enabled(env) && tval) {
|
|
|
|
env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
|
|
|
|
}
|
2020-02-01 04:02:30 +03:00
|
|
|
|
2020-02-01 04:03:02 +03:00
|
|
|
mtval2 = env->guest_phys_fault_addr;
|
|
|
|
|
2020-02-01 04:02:30 +03:00
|
|
|
/* Trapping to M mode, virt is disabled */
|
|
|
|
riscv_cpu_set_virt_enabled(env, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
s = env->mstatus;
|
2020-05-05 23:04:50 +03:00
|
|
|
s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
|
2018-03-02 15:31:10 +03:00
|
|
|
s = set_field(s, MSTATUS_MPP, env->priv);
|
|
|
|
s = set_field(s, MSTATUS_MIE, 0);
|
2019-01-05 02:23:55 +03:00
|
|
|
env->mstatus = s;
|
2019-03-16 04:21:03 +03:00
|
|
|
env->mcause = cause | ~(((target_ulong)-1) >> async);
|
|
|
|
env->mepc = env->pc;
|
2021-03-19 22:45:29 +03:00
|
|
|
env->mtval = tval;
|
2020-02-01 04:03:02 +03:00
|
|
|
env->mtval2 = mtval2;
|
2019-03-16 04:21:03 +03:00
|
|
|
env->pc = (env->mtvec >> 2 << 2) +
|
|
|
|
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
|
2019-01-15 02:58:23 +03:00
|
|
|
riscv_cpu_set_mode(env, PRV_M);
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|
2019-03-16 04:21:21 +03:00
|
|
|
|
|
|
|
/* NOTE: it is not necessary to yield load reservations here. It is only
|
|
|
|
* necessary for an SC from "another hart" to cause a load reservation
|
|
|
|
* to be yielded. Refer to the memory consistency model section of the
|
|
|
|
* RISC-V ISA Specification.
|
|
|
|
*/
|
|
|
|
|
2021-03-19 17:14:59 +03:00
|
|
|
env->two_stage_lookup = false;
|
2018-03-02 15:31:10 +03:00
|
|
|
#endif
|
2021-04-01 18:17:29 +03:00
|
|
|
cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */
|
2018-03-02 15:31:10 +03:00
|
|
|
}
|