4923f672e3
zicfilp introduces a new state elp ("expected landing pad") in cpu. During normal execution, elp is idle (NO_LP_EXPECTED) i.e not expecting landing pad. On an indirect call, elp moves LP_EXPECTED. When elp is LP_EXPECTED, only a subsquent landing pad instruction can set state back to NO_LP_EXPECTED. On reset, elp is set to NO_LP_EXPECTED. zicfilp is enabled via bit2 in *envcfg CSRs. Enabling control for M-mode is in mseccfg CSR at bit position 10. On trap, elp state is saved away in *status. Adds elp to the migration state as well. Signed-off-by: Deepak Gupta <debug@rivosinc.com> Co-developed-by: Jim Shu <jim.shu@sifive.com> Co-developed-by: Andy Chiu <andy.chiu@sifive.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Message-ID: <20241008225010.1861630-4-debug@rivosinc.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
93 lines
3.0 KiB
C
93 lines
3.0 KiB
C
/*
|
|
* QEMU RISC-V PMP (Physical Memory Protection)
|
|
*
|
|
* Author: Daire McNamara, daire.mcnamara@emdalo.com
|
|
* Ivan Griffin, ivan.griffin@emdalo.com
|
|
*
|
|
* This provides a RISC-V Physical Memory Protection interface
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2 or later, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License along with
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#ifndef RISCV_PMP_H
|
|
#define RISCV_PMP_H
|
|
|
|
#include "cpu.h"
|
|
|
|
typedef enum {
|
|
PMP_READ = 1 << 0,
|
|
PMP_WRITE = 1 << 1,
|
|
PMP_EXEC = 1 << 2,
|
|
PMP_AMATCH = (3 << 3),
|
|
PMP_LOCK = 1 << 7
|
|
} pmp_priv_t;
|
|
|
|
typedef enum {
|
|
PMP_AMATCH_OFF, /* Null (off) */
|
|
PMP_AMATCH_TOR, /* Top of Range */
|
|
PMP_AMATCH_NA4, /* Naturally aligned four-byte region */
|
|
PMP_AMATCH_NAPOT /* Naturally aligned power-of-two region */
|
|
} pmp_am_t;
|
|
|
|
typedef enum {
|
|
MSECCFG_MML = 1 << 0,
|
|
MSECCFG_MMWP = 1 << 1,
|
|
MSECCFG_RLB = 1 << 2,
|
|
MSECCFG_USEED = 1 << 8,
|
|
MSECCFG_SSEED = 1 << 9,
|
|
MSECCFG_MLPE = 1 << 10,
|
|
} mseccfg_field_t;
|
|
|
|
typedef struct {
|
|
target_ulong addr_reg;
|
|
uint8_t cfg_reg;
|
|
} pmp_entry_t;
|
|
|
|
typedef struct {
|
|
hwaddr sa;
|
|
hwaddr ea;
|
|
} pmp_addr_t;
|
|
|
|
typedef struct {
|
|
pmp_entry_t pmp[MAX_RISCV_PMPS];
|
|
pmp_addr_t addr[MAX_RISCV_PMPS];
|
|
uint32_t num_rules;
|
|
} pmp_table_t;
|
|
|
|
void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
|
|
target_ulong val);
|
|
target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index);
|
|
|
|
void mseccfg_csr_write(CPURISCVState *env, target_ulong val);
|
|
target_ulong mseccfg_csr_read(CPURISCVState *env);
|
|
|
|
void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
|
|
target_ulong val);
|
|
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
|
|
bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
|
|
target_ulong size, pmp_priv_t privs,
|
|
pmp_priv_t *allowed_privs,
|
|
target_ulong mode);
|
|
target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr);
|
|
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
|
|
void pmp_update_rule_nums(CPURISCVState *env);
|
|
uint32_t pmp_get_num_rules(CPURISCVState *env);
|
|
int pmp_priv_to_page_prot(pmp_priv_t pmp_priv);
|
|
void pmp_unlock_entries(CPURISCVState *env);
|
|
|
|
#define MSECCFG_MML_ISSET(env) get_field(env->mseccfg, MSECCFG_MML)
|
|
#define MSECCFG_MMWP_ISSET(env) get_field(env->mseccfg, MSECCFG_MMWP)
|
|
#define MSECCFG_RLB_ISSET(env) get_field(env->mseccfg, MSECCFG_RLB)
|
|
|
|
#endif
|