Third RISC-V PR for 8.2
* Rename ext_icboz to ext_zicboz * Rename ext_icbom to ext_zicbom * Rename ext_icsr to ext_zicsr * Rename ext_ifencei to ext_zifencei * Add RISC-V Virtual IRQs and IRQ filtering support * Change default linux-user cpu to 'max' * Update 'virt' machine core limit * Add query-cpu-model-expansion API * Rename epmp to smepmp and expose the extension * Clear pmp/smepmp bits on reset * Ignore pmp writes when RW=01 * Support zicntr/zihpm flags and disable support * Correct CSR_MSECCFG operations * Update mail address for Weiwei Li * Update RISC-V vector crypto to ratified v1.0.0 * Clear the Ibex/OpenTitan SPI interrupts even if disabled * Set the OpenTitan priv to 1.12.0 * Support discontinuous PMU counters -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmVJoOEACgkQr3yVEwxT gBPwcw/5AXgSVu521IHpobofq4Skc2rpO9P0Hep3IniBuS+5+h2XM3fwWNBaeeGj LZgdXDrCfcCnPuFh2I5j1D885xJDncDF4LET9EFtxK+BTT8eC5JpaCnORdV3Zd2T C7qdq1r4J/wKBel3cAz1jlLXc2Pssle4NFaMZGmOGlNX/mLJUYkI6BwKG9wNiCI+ cCRQW5bEv9g8XzPYPsIKhX9aTegDKdV5x4Xj3YyVs8qkZTVM7Ona8GTpy6eShNfL h/RW+yvSxLwfKC9YJHesjI1oqhLsAuA7hFu5AVHiedFNAD5FevMZsZwrqjrmeBOG 5awBw9XgfXFFl7jQ0VQVRknt/PFANzTmGGbjLUkaXgJ6iTmH7oIMzwbkx2pM/0Qd HV2EboUPe5rJl0SNhcDMCJkYJYpt4z6TVXFpN5p10WU4K1AJXZf9P3YkChcxWiSK B4DlY4ax3W77voySwbKCvJRIRWCFQZmtl7doFY5dEQz2ERcNfI7VIB1GKIj7BlGm AVTCc5G9KghsaB8q0BzYbDplzCggdaaUBRgpIgLS/n22GKJlOisFwMCawWquPkEw i0t3ftt+Ket4Qnnq+dO4W3ehR4qW1/XatCWgQ3NCSgUeS4/9VK3h/nz5t+L7iKwp mjp86gNN11wcJRsBIIV7nOAmSAs9ybCm2F4J6YAyh3n1IlRVN0Q= =2A+W -----END PGP SIGNATURE----- Merge tag 'pull-riscv-to-apply-20231107' of https://github.com/alistair23/qemu into staging Third RISC-V PR for 8.2 * Rename ext_icboz to ext_zicboz * Rename ext_icbom to ext_zicbom * Rename ext_icsr to ext_zicsr * Rename ext_ifencei to ext_zifencei * Add RISC-V Virtual IRQs and IRQ filtering support * Change default linux-user cpu to 'max' * Update 'virt' machine core limit * Add query-cpu-model-expansion API * Rename epmp to smepmp and expose the extension * Clear pmp/smepmp bits on reset * Ignore pmp writes when RW=01 * Support zicntr/zihpm flags and disable support * Correct CSR_MSECCFG operations * Update mail address for Weiwei Li * Update RISC-V vector crypto to ratified v1.0.0 * Clear the Ibex/OpenTitan SPI interrupts even if disabled * Set the OpenTitan priv to 1.12.0 * Support discontinuous PMU counters # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmVJoOEACgkQr3yVEwxT # gBPwcw/5AXgSVu521IHpobofq4Skc2rpO9P0Hep3IniBuS+5+h2XM3fwWNBaeeGj # LZgdXDrCfcCnPuFh2I5j1D885xJDncDF4LET9EFtxK+BTT8eC5JpaCnORdV3Zd2T # C7qdq1r4J/wKBel3cAz1jlLXc2Pssle4NFaMZGmOGlNX/mLJUYkI6BwKG9wNiCI+ # cCRQW5bEv9g8XzPYPsIKhX9aTegDKdV5x4Xj3YyVs8qkZTVM7Ona8GTpy6eShNfL # h/RW+yvSxLwfKC9YJHesjI1oqhLsAuA7hFu5AVHiedFNAD5FevMZsZwrqjrmeBOG # 5awBw9XgfXFFl7jQ0VQVRknt/PFANzTmGGbjLUkaXgJ6iTmH7oIMzwbkx2pM/0Qd # HV2EboUPe5rJl0SNhcDMCJkYJYpt4z6TVXFpN5p10WU4K1AJXZf9P3YkChcxWiSK # B4DlY4ax3W77voySwbKCvJRIRWCFQZmtl7doFY5dEQz2ERcNfI7VIB1GKIj7BlGm # AVTCc5G9KghsaB8q0BzYbDplzCggdaaUBRgpIgLS/n22GKJlOisFwMCawWquPkEw # i0t3ftt+Ket4Qnnq+dO4W3ehR4qW1/XatCWgQ3NCSgUeS4/9VK3h/nz5t+L7iKwp # mjp86gNN11wcJRsBIIV7nOAmSAs9ybCm2F4J6YAyh3n1IlRVN0Q= # =2A+W # -----END PGP SIGNATURE----- # gpg: Signature made Tue 07 Nov 2023 10:28:49 HKT # gpg: using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013 # gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65 9296 AF7C 9513 0C53 8013 * tag 'pull-riscv-to-apply-20231107' of https://github.com/alistair23/qemu: (49 commits) docs/about/deprecated: Document RISC-V "pmu-num" deprecation target/riscv: Add "pmu-mask" property to replace "pmu-num" target/riscv: Use existing PMU counter mask in FDT generation target/riscv: Don't assume PMU counters are continuous target/riscv: Propagate error from PMU setup target/riscv: cpu: Set the OpenTitan priv to 1.12.0 hw/ssi: ibex_spi_host: Clear the interrupt even if disabled disas/riscv: Replace TABs with space disas/riscv: Add support for vector crypto extensions disas/riscv: Add rv_codec_vror_vi for vror.vi disas/riscv: Add rv_fmt_vd_vs2_uimm format target/riscv: Move vector crypto extensions to riscv_cpu_extensions target/riscv: Expose Zvks[c|g] extnesion properties target/riscv: Add cfg properties for Zvks[c|g] extensions target/riscv: Expose Zvkn[c|g] extnesion properties target/riscv: Add cfg properties for Zvkn[c|g] extensions target/riscv: Expose Zvkb extension property target/riscv: Replace Zvbb checking by Zvkb target/riscv: Add cfg property for Zvkb extension target/riscv: Expose Zvkt extension property ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
8aba939e77
@ -323,7 +323,7 @@ RISC-V TCG CPUs
|
|||||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||||
M: Alistair Francis <alistair.francis@wdc.com>
|
M: Alistair Francis <alistair.francis@wdc.com>
|
||||||
M: Bin Meng <bin.meng@windriver.com>
|
M: Bin Meng <bin.meng@windriver.com>
|
||||||
R: Weiwei Li <liweiwei@iscas.ac.cn>
|
R: Weiwei Li <liwei1518@gmail.com>
|
||||||
R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
|
R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
|
||||||
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
|
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
|
||||||
L: qemu-riscv@nongnu.org
|
L: qemu-riscv@nongnu.org
|
||||||
|
157
disas/riscv.c
157
disas/riscv.c
@ -862,6 +862,47 @@ typedef enum {
|
|||||||
rv_op_fltq_q = 831,
|
rv_op_fltq_q = 831,
|
||||||
rv_op_fleq_h = 832,
|
rv_op_fleq_h = 832,
|
||||||
rv_op_fltq_h = 833,
|
rv_op_fltq_h = 833,
|
||||||
|
rv_op_vaesdf_vv = 834,
|
||||||
|
rv_op_vaesdf_vs = 835,
|
||||||
|
rv_op_vaesdm_vv = 836,
|
||||||
|
rv_op_vaesdm_vs = 837,
|
||||||
|
rv_op_vaesef_vv = 838,
|
||||||
|
rv_op_vaesef_vs = 839,
|
||||||
|
rv_op_vaesem_vv = 840,
|
||||||
|
rv_op_vaesem_vs = 841,
|
||||||
|
rv_op_vaeskf1_vi = 842,
|
||||||
|
rv_op_vaeskf2_vi = 843,
|
||||||
|
rv_op_vaesz_vs = 844,
|
||||||
|
rv_op_vandn_vv = 845,
|
||||||
|
rv_op_vandn_vx = 846,
|
||||||
|
rv_op_vbrev_v = 847,
|
||||||
|
rv_op_vbrev8_v = 848,
|
||||||
|
rv_op_vclmul_vv = 849,
|
||||||
|
rv_op_vclmul_vx = 850,
|
||||||
|
rv_op_vclmulh_vv = 851,
|
||||||
|
rv_op_vclmulh_vx = 852,
|
||||||
|
rv_op_vclz_v = 853,
|
||||||
|
rv_op_vcpop_v = 854,
|
||||||
|
rv_op_vctz_v = 855,
|
||||||
|
rv_op_vghsh_vv = 856,
|
||||||
|
rv_op_vgmul_vv = 857,
|
||||||
|
rv_op_vrev8_v = 858,
|
||||||
|
rv_op_vrol_vv = 859,
|
||||||
|
rv_op_vrol_vx = 860,
|
||||||
|
rv_op_vror_vv = 861,
|
||||||
|
rv_op_vror_vx = 862,
|
||||||
|
rv_op_vror_vi = 863,
|
||||||
|
rv_op_vsha2ch_vv = 864,
|
||||||
|
rv_op_vsha2cl_vv = 865,
|
||||||
|
rv_op_vsha2ms_vv = 866,
|
||||||
|
rv_op_vsm3c_vi = 867,
|
||||||
|
rv_op_vsm3me_vv = 868,
|
||||||
|
rv_op_vsm4k_vi = 869,
|
||||||
|
rv_op_vsm4r_vv = 870,
|
||||||
|
rv_op_vsm4r_vs = 871,
|
||||||
|
rv_op_vwsll_vv = 872,
|
||||||
|
rv_op_vwsll_vx = 873,
|
||||||
|
rv_op_vwsll_vi = 874,
|
||||||
} rv_op;
|
} rv_op;
|
||||||
|
|
||||||
/* register names */
|
/* register names */
|
||||||
@ -2008,6 +2049,47 @@ const rv_opcode_data rvi_opcode_data[] = {
|
|||||||
{ "fltq.q", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
|
{ "fltq.q", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
|
||||||
{ "fleq.h", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
|
{ "fleq.h", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
|
||||||
{ "fltq.h", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
|
{ "fltq.h", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vaesdf.vv", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vaesdf.vs", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vaesdm.vv", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vaesdm.vs", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vaesef.vv", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vaesef.vs", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vaesem.vv", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vaesem.vs", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vaeskf1.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm, NULL, 0, 0, 0 },
|
||||||
|
{ "vaeskf2.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm, NULL, 0, 0, 0 },
|
||||||
|
{ "vaesz.vs", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vandn.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vandn.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vbrev.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vbrev8.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vclmul.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vclmul.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vclmulh.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vclmulh.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vclz.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vcpop.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vctz.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vghsh.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1, NULL, 0, 0, 0 },
|
||||||
|
{ "vgmul.vv", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vrev8.v", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vrol.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vrol.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vror.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vror.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vror.vi", rv_codec_vror_vi, rv_fmt_vd_vs2_uimm_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vsha2ch.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1, NULL, 0, 0, 0 },
|
||||||
|
{ "vsha2cl.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1, NULL, 0, 0, 0 },
|
||||||
|
{ "vsha2ms.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1, NULL, 0, 0, 0 },
|
||||||
|
{ "vsm3c.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm, NULL, 0, 0, 0 },
|
||||||
|
{ "vsm3me.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1, NULL, 0, 0, 0 },
|
||||||
|
{ "vsm4k.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm, NULL, 0, 0, 0 },
|
||||||
|
{ "vsm4r.vv", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vsm4r.vs", rv_codec_v_r, rv_fmt_vd_vs2, NULL, 0, 0, 0 },
|
||||||
|
{ "vwsll.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vwsll.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 },
|
||||||
|
{ "vwsll.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, 0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
/* CSR names */
|
/* CSR names */
|
||||||
@ -3054,12 +3136,12 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 89:
|
case 89:
|
||||||
switch (((inst >> 12) & 0b111)) {
|
switch (((inst >> 12) & 0b111)) {
|
||||||
case 0: op = rv_op_fmvp_d_x; break;
|
case 0: op = rv_op_fmvp_d_x; break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 91:
|
case 91:
|
||||||
switch (((inst >> 12) & 0b111)) {
|
switch (((inst >> 12) & 0b111)) {
|
||||||
case 0: op = rv_op_fmvp_q_x; break;
|
case 0: op = rv_op_fmvp_q_x; break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -3176,6 +3258,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
case 0:
|
case 0:
|
||||||
switch ((inst >> 26) & 0b111111) {
|
switch ((inst >> 26) & 0b111111) {
|
||||||
case 0: op = rv_op_vadd_vv; break;
|
case 0: op = rv_op_vadd_vv; break;
|
||||||
|
case 1: op = rv_op_vandn_vv; break;
|
||||||
case 2: op = rv_op_vsub_vv; break;
|
case 2: op = rv_op_vsub_vv; break;
|
||||||
case 4: op = rv_op_vminu_vv; break;
|
case 4: op = rv_op_vminu_vv; break;
|
||||||
case 5: op = rv_op_vmin_vv; break;
|
case 5: op = rv_op_vmin_vv; break;
|
||||||
@ -3198,6 +3281,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 19: op = rv_op_vmsbc_vvm; break;
|
case 19: op = rv_op_vmsbc_vvm; break;
|
||||||
|
case 20: op = rv_op_vror_vv; break;
|
||||||
|
case 21: op = rv_op_vrol_vv; break;
|
||||||
case 23:
|
case 23:
|
||||||
if (((inst >> 20) & 0b111111) == 32)
|
if (((inst >> 20) & 0b111111) == 32)
|
||||||
op = rv_op_vmv_v_v;
|
op = rv_op_vmv_v_v;
|
||||||
@ -3226,6 +3311,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
case 47: op = rv_op_vnclip_wv; break;
|
case 47: op = rv_op_vnclip_wv; break;
|
||||||
case 48: op = rv_op_vwredsumu_vs; break;
|
case 48: op = rv_op_vwredsumu_vs; break;
|
||||||
case 49: op = rv_op_vwredsum_vs; break;
|
case 49: op = rv_op_vwredsum_vs; break;
|
||||||
|
case 53: op = rv_op_vwsll_vv; break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
@ -3323,6 +3409,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
case 9: op = rv_op_vaadd_vv; break;
|
case 9: op = rv_op_vaadd_vv; break;
|
||||||
case 10: op = rv_op_vasubu_vv; break;
|
case 10: op = rv_op_vasubu_vv; break;
|
||||||
case 11: op = rv_op_vasub_vv; break;
|
case 11: op = rv_op_vasub_vv; break;
|
||||||
|
case 12: op = rv_op_vclmul_vv; break;
|
||||||
|
case 13: op = rv_op_vclmulh_vv; break;
|
||||||
case 16:
|
case 16:
|
||||||
switch ((inst >> 15) & 0b11111) {
|
switch ((inst >> 15) & 0b11111) {
|
||||||
case 0: if ((inst >> 25) & 1) op = rv_op_vmv_x_s; break;
|
case 0: if ((inst >> 25) & 1) op = rv_op_vmv_x_s; break;
|
||||||
@ -3338,6 +3426,12 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
case 5: op = rv_op_vsext_vf4; break;
|
case 5: op = rv_op_vsext_vf4; break;
|
||||||
case 6: op = rv_op_vzext_vf2; break;
|
case 6: op = rv_op_vzext_vf2; break;
|
||||||
case 7: op = rv_op_vsext_vf2; break;
|
case 7: op = rv_op_vsext_vf2; break;
|
||||||
|
case 8: op = rv_op_vbrev8_v; break;
|
||||||
|
case 9: op = rv_op_vrev8_v; break;
|
||||||
|
case 10: op = rv_op_vbrev_v; break;
|
||||||
|
case 12: op = rv_op_vclz_v; break;
|
||||||
|
case 13: op = rv_op_vctz_v; break;
|
||||||
|
case 14: op = rv_op_vcpop_v; break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 20:
|
case 20:
|
||||||
@ -3406,6 +3500,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 17: op = rv_op_vmadc_vim; break;
|
case 17: op = rv_op_vmadc_vim; break;
|
||||||
|
case 20: case 21: op = rv_op_vror_vi; break;
|
||||||
case 23:
|
case 23:
|
||||||
if (((inst >> 20) & 0b111111) == 32)
|
if (((inst >> 20) & 0b111111) == 32)
|
||||||
op = rv_op_vmv_v_i;
|
op = rv_op_vmv_v_i;
|
||||||
@ -3437,11 +3532,13 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
case 45: op = rv_op_vnsra_wi; break;
|
case 45: op = rv_op_vnsra_wi; break;
|
||||||
case 46: op = rv_op_vnclipu_wi; break;
|
case 46: op = rv_op_vnclipu_wi; break;
|
||||||
case 47: op = rv_op_vnclip_wi; break;
|
case 47: op = rv_op_vnclip_wi; break;
|
||||||
|
case 53: op = rv_op_vwsll_vi; break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
switch ((inst >> 26) & 0b111111) {
|
switch ((inst >> 26) & 0b111111) {
|
||||||
case 0: op = rv_op_vadd_vx; break;
|
case 0: op = rv_op_vadd_vx; break;
|
||||||
|
case 1: op = rv_op_vandn_vx; break;
|
||||||
case 2: op = rv_op_vsub_vx; break;
|
case 2: op = rv_op_vsub_vx; break;
|
||||||
case 3: op = rv_op_vrsub_vx; break;
|
case 3: op = rv_op_vrsub_vx; break;
|
||||||
case 4: op = rv_op_vminu_vx; break;
|
case 4: op = rv_op_vminu_vx; break;
|
||||||
@ -3466,6 +3563,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 19: op = rv_op_vmsbc_vxm; break;
|
case 19: op = rv_op_vmsbc_vxm; break;
|
||||||
|
case 20: op = rv_op_vror_vx; break;
|
||||||
|
case 21: op = rv_op_vrol_vx; break;
|
||||||
case 23:
|
case 23:
|
||||||
if (((inst >> 20) & 0b111111) == 32)
|
if (((inst >> 20) & 0b111111) == 32)
|
||||||
op = rv_op_vmv_v_x;
|
op = rv_op_vmv_v_x;
|
||||||
@ -3494,6 +3593,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
case 45: op = rv_op_vnsra_wx; break;
|
case 45: op = rv_op_vnsra_wx; break;
|
||||||
case 46: op = rv_op_vnclipu_wx; break;
|
case 46: op = rv_op_vnclipu_wx; break;
|
||||||
case 47: op = rv_op_vnclip_wx; break;
|
case 47: op = rv_op_vnclip_wx; break;
|
||||||
|
case 53: op = rv_op_vwsll_vx; break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 5:
|
case 5:
|
||||||
@ -3554,6 +3654,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
case 9: op = rv_op_vaadd_vx; break;
|
case 9: op = rv_op_vaadd_vx; break;
|
||||||
case 10: op = rv_op_vasubu_vx; break;
|
case 10: op = rv_op_vasubu_vx; break;
|
||||||
case 11: op = rv_op_vasub_vx; break;
|
case 11: op = rv_op_vasub_vx; break;
|
||||||
|
case 12: op = rv_op_vclmul_vx; break;
|
||||||
|
case 13: op = rv_op_vclmulh_vx; break;
|
||||||
case 14: op = rv_op_vslide1up_vx; break;
|
case 14: op = rv_op_vslide1up_vx; break;
|
||||||
case 15: op = rv_op_vslide1down_vx; break;
|
case 15: op = rv_op_vslide1down_vx; break;
|
||||||
case 16:
|
case 16:
|
||||||
@ -3686,6 +3788,41 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
|
|||||||
case 7: op = rv_op_csrrci; break;
|
case 7: op = rv_op_csrrci; break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case 29:
|
||||||
|
if (((inst >> 25) & 1) == 1 && ((inst >> 12) & 0b111) == 2) {
|
||||||
|
switch ((inst >> 26) & 0b111111) {
|
||||||
|
case 32: op = rv_op_vsm3me_vv; break;
|
||||||
|
case 33: op = rv_op_vsm4k_vi; break;
|
||||||
|
case 34: op = rv_op_vaeskf1_vi; break;
|
||||||
|
case 40:
|
||||||
|
switch ((inst >> 15) & 0b11111) {
|
||||||
|
case 0: op = rv_op_vaesdm_vv; break;
|
||||||
|
case 1: op = rv_op_vaesdf_vv; break;
|
||||||
|
case 2: op = rv_op_vaesem_vv; break;
|
||||||
|
case 3: op = rv_op_vaesef_vv; break;
|
||||||
|
case 16: op = rv_op_vsm4r_vv; break;
|
||||||
|
case 17: op = rv_op_vgmul_vv; break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case 41:
|
||||||
|
switch ((inst >> 15) & 0b11111) {
|
||||||
|
case 0: op = rv_op_vaesdm_vs; break;
|
||||||
|
case 1: op = rv_op_vaesdf_vs; break;
|
||||||
|
case 2: op = rv_op_vaesem_vs; break;
|
||||||
|
case 3: op = rv_op_vaesef_vs; break;
|
||||||
|
case 7: op = rv_op_vaesz_vs; break;
|
||||||
|
case 16: op = rv_op_vsm4r_vs; break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case 42: op = rv_op_vaeskf2_vi; break;
|
||||||
|
case 43: op = rv_op_vsm3c_vi; break;
|
||||||
|
case 44: op = rv_op_vghsh_vv; break;
|
||||||
|
case 45: op = rv_op_vsha2ms_vv; break;
|
||||||
|
case 46: op = rv_op_vsha2ch_vv; break;
|
||||||
|
case 47: op = rv_op_vsha2cl_vv; break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
case 30:
|
case 30:
|
||||||
switch (((inst >> 22) & 0b1111111000) |
|
switch (((inst >> 22) & 0b1111111000) |
|
||||||
((inst >> 12) & 0b0000000111)) {
|
((inst >> 12) & 0b0000000111)) {
|
||||||
@ -4011,6 +4148,12 @@ static uint32_t operand_vzimm10(rv_inst inst)
|
|||||||
return (inst << 34) >> 54;
|
return (inst << 34) >> 54;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint32_t operand_vzimm6(rv_inst inst)
|
||||||
|
{
|
||||||
|
return ((inst << 37) >> 63) << 5 |
|
||||||
|
((inst << 44) >> 59);
|
||||||
|
}
|
||||||
|
|
||||||
static uint32_t operand_bs(rv_inst inst)
|
static uint32_t operand_bs(rv_inst inst)
|
||||||
{
|
{
|
||||||
return (inst << 32) >> 62;
|
return (inst << 32) >> 62;
|
||||||
@ -4393,6 +4536,12 @@ static void decode_inst_operands(rv_decode *dec, rv_isa isa)
|
|||||||
dec->imm = operand_vimm(inst);
|
dec->imm = operand_vimm(inst);
|
||||||
dec->vm = operand_vm(inst);
|
dec->vm = operand_vm(inst);
|
||||||
break;
|
break;
|
||||||
|
case rv_codec_vror_vi:
|
||||||
|
dec->rd = operand_rd(inst);
|
||||||
|
dec->rs2 = operand_rs2(inst);
|
||||||
|
dec->imm = operand_vzimm6(inst);
|
||||||
|
dec->vm = operand_vm(inst);
|
||||||
|
break;
|
||||||
case rv_codec_vsetvli:
|
case rv_codec_vsetvli:
|
||||||
dec->rd = operand_rd(inst);
|
dec->rd = operand_rd(inst);
|
||||||
dec->rs1 = operand_rs1(inst);
|
dec->rs1 = operand_rs1(inst);
|
||||||
@ -4430,7 +4579,7 @@ static void decode_inst_operands(rv_decode *dec, rv_isa isa)
|
|||||||
break;
|
break;
|
||||||
case rv_codec_zcmt_jt:
|
case rv_codec_zcmt_jt:
|
||||||
dec->imm = operand_tbl_index(inst);
|
dec->imm = operand_tbl_index(inst);
|
||||||
break;
|
break;
|
||||||
case rv_codec_fli:
|
case rv_codec_fli:
|
||||||
dec->rd = operand_rd(inst);
|
dec->rd = operand_rd(inst);
|
||||||
dec->imm = operand_rs1(inst);
|
dec->imm = operand_rs1(inst);
|
||||||
@ -4677,7 +4826,7 @@ static void format_inst(char *buf, size_t buflen, size_t tab, rv_decode *dec)
|
|||||||
append(buf, tmp, buflen);
|
append(buf, tmp, buflen);
|
||||||
break;
|
break;
|
||||||
case 'u':
|
case 'u':
|
||||||
snprintf(tmp, sizeof(tmp), "%u", ((uint32_t)dec->imm & 0b11111));
|
snprintf(tmp, sizeof(tmp), "%u", ((uint32_t)dec->imm & 0b111111));
|
||||||
append(buf, tmp, buflen);
|
append(buf, tmp, buflen);
|
||||||
break;
|
break;
|
||||||
case 'j':
|
case 'j':
|
||||||
|
@ -152,6 +152,7 @@ typedef enum {
|
|||||||
rv_codec_v_i,
|
rv_codec_v_i,
|
||||||
rv_codec_vsetvli,
|
rv_codec_vsetvli,
|
||||||
rv_codec_vsetivli,
|
rv_codec_vsetivli,
|
||||||
|
rv_codec_vror_vi,
|
||||||
rv_codec_zcb_ext,
|
rv_codec_zcb_ext,
|
||||||
rv_codec_zcb_mul,
|
rv_codec_zcb_mul,
|
||||||
rv_codec_zcb_lb,
|
rv_codec_zcb_lb,
|
||||||
@ -274,6 +275,7 @@ enum {
|
|||||||
#define rv_fmt_vd_vs2_fs1_vm "O\tD,F,4m"
|
#define rv_fmt_vd_vs2_fs1_vm "O\tD,F,4m"
|
||||||
#define rv_fmt_vd_vs2_imm_vl "O\tD,F,il"
|
#define rv_fmt_vd_vs2_imm_vl "O\tD,F,il"
|
||||||
#define rv_fmt_vd_vs2_imm_vm "O\tD,F,im"
|
#define rv_fmt_vd_vs2_imm_vm "O\tD,F,im"
|
||||||
|
#define rv_fmt_vd_vs2_uimm "O\tD,F,u"
|
||||||
#define rv_fmt_vd_vs2_uimm_vm "O\tD,F,um"
|
#define rv_fmt_vd_vs2_uimm_vm "O\tD,F,um"
|
||||||
#define rv_fmt_vd_vs1_vs2_vm "O\tD,E,Fm"
|
#define rv_fmt_vd_vs1_vs2_vm "O\tD,E,Fm"
|
||||||
#define rv_fmt_vd_rs1_vs2_vm "O\tD,1,Fm"
|
#define rv_fmt_vd_rs1_vs2_vm "O\tD,1,Fm"
|
||||||
|
@ -413,6 +413,18 @@ Specifying the iSCSI password in plain text on the command line using the
|
|||||||
used instead, to refer to a ``--object secret...`` instance that provides
|
used instead, to refer to a ``--object secret...`` instance that provides
|
||||||
a password via a file, or encrypted.
|
a password via a file, or encrypted.
|
||||||
|
|
||||||
|
CPU device properties
|
||||||
|
'''''''''''''''''''''
|
||||||
|
|
||||||
|
``pmu-num=n`` on RISC-V CPUs (since 8.2)
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
In order to support more flexible counter configurations this has been replaced
|
||||||
|
by a ``pmu-mask`` property. If set of counters is continuous then the mask can
|
||||||
|
be calculated with ``((2 ^ n) - 1) << 3``. The least significant three bits
|
||||||
|
must be left clear.
|
||||||
|
|
||||||
|
|
||||||
Backwards compatibility
|
Backwards compatibility
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ Supported devices
|
|||||||
|
|
||||||
The ``virt`` machine supports the following devices:
|
The ``virt`` machine supports the following devices:
|
||||||
|
|
||||||
* Up to 8 generic RV32GC/RV64GC cores, with optional extensions
|
* Up to 512 generic RV32GC/RV64GC cores, with optional extensions
|
||||||
* Core Local Interruptor (CLINT)
|
* Core Local Interruptor (CLINT)
|
||||||
* Platform-Level Interrupt Controller (PLIC)
|
* Platform-Level Interrupt Controller (PLIC)
|
||||||
* CFI parallel NOR flash memory
|
* CFI parallel NOR flash memory
|
||||||
|
@ -414,7 +414,7 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts
|
|||||||
reset_vec[4] = 0x0182b283; /* ld t0, 24(t0) */
|
reset_vec[4] = 0x0182b283; /* ld t0, 24(t0) */
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!harts->harts[0].cfg.ext_icsr) {
|
if (!harts->harts[0].cfg.ext_zicsr) {
|
||||||
/*
|
/*
|
||||||
* The Zicsr extension has been disabled, so let's ensure we don't
|
* The Zicsr extension has been disabled, so let's ensure we don't
|
||||||
* run the CSR instruction. Let's fill the address with a non
|
* run the CSR instruction. Let's fill the address with a non
|
||||||
|
@ -263,12 +263,12 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
|
|||||||
qemu_fdt_setprop_string(ms->fdt, cpu_name, "riscv,isa", name);
|
qemu_fdt_setprop_string(ms->fdt, cpu_name, "riscv,isa", name);
|
||||||
g_free(name);
|
g_free(name);
|
||||||
|
|
||||||
if (cpu_ptr->cfg.ext_icbom) {
|
if (cpu_ptr->cfg.ext_zicbom) {
|
||||||
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbom-block-size",
|
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbom-block-size",
|
||||||
cpu_ptr->cfg.cbom_blocksize);
|
cpu_ptr->cfg.cbom_blocksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_ptr->cfg.ext_icboz) {
|
if (cpu_ptr->cfg.ext_zicboz) {
|
||||||
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cboz-block-size",
|
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cboz-block-size",
|
||||||
cpu_ptr->cfg.cboz_blocksize);
|
cpu_ptr->cfg.cboz_blocksize);
|
||||||
}
|
}
|
||||||
@ -722,7 +722,7 @@ static void create_fdt_pmu(RISCVVirtState *s)
|
|||||||
pmu_name = g_strdup_printf("/pmu");
|
pmu_name = g_strdup_printf("/pmu");
|
||||||
qemu_fdt_add_subnode(ms->fdt, pmu_name);
|
qemu_fdt_add_subnode(ms->fdt, pmu_name);
|
||||||
qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu");
|
qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu");
|
||||||
riscv_pmu_generate_fdt_node(ms->fdt, hart.cfg.pmu_num, pmu_name);
|
riscv_pmu_generate_fdt_node(ms->fdt, hart.pmu_avail_ctrs, pmu_name);
|
||||||
|
|
||||||
g_free(pmu_name);
|
g_free(pmu_name);
|
||||||
}
|
}
|
||||||
|
@ -205,9 +205,10 @@ static void ibex_spi_host_irq(IbexSPIHostState *s)
|
|||||||
if (err_irq) {
|
if (err_irq) {
|
||||||
s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK;
|
s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK;
|
||||||
}
|
}
|
||||||
qemu_set_irq(s->host_err, err_irq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qemu_set_irq(s->host_err, err_irq);
|
||||||
|
|
||||||
/* Event IRQ Enabled and Event IRQ Cleared */
|
/* Event IRQ Enabled and Event IRQ Cleared */
|
||||||
if (event_en && !status_pending) {
|
if (event_en && !status_pending) {
|
||||||
if (FIELD_EX32(intr_test_reg, INTR_STATE, SPI_EVENT)) {
|
if (FIELD_EX32(intr_test_reg, INTR_STATE, SPI_EVENT)) {
|
||||||
@ -229,8 +230,9 @@ static void ibex_spi_host_irq(IbexSPIHostState *s)
|
|||||||
if (event_irq) {
|
if (event_irq) {
|
||||||
s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK;
|
s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK;
|
||||||
}
|
}
|
||||||
qemu_set_irq(s->event, event_irq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qemu_set_irq(s->event, event_irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ibex_spi_host_transfer(IbexSPIHostState *s)
|
static void ibex_spi_host_transfer(IbexSPIHostState *s)
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
#define RISCV_TARGET_ELF_H
|
#define RISCV_TARGET_ELF_H
|
||||||
static inline const char *cpu_get_model(uint32_t eflags)
|
static inline const char *cpu_get_model(uint32_t eflags)
|
||||||
{
|
{
|
||||||
/* TYPE_RISCV_CPU_ANY */
|
return "max";
|
||||||
return "any";
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -231,7 +231,8 @@
|
|||||||
'if': { 'any': [ 'TARGET_S390X',
|
'if': { 'any': [ 'TARGET_S390X',
|
||||||
'TARGET_I386',
|
'TARGET_I386',
|
||||||
'TARGET_ARM',
|
'TARGET_ARM',
|
||||||
'TARGET_LOONGARCH64' ] } }
|
'TARGET_LOONGARCH64',
|
||||||
|
'TARGET_RISCV' ] } }
|
||||||
|
|
||||||
##
|
##
|
||||||
# @query-cpu-model-expansion:
|
# @query-cpu-model-expansion:
|
||||||
@ -277,7 +278,8 @@
|
|||||||
'if': { 'any': [ 'TARGET_S390X',
|
'if': { 'any': [ 'TARGET_S390X',
|
||||||
'TARGET_I386',
|
'TARGET_I386',
|
||||||
'TARGET_ARM',
|
'TARGET_ARM',
|
||||||
'TARGET_LOONGARCH64' ] } }
|
'TARGET_LOONGARCH64',
|
||||||
|
'TARGET_RISCV' ] } }
|
||||||
|
|
||||||
##
|
##
|
||||||
# @CpuDefinitionInfo:
|
# @CpuDefinitionInfo:
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
#include "sysemu/kvm.h"
|
#include "sysemu/kvm.h"
|
||||||
#include "sysemu/tcg.h"
|
#include "sysemu/tcg.h"
|
||||||
#include "kvm/kvm_riscv.h"
|
#include "kvm/kvm_riscv.h"
|
||||||
|
#include "tcg/tcg-cpu.h"
|
||||||
#include "tcg/tcg.h"
|
#include "tcg/tcg.h"
|
||||||
|
|
||||||
/* RISC-V CPU definitions */
|
/* RISC-V CPU definitions */
|
||||||
@ -76,13 +77,15 @@ const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
|
|||||||
* instead.
|
* instead.
|
||||||
*/
|
*/
|
||||||
const RISCVIsaExtData isa_edata_arr[] = {
|
const RISCVIsaExtData isa_edata_arr[] = {
|
||||||
ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom),
|
ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
|
||||||
ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz),
|
ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
|
||||||
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
|
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
|
||||||
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
|
ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
|
||||||
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
|
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
|
||||||
|
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
|
||||||
ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
|
ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
|
||||||
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
|
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
|
||||||
|
ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
|
||||||
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
|
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
|
||||||
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
|
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
|
||||||
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
|
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
|
||||||
@ -124,16 +127,24 @@ const RISCVIsaExtData isa_edata_arr[] = {
|
|||||||
ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
|
ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
|
||||||
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
|
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
|
||||||
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
|
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
|
||||||
|
ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
|
||||||
ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
|
ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
|
||||||
|
ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
|
||||||
|
ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
|
||||||
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
|
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
|
||||||
|
ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
|
||||||
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
|
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
|
||||||
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
|
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
|
||||||
|
ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
|
||||||
|
ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
|
||||||
ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
|
ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
|
||||||
|
ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
|
||||||
ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
|
ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
|
||||||
|
ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
|
||||||
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
|
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
|
||||||
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
|
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
|
||||||
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
|
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
|
||||||
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp),
|
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
|
||||||
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
|
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
|
||||||
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
|
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
|
||||||
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
|
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
|
||||||
@ -382,8 +393,8 @@ static void riscv_any_cpu_init(Object *obj)
|
|||||||
env->priv_ver = PRIV_VERSION_LATEST;
|
env->priv_ver = PRIV_VERSION_LATEST;
|
||||||
|
|
||||||
/* inherited from parent obj via riscv_cpu_init() */
|
/* inherited from parent obj via riscv_cpu_init() */
|
||||||
cpu->cfg.ext_ifencei = true;
|
cpu->cfg.ext_zifencei = true;
|
||||||
cpu->cfg.ext_icsr = true;
|
cpu->cfg.ext_zicsr = true;
|
||||||
cpu->cfg.mmu = true;
|
cpu->cfg.mmu = true;
|
||||||
cpu->cfg.pmp = true;
|
cpu->cfg.pmp = true;
|
||||||
}
|
}
|
||||||
@ -430,8 +441,8 @@ static void rv64_sifive_u_cpu_init(Object *obj)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* inherited from parent obj via riscv_cpu_init() */
|
/* inherited from parent obj via riscv_cpu_init() */
|
||||||
cpu->cfg.ext_ifencei = true;
|
cpu->cfg.ext_zifencei = true;
|
||||||
cpu->cfg.ext_icsr = true;
|
cpu->cfg.ext_zicsr = true;
|
||||||
cpu->cfg.mmu = true;
|
cpu->cfg.mmu = true;
|
||||||
cpu->cfg.pmp = true;
|
cpu->cfg.pmp = true;
|
||||||
}
|
}
|
||||||
@ -448,8 +459,8 @@ static void rv64_sifive_e_cpu_init(Object *obj)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* inherited from parent obj via riscv_cpu_init() */
|
/* inherited from parent obj via riscv_cpu_init() */
|
||||||
cpu->cfg.ext_ifencei = true;
|
cpu->cfg.ext_zifencei = true;
|
||||||
cpu->cfg.ext_icsr = true;
|
cpu->cfg.ext_zicsr = true;
|
||||||
cpu->cfg.pmp = true;
|
cpu->cfg.pmp = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -494,13 +505,13 @@ static void rv64_veyron_v1_cpu_init(Object *obj)
|
|||||||
|
|
||||||
/* Enable ISA extensions */
|
/* Enable ISA extensions */
|
||||||
cpu->cfg.mmu = true;
|
cpu->cfg.mmu = true;
|
||||||
cpu->cfg.ext_ifencei = true;
|
cpu->cfg.ext_zifencei = true;
|
||||||
cpu->cfg.ext_icsr = true;
|
cpu->cfg.ext_zicsr = true;
|
||||||
cpu->cfg.pmp = true;
|
cpu->cfg.pmp = true;
|
||||||
cpu->cfg.ext_icbom = true;
|
cpu->cfg.ext_zicbom = true;
|
||||||
cpu->cfg.cbom_blocksize = 64;
|
cpu->cfg.cbom_blocksize = 64;
|
||||||
cpu->cfg.cboz_blocksize = 64;
|
cpu->cfg.cboz_blocksize = 64;
|
||||||
cpu->cfg.ext_icboz = true;
|
cpu->cfg.ext_zicboz = true;
|
||||||
cpu->cfg.ext_smaia = true;
|
cpu->cfg.ext_smaia = true;
|
||||||
cpu->cfg.ext_ssaia = true;
|
cpu->cfg.ext_ssaia = true;
|
||||||
cpu->cfg.ext_sscofpmf = true;
|
cpu->cfg.ext_sscofpmf = true;
|
||||||
@ -566,8 +577,8 @@ static void rv32_sifive_u_cpu_init(Object *obj)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* inherited from parent obj via riscv_cpu_init() */
|
/* inherited from parent obj via riscv_cpu_init() */
|
||||||
cpu->cfg.ext_ifencei = true;
|
cpu->cfg.ext_zifencei = true;
|
||||||
cpu->cfg.ext_icsr = true;
|
cpu->cfg.ext_zicsr = true;
|
||||||
cpu->cfg.mmu = true;
|
cpu->cfg.mmu = true;
|
||||||
cpu->cfg.pmp = true;
|
cpu->cfg.pmp = true;
|
||||||
}
|
}
|
||||||
@ -584,8 +595,8 @@ static void rv32_sifive_e_cpu_init(Object *obj)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* inherited from parent obj via riscv_cpu_init() */
|
/* inherited from parent obj via riscv_cpu_init() */
|
||||||
cpu->cfg.ext_ifencei = true;
|
cpu->cfg.ext_zifencei = true;
|
||||||
cpu->cfg.ext_icsr = true;
|
cpu->cfg.ext_zicsr = true;
|
||||||
cpu->cfg.pmp = true;
|
cpu->cfg.pmp = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -595,16 +606,15 @@ static void rv32_ibex_cpu_init(Object *obj)
|
|||||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||||
|
|
||||||
riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
|
riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
|
||||||
env->priv_ver = PRIV_VERSION_1_11_0;
|
env->priv_ver = PRIV_VERSION_1_12_0;
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
|
set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
|
||||||
#endif
|
#endif
|
||||||
cpu->cfg.epmp = true;
|
|
||||||
|
|
||||||
/* inherited from parent obj via riscv_cpu_init() */
|
/* inherited from parent obj via riscv_cpu_init() */
|
||||||
cpu->cfg.ext_ifencei = true;
|
cpu->cfg.ext_zifencei = true;
|
||||||
cpu->cfg.ext_icsr = true;
|
cpu->cfg.ext_zicsr = true;
|
||||||
cpu->cfg.pmp = true;
|
cpu->cfg.pmp = true;
|
||||||
|
cpu->cfg.ext_smepmp = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rv32_imafcu_nommu_cpu_init(Object *obj)
|
static void rv32_imafcu_nommu_cpu_init(Object *obj)
|
||||||
@ -619,8 +629,8 @@ static void rv32_imafcu_nommu_cpu_init(Object *obj)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* inherited from parent obj via riscv_cpu_init() */
|
/* inherited from parent obj via riscv_cpu_init() */
|
||||||
cpu->cfg.ext_ifencei = true;
|
cpu->cfg.ext_zifencei = true;
|
||||||
cpu->cfg.ext_icsr = true;
|
cpu->cfg.ext_zicsr = true;
|
||||||
cpu->cfg.pmp = true;
|
cpu->cfg.pmp = true;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -813,7 +823,9 @@ static bool riscv_cpu_has_work(CPUState *cs)
|
|||||||
* Definition of the WFI instruction requires it to ignore the privilege
|
* Definition of the WFI instruction requires it to ignore the privilege
|
||||||
* mode and delegation registers, but respect individual enables
|
* mode and delegation registers, but respect individual enables
|
||||||
*/
|
*/
|
||||||
return riscv_cpu_all_pending(env) != 0;
|
return riscv_cpu_all_pending(env) != 0 ||
|
||||||
|
riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
|
||||||
|
riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
|
||||||
#else
|
#else
|
||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
@ -882,6 +894,17 @@ static void riscv_cpu_reset_hold(Object *obj)
|
|||||||
}
|
}
|
||||||
/* mmte is supposed to have pm.current hardwired to 1 */
|
/* mmte is supposed to have pm.current hardwired to 1 */
|
||||||
env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
|
env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clear mseccfg and unlock all the PMP entries upon reset.
|
||||||
|
* This is allowed as per the priv and smepmp specifications
|
||||||
|
* and is needed to clear stale entries across reboots.
|
||||||
|
*/
|
||||||
|
if (riscv_cpu_cfg(env)->ext_smepmp) {
|
||||||
|
env->mseccfg = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pmp_unlock_entries(env);
|
||||||
#endif
|
#endif
|
||||||
env->xl = riscv_cpu_mxl(env);
|
env->xl = riscv_cpu_mxl(env);
|
||||||
riscv_cpu_update_mask(env);
|
riscv_cpu_update_mask(env);
|
||||||
@ -996,11 +1019,24 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
|
void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_USER_ONLY
|
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* KVM accel does not have a specialized finalize()
|
||||||
|
* callback because its extensions are validated
|
||||||
|
* in the get()/set() callbacks of each property.
|
||||||
|
*/
|
||||||
|
if (tcg_enabled()) {
|
||||||
|
riscv_tcg_cpu_finalize_features(cpu, &local_err);
|
||||||
|
if (local_err != NULL) {
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_USER_ONLY
|
||||||
riscv_cpu_satp_mode_finalize(cpu, &local_err);
|
riscv_cpu_satp_mode_finalize(cpu, &local_err);
|
||||||
if (local_err != NULL) {
|
if (local_err != NULL) {
|
||||||
error_propagate(errp, local_err);
|
error_propagate(errp, local_err);
|
||||||
@ -1047,6 +1083,15 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
|
|||||||
mcc->parent_realize(dev, errp);
|
mcc->parent_realize(dev, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
|
||||||
|
{
|
||||||
|
if (tcg_enabled()) {
|
||||||
|
return riscv_cpu_tcg_compatible(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
|
static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
|
||||||
void *opaque, Error **errp)
|
void *opaque, Error **errp)
|
||||||
@ -1173,6 +1218,16 @@ static void riscv_cpu_init(Object *obj)
|
|||||||
qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
|
qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
|
||||||
IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
|
IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
|
||||||
#endif /* CONFIG_USER_ONLY */
|
#endif /* CONFIG_USER_ONLY */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The timer and performance counters extensions were supported
|
||||||
|
* in QEMU before they were added as discrete extensions in the
|
||||||
|
* ISA. To keep compatibility we'll always default them to 'true'
|
||||||
|
* for all CPUs. Each accelerator will decide what to do when
|
||||||
|
* users disable them.
|
||||||
|
*/
|
||||||
|
RISCV_CPU(obj)->cfg.ext_zicntr = true;
|
||||||
|
RISCV_CPU(obj)->cfg.ext_zihpm = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct misa_ext_info {
|
typedef struct misa_ext_info {
|
||||||
@ -1242,8 +1297,8 @@ const char *riscv_get_misa_ext_description(uint32_t bit)
|
|||||||
const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
|
const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
|
||||||
/* Defaults for standard extensions */
|
/* Defaults for standard extensions */
|
||||||
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
|
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
|
||||||
MULTI_EXT_CFG_BOOL("zifencei", ext_ifencei, true),
|
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
|
||||||
MULTI_EXT_CFG_BOOL("zicsr", ext_icsr, true),
|
MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
|
||||||
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
|
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
|
||||||
MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
|
MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
|
||||||
MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
|
MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
|
||||||
@ -1255,12 +1310,16 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
|
|||||||
MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
|
MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
|
||||||
MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
|
MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
|
||||||
|
|
||||||
|
MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
|
||||||
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
|
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
|
||||||
MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
|
MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
|
||||||
MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
|
MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
|
||||||
MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
|
MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
|
||||||
MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
|
MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
|
||||||
|
|
||||||
|
MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
|
||||||
|
MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
|
||||||
|
|
||||||
MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
|
MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
|
||||||
MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
|
MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
|
||||||
MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
|
MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
|
||||||
@ -1284,8 +1343,8 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
|
|||||||
MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
|
MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
|
||||||
MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
|
MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
|
||||||
|
|
||||||
MULTI_EXT_CFG_BOOL("zicbom", ext_icbom, true),
|
MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
|
||||||
MULTI_EXT_CFG_BOOL("zicboz", ext_icboz, true),
|
MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
|
||||||
|
|
||||||
MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
|
MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
|
||||||
|
|
||||||
@ -1298,6 +1357,24 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
|
|||||||
MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
|
MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
|
||||||
MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
|
MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
|
||||||
|
|
||||||
|
/* Vector cryptography extensions */
|
||||||
|
MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
|
||||||
|
|
||||||
DEFINE_PROP_END_OF_LIST(),
|
DEFINE_PROP_END_OF_LIST(),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1320,8 +1397,6 @@ const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
|
|||||||
|
|
||||||
/* These are experimental so mark with 'x-' */
|
/* These are experimental so mark with 'x-' */
|
||||||
const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
|
const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
|
||||||
/* ePMP 0.9.3 */
|
|
||||||
MULTI_EXT_CFG_BOOL("x-epmp", epmp, false),
|
|
||||||
MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
|
MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
|
||||||
MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
|
MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
|
||||||
|
|
||||||
@ -1332,23 +1407,13 @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
|
|||||||
MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
|
MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
|
||||||
MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
|
MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
|
||||||
|
|
||||||
/* Vector cryptography extensions */
|
|
||||||
MULTI_EXT_CFG_BOOL("x-zvbb", ext_zvbb, false),
|
|
||||||
MULTI_EXT_CFG_BOOL("x-zvbc", ext_zvbc, false),
|
|
||||||
MULTI_EXT_CFG_BOOL("x-zvkg", ext_zvkg, false),
|
|
||||||
MULTI_EXT_CFG_BOOL("x-zvkned", ext_zvkned, false),
|
|
||||||
MULTI_EXT_CFG_BOOL("x-zvknha", ext_zvknha, false),
|
|
||||||
MULTI_EXT_CFG_BOOL("x-zvknhb", ext_zvknhb, false),
|
|
||||||
MULTI_EXT_CFG_BOOL("x-zvksed", ext_zvksed, false),
|
|
||||||
MULTI_EXT_CFG_BOOL("x-zvksh", ext_zvksh, false),
|
|
||||||
|
|
||||||
DEFINE_PROP_END_OF_LIST(),
|
DEFINE_PROP_END_OF_LIST(),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Deprecated entries marked for future removal */
|
/* Deprecated entries marked for future removal */
|
||||||
const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
|
const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
|
||||||
MULTI_EXT_CFG_BOOL("Zifencei", ext_ifencei, true),
|
MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
|
||||||
MULTI_EXT_CFG_BOOL("Zicsr", ext_icsr, true),
|
MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
|
||||||
MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
|
MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
|
||||||
MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
|
MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
|
||||||
MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
|
MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
|
||||||
@ -1362,8 +1427,46 @@ const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
|
|||||||
DEFINE_PROP_END_OF_LIST(),
|
DEFINE_PROP_END_OF_LIST(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
|
||||||
|
void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||||
|
uint8_t pmu_num;
|
||||||
|
|
||||||
|
visit_type_uint8(v, name, &pmu_num, errp);
|
||||||
|
|
||||||
|
if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
|
||||||
|
error_setg(errp, "Number of counters exceeds maximum available");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pmu_num == 0) {
|
||||||
|
cpu->cfg.pmu_mask = 0;
|
||||||
|
} else {
|
||||||
|
cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
|
||||||
|
}
|
||||||
|
|
||||||
|
warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
|
||||||
|
void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||||
|
uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
|
||||||
|
|
||||||
|
visit_type_uint8(v, name, &pmu_num, errp);
|
||||||
|
}
|
||||||
|
|
||||||
|
const PropertyInfo prop_pmu_num = {
|
||||||
|
.name = "pmu-num",
|
||||||
|
.get = prop_pmu_num_get,
|
||||||
|
.set = prop_pmu_num_set,
|
||||||
|
};
|
||||||
|
|
||||||
Property riscv_cpu_options[] = {
|
Property riscv_cpu_options[] = {
|
||||||
DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
|
DEFINE_PROP_UINT32("pmu-mask", RISCVCPU, cfg.pmu_mask, MAKE_64BIT_MASK(3, 16)),
|
||||||
|
{.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
|
||||||
|
|
||||||
DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
|
DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
|
||||||
DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
|
DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
|
||||||
|
@ -202,6 +202,18 @@ struct CPUArchState {
|
|||||||
uint64_t mie;
|
uint64_t mie;
|
||||||
uint64_t mideleg;
|
uint64_t mideleg;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
|
||||||
|
* alias of mie[i] and needs to be maintained separatly.
|
||||||
|
*/
|
||||||
|
uint64_t sie;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
|
||||||
|
* alias of sie[i] (mie[i]) and needs to be maintained separatly.
|
||||||
|
*/
|
||||||
|
uint64_t vsie;
|
||||||
|
|
||||||
target_ulong satp; /* since: priv-1.10.0 */
|
target_ulong satp; /* since: priv-1.10.0 */
|
||||||
target_ulong stval;
|
target_ulong stval;
|
||||||
target_ulong medeleg;
|
target_ulong medeleg;
|
||||||
@ -222,6 +234,8 @@ struct CPUArchState {
|
|||||||
/* AIA CSRs */
|
/* AIA CSRs */
|
||||||
target_ulong miselect;
|
target_ulong miselect;
|
||||||
target_ulong siselect;
|
target_ulong siselect;
|
||||||
|
uint64_t mvien;
|
||||||
|
uint64_t mvip;
|
||||||
|
|
||||||
/* Hypervisor CSRs */
|
/* Hypervisor CSRs */
|
||||||
target_ulong hstatus;
|
target_ulong hstatus;
|
||||||
@ -234,6 +248,14 @@ struct CPUArchState {
|
|||||||
target_ulong hgeie;
|
target_ulong hgeie;
|
||||||
target_ulong hgeip;
|
target_ulong hgeip;
|
||||||
uint64_t htimedelta;
|
uint64_t htimedelta;
|
||||||
|
uint64_t hvien;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
|
||||||
|
* from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
|
||||||
|
* maintain in hvip.
|
||||||
|
*/
|
||||||
|
uint64_t hvip;
|
||||||
|
|
||||||
/* Hypervisor controlled virtual interrupt priorities */
|
/* Hypervisor controlled virtual interrupt priorities */
|
||||||
target_ulong hvictl;
|
target_ulong hvictl;
|
||||||
@ -463,6 +485,7 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
|
|||||||
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
|
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
|
||||||
uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
|
uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
|
||||||
uint64_t value);
|
uint64_t value);
|
||||||
|
void riscv_cpu_interrupt(CPURISCVState *env);
|
||||||
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
|
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
|
||||||
void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
|
void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
|
||||||
void *arg);
|
void *arg);
|
||||||
@ -733,7 +756,9 @@ typedef struct isa_ext_data {
|
|||||||
extern const RISCVIsaExtData isa_edata_arr[];
|
extern const RISCVIsaExtData isa_edata_arr[];
|
||||||
char *riscv_cpu_get_name(RISCVCPU *cpu);
|
char *riscv_cpu_get_name(RISCVCPU *cpu);
|
||||||
|
|
||||||
|
void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
|
||||||
void riscv_add_satp_mode_properties(Object *obj);
|
void riscv_add_satp_mode_properties(Object *obj);
|
||||||
|
bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
|
||||||
|
|
||||||
/* CSR function table */
|
/* CSR function table */
|
||||||
extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
|
extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
|
||||||
|
@ -735,6 +735,12 @@ typedef enum RISCVException {
|
|||||||
#define MIE_SSIE (1 << IRQ_S_SOFT)
|
#define MIE_SSIE (1 << IRQ_S_SOFT)
|
||||||
#define MIE_USIE (1 << IRQ_U_SOFT)
|
#define MIE_USIE (1 << IRQ_U_SOFT)
|
||||||
|
|
||||||
|
/* Machine constants */
|
||||||
|
#define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
|
||||||
|
#define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP))
|
||||||
|
#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
|
||||||
|
#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
|
||||||
|
|
||||||
/* General PointerMasking CSR bits */
|
/* General PointerMasking CSR bits */
|
||||||
#define PM_ENABLE 0x00000001ULL
|
#define PM_ENABLE 0x00000001ULL
|
||||||
#define PM_CURRENT 0x00000002ULL
|
#define PM_CURRENT 0x00000002ULL
|
||||||
|
@ -61,13 +61,15 @@ struct RISCVCPUConfig {
|
|||||||
bool ext_zksed;
|
bool ext_zksed;
|
||||||
bool ext_zksh;
|
bool ext_zksh;
|
||||||
bool ext_zkt;
|
bool ext_zkt;
|
||||||
bool ext_ifencei;
|
bool ext_zifencei;
|
||||||
bool ext_icsr;
|
bool ext_zicntr;
|
||||||
bool ext_icbom;
|
bool ext_zicsr;
|
||||||
bool ext_icboz;
|
bool ext_zicbom;
|
||||||
|
bool ext_zicboz;
|
||||||
bool ext_zicond;
|
bool ext_zicond;
|
||||||
bool ext_zihintntl;
|
bool ext_zihintntl;
|
||||||
bool ext_zihintpause;
|
bool ext_zihintpause;
|
||||||
|
bool ext_zihpm;
|
||||||
bool ext_smstateen;
|
bool ext_smstateen;
|
||||||
bool ext_sstc;
|
bool ext_sstc;
|
||||||
bool ext_svadu;
|
bool ext_svadu;
|
||||||
@ -88,12 +90,20 @@ struct RISCVCPUConfig {
|
|||||||
bool ext_zve64d;
|
bool ext_zve64d;
|
||||||
bool ext_zvbb;
|
bool ext_zvbb;
|
||||||
bool ext_zvbc;
|
bool ext_zvbc;
|
||||||
|
bool ext_zvkb;
|
||||||
bool ext_zvkg;
|
bool ext_zvkg;
|
||||||
bool ext_zvkned;
|
bool ext_zvkned;
|
||||||
bool ext_zvknha;
|
bool ext_zvknha;
|
||||||
bool ext_zvknhb;
|
bool ext_zvknhb;
|
||||||
bool ext_zvksed;
|
bool ext_zvksed;
|
||||||
bool ext_zvksh;
|
bool ext_zvksh;
|
||||||
|
bool ext_zvkt;
|
||||||
|
bool ext_zvkn;
|
||||||
|
bool ext_zvknc;
|
||||||
|
bool ext_zvkng;
|
||||||
|
bool ext_zvks;
|
||||||
|
bool ext_zvksc;
|
||||||
|
bool ext_zvksg;
|
||||||
bool ext_zmmul;
|
bool ext_zmmul;
|
||||||
bool ext_zvfbfmin;
|
bool ext_zvfbfmin;
|
||||||
bool ext_zvfbfwma;
|
bool ext_zvfbfwma;
|
||||||
@ -102,6 +112,7 @@ struct RISCVCPUConfig {
|
|||||||
bool ext_smaia;
|
bool ext_smaia;
|
||||||
bool ext_ssaia;
|
bool ext_ssaia;
|
||||||
bool ext_sscofpmf;
|
bool ext_sscofpmf;
|
||||||
|
bool ext_smepmp;
|
||||||
bool rvv_ta_all_1s;
|
bool rvv_ta_all_1s;
|
||||||
bool rvv_ma_all_1s;
|
bool rvv_ma_all_1s;
|
||||||
|
|
||||||
@ -123,7 +134,7 @@ struct RISCVCPUConfig {
|
|||||||
bool ext_xtheadsync;
|
bool ext_xtheadsync;
|
||||||
bool ext_XVentanaCondOps;
|
bool ext_XVentanaCondOps;
|
||||||
|
|
||||||
uint8_t pmu_num;
|
uint32_t pmu_mask;
|
||||||
char *priv_spec;
|
char *priv_spec;
|
||||||
char *user_spec;
|
char *user_spec;
|
||||||
char *bext_spec;
|
char *bext_spec;
|
||||||
@ -134,7 +145,6 @@ struct RISCVCPUConfig {
|
|||||||
uint16_t cboz_blocksize;
|
uint16_t cboz_blocksize;
|
||||||
bool mmu;
|
bool mmu;
|
||||||
bool pmp;
|
bool pmp;
|
||||||
bool epmp;
|
|
||||||
bool debug;
|
bool debug;
|
||||||
bool misa_w;
|
bool misa_w;
|
||||||
|
|
||||||
|
@ -376,6 +376,11 @@ static int riscv_cpu_pending_to_irq(CPURISCVState *env,
|
|||||||
return best_irq;
|
return best_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Doesn't report interrupts inserted using mvip from M-mode firmware or
|
||||||
|
* using hvip bits 13:63 from HS-mode. Those are returned in
|
||||||
|
* riscv_cpu_sirq_pending() and riscv_cpu_vsirq_pending().
|
||||||
|
*/
|
||||||
uint64_t riscv_cpu_all_pending(CPURISCVState *env)
|
uint64_t riscv_cpu_all_pending(CPURISCVState *env)
|
||||||
{
|
{
|
||||||
uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
|
uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
|
||||||
@ -398,24 +403,32 @@ int riscv_cpu_sirq_pending(CPURISCVState *env)
|
|||||||
{
|
{
|
||||||
uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
|
uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
|
||||||
~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
|
~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
|
||||||
|
uint64_t irqs_f = env->mvip & env->mvien & ~env->mideleg & env->sie;
|
||||||
|
|
||||||
return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
|
return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
|
||||||
irqs, env->siprio);
|
irqs | irqs_f, env->siprio);
|
||||||
}
|
}
|
||||||
|
|
||||||
int riscv_cpu_vsirq_pending(CPURISCVState *env)
|
int riscv_cpu_vsirq_pending(CPURISCVState *env)
|
||||||
{
|
{
|
||||||
uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
|
uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & env->hideleg;
|
||||||
(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
|
uint64_t irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
|
||||||
|
uint64_t vsbits;
|
||||||
|
|
||||||
|
/* Bring VS-level bits to correct position */
|
||||||
|
vsbits = irqs & VS_MODE_INTERRUPTS;
|
||||||
|
irqs &= ~VS_MODE_INTERRUPTS;
|
||||||
|
irqs |= vsbits >> 1;
|
||||||
|
|
||||||
return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
|
return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
|
||||||
irqs >> 1, env->hviprio);
|
(irqs | irqs_f_vs), env->hviprio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int riscv_cpu_local_irq_pending(CPURISCVState *env)
|
static int riscv_cpu_local_irq_pending(CPURISCVState *env)
|
||||||
{
|
{
|
||||||
|
uint64_t irqs, pending, mie, hsie, vsie, irqs_f, irqs_f_vs;
|
||||||
|
uint64_t vsbits, irq_delegated;
|
||||||
int virq;
|
int virq;
|
||||||
uint64_t irqs, pending, mie, hsie, vsie;
|
|
||||||
|
|
||||||
/* Determine interrupt enable state of all privilege modes */
|
/* Determine interrupt enable state of all privilege modes */
|
||||||
if (env->virt_enabled) {
|
if (env->virt_enabled) {
|
||||||
@ -441,19 +454,36 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
|
|||||||
irqs, env->miprio);
|
irqs, env->miprio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check for virtual S-mode interrupts. */
|
||||||
|
irqs_f = env->mvip & (env->mvien & ~env->mideleg) & env->sie;
|
||||||
|
|
||||||
/* Check HS-mode interrupts */
|
/* Check HS-mode interrupts */
|
||||||
irqs = pending & env->mideleg & ~env->hideleg & -hsie;
|
irqs = ((pending & env->mideleg & ~env->hideleg) | irqs_f) & -hsie;
|
||||||
if (irqs) {
|
if (irqs) {
|
||||||
return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
|
return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
|
||||||
irqs, env->siprio);
|
irqs, env->siprio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check for virtual VS-mode interrupts. */
|
||||||
|
irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
|
||||||
|
|
||||||
/* Check VS-mode interrupts */
|
/* Check VS-mode interrupts */
|
||||||
irqs = pending & env->mideleg & env->hideleg & -vsie;
|
irq_delegated = pending & env->mideleg & env->hideleg;
|
||||||
|
|
||||||
|
/* Bring VS-level bits to correct position */
|
||||||
|
vsbits = irq_delegated & VS_MODE_INTERRUPTS;
|
||||||
|
irq_delegated &= ~VS_MODE_INTERRUPTS;
|
||||||
|
irq_delegated |= vsbits >> 1;
|
||||||
|
|
||||||
|
irqs = (irq_delegated | irqs_f_vs) & -vsie;
|
||||||
if (irqs) {
|
if (irqs) {
|
||||||
virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
|
virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
|
||||||
irqs >> 1, env->hviprio);
|
irqs, env->hviprio);
|
||||||
return (virq <= 0) ? virq : virq + 1;
|
if (virq <= 0 || (virq > 12 && virq <= 63)) {
|
||||||
|
return virq;
|
||||||
|
} else {
|
||||||
|
return virq + 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Indicate no pending interrupt */
|
/* Indicate no pending interrupt */
|
||||||
@ -620,28 +650,42 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
|
void riscv_cpu_interrupt(CPURISCVState *env)
|
||||||
uint64_t value)
|
|
||||||
{
|
{
|
||||||
|
uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
|
||||||
CPUState *cs = env_cpu(env);
|
CPUState *cs = env_cpu(env);
|
||||||
uint64_t gein, vsgein = 0, vstip = 0, old = env->mip;
|
|
||||||
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
|
|
||||||
if (env->virt_enabled) {
|
if (env->virt_enabled) {
|
||||||
gein = get_field(env->hstatus, HSTATUS_VGEIN);
|
gein = get_field(env->hstatus, HSTATUS_VGEIN);
|
||||||
vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
|
vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
|
||||||
|
irqf = env->hvien & env->hvip & env->vsie;
|
||||||
|
} else {
|
||||||
|
irqf = env->mvien & env->mvip & env->sie;
|
||||||
}
|
}
|
||||||
|
|
||||||
vstip = env->vstime_irq ? MIP_VSTIP : 0;
|
vstip = env->vstime_irq ? MIP_VSTIP : 0;
|
||||||
|
|
||||||
|
if (env->mip | vsgein | vstip | irqf) {
|
||||||
|
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||||
|
} else {
|
||||||
|
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
|
||||||
|
{
|
||||||
|
uint64_t old = env->mip;
|
||||||
|
|
||||||
|
/* No need to update mip for VSTIP */
|
||||||
|
mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
|
||||||
|
|
||||||
QEMU_IOTHREAD_LOCK_GUARD();
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
|
|
||||||
env->mip = (env->mip & ~mask) | (value & mask);
|
env->mip = (env->mip & ~mask) | (value & mask);
|
||||||
|
|
||||||
if (env->mip | vsgein | vstip) {
|
riscv_cpu_interrupt(env);
|
||||||
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
|
||||||
} else {
|
|
||||||
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
|
|
||||||
}
|
|
||||||
|
|
||||||
return old;
|
return old;
|
||||||
}
|
}
|
||||||
@ -1600,20 +1644,22 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||||||
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
|
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
|
||||||
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
|
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
|
||||||
uint64_t deleg = async ? env->mideleg : env->medeleg;
|
uint64_t deleg = async ? env->mideleg : env->medeleg;
|
||||||
|
bool s_injected = env->mvip & (1 << cause) & env->mvien &&
|
||||||
|
!(env->mip & (1 << cause));
|
||||||
|
bool vs_injected = env->hvip & (1 << cause) & env->hvien &&
|
||||||
|
!(env->mip & (1 << cause));
|
||||||
target_ulong tval = 0;
|
target_ulong tval = 0;
|
||||||
target_ulong tinst = 0;
|
target_ulong tinst = 0;
|
||||||
target_ulong htval = 0;
|
target_ulong htval = 0;
|
||||||
target_ulong mtval2 = 0;
|
target_ulong mtval2 = 0;
|
||||||
|
|
||||||
if (cause == RISCV_EXCP_SEMIHOST) {
|
|
||||||
do_common_semihosting(cs);
|
|
||||||
env->pc += 4;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!async) {
|
if (!async) {
|
||||||
/* set tval to badaddr for traps with address information */
|
/* set tval to badaddr for traps with address information */
|
||||||
switch (cause) {
|
switch (cause) {
|
||||||
|
case RISCV_EXCP_SEMIHOST:
|
||||||
|
do_common_semihosting(cs);
|
||||||
|
env->pc += 4;
|
||||||
|
return;
|
||||||
case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
|
case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
|
||||||
case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
|
case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
|
||||||
case RISCV_EXCP_LOAD_ADDR_MIS:
|
case RISCV_EXCP_LOAD_ADDR_MIS:
|
||||||
@ -1690,13 +1736,14 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||||||
__func__, env->mhartid, async, cause, env->pc, tval,
|
__func__, env->mhartid, async, cause, env->pc, tval,
|
||||||
riscv_cpu_get_trap_name(cause, async));
|
riscv_cpu_get_trap_name(cause, async));
|
||||||
|
|
||||||
if (env->priv <= PRV_S &&
|
if (env->priv <= PRV_S && cause < 64 &&
|
||||||
cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
|
(((deleg >> cause) & 1) || s_injected || vs_injected)) {
|
||||||
/* handle the trap in S-mode */
|
/* handle the trap in S-mode */
|
||||||
if (riscv_has_ext(env, RVH)) {
|
if (riscv_has_ext(env, RVH)) {
|
||||||
uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
|
uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
|
||||||
|
|
||||||
if (env->virt_enabled && ((hdeleg >> cause) & 1)) {
|
if (env->virt_enabled &&
|
||||||
|
(((hdeleg >> cause) & 1) || vs_injected)) {
|
||||||
/* Trap to VS mode */
|
/* Trap to VS mode */
|
||||||
/*
|
/*
|
||||||
* See if we need to adjust cause. Yes if its VS mode interrupt
|
* See if we need to adjust cause. Yes if its VS mode interrupt
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include "qemu/guest-random.h"
|
#include "qemu/guest-random.h"
|
||||||
#include "qapi/error.h"
|
#include "qapi/error.h"
|
||||||
|
|
||||||
|
|
||||||
/* CSR function table public API */
|
/* CSR function table public API */
|
||||||
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
|
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
|
||||||
{
|
{
|
||||||
@ -121,6 +122,10 @@ static RISCVException ctr(CPURISCVState *env, int csrno)
|
|||||||
|
|
||||||
if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
|
if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
|
||||||
(csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
|
(csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
|
||||||
|
if (!riscv_cpu_cfg(env)->ext_zicntr) {
|
||||||
|
return RISCV_EXCP_ILLEGAL_INST;
|
||||||
|
}
|
||||||
|
|
||||||
goto skip_ext_pmu_check;
|
goto skip_ext_pmu_check;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,7 +188,8 @@ static RISCVException zcmt(CPURISCVState *env, int csrno)
|
|||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
static RISCVException mctr(CPURISCVState *env, int csrno)
|
static RISCVException mctr(CPURISCVState *env, int csrno)
|
||||||
{
|
{
|
||||||
int pmu_num = riscv_cpu_cfg(env)->pmu_num;
|
RISCVCPU *cpu = env_archcpu(env);
|
||||||
|
uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
|
||||||
int ctr_index;
|
int ctr_index;
|
||||||
int base_csrno = CSR_MHPMCOUNTER3;
|
int base_csrno = CSR_MHPMCOUNTER3;
|
||||||
|
|
||||||
@ -192,7 +198,7 @@ static RISCVException mctr(CPURISCVState *env, int csrno)
|
|||||||
base_csrno += 0x80;
|
base_csrno += 0x80;
|
||||||
}
|
}
|
||||||
ctr_index = csrno - base_csrno;
|
ctr_index = csrno - base_csrno;
|
||||||
if (!pmu_num || ctr_index >= pmu_num) {
|
if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
|
||||||
/* The PMU is not enabled or counter is out of range */
|
/* The PMU is not enabled or counter is out of range */
|
||||||
return RISCV_EXCP_ILLEGAL_INST;
|
return RISCV_EXCP_ILLEGAL_INST;
|
||||||
}
|
}
|
||||||
@ -523,9 +529,12 @@ static RISCVException pmp(CPURISCVState *env, int csrno)
|
|||||||
return RISCV_EXCP_ILLEGAL_INST;
|
return RISCV_EXCP_ILLEGAL_INST;
|
||||||
}
|
}
|
||||||
|
|
||||||
static RISCVException epmp(CPURISCVState *env, int csrno)
|
static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
|
||||||
{
|
{
|
||||||
if (riscv_cpu_cfg(env)->epmp) {
|
if (riscv_cpu_cfg(env)->ext_smepmp) {
|
||||||
|
return RISCV_EXCP_NONE;
|
||||||
|
}
|
||||||
|
if (riscv_cpu_cfg(env)->ext_zkr) {
|
||||||
return RISCV_EXCP_NONE;
|
return RISCV_EXCP_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1117,21 +1126,16 @@ static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
|
|||||||
return RISCV_EXCP_NONE;
|
return RISCV_EXCP_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Machine constants */
|
|
||||||
|
|
||||||
#define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
|
|
||||||
#define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \
|
|
||||||
MIP_LCOFIP))
|
|
||||||
#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
|
|
||||||
#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
|
|
||||||
|
|
||||||
#define VSTOPI_NUM_SRCS 5
|
#define VSTOPI_NUM_SRCS 5
|
||||||
|
|
||||||
static const uint64_t delegable_ints = S_MODE_INTERRUPTS |
|
#define LOCAL_INTERRUPTS (~0x1FFF)
|
||||||
VS_MODE_INTERRUPTS;
|
|
||||||
static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS;
|
static const uint64_t delegable_ints =
|
||||||
|
S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
|
||||||
|
static const uint64_t vs_delegable_ints =
|
||||||
|
(VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
|
||||||
static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
|
static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
|
||||||
HS_MODE_INTERRUPTS;
|
HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
|
||||||
#define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
|
#define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
|
||||||
(1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
|
(1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
|
||||||
(1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
|
(1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
|
||||||
@ -1162,12 +1166,32 @@ static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
|
|||||||
static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
|
static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
|
||||||
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
|
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
|
||||||
SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
|
SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
|
||||||
static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP |
|
|
||||||
SIP_LCOFIP;
|
/*
|
||||||
|
* Spec allows for bits 13:63 to be either read-only or writable.
|
||||||
|
* So far we have interrupt LCOFIP in that region which is writable.
|
||||||
|
*
|
||||||
|
* Also, spec allows to inject virtual interrupts in this region even
|
||||||
|
* without any hardware interrupts for that interrupt number.
|
||||||
|
*
|
||||||
|
* For now interrupt in 13:63 region are all kept writable. 13 being
|
||||||
|
* LCOFIP and 14:63 being virtual only. Change this in future if we
|
||||||
|
* introduce more interrupts that are not writable.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
|
||||||
|
static const target_ulong mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
|
||||||
|
LOCAL_INTERRUPTS;
|
||||||
|
static const target_ulong mvien_writable_mask = MIP_SSIP | MIP_SEIP |
|
||||||
|
LOCAL_INTERRUPTS;
|
||||||
|
|
||||||
|
static const target_ulong sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
|
||||||
static const target_ulong hip_writable_mask = MIP_VSSIP;
|
static const target_ulong hip_writable_mask = MIP_VSSIP;
|
||||||
static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
|
static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
|
||||||
MIP_VSEIP;
|
MIP_VSEIP | LOCAL_INTERRUPTS;
|
||||||
static const target_ulong vsip_writable_mask = MIP_VSSIP;
|
static const target_ulong hvien_writable_mask = LOCAL_INTERRUPTS;
|
||||||
|
|
||||||
|
static const target_ulong vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
|
||||||
|
|
||||||
const bool valid_vm_1_10_32[16] = {
|
const bool valid_vm_1_10_32[16] = {
|
||||||
[VM_1_10_MBARE] = true,
|
[VM_1_10_MBARE] = true,
|
||||||
@ -1525,7 +1549,7 @@ static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
|
|||||||
env->mie = (env->mie & ~mask) | (new_val & mask);
|
env->mie = (env->mie & ~mask) | (new_val & mask);
|
||||||
|
|
||||||
if (!riscv_has_ext(env, RVH)) {
|
if (!riscv_has_ext(env, RVH)) {
|
||||||
env->mie &= ~((uint64_t)MIP_SGEIP);
|
env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
|
||||||
}
|
}
|
||||||
|
|
||||||
return RISCV_EXCP_NONE;
|
return RISCV_EXCP_NONE;
|
||||||
@ -1562,6 +1586,52 @@ static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
|
||||||
|
uint64_t *ret_val,
|
||||||
|
uint64_t new_val, uint64_t wr_mask)
|
||||||
|
{
|
||||||
|
uint64_t mask = wr_mask & mvien_writable_mask;
|
||||||
|
|
||||||
|
if (ret_val) {
|
||||||
|
*ret_val = env->mvien;
|
||||||
|
}
|
||||||
|
|
||||||
|
env->mvien = (env->mvien & ~mask) | (new_val & mask);
|
||||||
|
|
||||||
|
return RISCV_EXCP_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
|
||||||
|
target_ulong *ret_val,
|
||||||
|
target_ulong new_val, target_ulong wr_mask)
|
||||||
|
{
|
||||||
|
uint64_t rval;
|
||||||
|
RISCVException ret;
|
||||||
|
|
||||||
|
ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
|
||||||
|
if (ret_val) {
|
||||||
|
*ret_val = rval;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
|
||||||
|
target_ulong *ret_val,
|
||||||
|
target_ulong new_val, target_ulong wr_mask)
|
||||||
|
{
|
||||||
|
uint64_t rval;
|
||||||
|
RISCVException ret;
|
||||||
|
|
||||||
|
ret = rmw_mvien64(env, csrno, &rval,
|
||||||
|
((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
|
||||||
|
if (ret_val) {
|
||||||
|
*ret_val = rval >> 32;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
|
static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
|
||||||
{
|
{
|
||||||
int irq;
|
int irq;
|
||||||
@ -1703,6 +1773,11 @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
|
|||||||
priv = PRV_M;
|
priv = PRV_M;
|
||||||
break;
|
break;
|
||||||
case CSR_SIREG:
|
case CSR_SIREG:
|
||||||
|
if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
|
||||||
|
env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
|
||||||
|
env->siselect <= ISELECT_IMSIC_EIE63) {
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
iprio = env->siprio;
|
iprio = env->siprio;
|
||||||
isel = env->siselect;
|
isel = env->siselect;
|
||||||
priv = PRV_S;
|
priv = PRV_S;
|
||||||
@ -1769,6 +1844,9 @@ static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
|
|||||||
priv = PRV_M;
|
priv = PRV_M;
|
||||||
break;
|
break;
|
||||||
case CSR_STOPEI:
|
case CSR_STOPEI:
|
||||||
|
if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
priv = PRV_S;
|
priv = PRV_S;
|
||||||
break;
|
break;
|
||||||
case CSR_VSTOPEI:
|
case CSR_VSTOPEI:
|
||||||
@ -2360,6 +2438,143 @@ static RISCVException rmw_miph(CPURISCVState *env, int csrno,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The function is written for two use-cases:
|
||||||
|
* 1- To access mvip csr as is for m-mode access.
|
||||||
|
* 2- To access sip as a combination of mip and mvip for s-mode.
|
||||||
|
*
|
||||||
|
* Both report bits 1, 5, 9 and 13:63 but with the exception of
|
||||||
|
* STIP being read-only zero in case of mvip when sstc extension
|
||||||
|
* is present.
|
||||||
|
* Also, sip needs to be read-only zero when both mideleg[i] and
|
||||||
|
* mvien[i] are zero but mvip needs to be an alias of mip.
|
||||||
|
*/
|
||||||
|
static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
|
||||||
|
uint64_t *ret_val,
|
||||||
|
uint64_t new_val, uint64_t wr_mask)
|
||||||
|
{
|
||||||
|
RISCVCPU *cpu = env_archcpu(env);
|
||||||
|
target_ulong ret_mip = 0;
|
||||||
|
RISCVException ret;
|
||||||
|
uint64_t old_mvip;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* mideleg[i] mvien[i]
|
||||||
|
* 0 0 No delegation. mvip[i] is alias of mip[i].
|
||||||
|
* 0 1 mvip[i] becomes source of interrupt, mip bypassed.
|
||||||
|
* 1 X mip[i] is source of interrupt and mvip[i] aliases
|
||||||
|
* mip[i].
|
||||||
|
*
|
||||||
|
* So alias condition would be for bits:
|
||||||
|
* ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
|
||||||
|
* (!sstc & MIP_STIP)
|
||||||
|
*
|
||||||
|
* Non-alias condition will be for bits:
|
||||||
|
* (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
|
||||||
|
*
|
||||||
|
* alias_mask denotes the bits that come from mip nalias_mask denotes bits
|
||||||
|
* that come from hvip.
|
||||||
|
*/
|
||||||
|
uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
|
||||||
|
(env->mideleg | ~env->mvien)) | MIP_STIP;
|
||||||
|
uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
|
||||||
|
(~env->mideleg & env->mvien);
|
||||||
|
uint64_t wr_mask_mvip;
|
||||||
|
uint64_t wr_mask_mip;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* mideleg[i] mvien[i]
|
||||||
|
* 0 0 sip[i] read-only zero.
|
||||||
|
* 0 1 sip[i] alias of mvip[i].
|
||||||
|
* 1 X sip[i] alias of mip[i].
|
||||||
|
*
|
||||||
|
* Both alias and non-alias mask remain same for sip except for bits
|
||||||
|
* which are zero in both mideleg and mvien.
|
||||||
|
*/
|
||||||
|
if (csrno == CSR_SIP) {
|
||||||
|
/* Remove bits that are zero in both mideleg and mvien. */
|
||||||
|
alias_mask &= (env->mideleg | env->mvien);
|
||||||
|
nalias_mask &= (env->mideleg | env->mvien);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
|
||||||
|
* that our in mip returned value.
|
||||||
|
*/
|
||||||
|
if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
|
||||||
|
get_field(env->menvcfg, MENVCFG_STCE)) {
|
||||||
|
alias_mask &= ~MIP_STIP;
|
||||||
|
}
|
||||||
|
|
||||||
|
wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
|
||||||
|
wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For bits set in alias_mask, mvip needs to be alias of mip, so forward
|
||||||
|
* this to rmw_mip.
|
||||||
|
*/
|
||||||
|
ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
|
||||||
|
if (ret != RISCV_EXCP_NONE) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
old_mvip = env->mvip;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write to mvip. Update only non-alias bits. Alias bits were updated
|
||||||
|
* in mip in rmw_mip above.
|
||||||
|
*/
|
||||||
|
if (wr_mask_mvip) {
|
||||||
|
env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given mvip is separate source from mip, we need to trigger interrupt
|
||||||
|
* from here separately. Normally this happen from riscv_cpu_update_mip.
|
||||||
|
*/
|
||||||
|
riscv_cpu_interrupt(env);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret_val) {
|
||||||
|
ret_mip &= alias_mask;
|
||||||
|
old_mvip &= nalias_mask;
|
||||||
|
|
||||||
|
*ret_val = old_mvip | ret_mip;
|
||||||
|
}
|
||||||
|
|
||||||
|
return RISCV_EXCP_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
|
||||||
|
target_ulong *ret_val,
|
||||||
|
target_ulong new_val, target_ulong wr_mask)
|
||||||
|
{
|
||||||
|
uint64_t rval;
|
||||||
|
RISCVException ret;
|
||||||
|
|
||||||
|
ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
|
||||||
|
if (ret_val) {
|
||||||
|
*ret_val = rval;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
|
||||||
|
target_ulong *ret_val,
|
||||||
|
target_ulong new_val, target_ulong wr_mask)
|
||||||
|
{
|
||||||
|
uint64_t rval;
|
||||||
|
RISCVException ret;
|
||||||
|
|
||||||
|
ret = rmw_mvip64(env, csrno, &rval,
|
||||||
|
((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
|
||||||
|
if (ret_val) {
|
||||||
|
*ret_val = rval >> 32;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Supervisor Trap Setup */
|
/* Supervisor Trap Setup */
|
||||||
static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
|
static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
|
||||||
Int128 *val)
|
Int128 *val)
|
||||||
@ -2404,16 +2619,36 @@ static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
|
|||||||
uint64_t *ret_val,
|
uint64_t *ret_val,
|
||||||
uint64_t new_val, uint64_t wr_mask)
|
uint64_t new_val, uint64_t wr_mask)
|
||||||
{
|
{
|
||||||
|
uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
|
||||||
|
env->hideleg;
|
||||||
|
uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
|
||||||
|
uint64_t rval, rval_vs, vsbits;
|
||||||
|
uint64_t wr_mask_vsie;
|
||||||
|
uint64_t wr_mask_mie;
|
||||||
RISCVException ret;
|
RISCVException ret;
|
||||||
uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
|
|
||||||
|
|
||||||
/* Bring VS-level bits to correct position */
|
/* Bring VS-level bits to correct position */
|
||||||
new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1;
|
vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
|
||||||
wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1;
|
new_val &= ~(VS_MODE_INTERRUPTS >> 1);
|
||||||
|
new_val |= vsbits << 1;
|
||||||
|
|
||||||
|
vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
|
||||||
|
wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
|
||||||
|
wr_mask |= vsbits << 1;
|
||||||
|
|
||||||
|
wr_mask_mie = wr_mask & alias_mask;
|
||||||
|
wr_mask_vsie = wr_mask & nalias_mask;
|
||||||
|
|
||||||
|
ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
|
||||||
|
|
||||||
|
rval_vs = env->vsie & nalias_mask;
|
||||||
|
env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
|
||||||
|
|
||||||
ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask);
|
|
||||||
if (ret_val) {
|
if (ret_val) {
|
||||||
*ret_val = (rval & mask) >> 1;
|
rval &= alias_mask;
|
||||||
|
vsbits = rval & VS_MODE_INTERRUPTS;
|
||||||
|
rval &= ~VS_MODE_INTERRUPTS;
|
||||||
|
*ret_val = rval | (vsbits >> 1) | rval_vs;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -2454,20 +2689,37 @@ static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
|
|||||||
uint64_t *ret_val,
|
uint64_t *ret_val,
|
||||||
uint64_t new_val, uint64_t wr_mask)
|
uint64_t new_val, uint64_t wr_mask)
|
||||||
{
|
{
|
||||||
|
uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
|
||||||
|
(~env->mideleg & env->mvien);
|
||||||
|
uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
|
||||||
|
uint64_t sie_mask = wr_mask & nalias_mask;
|
||||||
RISCVException ret;
|
RISCVException ret;
|
||||||
uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* mideleg[i] mvien[i]
|
||||||
|
* 0 0 sie[i] read-only zero.
|
||||||
|
* 0 1 sie[i] is a separate writable bit.
|
||||||
|
* 1 X sie[i] alias of mie[i].
|
||||||
|
*
|
||||||
|
* Both alias and non-alias mask remain same for sip except for bits
|
||||||
|
* which are zero in both mideleg and mvien.
|
||||||
|
*/
|
||||||
if (env->virt_enabled) {
|
if (env->virt_enabled) {
|
||||||
if (env->hvictl & HVICTL_VTI) {
|
if (env->hvictl & HVICTL_VTI) {
|
||||||
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
|
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
|
||||||
}
|
}
|
||||||
ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
|
ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
|
||||||
|
if (ret_val) {
|
||||||
|
*ret_val &= alias_mask;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
|
ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
|
||||||
}
|
if (ret_val) {
|
||||||
|
*ret_val &= alias_mask;
|
||||||
|
*ret_val |= env->sie & nalias_mask;
|
||||||
|
}
|
||||||
|
|
||||||
if (ret_val) {
|
env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
|
||||||
*ret_val &= mask;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -2609,21 +2861,36 @@ static RISCVException write_stval(CPURISCVState *env, int csrno,
|
|||||||
return RISCV_EXCP_NONE;
|
return RISCV_EXCP_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
|
||||||
|
uint64_t *ret_val,
|
||||||
|
uint64_t new_val, uint64_t wr_mask);
|
||||||
|
|
||||||
static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
|
static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
|
||||||
uint64_t *ret_val,
|
uint64_t *ret_val,
|
||||||
uint64_t new_val, uint64_t wr_mask)
|
uint64_t new_val, uint64_t wr_mask)
|
||||||
{
|
{
|
||||||
RISCVException ret;
|
RISCVException ret;
|
||||||
uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
|
uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
|
||||||
|
uint64_t vsbits;
|
||||||
|
|
||||||
|
/* Add virtualized bits into vsip mask. */
|
||||||
|
mask |= env->hvien & ~env->hideleg;
|
||||||
|
|
||||||
/* Bring VS-level bits to correct position */
|
/* Bring VS-level bits to correct position */
|
||||||
new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1;
|
vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
|
||||||
wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1;
|
new_val &= ~(VS_MODE_INTERRUPTS >> 1);
|
||||||
|
new_val |= vsbits << 1;
|
||||||
|
vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
|
||||||
|
wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
|
||||||
|
wr_mask |= vsbits << 1;
|
||||||
|
|
||||||
ret = rmw_mip64(env, csrno, &rval, new_val,
|
ret = rmw_hvip64(env, csrno, &rval, new_val,
|
||||||
wr_mask & mask & vsip_writable_mask);
|
wr_mask & mask & vsip_writable_mask);
|
||||||
if (ret_val) {
|
if (ret_val) {
|
||||||
*ret_val = (rval & mask) >> 1;
|
rval &= mask;
|
||||||
|
vsbits = rval & VS_MODE_INTERRUPTS;
|
||||||
|
rval &= ~VS_MODE_INTERRUPTS;
|
||||||
|
*ret_val = rval | (vsbits >> 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -2665,7 +2932,7 @@ static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
|
|||||||
uint64_t new_val, uint64_t wr_mask)
|
uint64_t new_val, uint64_t wr_mask)
|
||||||
{
|
{
|
||||||
RISCVException ret;
|
RISCVException ret;
|
||||||
uint64_t mask = env->mideleg & sip_writable_mask;
|
uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
|
||||||
|
|
||||||
if (env->virt_enabled) {
|
if (env->virt_enabled) {
|
||||||
if (env->hvictl & HVICTL_VTI) {
|
if (env->hvictl & HVICTL_VTI) {
|
||||||
@ -2673,11 +2940,12 @@ static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
|
|||||||
}
|
}
|
||||||
ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
|
ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
|
||||||
} else {
|
} else {
|
||||||
ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
|
ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret_val) {
|
if (ret_val) {
|
||||||
*ret_val &= env->mideleg & S_MODE_INTERRUPTS;
|
*ret_val &= (env->mideleg | env->mvien) &
|
||||||
|
(S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -2842,6 +3110,7 @@ static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
|
|||||||
|
|
||||||
*val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
|
*val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
|
||||||
*val |= iprio;
|
*val |= iprio;
|
||||||
|
|
||||||
return RISCV_EXCP_NONE;
|
return RISCV_EXCP_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2913,6 +3182,52 @@ static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
|
|||||||
return RISCV_EXCP_NONE;
|
return RISCV_EXCP_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
|
||||||
|
uint64_t *ret_val,
|
||||||
|
uint64_t new_val, uint64_t wr_mask)
|
||||||
|
{
|
||||||
|
uint64_t mask = wr_mask & hvien_writable_mask;
|
||||||
|
|
||||||
|
if (ret_val) {
|
||||||
|
*ret_val = env->hvien;
|
||||||
|
}
|
||||||
|
|
||||||
|
env->hvien = (env->hvien & ~mask) | (new_val & mask);
|
||||||
|
|
||||||
|
return RISCV_EXCP_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
|
||||||
|
target_ulong *ret_val,
|
||||||
|
target_ulong new_val, target_ulong wr_mask)
|
||||||
|
{
|
||||||
|
uint64_t rval;
|
||||||
|
RISCVException ret;
|
||||||
|
|
||||||
|
ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
|
||||||
|
if (ret_val) {
|
||||||
|
*ret_val = rval;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
|
||||||
|
target_ulong *ret_val,
|
||||||
|
target_ulong new_val, target_ulong wr_mask)
|
||||||
|
{
|
||||||
|
uint64_t rval;
|
||||||
|
RISCVException ret;
|
||||||
|
|
||||||
|
ret = rmw_hvien64(env, csrno, &rval,
|
||||||
|
((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
|
||||||
|
if (ret_val) {
|
||||||
|
*ret_val = rval >> 32;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
|
static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
|
||||||
uint64_t *ret_val,
|
uint64_t *ret_val,
|
||||||
uint64_t new_val, uint64_t wr_mask)
|
uint64_t new_val, uint64_t wr_mask)
|
||||||
@ -2958,16 +3273,94 @@ static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The function is written for two use-cases:
|
||||||
|
* 1- To access hvip csr as is for HS-mode access.
|
||||||
|
* 2- To access vsip as a combination of hvip, and mip for vs-mode.
|
||||||
|
*
|
||||||
|
* Both report bits 2, 6, 10 and 13:63.
|
||||||
|
* vsip needs to be read-only zero when both hideleg[i] and
|
||||||
|
* hvien[i] are zero.
|
||||||
|
*/
|
||||||
static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
|
static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
|
||||||
uint64_t *ret_val,
|
uint64_t *ret_val,
|
||||||
uint64_t new_val, uint64_t wr_mask)
|
uint64_t new_val, uint64_t wr_mask)
|
||||||
{
|
{
|
||||||
RISCVException ret;
|
RISCVException ret;
|
||||||
|
uint64_t old_hvip;
|
||||||
|
uint64_t ret_mip;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
|
||||||
|
* present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
|
||||||
|
* is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
|
||||||
|
* bits are actually being maintained in mip so we read them from there.
|
||||||
|
* This way we have a single source of truth and allows for easier
|
||||||
|
* implementation.
|
||||||
|
*
|
||||||
|
* For bits 13:63 we have:
|
||||||
|
*
|
||||||
|
* hideleg[i] hvien[i]
|
||||||
|
* 0 0 No delegation. vsip[i] readonly zero.
|
||||||
|
* 0 1 vsip[i] is alias of hvip[i], sip bypassed.
|
||||||
|
* 1 X vsip[i] is alias of sip[i], hvip bypassed.
|
||||||
|
*
|
||||||
|
* alias_mask denotes the bits that come from sip (mip here given we
|
||||||
|
* maintain all bits there). nalias_mask denotes bits that come from
|
||||||
|
* hvip.
|
||||||
|
*/
|
||||||
|
uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
|
||||||
|
uint64_t nalias_mask = (~env->hideleg & env->hvien);
|
||||||
|
uint64_t wr_mask_hvip;
|
||||||
|
uint64_t wr_mask_mip;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Both alias and non-alias mask remain same for vsip except:
|
||||||
|
* 1- For VS* bits if they are zero in hideleg.
|
||||||
|
* 2- For 13:63 bits if they are zero in both hideleg and hvien.
|
||||||
|
*/
|
||||||
|
if (csrno == CSR_VSIP) {
|
||||||
|
/* zero-out VS* bits that are not delegated to VS mode. */
|
||||||
|
alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* zero-out 13:63 bits that are zero in both hideleg and hvien.
|
||||||
|
* nalias_mask mask can not contain any VS* bits so only second
|
||||||
|
* condition applies on it.
|
||||||
|
*/
|
||||||
|
nalias_mask &= (env->hideleg | env->hvien);
|
||||||
|
alias_mask &= (env->hideleg | env->hvien);
|
||||||
|
}
|
||||||
|
|
||||||
|
wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
|
||||||
|
wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
|
||||||
|
|
||||||
|
/* Aliased bits, bits 10, 6, 2 need to come from mip. */
|
||||||
|
ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
|
||||||
|
if (ret != RISCV_EXCP_NONE) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
old_hvip = env->hvip;
|
||||||
|
|
||||||
|
if (wr_mask_hvip) {
|
||||||
|
env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given hvip is separate source from mip, we need to trigger interrupt
|
||||||
|
* from here separately. Normally this happen from riscv_cpu_update_mip.
|
||||||
|
*/
|
||||||
|
riscv_cpu_interrupt(env);
|
||||||
|
}
|
||||||
|
|
||||||
ret = rmw_mip64(env, csrno, ret_val, new_val,
|
|
||||||
wr_mask & hvip_writable_mask);
|
|
||||||
if (ret_val) {
|
if (ret_val) {
|
||||||
*ret_val &= VS_MODE_INTERRUPTS;
|
/* Only take VS* bits from mip. */
|
||||||
|
ret_mip &= alias_mask;
|
||||||
|
|
||||||
|
/* Take in non-delegated 13:63 bits from hvip. */
|
||||||
|
old_hvip &= nalias_mask;
|
||||||
|
|
||||||
|
*ret_val = ret_mip | old_hvip;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -3858,7 +4251,7 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
|
|||||||
int csr_min_priv = csr_ops[csrno].min_priv_ver;
|
int csr_min_priv = csr_ops[csrno].min_priv_ver;
|
||||||
|
|
||||||
/* ensure the CSR extension is enabled */
|
/* ensure the CSR extension is enabled */
|
||||||
if (!riscv_cpu_cfg(env)->ext_icsr) {
|
if (!riscv_cpu_cfg(env)->ext_zicsr) {
|
||||||
return RISCV_EXCP_ILLEGAL_INST;
|
return RISCV_EXCP_ILLEGAL_INST;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4165,14 +4558,14 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
|
|||||||
[CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
|
[CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
|
||||||
|
|
||||||
/* Virtual Interrupts for Supervisor Level (AIA) */
|
/* Virtual Interrupts for Supervisor Level (AIA) */
|
||||||
[CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
|
[CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien },
|
||||||
[CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore },
|
[CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
|
||||||
|
|
||||||
/* Machine-Level High-Half CSRs (AIA) */
|
/* Machine-Level High-Half CSRs (AIA) */
|
||||||
[CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
|
[CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
|
||||||
[CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
|
[CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
|
||||||
[CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore },
|
[CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
|
||||||
[CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore },
|
[CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
|
||||||
[CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
|
[CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
|
||||||
|
|
||||||
/* Execution environment configuration */
|
/* Execution environment configuration */
|
||||||
@ -4346,14 +4739,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
|
|||||||
.min_priv_ver = PRIV_VERSION_1_12_0 },
|
.min_priv_ver = PRIV_VERSION_1_12_0 },
|
||||||
|
|
||||||
/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
|
/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
|
||||||
[CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore },
|
[CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien },
|
||||||
[CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
|
[CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
|
||||||
write_hvictl },
|
write_hvictl },
|
||||||
[CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
|
[CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
|
||||||
write_hviprio1 },
|
write_hviprio1 },
|
||||||
[CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
|
[CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
|
||||||
write_hviprio2 },
|
write_hviprio2 },
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
|
* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
|
||||||
*/
|
*/
|
||||||
@ -4368,8 +4760,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
|
|||||||
/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
|
/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
|
||||||
[CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
|
[CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
|
||||||
rmw_hidelegh },
|
rmw_hidelegh },
|
||||||
[CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero,
|
[CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh },
|
||||||
write_ignore },
|
|
||||||
[CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
|
[CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
|
||||||
[CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
|
[CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
|
||||||
write_hviprio1h },
|
write_hviprio1h },
|
||||||
@ -4379,7 +4770,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
|
|||||||
[CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
|
[CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
|
||||||
|
|
||||||
/* Physical Memory Protection */
|
/* Physical Memory Protection */
|
||||||
[CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg,
|
[CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg,
|
||||||
.min_priv_ver = PRIV_VERSION_1_11_0 },
|
.min_priv_ver = PRIV_VERSION_1_11_0 },
|
||||||
[CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
|
[CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
|
||||||
[CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
|
[CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
|
||||||
|
@ -342,7 +342,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
|
|||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu->cfg.ext_icsr) {
|
if (cpu->cfg.ext_zicsr) {
|
||||||
int base_reg = cs->gdb_num_regs;
|
int base_reg = cs->gdb_num_regs;
|
||||||
gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr,
|
gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr,
|
||||||
riscv_gen_dynamic_csr_xml(cs, base_reg),
|
riscv_gen_dynamic_csr_xml(cs, base_reg),
|
||||||
|
@ -799,7 +799,7 @@ static bool trans_fence(DisasContext *ctx, arg_fence *a)
|
|||||||
|
|
||||||
static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
|
static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
|
||||||
{
|
{
|
||||||
if (!ctx->cfg_ptr->ext_ifencei) {
|
if (!ctx->cfg_ptr->ext_zifencei) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,24 +112,27 @@ GEN_VX_MASKED_TRANS(vclmulh_vx, vclmul_vx_check)
|
|||||||
return false; \
|
return false; \
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool zvbb_vv_check(DisasContext *s, arg_rmrr *a)
|
static bool zvkb_vv_check(DisasContext *s, arg_rmrr *a)
|
||||||
{
|
{
|
||||||
return opivv_check(s, a) && s->cfg_ptr->ext_zvbb == true;
|
return opivv_check(s, a) &&
|
||||||
|
(s->cfg_ptr->ext_zvbb == true || s->cfg_ptr->ext_zvkb == true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool zvbb_vx_check(DisasContext *s, arg_rmrr *a)
|
static bool zvkb_vx_check(DisasContext *s, arg_rmrr *a)
|
||||||
{
|
{
|
||||||
return opivx_check(s, a) && s->cfg_ptr->ext_zvbb == true;
|
return opivx_check(s, a) &&
|
||||||
|
(s->cfg_ptr->ext_zvbb == true || s->cfg_ptr->ext_zvkb == true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* vrol.v[vx] */
|
/* vrol.v[vx] */
|
||||||
GEN_OPIVV_GVEC_TRANS_CHECK(vrol_vv, rotlv, zvbb_vv_check)
|
GEN_OPIVV_GVEC_TRANS_CHECK(vrol_vv, rotlv, zvkb_vv_check)
|
||||||
GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vrol_vx, rotls, zvbb_vx_check)
|
GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vrol_vx, rotls, zvkb_vx_check)
|
||||||
|
|
||||||
/* vror.v[vxi] */
|
/* vror.v[vxi] */
|
||||||
GEN_OPIVV_GVEC_TRANS_CHECK(vror_vv, rotrv, zvbb_vv_check)
|
GEN_OPIVV_GVEC_TRANS_CHECK(vror_vv, rotrv, zvkb_vv_check)
|
||||||
GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vror_vx, rotrs, zvbb_vx_check)
|
GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vror_vx, rotrs, zvkb_vx_check)
|
||||||
GEN_OPIVI_GVEC_TRANS_CHECK(vror_vi, IMM_TRUNC_SEW, vror_vx, rotri, zvbb_vx_check)
|
GEN_OPIVI_GVEC_TRANS_CHECK(vror_vi, IMM_TRUNC_SEW, vror_vx, rotri,
|
||||||
|
zvkb_vx_check)
|
||||||
|
|
||||||
#define GEN_OPIVX_GVEC_TRANS_CHECK(NAME, SUF, CHECK) \
|
#define GEN_OPIVX_GVEC_TRANS_CHECK(NAME, SUF, CHECK) \
|
||||||
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
|
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
|
||||||
@ -147,8 +150,8 @@ GEN_OPIVI_GVEC_TRANS_CHECK(vror_vi, IMM_TRUNC_SEW, vror_vx, rotri, zvbb_vx_check
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* vandn.v[vx] */
|
/* vandn.v[vx] */
|
||||||
GEN_OPIVV_GVEC_TRANS_CHECK(vandn_vv, andc, zvbb_vv_check)
|
GEN_OPIVV_GVEC_TRANS_CHECK(vandn_vv, andc, zvkb_vv_check)
|
||||||
GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andcs, zvbb_vx_check)
|
GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andcs, zvkb_vx_check)
|
||||||
|
|
||||||
#define GEN_OPIV_TRANS(NAME, CHECK) \
|
#define GEN_OPIV_TRANS(NAME, CHECK) \
|
||||||
static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
|
static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
|
||||||
@ -188,8 +191,16 @@ static bool zvbb_opiv_check(DisasContext *s, arg_rmr *a)
|
|||||||
vext_check_ss(s, a->rd, a->rs2, a->vm);
|
vext_check_ss(s, a->rd, a->rs2, a->vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
GEN_OPIV_TRANS(vbrev8_v, zvbb_opiv_check)
|
static bool zvkb_opiv_check(DisasContext *s, arg_rmr *a)
|
||||||
GEN_OPIV_TRANS(vrev8_v, zvbb_opiv_check)
|
{
|
||||||
|
return (s->cfg_ptr->ext_zvbb == true || s->cfg_ptr->ext_zvkb == true) &&
|
||||||
|
require_rvv(s) &&
|
||||||
|
vext_check_isa_ill(s) &&
|
||||||
|
vext_check_ss(s, a->rd, a->rs2, a->vm);
|
||||||
|
}
|
||||||
|
|
||||||
|
GEN_OPIV_TRANS(vbrev8_v, zvkb_opiv_check)
|
||||||
|
GEN_OPIV_TRANS(vrev8_v, zvkb_opiv_check)
|
||||||
GEN_OPIV_TRANS(vbrev_v, zvbb_opiv_check)
|
GEN_OPIV_TRANS(vbrev_v, zvbb_opiv_check)
|
||||||
GEN_OPIV_TRANS(vclz_v, zvbb_opiv_check)
|
GEN_OPIV_TRANS(vclz_v, zvbb_opiv_check)
|
||||||
GEN_OPIV_TRANS(vctz_v, zvbb_opiv_check)
|
GEN_OPIV_TRANS(vctz_v, zvbb_opiv_check)
|
||||||
|
@ -16,16 +16,16 @@
|
|||||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define REQUIRE_ZICBOM(ctx) do { \
|
#define REQUIRE_ZICBOM(ctx) do { \
|
||||||
if (!ctx->cfg_ptr->ext_icbom) { \
|
if (!ctx->cfg_ptr->ext_zicbom) { \
|
||||||
return false; \
|
return false; \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define REQUIRE_ZICBOZ(ctx) do { \
|
#define REQUIRE_ZICBOZ(ctx) do { \
|
||||||
if (!ctx->cfg_ptr->ext_icboz) { \
|
if (!ctx->cfg_ptr->ext_zicboz) { \
|
||||||
return false; \
|
return false; \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static bool trans_cbo_clean(DisasContext *ctx, arg_cbo_clean *a)
|
static bool trans_cbo_clean(DisasContext *ctx, arg_cbo_clean *a)
|
||||||
|
@ -140,6 +140,19 @@ static KVMCPUConfig kvm_misa_ext_cfgs[] = {
|
|||||||
KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
|
KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v,
|
||||||
|
const char *name,
|
||||||
|
void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
KVMCPUConfig *misa_ext_cfg = opaque;
|
||||||
|
target_ulong misa_bit = misa_ext_cfg->offset;
|
||||||
|
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||||
|
CPURISCVState *env = &cpu->env;
|
||||||
|
bool value = env->misa_ext_mask & misa_bit;
|
||||||
|
|
||||||
|
visit_type_bool(v, name, &value, errp);
|
||||||
|
}
|
||||||
|
|
||||||
static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
|
static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
|
||||||
const char *name,
|
const char *name,
|
||||||
void *opaque, Error **errp)
|
void *opaque, Error **errp)
|
||||||
@ -213,13 +226,20 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
|
|||||||
.kvm_reg_id = _reg_id}
|
.kvm_reg_id = _reg_id}
|
||||||
|
|
||||||
static KVMCPUConfig kvm_multi_ext_cfgs[] = {
|
static KVMCPUConfig kvm_multi_ext_cfgs[] = {
|
||||||
KVM_EXT_CFG("zicbom", ext_icbom, KVM_RISCV_ISA_EXT_ZICBOM),
|
KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
|
||||||
KVM_EXT_CFG("zicboz", ext_icboz, KVM_RISCV_ISA_EXT_ZICBOZ),
|
KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
|
||||||
|
KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
|
||||||
|
KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
|
||||||
|
KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI),
|
||||||
KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
|
KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
|
||||||
|
KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
|
||||||
|
KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA),
|
||||||
KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
|
KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
|
||||||
|
KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS),
|
||||||
KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
|
KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
|
||||||
KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
|
KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
|
||||||
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
|
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
|
||||||
|
KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
|
||||||
KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
|
KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -244,6 +264,17 @@ static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu,
|
|||||||
return *ext_enabled;
|
return *ext_enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kvm_cpu_get_multi_ext_cfg(Object *obj, Visitor *v,
|
||||||
|
const char *name,
|
||||||
|
void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
KVMCPUConfig *multi_ext_cfg = opaque;
|
||||||
|
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||||
|
bool value = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
|
||||||
|
|
||||||
|
visit_type_bool(v, name, &value, errp);
|
||||||
|
}
|
||||||
|
|
||||||
static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
|
static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
|
||||||
const char *name,
|
const char *name,
|
||||||
void *opaque, Error **errp)
|
void *opaque, Error **errp)
|
||||||
@ -346,6 +377,15 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void cpu_get_cfg_unavailable(Object *obj, Visitor *v,
|
||||||
|
const char *name,
|
||||||
|
void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
bool value = false;
|
||||||
|
|
||||||
|
visit_type_bool(v, name, &value, errp);
|
||||||
|
}
|
||||||
|
|
||||||
static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
|
static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
|
||||||
const char *name,
|
const char *name,
|
||||||
void *opaque, Error **errp)
|
void *opaque, Error **errp)
|
||||||
@ -376,7 +416,8 @@ static void riscv_cpu_add_kvm_unavail_prop(Object *obj, const char *prop_name)
|
|||||||
* to enable any of them.
|
* to enable any of them.
|
||||||
*/
|
*/
|
||||||
object_property_add(obj, prop_name, "bool",
|
object_property_add(obj, prop_name, "bool",
|
||||||
NULL, cpu_set_cfg_unavailable,
|
cpu_get_cfg_unavailable,
|
||||||
|
cpu_set_cfg_unavailable,
|
||||||
NULL, (void *)prop_name);
|
NULL, (void *)prop_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -406,7 +447,7 @@ static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
|
|||||||
misa_cfg->description = riscv_get_misa_ext_description(bit);
|
misa_cfg->description = riscv_get_misa_ext_description(bit);
|
||||||
|
|
||||||
object_property_add(cpu_obj, misa_cfg->name, "bool",
|
object_property_add(cpu_obj, misa_cfg->name, "bool",
|
||||||
NULL,
|
kvm_cpu_get_misa_ext_cfg,
|
||||||
kvm_cpu_set_misa_ext_cfg,
|
kvm_cpu_set_misa_ext_cfg,
|
||||||
NULL, misa_cfg);
|
NULL, misa_cfg);
|
||||||
object_property_set_description(cpu_obj, misa_cfg->name,
|
object_property_set_description(cpu_obj, misa_cfg->name,
|
||||||
@ -422,7 +463,7 @@ static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
|
|||||||
KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
|
KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
|
||||||
|
|
||||||
object_property_add(cpu_obj, multi_cfg->name, "bool",
|
object_property_add(cpu_obj, multi_cfg->name, "bool",
|
||||||
NULL,
|
kvm_cpu_get_multi_ext_cfg,
|
||||||
kvm_cpu_set_multi_ext_cfg,
|
kvm_cpu_set_multi_ext_cfg,
|
||||||
NULL, multi_cfg);
|
NULL, multi_cfg);
|
||||||
}
|
}
|
||||||
@ -804,11 +845,11 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
|
|||||||
kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
|
kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu->cfg.ext_icbom) {
|
if (cpu->cfg.ext_zicbom) {
|
||||||
kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
|
kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu->cfg.ext_icboz) {
|
if (cpu->cfg.ext_zicboz) {
|
||||||
kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
|
kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -897,11 +938,11 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
|
|||||||
kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
|
kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu->cfg.ext_icbom) {
|
if (cpu->cfg.ext_zicbom) {
|
||||||
kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
|
kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu->cfg.ext_icboz) {
|
if (cpu->cfg.ext_zicboz) {
|
||||||
kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
|
kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -79,8 +79,8 @@ static bool hyper_needed(void *opaque)
|
|||||||
|
|
||||||
static const VMStateDescription vmstate_hyper = {
|
static const VMStateDescription vmstate_hyper = {
|
||||||
.name = "cpu/hyper",
|
.name = "cpu/hyper",
|
||||||
.version_id = 2,
|
.version_id = 3,
|
||||||
.minimum_version_id = 2,
|
.minimum_version_id = 3,
|
||||||
.needed = hyper_needed,
|
.needed = hyper_needed,
|
||||||
.fields = (VMStateField[]) {
|
.fields = (VMStateField[]) {
|
||||||
VMSTATE_UINTTL(env.hstatus, RISCVCPU),
|
VMSTATE_UINTTL(env.hstatus, RISCVCPU),
|
||||||
@ -92,6 +92,8 @@ static const VMStateDescription vmstate_hyper = {
|
|||||||
VMSTATE_UINTTL(env.hgatp, RISCVCPU),
|
VMSTATE_UINTTL(env.hgatp, RISCVCPU),
|
||||||
VMSTATE_UINTTL(env.hgeie, RISCVCPU),
|
VMSTATE_UINTTL(env.hgeie, RISCVCPU),
|
||||||
VMSTATE_UINTTL(env.hgeip, RISCVCPU),
|
VMSTATE_UINTTL(env.hgeip, RISCVCPU),
|
||||||
|
VMSTATE_UINT64(env.hvien, RISCVCPU),
|
||||||
|
VMSTATE_UINT64(env.hvip, RISCVCPU),
|
||||||
VMSTATE_UINT64(env.htimedelta, RISCVCPU),
|
VMSTATE_UINT64(env.htimedelta, RISCVCPU),
|
||||||
VMSTATE_UINT64(env.vstimecmp, RISCVCPU),
|
VMSTATE_UINT64(env.vstimecmp, RISCVCPU),
|
||||||
|
|
||||||
@ -106,6 +108,7 @@ static const VMStateDescription vmstate_hyper = {
|
|||||||
VMSTATE_UINTTL(env.vstval, RISCVCPU),
|
VMSTATE_UINTTL(env.vstval, RISCVCPU),
|
||||||
VMSTATE_UINTTL(env.vsatp, RISCVCPU),
|
VMSTATE_UINTTL(env.vsatp, RISCVCPU),
|
||||||
VMSTATE_UINTTL(env.vsiselect, RISCVCPU),
|
VMSTATE_UINTTL(env.vsiselect, RISCVCPU),
|
||||||
|
VMSTATE_UINT64(env.vsie, RISCVCPU),
|
||||||
|
|
||||||
VMSTATE_UINTTL(env.mtval2, RISCVCPU),
|
VMSTATE_UINTTL(env.mtval2, RISCVCPU),
|
||||||
VMSTATE_UINTTL(env.mtinst, RISCVCPU),
|
VMSTATE_UINTTL(env.mtinst, RISCVCPU),
|
||||||
@ -313,7 +316,7 @@ static bool pmu_needed(void *opaque)
|
|||||||
{
|
{
|
||||||
RISCVCPU *cpu = opaque;
|
RISCVCPU *cpu = opaque;
|
||||||
|
|
||||||
return cpu->cfg.pmu_num;
|
return (cpu->cfg.pmu_mask > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const VMStateDescription vmstate_pmu_ctr_state = {
|
static const VMStateDescription vmstate_pmu_ctr_state = {
|
||||||
@ -351,8 +354,8 @@ static const VMStateDescription vmstate_jvt = {
|
|||||||
|
|
||||||
const VMStateDescription vmstate_riscv_cpu = {
|
const VMStateDescription vmstate_riscv_cpu = {
|
||||||
.name = "cpu",
|
.name = "cpu",
|
||||||
.version_id = 8,
|
.version_id = 9,
|
||||||
.minimum_version_id = 8,
|
.minimum_version_id = 9,
|
||||||
.post_load = riscv_cpu_post_load,
|
.post_load = riscv_cpu_post_load,
|
||||||
.fields = (VMStateField[]) {
|
.fields = (VMStateField[]) {
|
||||||
VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
|
VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
|
||||||
@ -379,6 +382,9 @@ const VMStateDescription vmstate_riscv_cpu = {
|
|||||||
VMSTATE_UINT64(env.mip, RISCVCPU),
|
VMSTATE_UINT64(env.mip, RISCVCPU),
|
||||||
VMSTATE_UINT64(env.miclaim, RISCVCPU),
|
VMSTATE_UINT64(env.miclaim, RISCVCPU),
|
||||||
VMSTATE_UINT64(env.mie, RISCVCPU),
|
VMSTATE_UINT64(env.mie, RISCVCPU),
|
||||||
|
VMSTATE_UINT64(env.mvien, RISCVCPU),
|
||||||
|
VMSTATE_UINT64(env.mvip, RISCVCPU),
|
||||||
|
VMSTATE_UINT64(env.sie, RISCVCPU),
|
||||||
VMSTATE_UINT64(env.mideleg, RISCVCPU),
|
VMSTATE_UINT64(env.mideleg, RISCVCPU),
|
||||||
VMSTATE_UINTTL(env.satp, RISCVCPU),
|
VMSTATE_UINTTL(env.satp, RISCVCPU),
|
||||||
VMSTATE_UINTTL(env.stval, RISCVCPU),
|
VMSTATE_UINTTL(env.stval, RISCVCPU),
|
||||||
|
@ -91,7 +91,7 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
|
|||||||
if (pmp_index < MAX_RISCV_PMPS) {
|
if (pmp_index < MAX_RISCV_PMPS) {
|
||||||
bool locked = true;
|
bool locked = true;
|
||||||
|
|
||||||
if (riscv_cpu_cfg(env)->epmp) {
|
if (riscv_cpu_cfg(env)->ext_smepmp) {
|
||||||
/* mseccfg.RLB is set */
|
/* mseccfg.RLB is set */
|
||||||
if (MSECCFG_RLB_ISSET(env)) {
|
if (MSECCFG_RLB_ISSET(env)) {
|
||||||
locked = false;
|
locked = false;
|
||||||
@ -123,6 +123,11 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
|
|||||||
if (locked) {
|
if (locked) {
|
||||||
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
|
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
|
||||||
} else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
|
} else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
|
||||||
|
/* If !mseccfg.MML then ignore writes with encoding RW=01 */
|
||||||
|
if ((val & PMP_WRITE) && !(val & PMP_READ) &&
|
||||||
|
!MSECCFG_MML_ISSET(env)) {
|
||||||
|
val &= ~(PMP_WRITE | PMP_READ);
|
||||||
|
}
|
||||||
env->pmp_state.pmp[pmp_index].cfg_reg = val;
|
env->pmp_state.pmp[pmp_index].cfg_reg = val;
|
||||||
pmp_update_rule_addr(env, pmp_index);
|
pmp_update_rule_addr(env, pmp_index);
|
||||||
return true;
|
return true;
|
||||||
@ -135,6 +140,16 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void pmp_unlock_entries(CPURISCVState *env)
|
||||||
|
{
|
||||||
|
uint32_t pmp_num = pmp_get_num_rules(env);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < pmp_num; i++) {
|
||||||
|
env->pmp_state.pmp[i].cfg_reg &= ~(PMP_LOCK | PMP_AMATCH);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void pmp_decode_napot(target_ulong a, target_ulong *sa,
|
static void pmp_decode_napot(target_ulong a, target_ulong *sa,
|
||||||
target_ulong *ea)
|
target_ulong *ea)
|
||||||
{
|
{
|
||||||
@ -340,9 +355,9 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert the PMP permissions to match the truth table in the
|
* Convert the PMP permissions to match the truth table in the
|
||||||
* ePMP spec.
|
* Smepmp spec.
|
||||||
*/
|
*/
|
||||||
const uint8_t epmp_operation =
|
const uint8_t smepmp_operation =
|
||||||
((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
|
((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
|
||||||
((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
|
((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
|
||||||
(env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
|
(env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
|
||||||
@ -367,7 +382,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
|
|||||||
* If mseccfg.MML Bit set, do the enhanced pmp priv check
|
* If mseccfg.MML Bit set, do the enhanced pmp priv check
|
||||||
*/
|
*/
|
||||||
if (mode == PRV_M) {
|
if (mode == PRV_M) {
|
||||||
switch (epmp_operation) {
|
switch (smepmp_operation) {
|
||||||
case 0:
|
case 0:
|
||||||
case 1:
|
case 1:
|
||||||
case 4:
|
case 4:
|
||||||
@ -398,7 +413,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
|
|||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
switch (epmp_operation) {
|
switch (smepmp_operation) {
|
||||||
case 0:
|
case 0:
|
||||||
case 8:
|
case 8:
|
||||||
case 9:
|
case 9:
|
||||||
@ -574,7 +589,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (riscv_cpu_cfg(env)->epmp) {
|
if (riscv_cpu_cfg(env)->ext_smepmp) {
|
||||||
/* Sticky bits */
|
/* Sticky bits */
|
||||||
val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
|
val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
|
||||||
if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
|
if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
|
||||||
|
@ -28,6 +28,7 @@ typedef enum {
|
|||||||
PMP_READ = 1 << 0,
|
PMP_READ = 1 << 0,
|
||||||
PMP_WRITE = 1 << 1,
|
PMP_WRITE = 1 << 1,
|
||||||
PMP_EXEC = 1 << 2,
|
PMP_EXEC = 1 << 2,
|
||||||
|
PMP_AMATCH = (3 << 3),
|
||||||
PMP_LOCK = 1 << 7
|
PMP_LOCK = 1 << 7
|
||||||
} pmp_priv_t;
|
} pmp_priv_t;
|
||||||
|
|
||||||
@ -81,6 +82,7 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
|
|||||||
void pmp_update_rule_nums(CPURISCVState *env);
|
void pmp_update_rule_nums(CPURISCVState *env);
|
||||||
uint32_t pmp_get_num_rules(CPURISCVState *env);
|
uint32_t pmp_get_num_rules(CPURISCVState *env);
|
||||||
int pmp_priv_to_page_prot(pmp_priv_t pmp_priv);
|
int pmp_priv_to_page_prot(pmp_priv_t pmp_priv);
|
||||||
|
void pmp_unlock_entries(CPURISCVState *env);
|
||||||
|
|
||||||
#define MSECCFG_MML_ISSET(env) get_field(env->mseccfg, MSECCFG_MML)
|
#define MSECCFG_MML_ISSET(env) get_field(env->mseccfg, MSECCFG_MML)
|
||||||
#define MSECCFG_MMWP_ISSET(env) get_field(env->mseccfg, MSECCFG_MMWP)
|
#define MSECCFG_MMWP_ISSET(env) get_field(env->mseccfg, MSECCFG_MMWP)
|
||||||
|
@ -18,14 +18,13 @@
|
|||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "qemu/log.h"
|
#include "qemu/log.h"
|
||||||
|
#include "qemu/error-report.h"
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
#include "pmu.h"
|
#include "pmu.h"
|
||||||
#include "sysemu/cpu-timers.h"
|
#include "sysemu/cpu-timers.h"
|
||||||
#include "sysemu/device_tree.h"
|
#include "sysemu/device_tree.h"
|
||||||
|
|
||||||
#define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */
|
#define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */
|
||||||
#define MAKE_32BIT_MASK(shift, length) \
|
|
||||||
(((uint32_t)(~0UL) >> (32 - (length))) << (shift))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To keep it simple, any event can be mapped to any programmable counters in
|
* To keep it simple, any event can be mapped to any programmable counters in
|
||||||
@ -34,13 +33,9 @@
|
|||||||
* to provide the correct value as well. Heterogeneous PMU per hart is not
|
* to provide the correct value as well. Heterogeneous PMU per hart is not
|
||||||
* supported yet. Thus, number of counters are same across all harts.
|
* supported yet. Thus, number of counters are same across all harts.
|
||||||
*/
|
*/
|
||||||
void riscv_pmu_generate_fdt_node(void *fdt, int num_ctrs, char *pmu_name)
|
void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name)
|
||||||
{
|
{
|
||||||
uint32_t fdt_event_ctr_map[15] = {};
|
uint32_t fdt_event_ctr_map[15] = {};
|
||||||
uint32_t cmask;
|
|
||||||
|
|
||||||
/* All the programmable counters can map to any event */
|
|
||||||
cmask = MAKE_32BIT_MASK(3, num_ctrs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The event encoding is specified in the SBI specification
|
* The event encoding is specified in the SBI specification
|
||||||
@ -188,7 +183,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
|
|||||||
CPURISCVState *env = &cpu->env;
|
CPURISCVState *env = &cpu->env;
|
||||||
gpointer value;
|
gpointer value;
|
||||||
|
|
||||||
if (!cpu->cfg.pmu_num) {
|
if (!cpu->cfg.pmu_mask) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
value = g_hash_table_lookup(cpu->pmu_event_ctr_map,
|
value = g_hash_table_lookup(cpu->pmu_event_ctr_map,
|
||||||
@ -434,22 +429,23 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int riscv_pmu_init(RISCVCPU *cpu, int num_counters)
|
void riscv_pmu_init(RISCVCPU *cpu, Error **errp)
|
||||||
{
|
{
|
||||||
if (num_counters > (RV_MAX_MHPMCOUNTERS - 3)) {
|
if (cpu->cfg.pmu_mask & (COUNTEREN_CY | COUNTEREN_TM | COUNTEREN_IR)) {
|
||||||
return -1;
|
error_setg(errp, "\"pmu-mask\" contains invalid bits (0-2) set");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ctpop32(cpu->cfg.pmu_mask) > (RV_MAX_MHPMCOUNTERS - 3)) {
|
||||||
|
error_setg(errp, "Number of counters exceeds maximum available");
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu->pmu_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
|
cpu->pmu_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
|
||||||
if (!cpu->pmu_event_ctr_map) {
|
if (!cpu->pmu_event_ctr_map) {
|
||||||
/* PMU support can not be enabled */
|
error_setg(errp, "Unable to allocate PMU event hash table");
|
||||||
qemu_log_mask(LOG_UNIMP, "PMU events can't be supported\n");
|
return;
|
||||||
cpu->cfg.pmu_num = 0;
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create a bitmask of available programmable counters */
|
cpu->pmu_avail_ctrs = cpu->cfg.pmu_mask;
|
||||||
cpu->pmu_avail_ctrs = MAKE_32BIT_MASK(3, num_counters);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
@ -17,16 +17,17 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
#include "qapi/error.h"
|
||||||
|
|
||||||
bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
|
bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
|
||||||
uint32_t target_ctr);
|
uint32_t target_ctr);
|
||||||
bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env,
|
bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env,
|
||||||
uint32_t target_ctr);
|
uint32_t target_ctr);
|
||||||
void riscv_pmu_timer_cb(void *priv);
|
void riscv_pmu_timer_cb(void *priv);
|
||||||
int riscv_pmu_init(RISCVCPU *cpu, int num_counters);
|
void riscv_pmu_init(RISCVCPU *cpu, Error **errp);
|
||||||
int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
|
int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
|
||||||
uint32_t ctr_idx);
|
uint32_t ctr_idx);
|
||||||
int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
|
int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
|
||||||
void riscv_pmu_generate_fdt_node(void *fdt, int num_counters, char *pmu_name);
|
void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
|
||||||
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
|
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
|
||||||
uint32_t ctr_idx);
|
uint32_t ctr_idx);
|
||||||
|
@ -24,8 +24,17 @@
|
|||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
|
|
||||||
|
#include "qapi/error.h"
|
||||||
#include "qapi/qapi-commands-machine-target.h"
|
#include "qapi/qapi-commands-machine-target.h"
|
||||||
|
#include "qapi/qmp/qdict.h"
|
||||||
|
#include "qapi/qmp/qerror.h"
|
||||||
|
#include "qapi/qobject-input-visitor.h"
|
||||||
|
#include "qapi/visitor.h"
|
||||||
|
#include "qom/qom-qobject.h"
|
||||||
|
#include "sysemu/kvm.h"
|
||||||
|
#include "sysemu/tcg.h"
|
||||||
#include "cpu-qom.h"
|
#include "cpu-qom.h"
|
||||||
|
#include "cpu.h"
|
||||||
|
|
||||||
static void riscv_cpu_add_definition(gpointer data, gpointer user_data)
|
static void riscv_cpu_add_definition(gpointer data, gpointer user_data)
|
||||||
{
|
{
|
||||||
@ -55,3 +64,154 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
|
|||||||
|
|
||||||
return cpu_list;
|
return cpu_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void riscv_check_if_cpu_available(RISCVCPU *cpu, Error **errp)
|
||||||
|
{
|
||||||
|
if (!riscv_cpu_accelerator_compatible(cpu)) {
|
||||||
|
g_autofree char *name = riscv_cpu_get_name(cpu);
|
||||||
|
const char *accel = kvm_enabled() ? "kvm" : "tcg";
|
||||||
|
|
||||||
|
error_setg(errp, "'%s' CPU not available with %s", name, accel);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void riscv_obj_add_qdict_prop(Object *obj, QDict *qdict_out,
|
||||||
|
const char *name)
|
||||||
|
{
|
||||||
|
ObjectProperty *prop = object_property_find(obj, name);
|
||||||
|
|
||||||
|
if (prop) {
|
||||||
|
QObject *value;
|
||||||
|
|
||||||
|
assert(prop->get);
|
||||||
|
value = object_property_get_qobject(obj, name, &error_abort);
|
||||||
|
|
||||||
|
qdict_put_obj(qdict_out, name, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void riscv_obj_add_multiext_props(Object *obj, QDict *qdict_out,
|
||||||
|
const RISCVCPUMultiExtConfig *arr)
|
||||||
|
{
|
||||||
|
for (int i = 0; arr[i].name != NULL; i++) {
|
||||||
|
riscv_obj_add_qdict_prop(obj, qdict_out, arr[i].name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void riscv_cpuobj_validate_qdict_in(Object *obj, QObject *props,
|
||||||
|
const QDict *qdict_in,
|
||||||
|
Error **errp)
|
||||||
|
{
|
||||||
|
const QDictEntry *qe;
|
||||||
|
Visitor *visitor;
|
||||||
|
Error *local_err = NULL;
|
||||||
|
|
||||||
|
visitor = qobject_input_visitor_new(props);
|
||||||
|
if (!visit_start_struct(visitor, NULL, NULL, 0, &local_err)) {
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (qe = qdict_first(qdict_in); qe; qe = qdict_next(qdict_in, qe)) {
|
||||||
|
object_property_find_err(obj, qe->key, &local_err);
|
||||||
|
if (local_err) {
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
object_property_set(obj, qe->key, visitor, &local_err);
|
||||||
|
if (local_err) {
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
visit_check_struct(visitor, &local_err);
|
||||||
|
if (local_err) {
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
riscv_cpu_finalize_features(RISCV_CPU(obj), &local_err);
|
||||||
|
if (local_err) {
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
visit_end_struct(visitor, NULL);
|
||||||
|
|
||||||
|
err:
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
visit_free(visitor);
|
||||||
|
}
|
||||||
|
|
||||||
|
CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
|
||||||
|
CpuModelInfo *model,
|
||||||
|
Error **errp)
|
||||||
|
{
|
||||||
|
CpuModelExpansionInfo *expansion_info;
|
||||||
|
const QDict *qdict_in = NULL;
|
||||||
|
QDict *qdict_out;
|
||||||
|
ObjectClass *oc;
|
||||||
|
Object *obj;
|
||||||
|
Error *local_err = NULL;
|
||||||
|
|
||||||
|
if (type != CPU_MODEL_EXPANSION_TYPE_FULL) {
|
||||||
|
error_setg(errp, "The requested expansion type is not supported");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
oc = cpu_class_by_name(TYPE_RISCV_CPU, model->name);
|
||||||
|
if (!oc) {
|
||||||
|
error_setg(errp, "The CPU type '%s' is not a known RISC-V CPU type",
|
||||||
|
model->name);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (model->props) {
|
||||||
|
qdict_in = qobject_to(QDict, model->props);
|
||||||
|
if (!qdict_in) {
|
||||||
|
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
obj = object_new(object_class_get_name(oc));
|
||||||
|
|
||||||
|
riscv_check_if_cpu_available(RISCV_CPU(obj), &local_err);
|
||||||
|
if (local_err != NULL) {
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
object_unref(obj);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (qdict_in) {
|
||||||
|
riscv_cpuobj_validate_qdict_in(obj, model->props, qdict_in,
|
||||||
|
&local_err);
|
||||||
|
if (local_err) {
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
object_unref(obj);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
expansion_info = g_new0(CpuModelExpansionInfo, 1);
|
||||||
|
expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
|
||||||
|
expansion_info->model->name = g_strdup(model->name);
|
||||||
|
|
||||||
|
qdict_out = qdict_new();
|
||||||
|
|
||||||
|
riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_extensions);
|
||||||
|
riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_experimental_exts);
|
||||||
|
riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_vendor_exts);
|
||||||
|
|
||||||
|
/* Add our CPU boolean options too */
|
||||||
|
riscv_obj_add_qdict_prop(obj, qdict_out, "mmu");
|
||||||
|
riscv_obj_add_qdict_prop(obj, qdict_out, "pmp");
|
||||||
|
|
||||||
|
if (!qdict_size(qdict_out)) {
|
||||||
|
qobject_unref(qdict_out);
|
||||||
|
} else {
|
||||||
|
expansion_info->model->props = QOBJECT(qdict_out);
|
||||||
|
}
|
||||||
|
|
||||||
|
object_unref(obj);
|
||||||
|
|
||||||
|
return expansion_info;
|
||||||
|
}
|
||||||
|
@ -278,23 +278,23 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
|||||||
!(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) &&
|
!(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) &&
|
||||||
riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) &&
|
riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) &&
|
||||||
riscv_has_ext(env, RVD) &&
|
riscv_has_ext(env, RVD) &&
|
||||||
cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) {
|
cpu->cfg.ext_zicsr && cpu->cfg.ext_zifencei)) {
|
||||||
|
|
||||||
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_icsr)) &&
|
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr)) &&
|
||||||
!cpu->cfg.ext_icsr) {
|
!cpu->cfg.ext_zicsr) {
|
||||||
error_setg(errp, "RVG requires Zicsr but user set Zicsr to false");
|
error_setg(errp, "RVG requires Zicsr but user set Zicsr to false");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ifencei)) &&
|
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei)) &&
|
||||||
!cpu->cfg.ext_ifencei) {
|
!cpu->cfg.ext_zifencei) {
|
||||||
error_setg(errp, "RVG requires Zifencei but user set "
|
error_setg(errp, "RVG requires Zifencei but user set "
|
||||||
"Zifencei to false");
|
"Zifencei to false");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_icsr), true);
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zicsr), true);
|
||||||
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_ifencei), true);
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zifencei), true);
|
||||||
|
|
||||||
env->misa_ext |= RVI | RVM | RVA | RVF | RVD;
|
env->misa_ext |= RVI | RVM | RVA | RVF | RVD;
|
||||||
env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD;
|
env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD;
|
||||||
@ -329,7 +329,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) {
|
if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) {
|
||||||
error_setg(errp, "F extension requires Zicsr");
|
error_setg(errp, "F extension requires Zicsr");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -434,7 +434,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (cpu->cfg.ext_zfinx) {
|
if (cpu->cfg.ext_zfinx) {
|
||||||
if (!cpu->cfg.ext_icsr) {
|
if (!cpu->cfg.ext_zicsr) {
|
||||||
error_setg(errp, "Zfinx extension requires Zicsr");
|
error_setg(errp, "Zfinx extension requires Zicsr");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -494,18 +494,60 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) {
|
if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) {
|
||||||
error_setg(errp, "Zcmt extension requires Zicsr extension");
|
error_setg(errp, "Zcmt extension requires Zicsr extension");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Shorthand vector crypto extensions
|
||||||
|
*/
|
||||||
|
if (cpu->cfg.ext_zvknc) {
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_zvkng) {
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_zvkn) {
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_zvksc) {
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_zvksg) {
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_zvks) {
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_zvkt) {
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true);
|
||||||
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In principle Zve*x would also suffice here, were they supported
|
* In principle Zve*x would also suffice here, were they supported
|
||||||
* in qemu
|
* in qemu
|
||||||
*/
|
*/
|
||||||
if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
|
if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg ||
|
||||||
cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || cpu->cfg.ext_zvksh) &&
|
cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed ||
|
||||||
!cpu->cfg.ext_zve32f) {
|
cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
|
||||||
error_setg(errp,
|
error_setg(errp,
|
||||||
"Vector crypto extensions require V or Zve* extensions");
|
"Vector crypto extensions require V or Zve* extensions");
|
||||||
return;
|
return;
|
||||||
@ -541,6 +583,27 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
|||||||
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true);
|
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) {
|
||||||
|
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) {
|
||||||
|
error_setg(errp, "zicntr requires zicsr");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cpu->cfg.ext_zicntr = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) {
|
||||||
|
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) {
|
||||||
|
error_setg(errp, "zihpm requires zicsr");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cpu->cfg.ext_zihpm = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!cpu->cfg.ext_zihpm) {
|
||||||
|
cpu->cfg.pmu_mask = 0;
|
||||||
|
cpu->pmu_avail_ctrs = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable isa extensions based on priv spec after we
|
* Disable isa extensions based on priv spec after we
|
||||||
* validated and set everything we need.
|
* validated and set everything we need.
|
||||||
@ -548,6 +611,44 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
|||||||
riscv_cpu_disable_priv_spec_isa_exts(cpu);
|
riscv_cpu_disable_priv_spec_isa_exts(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
|
||||||
|
{
|
||||||
|
CPURISCVState *env = &cpu->env;
|
||||||
|
Error *local_err = NULL;
|
||||||
|
|
||||||
|
riscv_cpu_validate_priv_spec(cpu, &local_err);
|
||||||
|
if (local_err != NULL) {
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
riscv_cpu_validate_misa_priv(env, &local_err);
|
||||||
|
if (local_err != NULL) {
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) {
|
||||||
|
/*
|
||||||
|
* Enhanced PMP should only be available
|
||||||
|
* on harts with PMP support
|
||||||
|
*/
|
||||||
|
error_setg(errp, "Invalid configuration: Smepmp requires PMP support");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
riscv_cpu_validate_set_extensions(cpu, &local_err);
|
||||||
|
if (local_err != NULL) {
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool riscv_cpu_tcg_compatible(RISCVCPU *cpu)
|
||||||
|
{
|
||||||
|
return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static bool riscv_cpu_is_generic(Object *cpu_obj)
|
static bool riscv_cpu_is_generic(Object *cpu_obj)
|
||||||
{
|
{
|
||||||
return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
|
return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
|
||||||
@ -563,10 +664,9 @@ static bool riscv_cpu_is_generic(Object *cpu_obj)
|
|||||||
static bool tcg_cpu_realize(CPUState *cs, Error **errp)
|
static bool tcg_cpu_realize(CPUState *cs, Error **errp)
|
||||||
{
|
{
|
||||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||||
CPURISCVState *env = &cpu->env;
|
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
|
||||||
if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) {
|
if (!riscv_cpu_tcg_compatible(cpu)) {
|
||||||
g_autofree char *name = riscv_cpu_get_name(cpu);
|
g_autofree char *name = riscv_cpu_get_name(cpu);
|
||||||
error_setg(errp, "'%s' CPU is not compatible with TCG acceleration",
|
error_setg(errp, "'%s' CPU is not compatible with TCG acceleration",
|
||||||
name);
|
name);
|
||||||
@ -579,46 +679,32 @@ static bool tcg_cpu_realize(CPUState *cs, Error **errp)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
riscv_cpu_validate_priv_spec(cpu, &local_err);
|
|
||||||
if (local_err != NULL) {
|
|
||||||
error_propagate(errp, local_err);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
riscv_cpu_validate_misa_priv(env, &local_err);
|
|
||||||
if (local_err != NULL) {
|
|
||||||
error_propagate(errp, local_err);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cpu->cfg.epmp && !cpu->cfg.pmp) {
|
|
||||||
/*
|
|
||||||
* Enhanced PMP should only be available
|
|
||||||
* on harts with PMP support
|
|
||||||
*/
|
|
||||||
error_setg(errp, "Invalid configuration: EPMP requires PMP support");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
riscv_cpu_validate_set_extensions(cpu, &local_err);
|
|
||||||
if (local_err != NULL) {
|
|
||||||
error_propagate(errp, local_err);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
CPURISCVState *env = &cpu->env;
|
||||||
|
|
||||||
CPU(cs)->tcg_cflags |= CF_PCREL;
|
CPU(cs)->tcg_cflags |= CF_PCREL;
|
||||||
|
|
||||||
if (cpu->cfg.ext_sstc) {
|
if (cpu->cfg.ext_sstc) {
|
||||||
riscv_timer_init(cpu);
|
riscv_timer_init(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu->cfg.pmu_num) {
|
if (cpu->cfg.pmu_mask) {
|
||||||
if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) {
|
riscv_pmu_init(cpu, &local_err);
|
||||||
|
if (local_err != NULL) {
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu->cfg.ext_sscofpmf) {
|
||||||
cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||||
riscv_pmu_timer_cb, cpu);
|
riscv_pmu_timer_cb, cpu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
|
||||||
|
if (riscv_has_ext(env, RVH)) {
|
||||||
|
env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -23,5 +23,7 @@
|
|||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
|
||||||
void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp);
|
void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp);
|
||||||
|
void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
|
||||||
|
bool riscv_cpu_tcg_compatible(RISCVCPU *cpu);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user