Fourth RISC-V PR for QEMU 8.0, Attempt 2

* A triplet of cleanups to the kernel/initrd loader that avoids
   duplication between the various boards.
 * Weiwei Li, Daniel Henrique Barboza, and Liu Zhiwei have been added as
   reviewers.  Thanks for the help!
 * A fix for PMP matching to avoid incorrectly appling the default
   permissions on PMP permission violations.
 * A cleanup to avoid an unnecessary avoid env_archcpu() in
   cpu_get_tb_cpu_state().
 * Fixes for the vector slide instructions to avoid truncating 64-bit
   values (such as doubles) on 32-bit targets.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmP5Br8THHBhbG1lckBk
 YWJiZWx0LmNvbQAKCRAuExnzX7sYiT4RD/9hdSlQlR1g/2h4fbCJ3U0GvyNH0T7N
 mt3AX8hFvmfR1O63qqVVebJSHM1dTm6WsA19vKE5tdtbjV5V8UZuBTSqYeRBSrLd
 LK9IHhwv3k9OQ/EG8CgRo7HEMxAurpC26zTf3chnfwa1Wyl5XxCXNx5hPbhu18G9
 oxw0sBi51T0Tb+N6lOVVSfmiEZWLXRq+lDCZdV0j864brsSjo4x8VEGrLaFTOJLf
 X4MW6vBI4Pcb7EGnHjj5WvRKsf8gdahdx8bSTjORIm8oGri9Iyw6Vrg2khuhjnuH
 99sD1O06cvrylp+sCOVei8H3S6/xCepQXUXnCBCd1/cetgV+olo+ZR78Z8ZjXPED
 jhZ23lsDcge+4W141lsCiwLgzI0YO3Ac+84zQLIvcx16c8zow3G9FO9sTlBSsgnW
 0XJrsUF7AZB6quUSMytG7WK+OBizzCRwj7ItC+Mty68wLrei5lDVj8b0t8hAQEdr
 dOb7jku+Dz8OspGZx1aDKKifGDO+Ppv4PjAM2G44OmkM824SvvFg8+FEr9NgbKbp
 VgTZDCeVC6IEpzthKsK8WeompLo7Sc33KITqwMbGiyGs+gsnmgKP2bcTLF8YTlFk
 dqFBWjo3tjH5oukgTLCSYY4xPaHR9q418vGAfRox15GtUVliQ9iL5oH47PVXg4U7
 YsNZ74nD1pUueg==
 =Umli
 -----END PGP SIGNATURE-----

Merge tag 'pull-riscv-to-apply-20230224' of github.com:palmer-dabbelt/qemu into staging

Fourth RISC-V PR for QEMU 8.0, Attempt 2

* A triplet of cleanups to the kernel/initrd loader that avoids
  duplication between the various boards.
* Weiwei Li, Daniel Henrique Barboza, and Liu Zhiwei have been added as
  reviewers.  Thanks for the help!
* A fix for PMP matching to avoid incorrectly appling the default
  permissions on PMP permission violations.
* A cleanup to avoid an unnecessary avoid env_archcpu() in
  cpu_get_tb_cpu_state().
* Fixes for the vector slide instructions to avoid truncating 64-bit
  values (such as doubles) on 32-bit targets.

# -----BEGIN PGP SIGNATURE-----
#
# iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmP5Br8THHBhbG1lckBk
# YWJiZWx0LmNvbQAKCRAuExnzX7sYiT4RD/9hdSlQlR1g/2h4fbCJ3U0GvyNH0T7N
# mt3AX8hFvmfR1O63qqVVebJSHM1dTm6WsA19vKE5tdtbjV5V8UZuBTSqYeRBSrLd
# LK9IHhwv3k9OQ/EG8CgRo7HEMxAurpC26zTf3chnfwa1Wyl5XxCXNx5hPbhu18G9
# oxw0sBi51T0Tb+N6lOVVSfmiEZWLXRq+lDCZdV0j864brsSjo4x8VEGrLaFTOJLf
# X4MW6vBI4Pcb7EGnHjj5WvRKsf8gdahdx8bSTjORIm8oGri9Iyw6Vrg2khuhjnuH
# 99sD1O06cvrylp+sCOVei8H3S6/xCepQXUXnCBCd1/cetgV+olo+ZR78Z8ZjXPED
# jhZ23lsDcge+4W141lsCiwLgzI0YO3Ac+84zQLIvcx16c8zow3G9FO9sTlBSsgnW
# 0XJrsUF7AZB6quUSMytG7WK+OBizzCRwj7ItC+Mty68wLrei5lDVj8b0t8hAQEdr
# dOb7jku+Dz8OspGZx1aDKKifGDO+Ppv4PjAM2G44OmkM824SvvFg8+FEr9NgbKbp
# VgTZDCeVC6IEpzthKsK8WeompLo7Sc33KITqwMbGiyGs+gsnmgKP2bcTLF8YTlFk
# dqFBWjo3tjH5oukgTLCSYY4xPaHR9q418vGAfRox15GtUVliQ9iL5oH47PVXg4U7
# YsNZ74nD1pUueg==
# =Umli
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 24 Feb 2023 18:49:35 GMT
# gpg:                using RSA key 2B3C3747446843B24A943A7A2E1319F35FBB1889
# gpg:                issuer "palmer@dabbelt.com"
# gpg: Good signature from "Palmer Dabbelt <palmer@dabbelt.com>" [unknown]
# gpg:                 aka "Palmer Dabbelt <palmerdabbelt@google.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 00CE 76D1 8349 60DF CE88  6DF8 EF4C A150 2CCB AB41
#      Subkey fingerprint: 2B3C 3747 4468 43B2 4A94  3A7A 2E13 19F3 5FBB 1889

* tag 'pull-riscv-to-apply-20230224' of github.com:palmer-dabbelt/qemu:
  target/riscv: Fix vslide1up.vf and vslide1down.vf
  target/riscv: avoid env_archcpu() in cpu_get_tb_cpu_state()
  target/riscv: Smepmp: Skip applying default rules when address matches
  MAINTAINERS: Add some RISC-V reviewers
  target/riscv: Remove privileged spec version restriction for RVV
  hw/riscv/boot.c: make riscv_load_initrd() static
  hw/riscv/boot.c: consolidate all kernel init in riscv_load_kernel()
  hw/riscv: handle 32 bit CPUs kernel_entry in riscv_load_kernel()

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2023-02-26 20:14:46 +00:00
commit b11728dc3a
14 changed files with 99 additions and 102 deletions

View File

@ -287,6 +287,9 @@ RISC-V TCG CPUs
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Alistair Francis <alistair.francis@wdc.com>
M: Bin Meng <bin.meng@windriver.com>
R: Weiwei Li <liweiwei@iscas.ac.cn>
R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
L: qemu-riscv@nongnu.org
S: Supported
F: target/riscv/

View File

@ -173,43 +173,7 @@ target_ulong riscv_load_firmware(const char *firmware_filename,
exit(1);
}
target_ulong riscv_load_kernel(MachineState *machine,
target_ulong kernel_start_addr,
symbol_fn_t sym_cb)
{
const char *kernel_filename = machine->kernel_filename;
uint64_t kernel_load_base, kernel_entry;
g_assert(kernel_filename != NULL);
/*
* NB: Use low address not ELF entry point to ensure that the fw_dynamic
* behaviour when loading an ELF matches the fw_payload, fw_jump and BBL
* behaviour, as well as fw_dynamic with a raw binary, all of which jump to
* the (expected) load address load address. This allows kernels to have
* separate SBI and ELF entry points (used by FreeBSD, for example).
*/
if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL,
NULL, &kernel_load_base, NULL, NULL, 0,
EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
return kernel_load_base;
}
if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL,
NULL, NULL, NULL) > 0) {
return kernel_entry;
}
if (load_image_targphys_as(kernel_filename, kernel_start_addr,
current_machine->ram_size, NULL) > 0) {
return kernel_start_addr;
}
error_report("could not load kernel '%s'", kernel_filename);
exit(1);
}
void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
{
const char *filename = machine->initrd_filename;
uint64_t mem_size = machine->ram_size;
@ -249,6 +213,67 @@ void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
}
}
target_ulong riscv_load_kernel(MachineState *machine,
RISCVHartArrayState *harts,
target_ulong kernel_start_addr,
bool load_initrd,
symbol_fn_t sym_cb)
{
const char *kernel_filename = machine->kernel_filename;
uint64_t kernel_load_base, kernel_entry;
void *fdt = machine->fdt;
g_assert(kernel_filename != NULL);
/*
* NB: Use low address not ELF entry point to ensure that the fw_dynamic
* behaviour when loading an ELF matches the fw_payload, fw_jump and BBL
* behaviour, as well as fw_dynamic with a raw binary, all of which jump to
* the (expected) load address load address. This allows kernels to have
* separate SBI and ELF entry points (used by FreeBSD, for example).
*/
if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL,
NULL, &kernel_load_base, NULL, NULL, 0,
EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
kernel_entry = kernel_load_base;
goto out;
}
if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL,
NULL, NULL, NULL) > 0) {
goto out;
}
if (load_image_targphys_as(kernel_filename, kernel_start_addr,
current_machine->ram_size, NULL) > 0) {
kernel_entry = kernel_start_addr;
goto out;
}
error_report("could not load kernel '%s'", kernel_filename);
exit(1);
out:
/*
* For 32 bit CPUs 'kernel_entry' can be sign-extended by
* load_elf_ram_sym().
*/
if (riscv_is_32bit(harts)) {
kernel_entry = extract64(kernel_entry, 0, 32);
}
if (load_initrd && machine->initrd_filename) {
riscv_load_initrd(machine, kernel_entry);
}
if (fdt && machine->kernel_cmdline && *machine->kernel_cmdline) {
qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
machine->kernel_cmdline);
}
return kernel_entry;
}
/*
* This function makes an assumption that the DRAM interval
* 'dram_base' + 'dram_size' is contiguous.

View File

@ -629,16 +629,8 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
firmware_end_addr);
kernel_entry = riscv_load_kernel(machine, kernel_start_addr, NULL);
if (machine->initrd_filename) {
riscv_load_initrd(machine, kernel_entry);
}
if (machine->kernel_cmdline && *machine->kernel_cmdline) {
qemu_fdt_setprop_string(machine->fdt, "/chosen",
"bootargs", machine->kernel_cmdline);
}
kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
kernel_start_addr, true, NULL);
/* Compute the fdt load address in dram */
fdt_load_addr = riscv_compute_fdt_addr(memmap[MICROCHIP_PFSOC_DRAM_LO].base,

View File

@ -101,7 +101,9 @@ static void opentitan_board_init(MachineState *machine)
}
if (machine->kernel_filename) {
riscv_load_kernel(machine, memmap[IBEX_DEV_RAM].base, NULL);
riscv_load_kernel(machine, &s->soc.cpus,
memmap[IBEX_DEV_RAM].base,
false, NULL);
}
}

View File

@ -114,7 +114,9 @@ static void sifive_e_machine_init(MachineState *machine)
memmap[SIFIVE_E_DEV_MROM].base, &address_space_memory);
if (machine->kernel_filename) {
riscv_load_kernel(machine, memmap[SIFIVE_E_DEV_DTIM].base, NULL);
riscv_load_kernel(machine, &s->soc.cpus,
memmap[SIFIVE_E_DEV_DTIM].base,
false, NULL);
}
}

View File

@ -598,16 +598,8 @@ static void sifive_u_machine_init(MachineState *machine)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
firmware_end_addr);
kernel_entry = riscv_load_kernel(machine, kernel_start_addr, NULL);
if (machine->initrd_filename) {
riscv_load_initrd(machine, kernel_entry);
}
if (machine->kernel_cmdline && *machine->kernel_cmdline) {
qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs",
machine->kernel_cmdline);
}
kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
kernel_start_addr, true, NULL);
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode

View File

@ -305,17 +305,9 @@ static void spike_board_init(MachineState *machine)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
firmware_end_addr);
kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
htif_symbol_callback);
if (machine->initrd_filename) {
riscv_load_initrd(machine, kernel_entry);
}
if (machine->kernel_cmdline && *machine->kernel_cmdline) {
qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs",
machine->kernel_cmdline);
}
kernel_entry = riscv_load_kernel(machine, &s->soc[0],
kernel_start_addr,
true, htif_symbol_callback);
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode

View File

@ -1277,16 +1277,8 @@ static void virt_machine_done(Notifier *notifier, void *data)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
firmware_end_addr);
kernel_entry = riscv_load_kernel(machine, kernel_start_addr, NULL);
if (machine->initrd_filename) {
riscv_load_initrd(machine, kernel_entry);
}
if (machine->kernel_cmdline && *machine->kernel_cmdline) {
qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs",
machine->kernel_cmdline);
}
kernel_entry = riscv_load_kernel(machine, &s->soc[0],
kernel_start_addr, true, NULL);
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode

View File

@ -44,9 +44,10 @@ target_ulong riscv_load_firmware(const char *firmware_filename,
hwaddr firmware_load_addr,
symbol_fn_t sym_cb);
target_ulong riscv_load_kernel(MachineState *machine,
RISCVHartArrayState *harts,
target_ulong firmware_end_addr,
bool load_initrd,
symbol_fn_t sym_cb);
void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry);
uint64_t riscv_compute_fdt_addr(hwaddr dram_start, uint64_t dram_size,
MachineState *ms);
void riscv_load_fdt(hwaddr fdt_addr, void *fdt);

View File

@ -73,7 +73,7 @@ struct isa_ext_data {
*/
static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h),
ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_12_0, ext_v),
ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_10_0, ext_v),
ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr),
ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei),
ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause),

View File

@ -60,7 +60,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
* which is not supported by GVEC. So we set vl_eq_vlmax flag to true
* only when maxsz >= 8 bytes.
*/
uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
uint32_t vlmax = vext_get_vlmax(cpu, env->vtype);
uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
uint32_t maxsz = vlmax << sew;
bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&

View File

@ -3980,20 +3980,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_FRM] = { "frm", fs, read_frm, write_frm },
[CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
/* Vector CSRs */
[CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_VL] = { "vl", vs, read_vl,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_VTYPE] = { "vtype", vs, read_vtype,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_VLENB] = { "vlenb", vs, read_vlenb,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
[CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
[CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
[CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
[CSR_VL] = { "vl", vs, read_vl },
[CSR_VTYPE] = { "vtype", vs, read_vtype },
[CSR_VLENB] = { "vlenb", vs, read_vlenb },
/* User Timers and Counters */
[CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
[CSR_INSTRET] = { "instret", ctr, read_hpmcounter },

View File

@ -441,9 +441,12 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
}
}
if ((privs & *allowed_privs) == privs) {
ret = i;
}
/*
* If matching address range was found, the protection bits
* defined with PMP must be used. We shouldn't fallback on
* finding default privileges.
*/
ret = i;
break;
}
}

View File

@ -5038,7 +5038,7 @@ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4)
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8)
#define GEN_VEXT_VSLIE1UP(BITWIDTH, H) \
static void vslide1up_##BITWIDTH(void *vd, void *v0, target_ulong s1, \
static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
void *vs2, CPURISCVState *env, uint32_t desc) \
{ \
typedef uint##BITWIDTH##_t ETYPE; \
@ -5086,7 +5086,7 @@ GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, 32)
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64)
#define GEN_VEXT_VSLIDE1DOWN(BITWIDTH, H) \
static void vslide1down_##BITWIDTH(void *vd, void *v0, target_ulong s1, \
static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
void *vs2, CPURISCVState *env, uint32_t desc) \
{ \
typedef uint##BITWIDTH##_t ETYPE; \