2018-03-02 15:31:13 +03:00
|
|
|
/*
|
|
|
|
* QEMU RISC-V VirtIO Board
|
|
|
|
*
|
|
|
|
* Copyright (c) 2017 SiFive, Inc.
|
|
|
|
*
|
|
|
|
* RISC-V machine with 16550a UART and VirtIO MMIO
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2 or later, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
2018-06-25 15:42:08 +03:00
|
|
|
#include "qemu/units.h"
|
2018-03-02 15:31:13 +03:00
|
|
|
#include "qemu/error-report.h"
|
2022-06-13 14:58:10 +03:00
|
|
|
#include "qemu/guest-random.h"
|
2018-03-02 15:31:13 +03:00
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "hw/boards.h"
|
|
|
|
#include "hw/loader.h"
|
|
|
|
#include "hw/sysbus.h"
|
2019-10-09 02:32:25 +03:00
|
|
|
#include "hw/qdev-properties.h"
|
2018-03-02 15:31:13 +03:00
|
|
|
#include "hw/char/serial.h"
|
|
|
|
#include "target/riscv/cpu.h"
|
2022-04-28 02:41:44 +03:00
|
|
|
#include "hw/core/sysbus-fdt.h"
|
2022-08-25 01:17:00 +03:00
|
|
|
#include "target/riscv/pmu.h"
|
2018-03-02 15:31:13 +03:00
|
|
|
#include "hw/riscv/riscv_hart.h"
|
|
|
|
#include "hw/riscv/virt.h"
|
2019-06-25 01:11:49 +03:00
|
|
|
#include "hw/riscv/boot.h"
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
#include "hw/riscv/numa.h"
|
2023-09-25 20:57:02 +03:00
|
|
|
#include "kvm/kvm_riscv.h"
|
2024-01-23 21:42:28 +03:00
|
|
|
#include "hw/firmware/smbios.h"
|
2021-08-31 14:06:00 +03:00
|
|
|
#include "hw/intc/riscv_aclint.h"
|
2022-02-20 11:55:22 +03:00
|
|
|
#include "hw/intc/riscv_aplic.h"
|
2020-09-03 13:40:17 +03:00
|
|
|
#include "hw/intc/sifive_plic.h"
|
2020-09-03 13:40:20 +03:00
|
|
|
#include "hw/misc/sifive_test.h"
|
2022-04-28 02:41:43 +03:00
|
|
|
#include "hw/platform-bus.h"
|
2018-03-02 15:31:13 +03:00
|
|
|
#include "chardev/char.h"
|
|
|
|
#include "sysemu/device_tree.h"
|
2019-08-12 08:23:57 +03:00
|
|
|
#include "sysemu/sysemu.h"
|
2023-06-29 15:11:03 +03:00
|
|
|
#include "sysemu/tcg.h"
|
2022-01-12 11:13:22 +03:00
|
|
|
#include "sysemu/kvm.h"
|
2022-04-28 02:41:46 +03:00
|
|
|
#include "sysemu/tpm.h"
|
2024-02-17 22:26:06 +03:00
|
|
|
#include "sysemu/qtest.h"
|
2018-12-12 01:37:36 +03:00
|
|
|
#include "hw/pci/pci.h"
|
|
|
|
#include "hw/pci-host/gpex.h"
|
2021-03-19 02:50:41 +03:00
|
|
|
#include "hw/display/ramfb.h"
|
2023-03-02 12:12:05 +03:00
|
|
|
#include "hw/acpi/aml-build.h"
|
2023-03-02 12:12:06 +03:00
|
|
|
#include "qapi/qapi-visit-common.h"
|
2024-02-17 22:26:05 +03:00
|
|
|
#include "hw/virtio/virtio-iommu.h"
|
2018-03-02 15:31:13 +03:00
|
|
|
|
2023-07-27 13:24:37 +03:00
|
|
|
/* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */
|
|
|
|
static bool virt_use_kvm_aia(RISCVVirtState *s)
|
|
|
|
{
|
|
|
|
return kvm_irqchip_in_kernel() && s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
|
|
|
|
}
|
|
|
|
|
2024-02-17 22:26:06 +03:00
|
|
|
static bool virt_aclint_allowed(void)
|
|
|
|
{
|
|
|
|
return tcg_enabled() || qtest_enabled();
|
|
|
|
}
|
|
|
|
|
2021-02-20 17:48:04 +03:00
|
|
|
static const MemMapEntry virt_memmap[] = {
|
2022-04-28 02:41:43 +03:00
|
|
|
[VIRT_DEBUG] = { 0x0, 0x100 },
|
|
|
|
[VIRT_MROM] = { 0x1000, 0xf000 },
|
|
|
|
[VIRT_TEST] = { 0x100000, 0x1000 },
|
|
|
|
[VIRT_RTC] = { 0x101000, 0x1000 },
|
|
|
|
[VIRT_CLINT] = { 0x2000000, 0x10000 },
|
|
|
|
[VIRT_ACLINT_SSWI] = { 0x2F00000, 0x4000 },
|
|
|
|
[VIRT_PCIE_PIO] = { 0x3000000, 0x10000 },
|
|
|
|
[VIRT_PLATFORM_BUS] = { 0x4000000, 0x2000000 },
|
|
|
|
[VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) },
|
|
|
|
[VIRT_APLIC_M] = { 0xc000000, APLIC_SIZE(VIRT_CPUS_MAX) },
|
|
|
|
[VIRT_APLIC_S] = { 0xd000000, APLIC_SIZE(VIRT_CPUS_MAX) },
|
|
|
|
[VIRT_UART0] = { 0x10000000, 0x100 },
|
|
|
|
[VIRT_VIRTIO] = { 0x10001000, 0x1000 },
|
|
|
|
[VIRT_FW_CFG] = { 0x10100000, 0x18 },
|
|
|
|
[VIRT_FLASH] = { 0x20000000, 0x4000000 },
|
|
|
|
[VIRT_IMSIC_M] = { 0x24000000, VIRT_IMSIC_MAX_SIZE },
|
|
|
|
[VIRT_IMSIC_S] = { 0x28000000, VIRT_IMSIC_MAX_SIZE },
|
|
|
|
[VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 },
|
|
|
|
[VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 },
|
|
|
|
[VIRT_DRAM] = { 0x80000000, 0x0 },
|
2018-03-02 15:31:13 +03:00
|
|
|
};
|
|
|
|
|
2021-02-20 17:48:07 +03:00
|
|
|
/* PCIe high mmio is fixed for RV32 */
|
|
|
|
#define VIRT32_HIGH_PCIE_MMIO_BASE 0x300000000ULL
|
|
|
|
#define VIRT32_HIGH_PCIE_MMIO_SIZE (4 * GiB)
|
|
|
|
|
|
|
|
/* PCIe high mmio for RV64, size is fixed but base depends on top of RAM */
|
|
|
|
#define VIRT64_HIGH_PCIE_MMIO_SIZE (16 * GiB)
|
|
|
|
|
|
|
|
static MemMapEntry virt_high_pcie_memmap;
|
|
|
|
|
2019-10-09 02:32:25 +03:00
|
|
|
#define VIRT_FLASH_SECTOR_SIZE (256 * KiB)
|
|
|
|
|
|
|
|
static PFlashCFI01 *virt_flash_create1(RISCVVirtState *s,
|
|
|
|
const char *name,
|
|
|
|
const char *alias_prop_name)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Create a single flash device. We use the same parameters as
|
|
|
|
* the flash devices on the ARM virt board.
|
|
|
|
*/
|
2020-06-10 08:31:59 +03:00
|
|
|
DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01);
|
2019-10-09 02:32:25 +03:00
|
|
|
|
|
|
|
qdev_prop_set_uint64(dev, "sector-length", VIRT_FLASH_SECTOR_SIZE);
|
|
|
|
qdev_prop_set_uint8(dev, "width", 4);
|
|
|
|
qdev_prop_set_uint8(dev, "device-width", 2);
|
|
|
|
qdev_prop_set_bit(dev, "big-endian", false);
|
|
|
|
qdev_prop_set_uint16(dev, "id0", 0x89);
|
|
|
|
qdev_prop_set_uint16(dev, "id1", 0x18);
|
|
|
|
qdev_prop_set_uint16(dev, "id2", 0x00);
|
|
|
|
qdev_prop_set_uint16(dev, "id3", 0x00);
|
|
|
|
qdev_prop_set_string(dev, "name", name);
|
|
|
|
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
object_property_add_child(OBJECT(s), name, OBJECT(dev));
|
2019-10-09 02:32:25 +03:00
|
|
|
object_property_add_alias(OBJECT(s), alias_prop_name,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
OBJECT(dev), "drive");
|
2019-10-09 02:32:25 +03:00
|
|
|
|
|
|
|
return PFLASH_CFI01(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_flash_create(RISCVVirtState *s)
|
|
|
|
{
|
|
|
|
s->flash[0] = virt_flash_create1(s, "virt.flash0", "pflash0");
|
|
|
|
s->flash[1] = virt_flash_create1(s, "virt.flash1", "pflash1");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_flash_map1(PFlashCFI01 *flash,
|
|
|
|
hwaddr base, hwaddr size,
|
|
|
|
MemoryRegion *sysmem)
|
|
|
|
{
|
|
|
|
DeviceState *dev = DEVICE(flash);
|
|
|
|
|
2020-05-11 23:52:46 +03:00
|
|
|
assert(QEMU_IS_ALIGNED(size, VIRT_FLASH_SECTOR_SIZE));
|
2019-10-09 02:32:25 +03:00
|
|
|
assert(size / VIRT_FLASH_SECTOR_SIZE <= UINT32_MAX);
|
|
|
|
qdev_prop_set_uint32(dev, "num-blocks", size / VIRT_FLASH_SECTOR_SIZE);
|
sysbus: Convert to sysbus_realize() etc. with Coccinelle
Convert from qdev_realize(), qdev_realize_and_unref() with null @bus
argument to sysbus_realize(), sysbus_realize_and_unref().
Coccinelle script:
@@
expression dev, errp;
@@
- qdev_realize(DEVICE(dev), NULL, errp);
+ sysbus_realize(SYS_BUS_DEVICE(dev), errp);
@@
expression sysbus_dev, dev, errp;
@@
+ sysbus_dev = SYS_BUS_DEVICE(dev);
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(sysbus_dev, errp);
- sysbus_dev = SYS_BUS_DEVICE(dev);
@@
expression sysbus_dev, dev, errp;
expression expr;
@@
sysbus_dev = SYS_BUS_DEVICE(dev);
... when != dev = expr;
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(sysbus_dev, errp);
@@
expression dev, errp;
@@
- qdev_realize_and_unref(DEVICE(dev), NULL, errp);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
@@
expression dev, errp;
@@
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
Whitespace changes minimized manually.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200610053247.1583243-46-armbru@redhat.com>
[Conflicts in hw/misc/empty_slot.c and hw/sparc/leon3.c resolved]
2020-06-10 08:32:34 +03:00
|
|
|
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
2019-10-09 02:32:25 +03:00
|
|
|
|
|
|
|
memory_region_add_subregion(sysmem, base,
|
|
|
|
sysbus_mmio_get_region(SYS_BUS_DEVICE(dev),
|
|
|
|
0));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_flash_map(RISCVVirtState *s,
|
|
|
|
MemoryRegion *sysmem)
|
|
|
|
{
|
|
|
|
hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2;
|
|
|
|
hwaddr flashbase = virt_memmap[VIRT_FLASH].base;
|
|
|
|
|
|
|
|
virt_flash_map1(s->flash[0], flashbase, flashsize,
|
|
|
|
sysmem);
|
|
|
|
virt_flash_map1(s->flash[1], flashbase + flashsize, flashsize,
|
|
|
|
sysmem);
|
|
|
|
}
|
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename,
|
|
|
|
uint32_t irqchip_phandle)
|
2018-12-12 01:37:36 +03:00
|
|
|
{
|
|
|
|
int pin, dev;
|
2022-02-20 11:55:22 +03:00
|
|
|
uint32_t irq_map_stride = 0;
|
|
|
|
uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS *
|
|
|
|
FDT_MAX_INT_MAP_WIDTH] = {};
|
2018-12-12 01:37:36 +03:00
|
|
|
uint32_t *irq_map = full_irq_map;
|
|
|
|
|
|
|
|
/* This code creates a standard swizzle of interrupts such that
|
|
|
|
* each device's first interrupt is based on it's PCI_SLOT number.
|
|
|
|
* (See pci_swizzle_map_irq_fn())
|
|
|
|
*
|
|
|
|
* We only need one entry per interrupt in the table (not one per
|
|
|
|
* possible slot) seeing the interrupt-map-mask will allow the table
|
|
|
|
* to wrap to any number of devices.
|
|
|
|
*/
|
|
|
|
for (dev = 0; dev < GPEX_NUM_IRQS; dev++) {
|
|
|
|
int devfn = dev * 0x8;
|
|
|
|
|
|
|
|
for (pin = 0; pin < GPEX_NUM_IRQS; pin++) {
|
|
|
|
int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS);
|
|
|
|
int i = 0;
|
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
/* Fill PCI address cells */
|
2018-12-12 01:37:36 +03:00
|
|
|
irq_map[i] = cpu_to_be32(devfn << 8);
|
|
|
|
i += FDT_PCI_ADDR_CELLS;
|
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
/* Fill PCI Interrupt cells */
|
|
|
|
irq_map[i] = cpu_to_be32(pin + 1);
|
2018-12-12 01:37:36 +03:00
|
|
|
i += FDT_PCI_INT_CELLS;
|
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
/* Fill interrupt controller phandle and cells */
|
|
|
|
irq_map[i++] = cpu_to_be32(irqchip_phandle);
|
|
|
|
irq_map[i++] = cpu_to_be32(irq_nr);
|
|
|
|
if (s->aia_type != VIRT_AIA_TYPE_NONE) {
|
|
|
|
irq_map[i++] = cpu_to_be32(0x4);
|
|
|
|
}
|
2018-12-12 01:37:36 +03:00
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
if (!irq_map_stride) {
|
|
|
|
irq_map_stride = i;
|
|
|
|
}
|
|
|
|
irq_map += irq_map_stride;
|
2018-12-12 01:37:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map,
|
|
|
|
GPEX_NUM_IRQS * GPEX_NUM_IRQS *
|
|
|
|
irq_map_stride * sizeof(uint32_t));
|
2018-12-12 01:37:36 +03:00
|
|
|
|
|
|
|
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask",
|
|
|
|
0x1800, 0, 0, 0x7);
|
|
|
|
}
|
|
|
|
|
2021-08-31 14:06:02 +03:00
|
|
|
static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
|
|
|
|
char *clust_name, uint32_t *phandle,
|
2023-01-11 20:09:42 +03:00
|
|
|
uint32_t *intc_phandles)
|
2018-03-02 15:31:13 +03:00
|
|
|
{
|
2021-08-31 14:06:02 +03:00
|
|
|
int cpu;
|
|
|
|
uint32_t cpu_phandle;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2023-01-11 20:09:42 +03:00
|
|
|
bool is_32_bit = riscv_is_32bit(&s->soc[0]);
|
2023-03-03 16:12:52 +03:00
|
|
|
uint8_t satp_mode_max;
|
2021-08-31 14:06:02 +03:00
|
|
|
|
|
|
|
for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
|
2023-02-22 21:52:03 +03:00
|
|
|
RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu];
|
2024-01-23 01:15:25 +03:00
|
|
|
g_autofree char *cpu_name = NULL;
|
|
|
|
g_autofree char *core_name = NULL;
|
|
|
|
g_autofree char *intc_name = NULL;
|
|
|
|
g_autofree char *sv_name = NULL;
|
2023-02-22 21:52:03 +03:00
|
|
|
|
2021-08-31 14:06:02 +03:00
|
|
|
cpu_phandle = (*phandle)++;
|
|
|
|
|
|
|
|
cpu_name = g_strdup_printf("/cpus/cpu@%d",
|
|
|
|
s->soc[socket].hartid_base + cpu);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, cpu_name);
|
2023-03-03 16:12:52 +03:00
|
|
|
|
2023-07-06 13:17:20 +03:00
|
|
|
if (cpu_ptr->cfg.satp_mode.supported != 0) {
|
|
|
|
satp_mode_max = satp_mode_max_from_map(cpu_ptr->cfg.satp_mode.map);
|
|
|
|
sv_name = g_strdup_printf("riscv,%s",
|
|
|
|
satp_mode_str(satp_mode_max, is_32_bit));
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name);
|
|
|
|
}
|
2023-03-03 16:12:52 +03:00
|
|
|
|
target/riscv: support new isa extension detection devicetree properties
A few months ago I submitted a patch to various lists, deprecating
"riscv,isa" with a lengthy commit message [0] that is now commit
aeb71e42caae ("dt-bindings: riscv: deprecate riscv,isa") in the Linux
kernel tree. Primarily, the goal was to replace "riscv,isa" with a new
set of properties that allowed for strictly defining the meaning of
various extensions, where "riscv,isa" was tied to whatever definitions
inflicted upon us by the ISA manual, which have seen some variance over
time.
Two new properties were introduced: "riscv,isa-base" and
"riscv,isa-extensions". The former is a simple string to communicate the
base ISA implemented by a hart and the latter an array of strings used
to communicate the set of ISA extensions supported, per the definitions
of each substring in extensions.yaml [1]. A beneficial side effect was
also the ability to define vendor extensions in a more "official" way,
as the ISA manual and other RVI specifications only covered the format
for vendor extensions in the ISA string, but not the meaning of vendor
extensions, for obvious reasons.
Add support for setting these two new properties in the devicetrees for
the various devicetree platforms supported by QEMU for RISC-V. The Linux
kernel already supports parsing ISA extensions from these new
properties, and documenting them in the dt-binding is a requirement for
new extension detection being added to the kernel.
A side effect of the implementation is that the meaning for elements in
"riscv,isa" and in "riscv,isa-extensions" are now tied together as they
are constructed from the same source. The same applies to the ISA string
provided in ACPI tables, but there does not appear to be any strict
definitions of meanings in ACPI land either.
Link: https://lore.kernel.org/qemu-riscv/20230702-eats-scorebook-c951f170d29f@spud/ [0]
Link: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/devicetree/bindings/riscv/extensions.yaml [1]
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Message-ID: <20240124-unvarying-foothold-9dde2aaf95d4@spud>
[ Changes by AF:
- Rebase on recent changes
]
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2024-01-24 15:55:50 +03:00
|
|
|
riscv_isa_write_fdt(cpu_ptr, ms->fdt, cpu_name);
|
2023-03-02 12:14:06 +03:00
|
|
|
|
2023-10-12 19:46:03 +03:00
|
|
|
if (cpu_ptr->cfg.ext_zicbom) {
|
2023-03-02 12:14:06 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbom-block-size",
|
|
|
|
cpu_ptr->cfg.cbom_blocksize);
|
|
|
|
}
|
|
|
|
|
2023-10-12 19:46:04 +03:00
|
|
|
if (cpu_ptr->cfg.ext_zicboz) {
|
2023-03-02 12:14:06 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cboz-block-size",
|
|
|
|
cpu_ptr->cfg.cboz_blocksize);
|
|
|
|
}
|
|
|
|
|
2023-12-18 15:53:13 +03:00
|
|
|
if (cpu_ptr->cfg.ext_zicbop) {
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbop-block-size",
|
|
|
|
cpu_ptr->cfg.cbop_blocksize);
|
|
|
|
}
|
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_string(ms->fdt, cpu_name, "compatible", "riscv");
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, cpu_name, "status", "okay");
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "reg",
|
2021-08-31 14:06:02 +03:00
|
|
|
s->soc[socket].hartid_base + cpu);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_string(ms->fdt, cpu_name, "device_type", "cpu");
|
|
|
|
riscv_socket_fdt_write_id(ms, cpu_name, socket);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "phandle", cpu_phandle);
|
2021-08-31 14:06:02 +03:00
|
|
|
|
|
|
|
intc_phandles[cpu] = (*phandle)++;
|
|
|
|
|
|
|
|
intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, intc_name);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, intc_name, "phandle",
|
2021-08-31 14:06:02 +03:00
|
|
|
intc_phandles[cpu]);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_string(ms->fdt, intc_name, "compatible",
|
2022-08-20 07:29:58 +03:00
|
|
|
"riscv,cpu-intc");
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, intc_name, "interrupt-controller", NULL, 0);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, intc_name, "#interrupt-cells", 1);
|
2021-08-31 14:06:02 +03:00
|
|
|
|
|
|
|
core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, core_name);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, core_name, "cpu", cpu_phandle);
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void create_fdt_socket_memory(RISCVVirtState *s,
|
|
|
|
const MemMapEntry *memmap, int socket)
|
|
|
|
{
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *mem_name = NULL;
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
uint64_t addr, size;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2021-08-31 14:06:02 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
addr = memmap[VIRT_DRAM].base + riscv_socket_mem_offset(ms, socket);
|
|
|
|
size = riscv_socket_mem_size(ms, socket);
|
2021-08-31 14:06:02 +03:00
|
|
|
mem_name = g_strdup_printf("/memory@%lx", (long)addr);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, mem_name);
|
|
|
|
qemu_fdt_setprop_cells(ms->fdt, mem_name, "reg",
|
2021-08-31 14:06:02 +03:00
|
|
|
addr >> 32, addr, size >> 32, size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_string(ms->fdt, mem_name, "device_type", "memory");
|
|
|
|
riscv_socket_fdt_write_id(ms, mem_name, socket);
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void create_fdt_socket_clint(RISCVVirtState *s,
|
|
|
|
const MemMapEntry *memmap, int socket,
|
|
|
|
uint32_t *intc_phandles)
|
|
|
|
{
|
|
|
|
int cpu;
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *clint_name = NULL;
|
|
|
|
g_autofree uint32_t *clint_cells = NULL;
|
2021-08-31 14:06:02 +03:00
|
|
|
unsigned long clint_addr;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2021-04-30 10:12:57 +03:00
|
|
|
static const char * const clint_compat[2] = {
|
|
|
|
"sifive,clint0", "riscv,clint0"
|
|
|
|
};
|
2021-08-31 14:06:02 +03:00
|
|
|
|
|
|
|
clint_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
|
|
|
|
|
|
|
|
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
|
|
|
|
clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]);
|
|
|
|
clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
|
|
|
|
clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]);
|
|
|
|
clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
|
|
|
|
}
|
|
|
|
|
|
|
|
clint_addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
|
|
|
|
clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, clint_name);
|
|
|
|
qemu_fdt_setprop_string_array(ms->fdt, clint_name, "compatible",
|
2021-08-31 14:06:02 +03:00
|
|
|
(char **)&clint_compat,
|
|
|
|
ARRAY_SIZE(clint_compat));
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, clint_name, "reg",
|
2021-08-31 14:06:02 +03:00
|
|
|
0x0, clint_addr, 0x0, memmap[VIRT_CLINT].size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, clint_name, "interrupts-extended",
|
2021-08-31 14:06:02 +03:00
|
|
|
clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
|
2023-01-25 00:22:33 +03:00
|
|
|
riscv_socket_fdt_write_id(ms, clint_name, socket);
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
|
2021-08-31 14:06:03 +03:00
|
|
|
static void create_fdt_socket_aclint(RISCVVirtState *s,
|
|
|
|
const MemMapEntry *memmap, int socket,
|
|
|
|
uint32_t *intc_phandles)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
char *name;
|
2022-02-20 11:55:24 +03:00
|
|
|
unsigned long addr, size;
|
2021-08-31 14:06:03 +03:00
|
|
|
uint32_t aclint_cells_size;
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree uint32_t *aclint_mswi_cells = NULL;
|
|
|
|
g_autofree uint32_t *aclint_sswi_cells = NULL;
|
|
|
|
g_autofree uint32_t *aclint_mtimer_cells = NULL;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2021-08-31 14:06:03 +03:00
|
|
|
|
|
|
|
aclint_mswi_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
|
|
|
|
aclint_mtimer_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
|
|
|
|
aclint_sswi_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
|
|
|
|
|
|
|
|
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
|
|
|
|
aclint_mswi_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
|
|
|
|
aclint_mswi_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_SOFT);
|
|
|
|
aclint_mtimer_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
|
|
|
|
aclint_mtimer_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_TIMER);
|
|
|
|
aclint_sswi_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
|
|
|
|
aclint_sswi_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_SOFT);
|
|
|
|
}
|
|
|
|
aclint_cells_size = s->soc[socket].num_harts * sizeof(uint32_t) * 2;
|
|
|
|
|
2022-02-20 11:55:24 +03:00
|
|
|
if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
|
|
|
|
addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
|
|
|
|
name = g_strdup_printf("/soc/mswi@%lx", addr);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
|
2022-02-20 11:55:24 +03:00
|
|
|
"riscv,aclint-mswi");
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
|
2022-02-20 11:55:24 +03:00
|
|
|
0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, name, "interrupts-extended",
|
2022-02-20 11:55:24 +03:00
|
|
|
aclint_mswi_cells, aclint_cells_size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, name, "interrupt-controller", NULL, 0);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "#interrupt-cells", 0);
|
|
|
|
riscv_socket_fdt_write_id(ms, name, socket);
|
2022-02-20 11:55:24 +03:00
|
|
|
g_free(name);
|
|
|
|
}
|
2021-08-31 14:06:03 +03:00
|
|
|
|
2022-02-20 11:55:24 +03:00
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
|
|
|
|
addr = memmap[VIRT_CLINT].base +
|
|
|
|
(RISCV_ACLINT_DEFAULT_MTIMER_SIZE * socket);
|
|
|
|
size = RISCV_ACLINT_DEFAULT_MTIMER_SIZE;
|
|
|
|
} else {
|
|
|
|
addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
|
|
|
|
(memmap[VIRT_CLINT].size * socket);
|
|
|
|
size = memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE;
|
|
|
|
}
|
2021-08-31 14:06:03 +03:00
|
|
|
name = g_strdup_printf("/soc/mtimer@%lx", addr);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
|
2021-08-31 14:06:03 +03:00
|
|
|
"riscv,aclint-mtimer");
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
|
2021-08-31 14:06:03 +03:00
|
|
|
0x0, addr + RISCV_ACLINT_DEFAULT_MTIME,
|
2022-02-20 11:55:24 +03:00
|
|
|
0x0, size - RISCV_ACLINT_DEFAULT_MTIME,
|
2021-08-31 14:06:03 +03:00
|
|
|
0x0, addr + RISCV_ACLINT_DEFAULT_MTIMECMP,
|
|
|
|
0x0, RISCV_ACLINT_DEFAULT_MTIME);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, name, "interrupts-extended",
|
2021-08-31 14:06:03 +03:00
|
|
|
aclint_mtimer_cells, aclint_cells_size);
|
2023-01-25 00:22:33 +03:00
|
|
|
riscv_socket_fdt_write_id(ms, name, socket);
|
2021-08-31 14:06:03 +03:00
|
|
|
g_free(name);
|
|
|
|
|
2022-02-20 11:55:24 +03:00
|
|
|
if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
|
|
|
|
addr = memmap[VIRT_ACLINT_SSWI].base +
|
|
|
|
(memmap[VIRT_ACLINT_SSWI].size * socket);
|
|
|
|
name = g_strdup_printf("/soc/sswi@%lx", addr);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
|
2022-02-20 11:55:24 +03:00
|
|
|
"riscv,aclint-sswi");
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
|
2022-02-20 11:55:24 +03:00
|
|
|
0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, name, "interrupts-extended",
|
2022-02-20 11:55:24 +03:00
|
|
|
aclint_sswi_cells, aclint_cells_size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, name, "interrupt-controller", NULL, 0);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "#interrupt-cells", 0);
|
|
|
|
riscv_socket_fdt_write_id(ms, name, socket);
|
2022-02-20 11:55:24 +03:00
|
|
|
g_free(name);
|
|
|
|
}
|
2021-08-31 14:06:03 +03:00
|
|
|
}
|
|
|
|
|
2021-08-31 14:06:02 +03:00
|
|
|
static void create_fdt_socket_plic(RISCVVirtState *s,
|
|
|
|
const MemMapEntry *memmap, int socket,
|
|
|
|
uint32_t *phandle, uint32_t *intc_phandles,
|
|
|
|
uint32_t *plic_phandles)
|
|
|
|
{
|
|
|
|
int cpu;
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *plic_name = NULL;
|
|
|
|
g_autofree uint32_t *plic_cells;
|
2021-08-31 14:06:02 +03:00
|
|
|
unsigned long plic_addr;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2021-04-30 10:12:58 +03:00
|
|
|
static const char * const plic_compat[2] = {
|
|
|
|
"sifive,plic-1.0.0", "riscv,plic0"
|
|
|
|
};
|
2018-03-02 15:31:13 +03:00
|
|
|
|
2021-08-31 14:06:02 +03:00
|
|
|
plic_phandles[socket] = (*phandle)++;
|
|
|
|
plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket);
|
|
|
|
plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, plic_name);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, plic_name,
|
2021-08-31 14:06:02 +03:00
|
|
|
"#interrupt-cells", FDT_PLIC_INT_CELLS);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, plic_name,
|
2022-08-10 21:46:10 +03:00
|
|
|
"#address-cells", FDT_PLIC_ADDR_CELLS);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_string_array(ms->fdt, plic_name, "compatible",
|
2021-08-31 14:06:02 +03:00
|
|
|
(char **)&plic_compat,
|
|
|
|
ARRAY_SIZE(plic_compat));
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, plic_name, "interrupt-controller", NULL, 0);
|
2023-12-18 12:05:40 +03:00
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
|
|
|
|
|
|
|
|
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
|
|
|
|
plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
|
|
|
|
plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended",
|
|
|
|
plic_cells,
|
|
|
|
s->soc[socket].num_harts * sizeof(uint32_t) * 2);
|
|
|
|
} else {
|
|
|
|
plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
|
|
|
|
|
|
|
|
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
|
|
|
|
plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]);
|
|
|
|
plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT);
|
|
|
|
plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]);
|
|
|
|
plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT);
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended",
|
|
|
|
plic_cells,
|
|
|
|
s->soc[socket].num_harts * sizeof(uint32_t) * 4);
|
|
|
|
}
|
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, plic_name, "reg",
|
2021-08-31 14:06:02 +03:00
|
|
|
0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, plic_name, "riscv,ndev",
|
2022-12-11 06:08:26 +03:00
|
|
|
VIRT_IRQCHIP_NUM_SOURCES - 1);
|
2023-01-25 00:22:33 +03:00
|
|
|
riscv_socket_fdt_write_id(ms, plic_name, socket);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, plic_name, "phandle",
|
2021-08-31 14:06:02 +03:00
|
|
|
plic_phandles[socket]);
|
2022-04-28 02:41:44 +03:00
|
|
|
|
2022-05-11 17:45:28 +03:00
|
|
|
if (!socket) {
|
2023-01-25 00:22:33 +03:00
|
|
|
platform_bus_add_all_fdt_nodes(ms->fdt, plic_name,
|
2022-05-11 17:45:28 +03:00
|
|
|
memmap[VIRT_PLATFORM_BUS].base,
|
|
|
|
memmap[VIRT_PLATFORM_BUS].size,
|
|
|
|
VIRT_PLATFORM_BUS_IRQ);
|
|
|
|
}
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
2018-03-02 15:31:13 +03:00
|
|
|
|
2023-12-18 18:02:38 +03:00
|
|
|
uint32_t imsic_num_bits(uint32_t count)
|
2022-02-20 11:55:24 +03:00
|
|
|
{
|
|
|
|
uint32_t ret = 0;
|
|
|
|
|
|
|
|
while (BIT(ret) < count) {
|
|
|
|
ret++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-07-27 13:24:33 +03:00
|
|
|
static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
|
|
|
|
uint32_t *intc_phandles, uint32_t msi_phandle,
|
|
|
|
bool m_mode, uint32_t imsic_guest_bits)
|
2022-02-20 11:55:24 +03:00
|
|
|
{
|
|
|
|
int cpu, socket;
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *imsic_name = NULL;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
|
|
|
int socket_count = riscv_socket_count(ms);
|
2024-01-23 01:15:29 +03:00
|
|
|
uint32_t imsic_max_hart_per_socket, imsic_addr, imsic_size;
|
|
|
|
g_autofree uint32_t *imsic_cells = NULL;
|
|
|
|
g_autofree uint32_t *imsic_regs = NULL;
|
2024-05-31 23:27:58 +03:00
|
|
|
static const char * const imsic_compat[2] = {
|
|
|
|
"qemu,imsics", "riscv,imsics"
|
|
|
|
};
|
2022-02-20 11:55:24 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
imsic_cells = g_new0(uint32_t, ms->smp.cpus * 2);
|
2023-01-25 00:22:32 +03:00
|
|
|
imsic_regs = g_new0(uint32_t, socket_count * 4);
|
2022-02-20 11:55:24 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
|
2022-02-20 11:55:24 +03:00
|
|
|
imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
|
2023-07-27 13:24:33 +03:00
|
|
|
imsic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
|
2022-02-20 11:55:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
imsic_max_hart_per_socket = 0;
|
2023-01-25 00:22:32 +03:00
|
|
|
for (socket = 0; socket < socket_count; socket++) {
|
2023-07-27 13:24:33 +03:00
|
|
|
imsic_addr = base_addr + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
|
2022-02-20 11:55:24 +03:00
|
|
|
imsic_size = IMSIC_HART_SIZE(imsic_guest_bits) *
|
|
|
|
s->soc[socket].num_harts;
|
|
|
|
imsic_regs[socket * 4 + 0] = 0;
|
|
|
|
imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
|
|
|
|
imsic_regs[socket * 4 + 2] = 0;
|
|
|
|
imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
|
|
|
|
if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
|
|
|
|
imsic_max_hart_per_socket = s->soc[socket].num_harts;
|
|
|
|
}
|
|
|
|
}
|
2023-07-27 13:24:33 +03:00
|
|
|
|
2024-05-31 23:27:57 +03:00
|
|
|
imsic_name = g_strdup_printf("/soc/interrupt-controller@%lx",
|
|
|
|
(unsigned long)base_addr);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, imsic_name);
|
2024-05-31 23:27:58 +03:00
|
|
|
qemu_fdt_setprop_string_array(ms->fdt, imsic_name, "compatible",
|
|
|
|
(char **)&imsic_compat,
|
|
|
|
ARRAY_SIZE(imsic_compat));
|
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#interrupt-cells",
|
2023-07-27 13:24:33 +03:00
|
|
|
FDT_IMSIC_INT_CELLS);
|
|
|
|
qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller", NULL, 0);
|
|
|
|
qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller", NULL, 0);
|
2024-05-31 23:27:59 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#msi-cells", 0);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended",
|
2023-07-27 13:24:33 +03:00
|
|
|
imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs,
|
2023-07-27 13:24:33 +03:00
|
|
|
socket_count * sizeof(uint32_t) * 4);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,num-ids",
|
2023-07-27 13:24:33 +03:00
|
|
|
VIRT_IRQCHIP_NUM_MSIS);
|
|
|
|
|
2022-02-20 11:55:24 +03:00
|
|
|
if (imsic_guest_bits) {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,guest-index-bits",
|
2023-07-27 13:24:33 +03:00
|
|
|
imsic_guest_bits);
|
2022-02-20 11:55:24 +03:00
|
|
|
}
|
2023-07-27 13:24:33 +03:00
|
|
|
|
2023-01-25 00:22:32 +03:00
|
|
|
if (socket_count > 1) {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,hart-index-bits",
|
2023-07-27 13:24:33 +03:00
|
|
|
imsic_num_bits(imsic_max_hart_per_socket));
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-bits",
|
2023-07-27 13:24:33 +03:00
|
|
|
imsic_num_bits(socket_count));
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-shift",
|
2023-07-27 13:24:33 +03:00
|
|
|
IMSIC_MMIO_GROUP_MIN_SHIFT);
|
2022-02-20 11:55:24 +03:00
|
|
|
}
|
2023-07-27 13:24:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle);
|
2022-02-20 11:55:24 +03:00
|
|
|
}
|
|
|
|
|
2023-07-27 13:24:33 +03:00
|
|
|
static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
|
|
|
|
uint32_t *phandle, uint32_t *intc_phandles,
|
|
|
|
uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
|
|
|
|
{
|
|
|
|
*msi_m_phandle = (*phandle)++;
|
|
|
|
*msi_s_phandle = (*phandle)++;
|
|
|
|
|
|
|
|
if (!kvm_enabled()) {
|
|
|
|
/* M-level IMSIC node */
|
|
|
|
create_fdt_one_imsic(s, memmap[VIRT_IMSIC_M].base, intc_phandles,
|
|
|
|
*msi_m_phandle, true, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* S-level IMSIC node */
|
|
|
|
create_fdt_one_imsic(s, memmap[VIRT_IMSIC_S].base, intc_phandles,
|
|
|
|
*msi_s_phandle, false,
|
|
|
|
imsic_num_bits(s->aia_guests + 1));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2024-05-31 23:27:53 +03:00
|
|
|
/* Caller must free string after use */
|
|
|
|
static char *fdt_get_aplic_nodename(unsigned long aplic_addr)
|
|
|
|
{
|
2024-05-31 23:27:54 +03:00
|
|
|
return g_strdup_printf("/soc/interrupt-controller@%lx", aplic_addr);
|
2024-05-31 23:27:53 +03:00
|
|
|
}
|
|
|
|
|
2023-07-27 13:24:33 +03:00
|
|
|
static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
|
|
|
|
unsigned long aplic_addr, uint32_t aplic_size,
|
|
|
|
uint32_t msi_phandle,
|
|
|
|
uint32_t *intc_phandles,
|
|
|
|
uint32_t aplic_phandle,
|
|
|
|
uint32_t aplic_child_phandle,
|
2023-07-27 13:24:37 +03:00
|
|
|
bool m_mode, int num_harts)
|
2022-02-20 11:55:22 +03:00
|
|
|
{
|
|
|
|
int cpu;
|
2024-05-31 23:27:53 +03:00
|
|
|
g_autofree char *aplic_name = fdt_get_aplic_nodename(aplic_addr);
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree uint32_t *aplic_cells = g_new0(uint32_t, num_harts * 2);
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2024-05-31 23:27:55 +03:00
|
|
|
static const char * const aplic_compat[2] = {
|
|
|
|
"qemu,aplic", "riscv,aplic"
|
|
|
|
};
|
2022-02-20 11:55:22 +03:00
|
|
|
|
2023-07-27 13:24:37 +03:00
|
|
|
for (cpu = 0; cpu < num_harts; cpu++) {
|
2022-02-20 11:55:22 +03:00
|
|
|
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
|
2023-07-27 13:24:33 +03:00
|
|
|
aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
|
2022-02-20 11:55:22 +03:00
|
|
|
}
|
2023-07-27 13:24:33 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, aplic_name);
|
2024-05-31 23:27:55 +03:00
|
|
|
qemu_fdt_setprop_string_array(ms->fdt, aplic_name, "compatible",
|
|
|
|
(char **)&aplic_compat,
|
|
|
|
ARRAY_SIZE(aplic_compat));
|
2024-05-31 23:27:52 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "#address-cells",
|
|
|
|
FDT_APLIC_ADDR_CELLS);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, aplic_name,
|
2023-07-27 13:24:33 +03:00
|
|
|
"#interrupt-cells", FDT_APLIC_INT_CELLS);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupt-controller", NULL, 0);
|
2023-07-27 13:24:33 +03:00
|
|
|
|
2022-02-20 11:55:24 +03:00
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
|
2023-07-27 13:24:37 +03:00
|
|
|
aplic_cells, num_harts * sizeof(uint32_t) * 2);
|
2022-02-20 11:55:24 +03:00
|
|
|
} else {
|
2023-07-27 13:24:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent", msi_phandle);
|
2022-02-20 11:55:24 +03:00
|
|
|
}
|
2023-07-27 13:24:33 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, aplic_name, "reg",
|
2023-07-27 13:24:33 +03:00
|
|
|
0x0, aplic_addr, 0x0, aplic_size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,num-sources",
|
2023-07-27 13:24:33 +03:00
|
|
|
VIRT_IRQCHIP_NUM_SOURCES);
|
|
|
|
|
|
|
|
if (aplic_child_phandle) {
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,children",
|
|
|
|
aplic_child_phandle);
|
2024-05-31 23:27:56 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegation",
|
2023-07-27 13:24:33 +03:00
|
|
|
aplic_child_phandle, 0x1,
|
|
|
|
VIRT_IRQCHIP_NUM_SOURCES);
|
|
|
|
}
|
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
riscv_socket_fdt_write_id(ms, aplic_name, socket);
|
2023-07-27 13:24:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_phandle);
|
|
|
|
}
|
2022-02-20 11:55:22 +03:00
|
|
|
|
2023-07-27 13:24:33 +03:00
|
|
|
static void create_fdt_socket_aplic(RISCVVirtState *s,
|
|
|
|
const MemMapEntry *memmap, int socket,
|
|
|
|
uint32_t msi_m_phandle,
|
|
|
|
uint32_t msi_s_phandle,
|
|
|
|
uint32_t *phandle,
|
|
|
|
uint32_t *intc_phandles,
|
2023-07-27 13:24:37 +03:00
|
|
|
uint32_t *aplic_phandles,
|
|
|
|
int num_harts)
|
2023-07-27 13:24:33 +03:00
|
|
|
{
|
|
|
|
unsigned long aplic_addr;
|
|
|
|
MachineState *ms = MACHINE(s);
|
|
|
|
uint32_t aplic_m_phandle, aplic_s_phandle;
|
|
|
|
|
|
|
|
aplic_m_phandle = (*phandle)++;
|
|
|
|
aplic_s_phandle = (*phandle)++;
|
|
|
|
|
|
|
|
if (!kvm_enabled()) {
|
|
|
|
/* M-level APLIC node */
|
|
|
|
aplic_addr = memmap[VIRT_APLIC_M].base +
|
|
|
|
(memmap[VIRT_APLIC_M].size * socket);
|
|
|
|
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size,
|
|
|
|
msi_m_phandle, intc_phandles,
|
|
|
|
aplic_m_phandle, aplic_s_phandle,
|
2023-07-27 13:24:37 +03:00
|
|
|
true, num_harts);
|
2022-02-20 11:55:22 +03:00
|
|
|
}
|
2023-07-27 13:24:33 +03:00
|
|
|
|
|
|
|
/* S-level APLIC node */
|
2022-02-20 11:55:22 +03:00
|
|
|
aplic_addr = memmap[VIRT_APLIC_S].base +
|
|
|
|
(memmap[VIRT_APLIC_S].size * socket);
|
2023-07-27 13:24:33 +03:00
|
|
|
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size,
|
|
|
|
msi_s_phandle, intc_phandles,
|
|
|
|
aplic_s_phandle, 0,
|
2023-07-27 13:24:37 +03:00
|
|
|
false, num_harts);
|
2023-07-27 13:24:33 +03:00
|
|
|
|
2022-05-11 17:45:28 +03:00
|
|
|
if (!socket) {
|
2024-05-31 23:27:53 +03:00
|
|
|
g_autofree char *aplic_name = fdt_get_aplic_nodename(aplic_addr);
|
2023-01-25 00:22:33 +03:00
|
|
|
platform_bus_add_all_fdt_nodes(ms->fdt, aplic_name,
|
2022-05-11 17:45:28 +03:00
|
|
|
memmap[VIRT_PLATFORM_BUS].base,
|
|
|
|
memmap[VIRT_PLATFORM_BUS].size,
|
|
|
|
VIRT_PLATFORM_BUS_IRQ);
|
|
|
|
}
|
2022-04-28 02:41:44 +03:00
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
aplic_phandles[socket] = aplic_s_phandle;
|
|
|
|
}
|
|
|
|
|
2022-08-25 01:17:00 +03:00
|
|
|
static void create_fdt_pmu(RISCVVirtState *s)
|
|
|
|
{
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *pmu_name = g_strdup_printf("/pmu");
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2022-08-25 01:17:00 +03:00
|
|
|
RISCVCPU hart = s->soc[0].harts[0];
|
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, pmu_name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu");
|
2023-10-31 18:37:15 +03:00
|
|
|
riscv_pmu_generate_fdt_node(ms->fdt, hart.pmu_avail_ctrs, pmu_name);
|
2022-08-25 01:17:00 +03:00
|
|
|
}
|
|
|
|
|
2021-08-31 14:06:02 +03:00
|
|
|
static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
|
2023-01-11 20:09:42 +03:00
|
|
|
uint32_t *phandle,
|
2021-08-31 14:06:02 +03:00
|
|
|
uint32_t *irq_mmio_phandle,
|
|
|
|
uint32_t *irq_pcie_phandle,
|
2022-02-20 11:55:24 +03:00
|
|
|
uint32_t *irq_virtio_phandle,
|
|
|
|
uint32_t *msi_pcie_phandle)
|
2021-08-31 14:06:02 +03:00
|
|
|
{
|
2022-02-20 11:55:24 +03:00
|
|
|
int socket, phandle_pos;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2022-02-20 11:55:24 +03:00
|
|
|
uint32_t msi_m_phandle = 0, msi_s_phandle = 0;
|
2024-01-23 01:15:26 +03:00
|
|
|
uint32_t xplic_phandles[MAX_NODES];
|
|
|
|
g_autofree uint32_t *intc_phandles = NULL;
|
2023-01-25 00:22:33 +03:00
|
|
|
int socket_count = riscv_socket_count(ms);
|
2018-03-02 15:31:13 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, "/cpus");
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "timebase-frequency",
|
2024-03-14 09:15:09 +03:00
|
|
|
kvm_enabled() ?
|
|
|
|
kvm_riscv_get_timebase_frequency(first_cpu) :
|
2021-08-31 14:06:01 +03:00
|
|
|
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1);
|
|
|
|
qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
intc_phandles = g_new0(uint32_t, ms->smp.cpus);
|
2022-02-20 11:55:24 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
phandle_pos = ms->smp.cpus;
|
2023-01-25 00:22:32 +03:00
|
|
|
for (socket = (socket_count - 1); socket >= 0; socket--) {
|
2024-01-23 01:15:26 +03:00
|
|
|
g_autofree char *clust_name = NULL;
|
2022-02-20 11:55:24 +03:00
|
|
|
phandle_pos -= s->soc[socket].num_harts;
|
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, clust_name);
|
2021-08-31 14:06:02 +03:00
|
|
|
|
|
|
|
create_fdt_socket_cpus(s, socket, clust_name, phandle,
|
2023-01-11 20:09:42 +03:00
|
|
|
&intc_phandles[phandle_pos]);
|
2018-03-02 15:31:13 +03:00
|
|
|
|
2021-08-31 14:06:02 +03:00
|
|
|
create_fdt_socket_memory(s, memmap, socket);
|
|
|
|
|
2024-02-17 22:26:06 +03:00
|
|
|
if (virt_aclint_allowed() && s->have_aclint) {
|
|
|
|
create_fdt_socket_aclint(s, memmap, socket,
|
|
|
|
&intc_phandles[phandle_pos]);
|
|
|
|
} else if (tcg_enabled()) {
|
|
|
|
create_fdt_socket_clint(s, memmap, socket,
|
|
|
|
&intc_phandles[phandle_pos]);
|
2021-08-31 14:06:03 +03:00
|
|
|
}
|
2022-02-20 11:55:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
|
|
|
|
create_fdt_imsic(s, memmap, phandle, intc_phandles,
|
|
|
|
&msi_m_phandle, &msi_s_phandle);
|
|
|
|
*msi_pcie_phandle = msi_s_phandle;
|
|
|
|
}
|
|
|
|
|
2023-07-27 13:24:37 +03:00
|
|
|
/* KVM AIA only has one APLIC instance */
|
2023-08-30 16:35:02 +03:00
|
|
|
if (kvm_enabled() && virt_use_kvm_aia(s)) {
|
2023-07-27 13:24:37 +03:00
|
|
|
create_fdt_socket_aplic(s, memmap, 0,
|
|
|
|
msi_m_phandle, msi_s_phandle, phandle,
|
|
|
|
&intc_phandles[0], xplic_phandles,
|
|
|
|
ms->smp.cpus);
|
|
|
|
} else {
|
|
|
|
phandle_pos = ms->smp.cpus;
|
|
|
|
for (socket = (socket_count - 1); socket >= 0; socket--) {
|
|
|
|
phandle_pos -= s->soc[socket].num_harts;
|
|
|
|
|
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
|
|
|
|
create_fdt_socket_plic(s, memmap, socket, phandle,
|
|
|
|
&intc_phandles[phandle_pos],
|
|
|
|
xplic_phandles);
|
|
|
|
} else {
|
|
|
|
create_fdt_socket_aplic(s, memmap, socket,
|
|
|
|
msi_m_phandle, msi_s_phandle, phandle,
|
|
|
|
&intc_phandles[phandle_pos],
|
|
|
|
xplic_phandles,
|
|
|
|
s->soc[socket].num_harts);
|
|
|
|
}
|
2022-02-20 11:55:22 +03:00
|
|
|
}
|
2019-06-25 02:41:44 +03:00
|
|
|
}
|
|
|
|
|
2023-08-30 16:35:02 +03:00
|
|
|
if (kvm_enabled() && virt_use_kvm_aia(s)) {
|
2023-07-27 13:24:37 +03:00
|
|
|
*irq_mmio_phandle = xplic_phandles[0];
|
|
|
|
*irq_virtio_phandle = xplic_phandles[0];
|
|
|
|
*irq_pcie_phandle = xplic_phandles[0];
|
|
|
|
} else {
|
|
|
|
for (socket = 0; socket < socket_count; socket++) {
|
|
|
|
if (socket == 0) {
|
|
|
|
*irq_mmio_phandle = xplic_phandles[socket];
|
|
|
|
*irq_virtio_phandle = xplic_phandles[socket];
|
|
|
|
*irq_pcie_phandle = xplic_phandles[socket];
|
|
|
|
}
|
|
|
|
if (socket == 1) {
|
|
|
|
*irq_virtio_phandle = xplic_phandles[socket];
|
|
|
|
*irq_pcie_phandle = xplic_phandles[socket];
|
|
|
|
}
|
|
|
|
if (socket == 2) {
|
|
|
|
*irq_pcie_phandle = xplic_phandles[socket];
|
|
|
|
}
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
}
|
2018-03-02 15:31:13 +03:00
|
|
|
}
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
riscv_socket_fdt_write_distance_matrix(ms);
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
|
|
|
|
uint32_t irq_virtio_phandle)
|
|
|
|
{
|
|
|
|
int i;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2018-03-02 15:31:13 +03:00
|
|
|
|
|
|
|
for (i = 0; i < VIRTIO_COUNT; i++) {
|
2024-01-23 01:15:27 +03:00
|
|
|
g_autofree char *name = g_strdup_printf("/soc/virtio_mmio@%lx",
|
2018-03-02 15:31:13 +03:00
|
|
|
(long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size));
|
2024-01-23 01:15:27 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "virtio,mmio");
|
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
|
2018-03-02 15:31:13 +03:00
|
|
|
0x0, memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
|
|
|
|
0x0, memmap[VIRT_VIRTIO].size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent",
|
2021-08-31 14:06:02 +03:00
|
|
|
irq_virtio_phandle);
|
2022-02-20 11:55:22 +03:00
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "interrupts",
|
2022-02-20 11:55:22 +03:00
|
|
|
VIRTIO_IRQ + i);
|
|
|
|
} else {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "interrupts",
|
2022-02-20 11:55:22 +03:00
|
|
|
VIRTIO_IRQ + i, 0x4);
|
|
|
|
}
|
2018-03-02 15:31:13 +03:00
|
|
|
}
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
|
2022-02-20 11:55:24 +03:00
|
|
|
uint32_t irq_pcie_phandle,
|
|
|
|
uint32_t msi_pcie_phandle)
|
2021-08-31 14:06:02 +03:00
|
|
|
{
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *name = NULL;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2018-03-02 15:31:13 +03:00
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
name = g_strdup_printf("/soc/pci@%lx",
|
2018-12-12 01:37:36 +03:00
|
|
|
(long) memmap[VIRT_PCIE_ECAM].base);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "#address-cells",
|
2021-08-31 14:06:02 +03:00
|
|
|
FDT_PCI_ADDR_CELLS);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "#interrupt-cells",
|
2021-08-31 14:06:02 +03:00
|
|
|
FDT_PCI_INT_CELLS);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "#size-cells", 0x2);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
|
2021-08-31 14:06:02 +03:00
|
|
|
"pci-host-ecam-generic");
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "device_type", "pci");
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "linux,pci-domain", 0);
|
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "bus-range", 0,
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, name, "dma-coherent", NULL, 0);
|
2022-02-20 11:55:24 +03:00
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "msi-parent", msi_pcie_phandle);
|
2022-02-20 11:55:24 +03:00
|
|
|
}
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "reg", 0,
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_sized_cells(ms->fdt, name, "ranges",
|
2018-12-12 01:37:36 +03:00
|
|
|
1, FDT_PCI_RANGE_IOPORT, 2, 0,
|
|
|
|
2, memmap[VIRT_PCIE_PIO].base, 2, memmap[VIRT_PCIE_PIO].size,
|
|
|
|
1, FDT_PCI_RANGE_MMIO,
|
|
|
|
2, memmap[VIRT_PCIE_MMIO].base,
|
2021-02-20 17:48:07 +03:00
|
|
|
2, memmap[VIRT_PCIE_MMIO].base, 2, memmap[VIRT_PCIE_MMIO].size,
|
|
|
|
1, FDT_PCI_RANGE_MMIO_64BIT,
|
|
|
|
2, virt_high_pcie_memmap.base,
|
|
|
|
2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size);
|
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
create_pcie_irq_map(s, ms->fdt, name, irq_pcie_phandle);
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
2018-12-12 01:37:36 +03:00
|
|
|
|
2021-08-31 14:06:02 +03:00
|
|
|
static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap,
|
|
|
|
uint32_t *phandle)
|
|
|
|
{
|
|
|
|
char *name;
|
|
|
|
uint32_t test_phandle;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2021-08-31 14:06:02 +03:00
|
|
|
|
|
|
|
test_phandle = (*phandle)++;
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
name = g_strdup_printf("/soc/test@%lx",
|
2018-03-02 15:31:13 +03:00
|
|
|
(long)memmap[VIRT_TEST].base);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
2019-11-08 01:25:00 +03:00
|
|
|
{
|
2021-04-30 10:12:56 +03:00
|
|
|
static const char * const compat[3] = {
|
|
|
|
"sifive,test1", "sifive,test0", "syscon"
|
|
|
|
};
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_string_array(ms->fdt, name, "compatible",
|
2021-08-31 14:06:02 +03:00
|
|
|
(char **)&compat, ARRAY_SIZE(compat));
|
2019-11-08 01:25:00 +03:00
|
|
|
}
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
|
2021-08-31 14:06:02 +03:00
|
|
|
0x0, memmap[VIRT_TEST].base, 0x0, memmap[VIRT_TEST].size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "phandle", test_phandle);
|
|
|
|
test_phandle = qemu_fdt_get_phandle(ms->fdt, name);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
g_free(name);
|
|
|
|
|
2022-08-10 21:46:11 +03:00
|
|
|
name = g_strdup_printf("/reboot");
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "syscon-reboot");
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "regmap", test_phandle);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "offset", 0x0);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "value", FINISHER_RESET);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
g_free(name);
|
|
|
|
|
2022-08-10 21:46:11 +03:00
|
|
|
name = g_strdup_printf("/poweroff");
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "syscon-poweroff");
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "regmap", test_phandle);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "offset", 0x0);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "value", FINISHER_PASS);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
g_free(name);
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
|
|
|
|
uint32_t irq_mmio_phandle)
|
|
|
|
{
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *name = NULL;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
|
2022-08-10 21:46:09 +03:00
|
|
|
name = g_strdup_printf("/soc/serial@%lx", (long)memmap[VIRT_UART0].base);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "ns16550a");
|
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
|
2018-03-02 15:31:13 +03:00
|
|
|
0x0, memmap[VIRT_UART0].base,
|
|
|
|
0x0, memmap[VIRT_UART0].size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "clock-frequency", 3686400);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent", irq_mmio_phandle);
|
2022-02-20 11:55:22 +03:00
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "interrupts", UART0_IRQ);
|
2022-02-20 11:55:22 +03:00
|
|
|
} else {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "interrupts", UART0_IRQ, 0x4);
|
2022-02-20 11:55:22 +03:00
|
|
|
}
|
2018-03-02 15:31:13 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", name);
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
|
|
|
|
uint32_t irq_mmio_phandle)
|
|
|
|
{
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *name = NULL;
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
|
|
|
|
name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
|
2021-08-31 14:06:02 +03:00
|
|
|
"google,goldfish-rtc");
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
|
2021-08-31 14:06:02 +03:00
|
|
|
0x0, memmap[VIRT_RTC].base, 0x0, memmap[VIRT_RTC].size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent",
|
2021-08-31 14:06:02 +03:00
|
|
|
irq_mmio_phandle);
|
2022-02-20 11:55:22 +03:00
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "interrupts", RTC_IRQ);
|
2022-02-20 11:55:22 +03:00
|
|
|
} else {
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cells(ms->fdt, name, "interrupts", RTC_IRQ, 0x4);
|
2022-02-20 11:55:22 +03:00
|
|
|
}
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap)
|
|
|
|
{
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2021-08-31 14:06:02 +03:00
|
|
|
hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2;
|
|
|
|
hwaddr flashbase = virt_memmap[VIRT_FLASH].base;
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *name = g_strdup_printf("/flash@%" PRIx64, flashbase);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "cfi-flash");
|
|
|
|
qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg",
|
2019-10-09 02:32:25 +03:00
|
|
|
2, flashbase, 2, flashsize,
|
|
|
|
2, flashbase + flashsize, 2, flashsize);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_cell(ms->fdt, name, "bank-width", 4);
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
|
2022-05-26 23:35:00 +03:00
|
|
|
static void create_fdt_fw_cfg(RISCVVirtState *s, const MemMapEntry *memmap)
|
|
|
|
{
|
2023-01-25 00:22:33 +03:00
|
|
|
MachineState *ms = MACHINE(s);
|
2022-05-26 23:35:00 +03:00
|
|
|
hwaddr base = memmap[VIRT_FW_CFG].base;
|
|
|
|
hwaddr size = memmap[VIRT_FW_CFG].size;
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base);
|
2022-05-26 23:35:00 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, nodename);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, nodename,
|
2022-05-26 23:35:00 +03:00
|
|
|
"compatible", "qemu,fw-cfg-mmio");
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
|
2022-05-26 23:35:00 +03:00
|
|
|
2, base, 2, size);
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0);
|
2022-05-26 23:35:00 +03:00
|
|
|
}
|
|
|
|
|
2024-02-17 22:26:05 +03:00
|
|
|
static void create_fdt_virtio_iommu(RISCVVirtState *s, uint16_t bdf)
|
|
|
|
{
|
|
|
|
const char compat[] = "virtio,pci-iommu\0pci1af4,1057";
|
|
|
|
void *fdt = MACHINE(s)->fdt;
|
|
|
|
uint32_t iommu_phandle;
|
|
|
|
g_autofree char *iommu_node = NULL;
|
|
|
|
g_autofree char *pci_node = NULL;
|
|
|
|
|
|
|
|
pci_node = g_strdup_printf("/soc/pci@%lx",
|
|
|
|
(long) virt_memmap[VIRT_PCIE_ECAM].base);
|
|
|
|
iommu_node = g_strdup_printf("%s/virtio_iommu@%x,%x", pci_node,
|
|
|
|
PCI_SLOT(bdf), PCI_FUNC(bdf));
|
|
|
|
iommu_phandle = qemu_fdt_alloc_phandle(fdt);
|
|
|
|
|
|
|
|
qemu_fdt_add_subnode(fdt, iommu_node);
|
|
|
|
|
|
|
|
qemu_fdt_setprop(fdt, iommu_node, "compatible", compat, sizeof(compat));
|
|
|
|
qemu_fdt_setprop_sized_cells(fdt, iommu_node, "reg",
|
|
|
|
1, bdf << 8, 1, 0, 1, 0,
|
|
|
|
1, 0, 1, 0);
|
|
|
|
qemu_fdt_setprop_cell(fdt, iommu_node, "#iommu-cells", 1);
|
|
|
|
qemu_fdt_setprop_cell(fdt, iommu_node, "phandle", iommu_phandle);
|
|
|
|
|
|
|
|
qemu_fdt_setprop_cells(fdt, pci_node, "iommu-map",
|
|
|
|
0, iommu_phandle, 0, bdf,
|
|
|
|
bdf + 1, iommu_phandle, bdf + 1, 0xffff - bdf);
|
|
|
|
}
|
|
|
|
|
hw/riscv/virt.c: do create_fdt() earlier, add finalize_fdt()
Commit 49554856f0 fixed a problem, where TPM devices were not appearing
in the FDT, by delaying the FDT creation up until virt_machine_done().
This create a side effect (see gitlab #1925) - devices that need access
to the '/chosen' FDT node during realize() stopped working because, at
that point, we don't have a FDT.
This happens because our FDT creation is monolithic, but it doesn't need
to be. We can add the needed FDT components for realize() time and, at
the same time, do another FDT round where we account for dynamic sysbus
devices. In other words, the problem fixed by 49554856f0 could also be
fixed by postponing only create_fdt_sockets() and its dependencies,
leaving everything else from create_fdt() to be done during init().
Split the FDT creation in two parts:
- create_fdt(), now moved back to virt_machine_init(), will create FDT
nodes that doesn't depend on additional (dynamic) devices from the
sysbus;
- a new finalize_fdt() step is added, where create_fdt_sockets() and
friends is executed, accounting for the dynamic sysbus devices that
were added during realize().
This will make both use cases happy: TPM devices are still working as
intended, and devices such as 'guest-loader' have a FDT to work on
during realize().
Fixes: 49554856f0 ("riscv: Generate devicetree only after machine initialization is complete")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1925
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-ID: <20231110172559.73209-1-dbarboza@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-11-10 20:25:59 +03:00
|
|
|
static void finalize_fdt(RISCVVirtState *s)
|
2021-08-31 14:06:02 +03:00
|
|
|
{
|
2022-02-20 11:55:24 +03:00
|
|
|
uint32_t phandle = 1, irq_mmio_phandle = 1, msi_pcie_phandle = 1;
|
2021-08-31 14:06:02 +03:00
|
|
|
uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1;
|
hw/riscv/virt.c: do create_fdt() earlier, add finalize_fdt()
Commit 49554856f0 fixed a problem, where TPM devices were not appearing
in the FDT, by delaying the FDT creation up until virt_machine_done().
This create a side effect (see gitlab #1925) - devices that need access
to the '/chosen' FDT node during realize() stopped working because, at
that point, we don't have a FDT.
This happens because our FDT creation is monolithic, but it doesn't need
to be. We can add the needed FDT components for realize() time and, at
the same time, do another FDT round where we account for dynamic sysbus
devices. In other words, the problem fixed by 49554856f0 could also be
fixed by postponing only create_fdt_sockets() and its dependencies,
leaving everything else from create_fdt() to be done during init().
Split the FDT creation in two parts:
- create_fdt(), now moved back to virt_machine_init(), will create FDT
nodes that doesn't depend on additional (dynamic) devices from the
sysbus;
- a new finalize_fdt() step is added, where create_fdt_sockets() and
friends is executed, accounting for the dynamic sysbus devices that
were added during realize().
This will make both use cases happy: TPM devices are still working as
intended, and devices such as 'guest-loader' have a FDT to work on
during realize().
Fixes: 49554856f0 ("riscv: Generate devicetree only after machine initialization is complete")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1925
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-ID: <20231110172559.73209-1-dbarboza@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-11-10 20:25:59 +03:00
|
|
|
|
|
|
|
create_fdt_sockets(s, virt_memmap, &phandle, &irq_mmio_phandle,
|
|
|
|
&irq_pcie_phandle, &irq_virtio_phandle,
|
|
|
|
&msi_pcie_phandle);
|
|
|
|
|
|
|
|
create_fdt_virtio(s, virt_memmap, irq_virtio_phandle);
|
|
|
|
|
|
|
|
create_fdt_pcie(s, virt_memmap, irq_pcie_phandle, msi_pcie_phandle);
|
|
|
|
|
|
|
|
create_fdt_reset(s, virt_memmap, &phandle);
|
|
|
|
|
|
|
|
create_fdt_uart(s, virt_memmap, irq_mmio_phandle);
|
|
|
|
|
|
|
|
create_fdt_rtc(s, virt_memmap, irq_mmio_phandle);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
|
|
|
|
{
|
|
|
|
MachineState *ms = MACHINE(s);
|
2022-06-13 14:58:10 +03:00
|
|
|
uint8_t rng_seed[32];
|
2024-02-17 22:26:04 +03:00
|
|
|
g_autofree char *name = NULL;
|
2021-08-31 14:06:02 +03:00
|
|
|
|
2023-02-28 10:45:22 +03:00
|
|
|
ms->fdt = create_device_tree(&s->fdt_size);
|
|
|
|
if (!ms->fdt) {
|
|
|
|
error_report("create_device_tree() failed");
|
|
|
|
exit(1);
|
2021-08-31 14:06:02 +03:00
|
|
|
}
|
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop_string(ms->fdt, "/", "model", "riscv-virtio,qemu");
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, "/", "compatible", "riscv-virtio");
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, "/", "#size-cells", 0x2);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, "/", "#address-cells", 0x2);
|
2021-08-31 14:06:02 +03:00
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, "/soc");
|
|
|
|
qemu_fdt_setprop(ms->fdt, "/soc", "ranges", NULL, 0);
|
|
|
|
qemu_fdt_setprop_string(ms->fdt, "/soc", "compatible", "simple-bus");
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, "/soc", "#size-cells", 0x2);
|
|
|
|
qemu_fdt_setprop_cell(ms->fdt, "/soc", "#address-cells", 0x2);
|
2021-08-31 14:06:02 +03:00
|
|
|
|
2024-02-17 22:26:04 +03:00
|
|
|
/*
|
|
|
|
* The "/soc/pci@..." node is needed for PCIE hotplugs
|
|
|
|
* that might happen before finalize_fdt().
|
|
|
|
*/
|
|
|
|
name = g_strdup_printf("/soc/pci@%lx", (long) memmap[VIRT_PCIE_ECAM].base);
|
|
|
|
qemu_fdt_add_subnode(ms->fdt, name);
|
|
|
|
|
hw/riscv/virt.c: do create_fdt() earlier, add finalize_fdt()
Commit 49554856f0 fixed a problem, where TPM devices were not appearing
in the FDT, by delaying the FDT creation up until virt_machine_done().
This create a side effect (see gitlab #1925) - devices that need access
to the '/chosen' FDT node during realize() stopped working because, at
that point, we don't have a FDT.
This happens because our FDT creation is monolithic, but it doesn't need
to be. We can add the needed FDT components for realize() time and, at
the same time, do another FDT round where we account for dynamic sysbus
devices. In other words, the problem fixed by 49554856f0 could also be
fixed by postponing only create_fdt_sockets() and its dependencies,
leaving everything else from create_fdt() to be done during init().
Split the FDT creation in two parts:
- create_fdt(), now moved back to virt_machine_init(), will create FDT
nodes that doesn't depend on additional (dynamic) devices from the
sysbus;
- a new finalize_fdt() step is added, where create_fdt_sockets() and
friends is executed, accounting for the dynamic sysbus devices that
were added during realize().
This will make both use cases happy: TPM devices are still working as
intended, and devices such as 'guest-loader' have a FDT to work on
during realize().
Fixes: 49554856f0 ("riscv: Generate devicetree only after machine initialization is complete")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1925
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-ID: <20231110172559.73209-1-dbarboza@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-11-10 20:25:59 +03:00
|
|
|
qemu_fdt_add_subnode(ms->fdt, "/chosen");
|
2020-10-22 08:32:25 +03:00
|
|
|
|
2022-06-13 14:58:10 +03:00
|
|
|
/* Pass seed to RNG */
|
|
|
|
qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed));
|
2023-01-25 00:22:33 +03:00
|
|
|
qemu_fdt_setprop(ms->fdt, "/chosen", "rng-seed",
|
2023-01-25 00:22:32 +03:00
|
|
|
rng_seed, sizeof(rng_seed));
|
hw/riscv/virt.c: do create_fdt() earlier, add finalize_fdt()
Commit 49554856f0 fixed a problem, where TPM devices were not appearing
in the FDT, by delaying the FDT creation up until virt_machine_done().
This create a side effect (see gitlab #1925) - devices that need access
to the '/chosen' FDT node during realize() stopped working because, at
that point, we don't have a FDT.
This happens because our FDT creation is monolithic, but it doesn't need
to be. We can add the needed FDT components for realize() time and, at
the same time, do another FDT round where we account for dynamic sysbus
devices. In other words, the problem fixed by 49554856f0 could also be
fixed by postponing only create_fdt_sockets() and its dependencies,
leaving everything else from create_fdt() to be done during init().
Split the FDT creation in two parts:
- create_fdt(), now moved back to virt_machine_init(), will create FDT
nodes that doesn't depend on additional (dynamic) devices from the
sysbus;
- a new finalize_fdt() step is added, where create_fdt_sockets() and
friends is executed, accounting for the dynamic sysbus devices that
were added during realize().
This will make both use cases happy: TPM devices are still working as
intended, and devices such as 'guest-loader' have a FDT to work on
during realize().
Fixes: 49554856f0 ("riscv: Generate devicetree only after machine initialization is complete")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1925
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-ID: <20231110172559.73209-1-dbarboza@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-11-10 20:25:59 +03:00
|
|
|
|
|
|
|
create_fdt_flash(s, memmap);
|
|
|
|
create_fdt_fw_cfg(s, memmap);
|
|
|
|
create_fdt_pmu(s);
|
2018-03-02 15:31:13 +03:00
|
|
|
}
|
|
|
|
|
2018-12-12 01:37:36 +03:00
|
|
|
static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
|
2023-12-18 18:02:45 +03:00
|
|
|
DeviceState *irqchip,
|
|
|
|
RISCVVirtState *s)
|
2018-12-12 01:37:36 +03:00
|
|
|
{
|
|
|
|
DeviceState *dev;
|
|
|
|
MemoryRegion *ecam_alias, *ecam_reg;
|
2021-02-20 17:48:07 +03:00
|
|
|
MemoryRegion *mmio_alias, *high_mmio_alias, *mmio_reg;
|
2023-12-18 18:02:45 +03:00
|
|
|
hwaddr ecam_base = s->memmap[VIRT_PCIE_ECAM].base;
|
|
|
|
hwaddr ecam_size = s->memmap[VIRT_PCIE_ECAM].size;
|
|
|
|
hwaddr mmio_base = s->memmap[VIRT_PCIE_MMIO].base;
|
|
|
|
hwaddr mmio_size = s->memmap[VIRT_PCIE_MMIO].size;
|
|
|
|
hwaddr high_mmio_base = virt_high_pcie_memmap.base;
|
|
|
|
hwaddr high_mmio_size = virt_high_pcie_memmap.size;
|
|
|
|
hwaddr pio_base = s->memmap[VIRT_PCIE_PIO].base;
|
|
|
|
hwaddr pio_size = s->memmap[VIRT_PCIE_PIO].size;
|
2018-12-12 01:37:36 +03:00
|
|
|
qemu_irq irq;
|
|
|
|
int i;
|
|
|
|
|
qdev: Convert uses of qdev_create() with Coccinelle
This is the transformation explained in the commit before previous.
Takes care of just one pattern that needs conversion. More to come in
this series.
Coccinelle script:
@ depends on !(file in "hw/arm/highbank.c")@
expression bus, type_name, dev, expr;
@@
- dev = qdev_create(bus, type_name);
+ dev = qdev_new(type_name);
... when != dev = expr
- qdev_init_nofail(dev);
+ qdev_realize_and_unref(dev, bus, &error_fatal);
@@
expression bus, type_name, dev, expr;
identifier DOWN;
@@
- dev = DOWN(qdev_create(bus, type_name));
+ dev = DOWN(qdev_new(type_name));
... when != dev = expr
- qdev_init_nofail(DEVICE(dev));
+ qdev_realize_and_unref(DEVICE(dev), bus, &error_fatal);
@@
expression bus, type_name, expr;
identifier dev;
@@
- DeviceState *dev = qdev_create(bus, type_name);
+ DeviceState *dev = qdev_new(type_name);
... when != dev = expr
- qdev_init_nofail(dev);
+ qdev_realize_and_unref(dev, bus, &error_fatal);
@@
expression bus, type_name, dev, expr, errp;
symbol true;
@@
- dev = qdev_create(bus, type_name);
+ dev = qdev_new(type_name);
... when != dev = expr
- object_property_set_bool(OBJECT(dev), true, "realized", errp);
+ qdev_realize_and_unref(dev, bus, errp);
@@
expression bus, type_name, expr, errp;
identifier dev;
symbol true;
@@
- DeviceState *dev = qdev_create(bus, type_name);
+ DeviceState *dev = qdev_new(type_name);
... when != dev = expr
- object_property_set_bool(OBJECT(dev), true, "realized", errp);
+ qdev_realize_and_unref(dev, bus, errp);
The first rule exempts hw/arm/highbank.c, because it matches along two
control flow paths there, with different @type_name. Covered by the
next commit's manual conversions.
Missing #include "qapi/error.h" added manually.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200610053247.1583243-10-armbru@redhat.com>
[Conflicts in hw/misc/empty_slot.c and hw/sparc/leon3.c resolved]
2020-06-10 08:31:58 +03:00
|
|
|
dev = qdev_new(TYPE_GPEX_HOST);
|
2018-12-12 01:37:36 +03:00
|
|
|
|
2023-12-18 18:02:45 +03:00
|
|
|
/* Set GPEX object properties for the virt machine */
|
|
|
|
object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_BASE,
|
|
|
|
ecam_base, NULL);
|
|
|
|
object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_SIZE,
|
|
|
|
ecam_size, NULL);
|
|
|
|
object_property_set_uint(OBJECT(GPEX_HOST(dev)),
|
|
|
|
PCI_HOST_BELOW_4G_MMIO_BASE,
|
|
|
|
mmio_base, NULL);
|
|
|
|
object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_BELOW_4G_MMIO_SIZE,
|
|
|
|
mmio_size, NULL);
|
|
|
|
object_property_set_uint(OBJECT(GPEX_HOST(dev)),
|
|
|
|
PCI_HOST_ABOVE_4G_MMIO_BASE,
|
|
|
|
high_mmio_base, NULL);
|
|
|
|
object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ABOVE_4G_MMIO_SIZE,
|
|
|
|
high_mmio_size, NULL);
|
|
|
|
object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_BASE,
|
|
|
|
pio_base, NULL);
|
|
|
|
object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_SIZE,
|
|
|
|
pio_size, NULL);
|
|
|
|
|
sysbus: Convert to sysbus_realize() etc. with Coccinelle
Convert from qdev_realize(), qdev_realize_and_unref() with null @bus
argument to sysbus_realize(), sysbus_realize_and_unref().
Coccinelle script:
@@
expression dev, errp;
@@
- qdev_realize(DEVICE(dev), NULL, errp);
+ sysbus_realize(SYS_BUS_DEVICE(dev), errp);
@@
expression sysbus_dev, dev, errp;
@@
+ sysbus_dev = SYS_BUS_DEVICE(dev);
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(sysbus_dev, errp);
- sysbus_dev = SYS_BUS_DEVICE(dev);
@@
expression sysbus_dev, dev, errp;
expression expr;
@@
sysbus_dev = SYS_BUS_DEVICE(dev);
... when != dev = expr;
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(sysbus_dev, errp);
@@
expression dev, errp;
@@
- qdev_realize_and_unref(DEVICE(dev), NULL, errp);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
@@
expression dev, errp;
@@
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
Whitespace changes minimized manually.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200610053247.1583243-46-armbru@redhat.com>
[Conflicts in hw/misc/empty_slot.c and hw/sparc/leon3.c resolved]
2020-06-10 08:32:34 +03:00
|
|
|
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
2018-12-12 01:37:36 +03:00
|
|
|
|
|
|
|
ecam_alias = g_new0(MemoryRegion, 1);
|
|
|
|
ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
|
|
|
|
memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam",
|
|
|
|
ecam_reg, 0, ecam_size);
|
|
|
|
memory_region_add_subregion(get_system_memory(), ecam_base, ecam_alias);
|
|
|
|
|
|
|
|
mmio_alias = g_new0(MemoryRegion, 1);
|
|
|
|
mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
|
|
|
|
memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
|
|
|
|
mmio_reg, mmio_base, mmio_size);
|
|
|
|
memory_region_add_subregion(get_system_memory(), mmio_base, mmio_alias);
|
|
|
|
|
2021-02-20 17:48:07 +03:00
|
|
|
/* Map high MMIO space */
|
|
|
|
high_mmio_alias = g_new0(MemoryRegion, 1);
|
|
|
|
memory_region_init_alias(high_mmio_alias, OBJECT(dev), "pcie-mmio-high",
|
|
|
|
mmio_reg, high_mmio_base, high_mmio_size);
|
|
|
|
memory_region_add_subregion(get_system_memory(), high_mmio_base,
|
|
|
|
high_mmio_alias);
|
|
|
|
|
2018-12-12 01:37:36 +03:00
|
|
|
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base);
|
|
|
|
|
|
|
|
for (i = 0; i < GPEX_NUM_IRQS; i++) {
|
2022-02-20 11:55:22 +03:00
|
|
|
irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i);
|
2018-12-12 01:37:36 +03:00
|
|
|
|
|
|
|
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq);
|
|
|
|
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i);
|
|
|
|
}
|
|
|
|
|
2023-12-18 18:02:45 +03:00
|
|
|
GPEX_HOST(dev)->gpex_cfg.bus = PCI_HOST_BRIDGE(GPEX_HOST(dev))->bus;
|
2018-12-12 01:37:36 +03:00
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
2023-01-25 00:22:33 +03:00
|
|
|
static FWCfgState *create_fw_cfg(const MachineState *ms)
|
2021-03-19 02:50:40 +03:00
|
|
|
{
|
|
|
|
hwaddr base = virt_memmap[VIRT_FW_CFG].base;
|
|
|
|
FWCfgState *fw_cfg;
|
|
|
|
|
|
|
|
fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16,
|
|
|
|
&address_space_memory);
|
2023-01-25 00:22:33 +03:00
|
|
|
fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)ms->smp.cpus);
|
2021-03-19 02:50:40 +03:00
|
|
|
|
|
|
|
return fw_cfg;
|
|
|
|
}
|
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket,
|
|
|
|
int base_hartid, int hart_count)
|
|
|
|
{
|
|
|
|
DeviceState *ret;
|
2024-01-23 01:15:29 +03:00
|
|
|
g_autofree char *plic_hart_config = NULL;
|
2022-02-20 11:55:22 +03:00
|
|
|
|
|
|
|
/* Per-socket PLIC hart topology configuration string */
|
|
|
|
plic_hart_config = riscv_plic_hart_config_string(hart_count);
|
|
|
|
|
|
|
|
/* Per-socket PLIC */
|
|
|
|
ret = sifive_plic_create(
|
|
|
|
memmap[VIRT_PLIC].base + socket * memmap[VIRT_PLIC].size,
|
|
|
|
plic_hart_config, hart_count, base_hartid,
|
|
|
|
VIRT_IRQCHIP_NUM_SOURCES,
|
|
|
|
((1U << VIRT_IRQCHIP_NUM_PRIO_BITS) - 1),
|
|
|
|
VIRT_PLIC_PRIORITY_BASE,
|
|
|
|
VIRT_PLIC_PENDING_BASE,
|
|
|
|
VIRT_PLIC_ENABLE_BASE,
|
|
|
|
VIRT_PLIC_ENABLE_STRIDE,
|
|
|
|
VIRT_PLIC_CONTEXT_BASE,
|
|
|
|
VIRT_PLIC_CONTEXT_STRIDE,
|
|
|
|
memmap[VIRT_PLIC].size);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-20 11:55:24 +03:00
|
|
|
static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
|
2022-02-20 11:55:22 +03:00
|
|
|
const MemMapEntry *memmap, int socket,
|
|
|
|
int base_hartid, int hart_count)
|
|
|
|
{
|
2022-02-20 11:55:24 +03:00
|
|
|
int i;
|
|
|
|
hwaddr addr;
|
|
|
|
uint32_t guest_bits;
|
2023-07-27 13:24:33 +03:00
|
|
|
DeviceState *aplic_s = NULL;
|
|
|
|
DeviceState *aplic_m = NULL;
|
|
|
|
bool msimode = aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
|
2022-02-20 11:55:24 +03:00
|
|
|
|
|
|
|
if (msimode) {
|
2023-07-27 13:24:33 +03:00
|
|
|
if (!kvm_enabled()) {
|
|
|
|
/* Per-socket M-level IMSICs */
|
|
|
|
addr = memmap[VIRT_IMSIC_M].base +
|
|
|
|
socket * VIRT_IMSIC_GROUP_MAX_SIZE;
|
|
|
|
for (i = 0; i < hart_count; i++) {
|
|
|
|
riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
|
|
|
|
base_hartid + i, true, 1,
|
|
|
|
VIRT_IRQCHIP_NUM_MSIS);
|
|
|
|
}
|
2022-02-20 11:55:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Per-socket S-level IMSICs */
|
|
|
|
guest_bits = imsic_num_bits(aia_guests + 1);
|
|
|
|
addr = memmap[VIRT_IMSIC_S].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
|
|
|
|
for (i = 0; i < hart_count; i++) {
|
|
|
|
riscv_imsic_create(addr + i * IMSIC_HART_SIZE(guest_bits),
|
|
|
|
base_hartid + i, false, 1 + aia_guests,
|
|
|
|
VIRT_IRQCHIP_NUM_MSIS);
|
|
|
|
}
|
|
|
|
}
|
2022-02-20 11:55:22 +03:00
|
|
|
|
2023-07-27 13:24:33 +03:00
|
|
|
if (!kvm_enabled()) {
|
|
|
|
/* Per-socket M-level APLIC */
|
|
|
|
aplic_m = riscv_aplic_create(memmap[VIRT_APLIC_M].base +
|
|
|
|
socket * memmap[VIRT_APLIC_M].size,
|
|
|
|
memmap[VIRT_APLIC_M].size,
|
|
|
|
(msimode) ? 0 : base_hartid,
|
|
|
|
(msimode) ? 0 : hart_count,
|
|
|
|
VIRT_IRQCHIP_NUM_SOURCES,
|
|
|
|
VIRT_IRQCHIP_NUM_PRIO_BITS,
|
|
|
|
msimode, true, NULL);
|
2022-02-20 11:55:22 +03:00
|
|
|
}
|
|
|
|
|
2023-07-27 13:24:33 +03:00
|
|
|
/* Per-socket S-level APLIC */
|
|
|
|
aplic_s = riscv_aplic_create(memmap[VIRT_APLIC_S].base +
|
|
|
|
socket * memmap[VIRT_APLIC_S].size,
|
|
|
|
memmap[VIRT_APLIC_S].size,
|
|
|
|
(msimode) ? 0 : base_hartid,
|
|
|
|
(msimode) ? 0 : hart_count,
|
|
|
|
VIRT_IRQCHIP_NUM_SOURCES,
|
|
|
|
VIRT_IRQCHIP_NUM_PRIO_BITS,
|
|
|
|
msimode, false, aplic_m);
|
|
|
|
|
|
|
|
return kvm_enabled() ? aplic_s : aplic_m;
|
2022-02-20 11:55:22 +03:00
|
|
|
}
|
|
|
|
|
2022-04-28 02:41:43 +03:00
|
|
|
static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip)
|
|
|
|
{
|
|
|
|
DeviceState *dev;
|
|
|
|
SysBusDevice *sysbus;
|
|
|
|
const MemMapEntry *memmap = virt_memmap;
|
|
|
|
int i;
|
|
|
|
MemoryRegion *sysmem = get_system_memory();
|
|
|
|
|
|
|
|
dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE);
|
|
|
|
dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE);
|
|
|
|
qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS);
|
|
|
|
qdev_prop_set_uint32(dev, "mmio_size", memmap[VIRT_PLATFORM_BUS].size);
|
|
|
|
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
|
|
|
s->platform_bus_dev = dev;
|
|
|
|
|
|
|
|
sysbus = SYS_BUS_DEVICE(dev);
|
|
|
|
for (i = 0; i < VIRT_PLATFORM_BUS_NUM_IRQS; i++) {
|
|
|
|
int irq = VIRT_PLATFORM_BUS_IRQ + i;
|
|
|
|
sysbus_connect_irq(sysbus, i, qdev_get_gpio_in(irqchip, irq));
|
|
|
|
}
|
|
|
|
|
|
|
|
memory_region_add_subregion(sysmem,
|
|
|
|
memmap[VIRT_PLATFORM_BUS].base,
|
|
|
|
sysbus_mmio_get_region(sysbus, 0));
|
|
|
|
}
|
|
|
|
|
2024-01-23 21:42:28 +03:00
|
|
|
static void virt_build_smbios(RISCVVirtState *s)
|
|
|
|
{
|
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(s);
|
|
|
|
MachineState *ms = MACHINE(s);
|
|
|
|
uint8_t *smbios_tables, *smbios_anchor;
|
|
|
|
size_t smbios_tables_len, smbios_anchor_len;
|
|
|
|
struct smbios_phys_mem_area mem_array;
|
|
|
|
const char *product = "QEMU Virtual Machine";
|
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
product = "KVM Virtual Machine";
|
|
|
|
}
|
|
|
|
|
2024-03-27 12:08:05 +03:00
|
|
|
smbios_set_defaults("QEMU", product, mc->name);
|
2024-01-23 21:42:28 +03:00
|
|
|
|
|
|
|
if (riscv_is_32bit(&s->soc[0])) {
|
|
|
|
smbios_set_default_processor_family(0x200);
|
|
|
|
} else {
|
|
|
|
smbios_set_default_processor_family(0x201);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* build the array of physical mem area from base_memmap */
|
|
|
|
mem_array.address = s->memmap[VIRT_DRAM].base;
|
|
|
|
mem_array.length = ms->ram_size;
|
|
|
|
|
2024-03-14 18:22:54 +03:00
|
|
|
smbios_get_tables(ms, SMBIOS_ENTRY_POINT_TYPE_64,
|
|
|
|
&mem_array, 1,
|
2024-01-23 21:42:28 +03:00
|
|
|
&smbios_tables, &smbios_tables_len,
|
|
|
|
&smbios_anchor, &smbios_anchor_len,
|
|
|
|
&error_fatal);
|
|
|
|
|
|
|
|
if (smbios_anchor) {
|
|
|
|
fw_cfg_add_file(s->fw_cfg, "etc/smbios/smbios-tables",
|
|
|
|
smbios_tables, smbios_tables_len);
|
|
|
|
fw_cfg_add_file(s->fw_cfg, "etc/smbios/smbios-anchor",
|
|
|
|
smbios_anchor, smbios_anchor_len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-28 02:41:41 +03:00
|
|
|
static void virt_machine_done(Notifier *notifier, void *data)
|
|
|
|
{
|
|
|
|
RISCVVirtState *s = container_of(notifier, RISCVVirtState,
|
|
|
|
machine_done);
|
|
|
|
const MemMapEntry *memmap = virt_memmap;
|
|
|
|
MachineState *machine = MACHINE(s);
|
|
|
|
target_ulong start_addr = memmap[VIRT_DRAM].base;
|
|
|
|
target_ulong firmware_end_addr, kernel_start_addr;
|
2022-12-29 12:18:26 +03:00
|
|
|
const char *firmware_name = riscv_default_firmware_name(&s->soc[0]);
|
2023-06-20 16:50:06 +03:00
|
|
|
uint64_t fdt_load_addr;
|
hw/riscv: virt: Assume M-mode FW in pflash0 only when "-bios none"
Currently, virt machine supports two pflash instances each with
32MB size. However, the first pflash is always assumed to
contain M-mode firmware and reset vector is set to this if
enabled. Hence, for S-mode payloads like EDK2, only one pflash
instance is available for use. This means both code and NV variables
of EDK2 will need to use the same pflash.
The OS distros keep the EDK2 FW code as readonly. When non-volatile
variables also need to share the same pflash, it is not possible
to keep it as readonly since variables need write access.
To resolve this issue, the code and NV variables need to be separated.
But in that case we need an extra flash. Hence, modify the convention
for non-KVM guests such that, pflash0 will contain the M-mode FW
only when "-bios none" option is used. Otherwise, pflash0 will contain
the S-mode payload FW. This enables both pflash instances available
for EDK2 use.
When KVM is enabled, pflash0 is always assumed to contain the
S-mode payload firmware only.
Example usage:
1) pflash0 containing M-mode FW
qemu-system-riscv64 -bios none -pflash <mmode_fw> -machine virt
or
qemu-system-riscv64 -bios none \
-drive file=<mmode_fw>,if=pflash,format=raw,unit=0 -machine virt
2) pflash0 containing S-mode payload like EDK2
qemu-system-riscv64 -pflash <smode_fw_code> -pflash <smode_vars> -machine virt
or
qemu-system-riscv64 -bios <opensbi_fw> \
-pflash <smode_fw_code> \
-pflash <smode_vars> \
-machine virt
or
qemu-system-riscv64 -bios <opensbi_fw> \
-drive file=<smode_fw_code>,if=pflash,format=raw,unit=0,readonly=on \
-drive file=<smode_fw_vars>,if=pflash,format=raw,unit=1 \
-machine virt
Signed-off-by: Sunil V L <sunilvl@ventanamicro.com>
Reported-by: Heinrich Schuchardt <xypron.glpk@gmx.de>
Tested-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-Id: <20230601045910.18646-2-sunilvl@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-06-01 07:59:08 +03:00
|
|
|
uint64_t kernel_entry = 0;
|
2023-06-01 07:59:09 +03:00
|
|
|
BlockBackend *pflash_blk0;
|
2022-04-28 02:41:41 +03:00
|
|
|
|
hw/riscv/virt.c: do create_fdt() earlier, add finalize_fdt()
Commit 49554856f0 fixed a problem, where TPM devices were not appearing
in the FDT, by delaying the FDT creation up until virt_machine_done().
This create a side effect (see gitlab #1925) - devices that need access
to the '/chosen' FDT node during realize() stopped working because, at
that point, we don't have a FDT.
This happens because our FDT creation is monolithic, but it doesn't need
to be. We can add the needed FDT components for realize() time and, at
the same time, do another FDT round where we account for dynamic sysbus
devices. In other words, the problem fixed by 49554856f0 could also be
fixed by postponing only create_fdt_sockets() and its dependencies,
leaving everything else from create_fdt() to be done during init().
Split the FDT creation in two parts:
- create_fdt(), now moved back to virt_machine_init(), will create FDT
nodes that doesn't depend on additional (dynamic) devices from the
sysbus;
- a new finalize_fdt() step is added, where create_fdt_sockets() and
friends is executed, accounting for the dynamic sysbus devices that
were added during realize().
This will make both use cases happy: TPM devices are still working as
intended, and devices such as 'guest-loader' have a FDT to work on
during realize().
Fixes: 49554856f0 ("riscv: Generate devicetree only after machine initialization is complete")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1925
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-ID: <20231110172559.73209-1-dbarboza@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-11-10 20:25:59 +03:00
|
|
|
/*
|
|
|
|
* An user provided dtb must include everything, including
|
|
|
|
* dynamic sysbus devices. Our FDT needs to be finalized.
|
|
|
|
*/
|
|
|
|
if (machine->dtb == NULL) {
|
|
|
|
finalize_fdt(s);
|
2023-07-06 06:59:37 +03:00
|
|
|
}
|
|
|
|
|
2022-04-28 02:41:41 +03:00
|
|
|
/*
|
|
|
|
* Only direct boot kernel is currently supported for KVM VM,
|
|
|
|
* so the "-bios" parameter is not supported when KVM is enabled.
|
|
|
|
*/
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
if (machine->firmware) {
|
|
|
|
if (strcmp(machine->firmware, "none")) {
|
|
|
|
error_report("Machine mode firmware is not supported in "
|
|
|
|
"combination with KVM.");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
machine->firmware = g_strdup("none");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-29 12:18:26 +03:00
|
|
|
firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name,
|
|
|
|
start_addr, NULL);
|
2022-04-28 02:41:41 +03:00
|
|
|
|
2023-06-01 07:59:09 +03:00
|
|
|
pflash_blk0 = pflash_cfi01_get_blk(s->flash[0]);
|
|
|
|
if (pflash_blk0) {
|
hw/riscv: virt: Assume M-mode FW in pflash0 only when "-bios none"
Currently, virt machine supports two pflash instances each with
32MB size. However, the first pflash is always assumed to
contain M-mode firmware and reset vector is set to this if
enabled. Hence, for S-mode payloads like EDK2, only one pflash
instance is available for use. This means both code and NV variables
of EDK2 will need to use the same pflash.
The OS distros keep the EDK2 FW code as readonly. When non-volatile
variables also need to share the same pflash, it is not possible
to keep it as readonly since variables need write access.
To resolve this issue, the code and NV variables need to be separated.
But in that case we need an extra flash. Hence, modify the convention
for non-KVM guests such that, pflash0 will contain the M-mode FW
only when "-bios none" option is used. Otherwise, pflash0 will contain
the S-mode payload FW. This enables both pflash instances available
for EDK2 use.
When KVM is enabled, pflash0 is always assumed to contain the
S-mode payload firmware only.
Example usage:
1) pflash0 containing M-mode FW
qemu-system-riscv64 -bios none -pflash <mmode_fw> -machine virt
or
qemu-system-riscv64 -bios none \
-drive file=<mmode_fw>,if=pflash,format=raw,unit=0 -machine virt
2) pflash0 containing S-mode payload like EDK2
qemu-system-riscv64 -pflash <smode_fw_code> -pflash <smode_vars> -machine virt
or
qemu-system-riscv64 -bios <opensbi_fw> \
-pflash <smode_fw_code> \
-pflash <smode_vars> \
-machine virt
or
qemu-system-riscv64 -bios <opensbi_fw> \
-drive file=<smode_fw_code>,if=pflash,format=raw,unit=0,readonly=on \
-drive file=<smode_fw_vars>,if=pflash,format=raw,unit=1 \
-machine virt
Signed-off-by: Sunil V L <sunilvl@ventanamicro.com>
Reported-by: Heinrich Schuchardt <xypron.glpk@gmx.de>
Tested-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-Id: <20230601045910.18646-2-sunilvl@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-06-01 07:59:08 +03:00
|
|
|
if (machine->firmware && !strcmp(machine->firmware, "none") &&
|
|
|
|
!kvm_enabled()) {
|
|
|
|
/*
|
|
|
|
* Pflash was supplied but bios is none and not KVM guest,
|
|
|
|
* let's overwrite the address we jump to after reset to
|
|
|
|
* the base of the flash.
|
|
|
|
*/
|
|
|
|
start_addr = virt_memmap[VIRT_FLASH].base;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Pflash was supplied but either KVM guest or bios is not none.
|
|
|
|
* In this case, base of the flash would contain S-mode payload.
|
|
|
|
*/
|
|
|
|
riscv_setup_firmware_boot(machine);
|
|
|
|
kernel_entry = virt_memmap[VIRT_FLASH].base;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (machine->kernel_filename && !kernel_entry) {
|
2022-04-28 02:41:41 +03:00
|
|
|
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
|
|
|
|
firmware_end_addr);
|
|
|
|
|
2023-02-06 17:00:20 +03:00
|
|
|
kernel_entry = riscv_load_kernel(machine, &s->soc[0],
|
2023-02-06 17:00:21 +03:00
|
|
|
kernel_start_addr, true, NULL);
|
2022-04-28 02:41:41 +03:00
|
|
|
}
|
|
|
|
|
2023-02-01 20:12:11 +03:00
|
|
|
fdt_load_addr = riscv_compute_fdt_addr(memmap[VIRT_DRAM].base,
|
hw/riscv: change riscv_compute_fdt_addr() semantics
As it is now, riscv_compute_fdt_addr() is receiving a dram_base, a
mem_size (which is defaulted to MachineState::ram_size in all boards)
and the FDT pointer. And it makes a very important assumption: the DRAM
interval dram_base + mem_size is contiguous. This is indeed the case for
most boards that use a FDT.
The Icicle Kit board works with 2 distinct RAM banks that are separated
by a gap. We have a lower bank with 1GiB size, a gap follows, then at
64GiB the high memory starts. MachineClass::default_ram_size for this
board is set to 1.5Gb, and machine_init() is enforcing it as minimal RAM
size, meaning that there we'll always have at least 512 MiB in the Hi
RAM area.
Using riscv_compute_fdt_addr() in this board is weird because not only
the board has sparse RAM, and it's calling it using the base address of
the Lo RAM area, but it's also using a mem_size that we have guarantees
that it will go up to the Hi RAM. All the function assumptions doesn't
work for this board.
In fact, what makes the function works at all in this case is a
coincidence. Commit 1a475d39ef54 introduced a 3GB boundary for the FDT,
down from 4Gb, that is enforced if dram_base is lower than 3072 MiB. For
the Icicle Kit board, memmap[MICROCHIP_PFSOC_DRAM_LO].base is 0x80000000
(2 Gb) and it has a 1Gb size, so it will fall in the conditions to put
the FDT under a 3Gb address, which happens to be exactly at the end of
DRAM_LO. If the base address of the Lo area started later than 3Gb this
function would be unusable by the board. Changing any assumptions inside
riscv_compute_fdt_addr() can also break it by accident as well.
Let's change riscv_compute_fdt_addr() semantics to be appropriate to the
Icicle Kit board and for future boards that might have sparse RAM
topologies to worry about:
- relieve the condition that the dram_base + mem_size area is contiguous,
since this is already not the case today;
- receive an extra 'dram_size' size attribute that refers to a contiguous
RAM block that the board wants the FDT to reside on.
Together with 'mem_size' and 'fdt', which are now now being consumed by a
MachineState pointer, we're able to make clear assumptions based on the
DRAM block and total mem_size available to ensure that the FDT will be put
in a valid RAM address.
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-Id: <20230201171212.1219375-4-dbarboza@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-02-01 20:12:12 +03:00
|
|
|
memmap[VIRT_DRAM].size,
|
|
|
|
machine);
|
2023-02-01 20:12:11 +03:00
|
|
|
riscv_load_fdt(fdt_load_addr, machine->fdt);
|
|
|
|
|
2022-04-28 02:41:41 +03:00
|
|
|
/* load the reset vector */
|
|
|
|
riscv_setup_rom_reset_vec(machine, &s->soc[0], start_addr,
|
|
|
|
virt_memmap[VIRT_MROM].base,
|
|
|
|
virt_memmap[VIRT_MROM].size, kernel_entry,
|
2022-07-28 21:19:26 +03:00
|
|
|
fdt_load_addr);
|
2022-04-28 02:41:41 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only direct boot kernel is currently supported for KVM VM,
|
|
|
|
* So here setup kernel start address and fdt address.
|
|
|
|
* TODO:Support firmware loading and integrate to TCG start
|
|
|
|
*/
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
riscv_setup_direct_kernel(kernel_entry, fdt_load_addr);
|
|
|
|
}
|
2023-03-02 12:12:11 +03:00
|
|
|
|
2024-01-23 21:42:28 +03:00
|
|
|
virt_build_smbios(s);
|
|
|
|
|
2023-03-02 12:12:11 +03:00
|
|
|
if (virt_is_acpi_enabled(s)) {
|
|
|
|
virt_acpi_setup(s);
|
|
|
|
}
|
2022-04-28 02:41:41 +03:00
|
|
|
}
|
|
|
|
|
2020-05-21 17:42:27 +03:00
|
|
|
static void virt_machine_init(MachineState *machine)
|
2018-03-02 15:31:13 +03:00
|
|
|
{
|
2021-02-20 17:48:04 +03:00
|
|
|
const MemMapEntry *memmap = virt_memmap;
|
2019-10-09 02:32:22 +03:00
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
|
2018-03-02 15:31:13 +03:00
|
|
|
MemoryRegion *system_memory = get_system_memory();
|
2018-03-04 01:52:13 +03:00
|
|
|
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
|
2022-02-20 11:55:22 +03:00
|
|
|
DeviceState *mmio_irqchip, *virtio_irqchip, *pcie_irqchip;
|
2021-08-12 17:46:47 +03:00
|
|
|
int i, base_hartid, hart_count;
|
2023-01-25 00:22:32 +03:00
|
|
|
int socket_count = riscv_socket_count(machine);
|
2018-03-02 15:31:13 +03:00
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
/* Check socket count limit */
|
2023-01-25 00:22:32 +03:00
|
|
|
if (VIRT_SOCKETS_MAX < socket_count) {
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
error_report("number of sockets/nodes should be less than %d",
|
|
|
|
VIRT_SOCKETS_MAX);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2024-02-17 22:26:06 +03:00
|
|
|
if (!virt_aclint_allowed() && s->have_aclint) {
|
2023-08-11 19:02:24 +03:00
|
|
|
error_report("'aclint' is only available with TCG acceleration");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
/* Initialize sockets */
|
2022-02-20 11:55:22 +03:00
|
|
|
mmio_irqchip = virtio_irqchip = pcie_irqchip = NULL;
|
2023-01-25 00:22:32 +03:00
|
|
|
for (i = 0; i < socket_count; i++) {
|
2024-01-23 01:15:28 +03:00
|
|
|
g_autofree char *soc_name = g_strdup_printf("soc%d", i);
|
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
if (!riscv_socket_check_hartids(machine, i)) {
|
|
|
|
error_report("discontinuous hartids in socket%d", i);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
base_hartid = riscv_socket_first_hartid(machine, i);
|
|
|
|
if (base_hartid < 0) {
|
|
|
|
error_report("can't find hartid base for socket%d", i);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
hart_count = riscv_socket_hart_count(machine, i);
|
|
|
|
if (hart_count < 0) {
|
|
|
|
error_report("can't find hart count for socket%d", i);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
object_initialize_child(OBJECT(machine), soc_name, &s->soc[i],
|
|
|
|
TYPE_RISCV_HART_ARRAY);
|
|
|
|
object_property_set_str(OBJECT(&s->soc[i]), "cpu-type",
|
|
|
|
machine->cpu_type, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(&s->soc[i]), "hartid-base",
|
|
|
|
base_hartid, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(&s->soc[i]), "num-harts",
|
|
|
|
hart_count, &error_abort);
|
2022-05-14 09:29:40 +03:00
|
|
|
sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_fatal);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
|
2024-02-17 22:26:06 +03:00
|
|
|
if (virt_aclint_allowed() && s->have_aclint) {
|
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
|
|
|
|
/* Per-socket ACLINT MTIMER */
|
|
|
|
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
|
2022-02-20 11:55:24 +03:00
|
|
|
i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
|
|
|
|
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
|
|
|
|
base_hartid, hart_count,
|
|
|
|
RISCV_ACLINT_DEFAULT_MTIMECMP,
|
|
|
|
RISCV_ACLINT_DEFAULT_MTIME,
|
|
|
|
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
|
2024-02-17 22:26:06 +03:00
|
|
|
} else {
|
|
|
|
/* Per-socket ACLINT MSWI, MTIMER, and SSWI */
|
|
|
|
riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
|
2022-02-20 11:55:24 +03:00
|
|
|
i * memmap[VIRT_CLINT].size,
|
|
|
|
base_hartid, hart_count, false);
|
2024-02-17 22:26:06 +03:00
|
|
|
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
|
2022-02-20 11:55:24 +03:00
|
|
|
i * memmap[VIRT_CLINT].size +
|
|
|
|
RISCV_ACLINT_SWI_SIZE,
|
|
|
|
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
|
|
|
|
base_hartid, hart_count,
|
|
|
|
RISCV_ACLINT_DEFAULT_MTIMECMP,
|
|
|
|
RISCV_ACLINT_DEFAULT_MTIME,
|
|
|
|
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
|
2024-02-17 22:26:06 +03:00
|
|
|
riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
|
2022-02-20 11:55:24 +03:00
|
|
|
i * memmap[VIRT_ACLINT_SSWI].size,
|
|
|
|
base_hartid, hart_count, true);
|
2024-02-17 22:26:06 +03:00
|
|
|
}
|
|
|
|
} else if (tcg_enabled()) {
|
|
|
|
/* Per-socket SiFive CLINT */
|
|
|
|
riscv_aclint_swi_create(
|
2022-02-20 11:55:24 +03:00
|
|
|
memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
|
|
|
|
base_hartid, hart_count, false);
|
2024-02-17 22:26:06 +03:00
|
|
|
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
|
2022-02-20 11:55:24 +03:00
|
|
|
i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
|
|
|
|
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
|
|
|
|
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
|
|
|
|
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
|
2021-08-31 14:06:03 +03:00
|
|
|
}
|
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
/* Per-socket interrupt controller */
|
|
|
|
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
|
|
|
|
s->irqchip[i] = virt_create_plic(memmap, i,
|
|
|
|
base_hartid, hart_count);
|
|
|
|
} else {
|
2022-02-20 11:55:24 +03:00
|
|
|
s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests,
|
|
|
|
memmap, i, base_hartid,
|
|
|
|
hart_count);
|
2022-02-20 11:55:22 +03:00
|
|
|
}
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
/* Try to use different IRQCHIP instance based device type */
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
if (i == 0) {
|
2022-02-20 11:55:22 +03:00
|
|
|
mmio_irqchip = s->irqchip[i];
|
|
|
|
virtio_irqchip = s->irqchip[i];
|
|
|
|
pcie_irqchip = s->irqchip[i];
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
}
|
|
|
|
if (i == 1) {
|
2022-02-20 11:55:22 +03:00
|
|
|
virtio_irqchip = s->irqchip[i];
|
|
|
|
pcie_irqchip = s->irqchip[i];
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
}
|
|
|
|
if (i == 2) {
|
2022-02-20 11:55:22 +03:00
|
|
|
pcie_irqchip = s->irqchip[i];
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
}
|
|
|
|
}
|
2018-03-02 15:31:13 +03:00
|
|
|
|
2023-08-30 16:35:02 +03:00
|
|
|
if (kvm_enabled() && virt_use_kvm_aia(s)) {
|
2023-07-27 13:24:37 +03:00
|
|
|
kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
|
|
|
|
VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
|
|
|
|
memmap[VIRT_APLIC_S].base,
|
|
|
|
memmap[VIRT_IMSIC_S].base,
|
|
|
|
s->aia_guests);
|
|
|
|
}
|
|
|
|
|
2021-02-20 17:48:06 +03:00
|
|
|
if (riscv_is_32bit(&s->soc[0])) {
|
|
|
|
#if HOST_LONG_BITS == 64
|
|
|
|
/* limit RAM size in a 32-bit system */
|
|
|
|
if (machine->ram_size > 10 * GiB) {
|
|
|
|
machine->ram_size = 10 * GiB;
|
|
|
|
error_report("Limiting RAM size to 10 GiB");
|
|
|
|
}
|
|
|
|
#endif
|
2021-02-20 17:48:07 +03:00
|
|
|
virt_high_pcie_memmap.base = VIRT32_HIGH_PCIE_MMIO_BASE;
|
|
|
|
virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE;
|
|
|
|
} else {
|
|
|
|
virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE;
|
|
|
|
virt_high_pcie_memmap.base = memmap[VIRT_DRAM].base + machine->ram_size;
|
|
|
|
virt_high_pcie_memmap.base =
|
|
|
|
ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size);
|
2021-02-20 17:48:06 +03:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:12:07 +03:00
|
|
|
s->memmap = virt_memmap;
|
|
|
|
|
2018-03-02 15:31:13 +03:00
|
|
|
/* register system main memory (actual RAM) */
|
|
|
|
memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base,
|
2021-10-16 06:09:08 +03:00
|
|
|
machine->ram);
|
2018-03-02 15:31:13 +03:00
|
|
|
|
|
|
|
/* boot rom */
|
2018-03-04 01:52:13 +03:00
|
|
|
memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom",
|
|
|
|
memmap[VIRT_MROM].size, &error_fatal);
|
|
|
|
memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base,
|
|
|
|
mask_rom);
|
2018-03-02 15:31:13 +03:00
|
|
|
|
2023-01-17 16:27:51 +03:00
|
|
|
/*
|
|
|
|
* Init fw_cfg. Must be done before riscv_load_fdt, otherwise the
|
|
|
|
* device tree cannot be altered and we get FDT_ERR_NOSPACE.
|
|
|
|
*/
|
|
|
|
s->fw_cfg = create_fw_cfg(machine);
|
|
|
|
rom_set_fw(s->fw_cfg);
|
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
/* SiFive Test MMIO device */
|
2018-03-02 15:31:13 +03:00
|
|
|
sifive_test_create(memmap[VIRT_TEST].base);
|
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
/* VirtIO MMIO devices */
|
2018-03-02 15:31:13 +03:00
|
|
|
for (i = 0; i < VIRTIO_COUNT; i++) {
|
|
|
|
sysbus_create_simple("virtio-mmio",
|
|
|
|
memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
|
2023-06-01 12:34:52 +03:00
|
|
|
qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i));
|
2018-03-02 15:31:13 +03:00
|
|
|
}
|
|
|
|
|
2023-12-18 18:02:45 +03:00
|
|
|
gpex_pcie_init(system_memory, pcie_irqchip, s);
|
2018-12-12 01:37:36 +03:00
|
|
|
|
2023-06-01 12:34:52 +03:00
|
|
|
create_platform_bus(s, mmio_irqchip);
|
2022-04-28 02:41:43 +03:00
|
|
|
|
2018-03-02 15:31:13 +03:00
|
|
|
serial_mm_init(system_memory, memmap[VIRT_UART0].base,
|
2023-06-01 12:34:52 +03:00
|
|
|
0, qdev_get_gpio_in(mmio_irqchip, UART0_IRQ), 399193,
|
2018-04-20 17:52:43 +03:00
|
|
|
serial_hd(0), DEVICE_LITTLE_ENDIAN);
|
2018-04-30 03:29:34 +03:00
|
|
|
|
2019-11-06 14:56:43 +03:00
|
|
|
sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
|
2023-06-01 12:34:52 +03:00
|
|
|
qdev_get_gpio_in(mmio_irqchip, RTC_IRQ));
|
2019-11-06 14:56:43 +03:00
|
|
|
|
2019-10-09 02:32:25 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(s->flash); i++) {
|
|
|
|
/* Map legacy -drive if=pflash to machine properties */
|
|
|
|
pflash_cfi01_legacy_drive(s->flash[i],
|
|
|
|
drive_get(IF_PFLASH, 0, i));
|
|
|
|
}
|
|
|
|
virt_flash_map(s, system_memory);
|
2022-04-28 02:41:41 +03:00
|
|
|
|
hw/riscv/virt.c: do create_fdt() earlier, add finalize_fdt()
Commit 49554856f0 fixed a problem, where TPM devices were not appearing
in the FDT, by delaying the FDT creation up until virt_machine_done().
This create a side effect (see gitlab #1925) - devices that need access
to the '/chosen' FDT node during realize() stopped working because, at
that point, we don't have a FDT.
This happens because our FDT creation is monolithic, but it doesn't need
to be. We can add the needed FDT components for realize() time and, at
the same time, do another FDT round where we account for dynamic sysbus
devices. In other words, the problem fixed by 49554856f0 could also be
fixed by postponing only create_fdt_sockets() and its dependencies,
leaving everything else from create_fdt() to be done during init().
Split the FDT creation in two parts:
- create_fdt(), now moved back to virt_machine_init(), will create FDT
nodes that doesn't depend on additional (dynamic) devices from the
sysbus;
- a new finalize_fdt() step is added, where create_fdt_sockets() and
friends is executed, accounting for the dynamic sysbus devices that
were added during realize().
This will make both use cases happy: TPM devices are still working as
intended, and devices such as 'guest-loader' have a FDT to work on
during realize().
Fixes: 49554856f0 ("riscv: Generate devicetree only after machine initialization is complete")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1925
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-ID: <20231110172559.73209-1-dbarboza@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-11-10 20:25:59 +03:00
|
|
|
/* load/create device tree */
|
|
|
|
if (machine->dtb) {
|
|
|
|
machine->fdt = load_device_tree(machine->dtb, &s->fdt_size);
|
|
|
|
if (!machine->fdt) {
|
|
|
|
error_report("load_device_tree() failed");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
create_fdt(s, memmap);
|
|
|
|
}
|
|
|
|
|
2022-04-28 02:41:41 +03:00
|
|
|
s->machine_done.notify = virt_machine_done;
|
|
|
|
qemu_add_machine_init_done_notifier(&s->machine_done);
|
2018-03-02 15:31:13 +03:00
|
|
|
}
|
|
|
|
|
2020-05-21 17:42:27 +03:00
|
|
|
static void virt_machine_instance_init(Object *obj)
|
2018-03-02 15:31:13 +03:00
|
|
|
{
|
2023-03-02 12:12:05 +03:00
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
|
|
|
|
2023-06-01 07:59:09 +03:00
|
|
|
virt_flash_create(s);
|
|
|
|
|
2023-03-02 12:12:05 +03:00
|
|
|
s->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
|
|
|
|
s->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
|
2023-03-02 12:12:06 +03:00
|
|
|
s->acpi = ON_OFF_AUTO_AUTO;
|
2019-10-09 02:32:22 +03:00
|
|
|
}
|
|
|
|
|
2022-02-20 11:55:24 +03:00
|
|
|
static char *virt_get_aia_guests(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
|
|
|
|
2024-04-11 13:33:31 +03:00
|
|
|
return g_strdup_printf("%d", s->aia_guests);
|
2022-02-20 11:55:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_set_aia_guests(Object *obj, const char *val, Error **errp)
|
|
|
|
{
|
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
|
|
|
|
|
|
|
s->aia_guests = atoi(val);
|
|
|
|
if (s->aia_guests < 0 || s->aia_guests > VIRT_IRQCHIP_MAX_GUESTS) {
|
|
|
|
error_setg(errp, "Invalid number of AIA IMSIC guests");
|
|
|
|
error_append_hint(errp, "Valid values be between 0 and %d.\n",
|
|
|
|
VIRT_IRQCHIP_MAX_GUESTS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
static char *virt_get_aia(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
|
|
|
const char *val;
|
|
|
|
|
|
|
|
switch (s->aia_type) {
|
|
|
|
case VIRT_AIA_TYPE_APLIC:
|
|
|
|
val = "aplic";
|
|
|
|
break;
|
2022-02-20 11:55:24 +03:00
|
|
|
case VIRT_AIA_TYPE_APLIC_IMSIC:
|
|
|
|
val = "aplic-imsic";
|
|
|
|
break;
|
2022-02-20 11:55:22 +03:00
|
|
|
default:
|
|
|
|
val = "none";
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
|
|
|
|
return g_strdup(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_set_aia(Object *obj, const char *val, Error **errp)
|
|
|
|
{
|
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
|
|
|
|
|
|
|
if (!strcmp(val, "none")) {
|
|
|
|
s->aia_type = VIRT_AIA_TYPE_NONE;
|
|
|
|
} else if (!strcmp(val, "aplic")) {
|
|
|
|
s->aia_type = VIRT_AIA_TYPE_APLIC;
|
2022-02-20 11:55:24 +03:00
|
|
|
} else if (!strcmp(val, "aplic-imsic")) {
|
|
|
|
s->aia_type = VIRT_AIA_TYPE_APLIC_IMSIC;
|
2022-02-20 11:55:22 +03:00
|
|
|
} else {
|
|
|
|
error_setg(errp, "Invalid AIA interrupt controller type");
|
2022-02-20 11:55:24 +03:00
|
|
|
error_append_hint(errp, "Valid values are none, aplic, and "
|
|
|
|
"aplic-imsic.\n");
|
2022-02-20 11:55:22 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-31 14:06:03 +03:00
|
|
|
static bool virt_get_aclint(Object *obj, Error **errp)
|
|
|
|
{
|
2023-02-06 11:50:07 +03:00
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
2021-08-31 14:06:03 +03:00
|
|
|
|
|
|
|
return s->have_aclint;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_set_aclint(Object *obj, bool value, Error **errp)
|
|
|
|
{
|
2023-02-06 11:50:07 +03:00
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
2021-08-31 14:06:03 +03:00
|
|
|
|
|
|
|
s->have_aclint = value;
|
|
|
|
}
|
|
|
|
|
2023-03-02 12:12:06 +03:00
|
|
|
bool virt_is_acpi_enabled(RISCVVirtState *s)
|
|
|
|
{
|
|
|
|
return s->acpi != ON_OFF_AUTO_OFF;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_get_acpi(Object *obj, Visitor *v, const char *name,
|
|
|
|
void *opaque, Error **errp)
|
|
|
|
{
|
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
|
|
|
OnOffAuto acpi = s->acpi;
|
|
|
|
|
|
|
|
visit_type_OnOffAuto(v, name, &acpi, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_set_acpi(Object *obj, Visitor *v, const char *name,
|
|
|
|
void *opaque, Error **errp)
|
|
|
|
{
|
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
|
|
|
|
|
|
|
visit_type_OnOffAuto(v, name, &s->acpi, errp);
|
|
|
|
}
|
|
|
|
|
2022-04-28 02:41:45 +03:00
|
|
|
static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
|
|
|
|
DeviceState *dev)
|
|
|
|
{
|
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
|
|
|
|
2024-02-17 22:26:05 +03:00
|
|
|
if (device_is_dynamic_sysbus(mc, dev) ||
|
|
|
|
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
|
2022-04-28 02:41:45 +03:00
|
|
|
return HOTPLUG_HANDLER(machine);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(hotplug_dev);
|
|
|
|
|
|
|
|
if (s->platform_bus_dev) {
|
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(s);
|
|
|
|
|
|
|
|
if (device_is_dynamic_sysbus(mc, dev)) {
|
|
|
|
platform_bus_link_device(PLATFORM_BUS_DEVICE(s->platform_bus_dev),
|
|
|
|
SYS_BUS_DEVICE(dev));
|
|
|
|
}
|
|
|
|
}
|
2024-02-17 22:26:05 +03:00
|
|
|
|
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
|
|
|
|
create_fdt_virtio_iommu(s, pci_get_bdf(PCI_DEVICE(dev)));
|
|
|
|
}
|
2022-04-28 02:41:45 +03:00
|
|
|
}
|
|
|
|
|
2020-05-21 17:42:27 +03:00
|
|
|
static void virt_machine_class_init(ObjectClass *oc, void *data)
|
2019-10-09 02:32:22 +03:00
|
|
|
{
|
|
|
|
MachineClass *mc = MACHINE_CLASS(oc);
|
2022-04-28 02:41:45 +03:00
|
|
|
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
|
2019-10-09 02:32:22 +03:00
|
|
|
|
|
|
|
mc->desc = "RISC-V VirtIO board";
|
2020-05-21 17:42:27 +03:00
|
|
|
mc->init = virt_machine_init;
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
mc->max_cpus = VIRT_CPUS_MAX;
|
2020-12-16 21:22:34 +03:00
|
|
|
mc->default_cpu_type = TYPE_RISCV_CPU_BASE;
|
2024-06-20 09:47:18 +03:00
|
|
|
mc->block_default_type = IF_VIRTIO;
|
|
|
|
mc->no_cdrom = 1;
|
2019-11-22 18:27:52 +03:00
|
|
|
mc->pci_allow_0_address = true;
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 12:28:50 +03:00
|
|
|
mc->possible_cpu_arch_ids = riscv_numa_possible_cpu_arch_ids;
|
|
|
|
mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props;
|
|
|
|
mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id;
|
|
|
|
mc->numa_mem_supported = true;
|
2023-05-09 03:27:39 +03:00
|
|
|
/* platform instead of architectural choice */
|
|
|
|
mc->cpu_cluster_has_numa_boundary = true;
|
2021-10-16 06:09:08 +03:00
|
|
|
mc->default_ram_id = "riscv_virt_board.ram";
|
2022-04-28 02:41:45 +03:00
|
|
|
assert(!mc->get_hotplug_handler);
|
|
|
|
mc->get_hotplug_handler = virt_machine_get_hotplug_handler;
|
|
|
|
|
|
|
|
hc->plug = virt_machine_device_plug_cb;
|
2021-03-19 02:50:41 +03:00
|
|
|
|
|
|
|
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE);
|
2022-04-28 02:41:46 +03:00
|
|
|
#ifdef CONFIG_TPM
|
|
|
|
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
|
|
|
|
#endif
|
2021-08-31 14:06:03 +03:00
|
|
|
|
2023-08-11 19:02:24 +03:00
|
|
|
object_class_property_add_bool(oc, "aclint", virt_get_aclint,
|
|
|
|
virt_set_aclint);
|
|
|
|
object_class_property_set_description(oc, "aclint",
|
|
|
|
"(TCG only) Set on/off to "
|
|
|
|
"enable/disable emulating "
|
|
|
|
"ACLINT devices");
|
|
|
|
|
2022-02-20 11:55:22 +03:00
|
|
|
object_class_property_add_str(oc, "aia", virt_get_aia,
|
|
|
|
virt_set_aia);
|
|
|
|
object_class_property_set_description(oc, "aia",
|
|
|
|
"Set type of AIA interrupt "
|
2023-06-15 12:21:48 +03:00
|
|
|
"controller. Valid values are "
|
2022-02-20 11:55:24 +03:00
|
|
|
"none, aplic, and aplic-imsic.");
|
|
|
|
|
|
|
|
object_class_property_add_str(oc, "aia-guests",
|
|
|
|
virt_get_aia_guests,
|
|
|
|
virt_set_aia_guests);
|
2024-04-11 13:33:31 +03:00
|
|
|
{
|
|
|
|
g_autofree char *str =
|
|
|
|
g_strdup_printf("Set number of guest MMIO pages for AIA IMSIC. "
|
|
|
|
"Valid value should be between 0 and %d.",
|
|
|
|
VIRT_IRQCHIP_MAX_GUESTS);
|
|
|
|
object_class_property_set_description(oc, "aia-guests", str);
|
|
|
|
}
|
|
|
|
|
2023-03-02 12:12:06 +03:00
|
|
|
object_class_property_add(oc, "acpi", "OnOffAuto",
|
|
|
|
virt_get_acpi, virt_set_acpi,
|
|
|
|
NULL, NULL);
|
|
|
|
object_class_property_set_description(oc, "acpi",
|
|
|
|
"Enable ACPI");
|
2018-03-02 15:31:13 +03:00
|
|
|
}
|
|
|
|
|
2020-05-21 17:42:27 +03:00
|
|
|
static const TypeInfo virt_machine_typeinfo = {
|
2019-10-09 02:32:22 +03:00
|
|
|
.name = MACHINE_TYPE_NAME("virt"),
|
|
|
|
.parent = TYPE_MACHINE,
|
2020-05-21 17:42:27 +03:00
|
|
|
.class_init = virt_machine_class_init,
|
|
|
|
.instance_init = virt_machine_instance_init,
|
2019-10-09 02:32:22 +03:00
|
|
|
.instance_size = sizeof(RISCVVirtState),
|
2022-04-28 02:41:45 +03:00
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{ TYPE_HOTPLUG_HANDLER },
|
|
|
|
{ }
|
|
|
|
},
|
2019-10-09 02:32:22 +03:00
|
|
|
};
|
|
|
|
|
2020-05-21 17:42:27 +03:00
|
|
|
static void virt_machine_init_register_types(void)
|
2019-10-09 02:32:22 +03:00
|
|
|
{
|
2020-05-21 17:42:27 +03:00
|
|
|
type_register_static(&virt_machine_typeinfo);
|
2019-10-09 02:32:22 +03:00
|
|
|
}
|
|
|
|
|
2020-05-21 17:42:27 +03:00
|
|
|
type_init(virt_machine_init_register_types)
|