hw/arm/virt: Remove virt machine state 'smp_cpus'

virt machine's 'smp_cpus' and machine->smp.cpus must always have the
same value. And, anywhere we have virt machine state we have machine
state. So let's remove the redundancy. Also, to make it easier to see
that machine->smp is the true source for "smp_cpus" and "max_cpus",
avoid passing them in function parameters, preferring instead to get
them from the state.

No functional change intended.

Signed-off-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: David Edmondson <david.edmondson@oracle.com>
Reviewed-by: Ying Fang <fangying1@huawei.com>
Message-id: 20201215174815.51520-1-drjones@redhat.com
[PMM: minor formatting tweak to smp_cpus variable declaration]
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Andrew Jones 2020-12-15 18:48:15 +01:00 committed by Peter Maydell
parent 4663b72a48
commit 9cd07db94b
3 changed files with 16 additions and 17 deletions

View File

@ -59,11 +59,12 @@
#define ACPI_BUILD_TABLE_SIZE 0x20000
static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
static void acpi_dsdt_add_cpus(Aml *scope, VirtMachineState *vms)
{
MachineState *ms = MACHINE(vms);
uint16_t i;
for (i = 0; i < smp_cpus; i++) {
for (i = 0; i < ms->smp.cpus; i++) {
Aml *dev = aml_device("C%.03X", i);
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
aml_append(dev, aml_name_decl("_UID", aml_int(i)));
@ -484,7 +485,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base);
gicd->version = vms->gic_version;
for (i = 0; i < vms->smp_cpus; i++) {
for (i = 0; i < MACHINE(vms)->smp.cpus; i++) {
AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data,
sizeof(*gicc));
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
@ -603,7 +604,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
* the RTC ACPI device at all when using UEFI.
*/
scope = aml_scope("\\_SB");
acpi_dsdt_add_cpus(scope, vms->smp_cpus);
acpi_dsdt_add_cpus(scope, vms);
acpi_dsdt_add_uart(scope, &memmap[VIRT_UART],
(irqmap[VIRT_UART] + ARM_SPI_BASE));
if (vmc->acpi_expose_flash) {

View File

@ -323,7 +323,7 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms)
if (vms->gic_version == VIRT_GIC_VERSION_2) {
irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START,
GIC_FDT_IRQ_PPI_CPU_WIDTH,
(1 << vms->smp_cpus) - 1);
(1 << MACHINE(vms)->smp.cpus) - 1);
}
qemu_fdt_add_subnode(vms->fdt, "/timer");
@ -350,6 +350,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
int cpu;
int addr_cells = 1;
const MachineState *ms = MACHINE(vms);
int smp_cpus = ms->smp.cpus;
/*
* From Documentation/devicetree/bindings/arm/cpus.txt
@ -364,7 +365,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
* The simplest way to go is to examine affinity IDs of all our CPUs. If
* at least one of them has Aff3 populated, we set #address-cells to 2.
*/
for (cpu = 0; cpu < vms->smp_cpus; cpu++) {
for (cpu = 0; cpu < smp_cpus; cpu++) {
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
if (armcpu->mp_affinity & ARM_AFF3_MASK) {
@ -377,7 +378,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
qemu_fdt_setprop_cell(vms->fdt, "/cpus", "#address-cells", addr_cells);
qemu_fdt_setprop_cell(vms->fdt, "/cpus", "#size-cells", 0x0);
for (cpu = vms->smp_cpus - 1; cpu >= 0; cpu--) {
for (cpu = smp_cpus - 1; cpu >= 0; cpu--) {
char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
CPUState *cs = CPU(armcpu);
@ -387,8 +388,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
qemu_fdt_setprop_string(vms->fdt, nodename, "compatible",
armcpu->dtb_compatible);
if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED
&& vms->smp_cpus > 1) {
if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED && smp_cpus > 1) {
qemu_fdt_setprop_string(vms->fdt, nodename,
"enable-method", "psci");
}
@ -534,7 +534,7 @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms)
if (vms->gic_version == VIRT_GIC_VERSION_2) {
irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START,
GIC_FDT_IRQ_PPI_CPU_WIDTH,
(1 << vms->smp_cpus) - 1);
(1 << MACHINE(vms)->smp.cpus) - 1);
}
qemu_fdt_add_subnode(vms->fdt, "/pmu");
@ -1674,9 +1674,9 @@ static void finalize_gic_version(VirtMachineState *vms)
* virt_cpu_post_init() must be called after the CPUs have
* been realized and the GIC has been created.
*/
static void virt_cpu_post_init(VirtMachineState *vms, int max_cpus,
MemoryRegion *sysmem)
static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem)
{
int max_cpus = MACHINE(vms)->smp.max_cpus;
bool aarch64, pmu, steal_time;
CPUState *cpu;
@ -1829,8 +1829,6 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
vms->smp_cpus = smp_cpus;
if (vms->virt && kvm_enabled()) {
error_report("mach-virt: KVM does not support providing "
"Virtualization extensions to the guest CPU");
@ -1846,6 +1844,7 @@ static void machvirt_init(MachineState *machine)
create_fdt(vms);
possible_cpus = mc->possible_cpu_arch_ids(machine);
assert(possible_cpus->len == max_cpus);
for (n = 0; n < possible_cpus->len; n++) {
Object *cpuobj;
CPUState *cs;
@ -1966,7 +1965,7 @@ static void machvirt_init(MachineState *machine)
create_gic(vms);
virt_cpu_post_init(vms, possible_cpus->len, sysmem);
virt_cpu_post_init(vms, sysmem);
fdt_add_pmu_nodes(vms);

View File

@ -151,7 +151,6 @@ struct VirtMachineState {
MemMapEntry *memmap;
char *pciehb_nodename;
const int *irqmap;
int smp_cpus;
void *fdt;
int fdt_size;
uint32_t clock_phandle;
@ -182,7 +181,7 @@ static inline int virt_gicv3_redist_region_count(VirtMachineState *vms)
assert(vms->gic_version == VIRT_GIC_VERSION_3);
return vms->smp_cpus > redist0_capacity ? 2 : 1;
return MACHINE(vms)->smp.cpus > redist0_capacity ? 2 : 1;
}
#endif /* QEMU_ARM_VIRT_H */