qemu/hw/core/machine.c

1666 lines
54 KiB
C
Raw Normal View History

/*
* QEMU Machine
*
* Copyright (C) 2014 Red Hat Inc
*
* Authors:
* Marcel Apfelbaum <marcel.a@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/accel.h"
#include "sysemu/replay.h"
#include "hw/boards.h"
#include "hw/loader.h"
2016-03-14 11:01:28 +03:00
#include "qapi/error.h"
#include "qapi/qapi-visit-machine.h"
#include "qemu/madvise.h"
#include "qom/object_interfaces.h"
#include "sysemu/cpus.h"
#include "sysemu/sysemu.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
#include "sysemu/xen.h"
#include "sysemu/qtest.h"
#include "hw/pci/pci_bridge.h"
#include "hw/mem/nvdimm.h"
#include "migration/global_state.h"
#include "exec/confidential-guest-support.h"
#include "hw/virtio/virtio-pci.h"
#include "hw/virtio/virtio-net.h"
virtio-iommu: Change the default granule to the host page size We used to set the default granule to 4KB but with VFIO assignment it makes more sense to use the actual host page size. Indeed when hotplugging a VFIO device protected by a virtio-iommu on a 64kB/64kB host/guest config, we current get a qemu crash: "vfio: DMA mapping failed, unable to continue" This is due to the hot-attached VFIO device calling memory_region_iommu_set_page_size_mask() with 64kB granule whereas the virtio-iommu granule was already frozen to 4KB on machine init done. Set the granule property to "host" and introduce a new compat. The page size mask used before 9.0 was qemu_target_page_mask(). Since the virtio-iommu currently only supports x86_64 and aarch64, this matched a 4KB granule. Note that the new default will prevent 4kB guest on 64kB host because the granule will be set to 64kB which would be larger than the guest page size. In that situation, the virtio-iommu driver fails on viommu_domain_finalise() with "granule 0x10000 larger than system page size 0x1000". In that case the workaround is to request 4K granule. The current limitation of global granule in the virtio-iommu should be removed and turned into per domain granule. But until we get this upgraded, this new default is probably better because I don't think anyone is currently interested in running a 4KB page size guest with virtio-iommu on a 64KB host. However supporting 64kB guest on 64kB host with virtio-iommu and VFIO looks a more important feature. Signed-off-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Message-Id: <20240307134445.92296-4-eric.auger@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2024-03-07 16:43:04 +03:00
#include "hw/virtio/virtio-iommu.h"
#include "audio/audio.h"
GlobalProperty hw_compat_9_0[] = {
{"arm-cpu", "backcompat-cntfrq", "true" },
scsi-disk: Fix crash for VM configured with USB CDROM after live migration For VMs configured with the USB CDROM device: -drive file=/path/to/local/file,id=drive-usb-disk0,media=cdrom,readonly=on... -device usb-storage,drive=drive-usb-disk0,id=usb-disk0... QEMU process may crash after live migration, to reproduce the issue, configure VM (Guest OS ubuntu 20.04 or 21.10) with the following XML: <disk type='file' device='cdrom'> <driver name='qemu' type='raw'/> <source file='/path/to/share_fs/cdrom.iso'/> <target dev='sda' bus='usb'/> <readonly/> <address type='usb' bus='0' port='2'/> </disk> <controller type='usb' index='0' model='piix3-uhci'/> Do the live migration repeatedly, crash may happen after live migratoin, trace log at the source before live migration is as follows: 324808@1711972823.521945:usb_uhci_frame_start nr 319 324808@1711972823.521978:usb_uhci_qh_load qh 0x35cb5400 324808@1711972823.521989:usb_uhci_qh_load qh 0x35cb5480 324808@1711972823.521997:usb_uhci_td_load qh 0x35cb5480, td 0x35cbe000, ctrl 0x0, token 0xffe07f69 324808@1711972823.522010:usb_uhci_td_nextqh qh 0x35cb5480, td 0x35cbe000 324808@1711972823.522022:usb_uhci_qh_load qh 0x35cb5680 324808@1711972823.522030:usb_uhci_td_load qh 0x35cb5680, td 0x75ac5180, ctrl 0x19800000, token 0x3c903e1 324808@1711972823.522045:usb_uhci_packet_add token 0x103e1, td 0x75ac5180 324808@1711972823.522056:usb_packet_state_change bus 0, port 2, ep 2, packet 0x559f9ba14b00, state undef -> setup 324808@1711972823.522079:usb_msd_cmd_submit lun 0, tag 0x472, flags 0x00000080, len 10, data-len 8 324808@1711972823.522107:scsi_req_parsed target 0 lun 0 tag 1138 command 74 dir 1 length 8 324808@1711972823.522124:scsi_req_parsed_lba target 0 lun 0 tag 1138 command 74 lba 4096 324808@1711972823.522139:scsi_req_alloc target 0 lun 0 tag 1138 324808@1711972823.522169:scsi_req_continue target 0 lun 0 tag 1138 324808@1711972823.522181:scsi_req_data target 0 lun 0 tag 1138 len 8 324808@1711972823.522194:usb_packet_state_change bus 0, port 2, ep 2, packet 0x559f9ba14b00, state setup -> complete 324808@1711972823.522209:usb_uhci_packet_complete_success token 0x103e1, td 0x75ac5180 324808@1711972823.522219:usb_uhci_packet_del token 0x103e1, td 0x75ac5180 324808@1711972823.522232:usb_uhci_td_complete qh 0x35cb5680, td 0x75ac5180 trace log at the destination after live migration is as follows: 3286206@1711972823.951646:usb_uhci_frame_start nr 320 3286206@1711972823.951663:usb_uhci_qh_load qh 0x35cb5100 3286206@1711972823.951671:usb_uhci_qh_load qh 0x35cb5480 3286206@1711972823.951680:usb_uhci_td_load qh 0x35cb5480, td 0x35cbe000, ctrl 0x1000000, token 0xffe07f69 3286206@1711972823.951693:usb_uhci_td_nextqh qh 0x35cb5480, td 0x35cbe000 3286206@1711972823.951702:usb_uhci_qh_load qh 0x35cb5700 3286206@1711972823.951709:usb_uhci_td_load qh 0x35cb5700, td 0x75ac5240, ctrl 0x39800000, token 0xe08369 3286206@1711972823.951727:usb_uhci_queue_add token 0x8369 3286206@1711972823.951735:usb_uhci_packet_add token 0x8369, td 0x75ac5240 3286206@1711972823.951746:usb_packet_state_change bus 0, port 2, ep 1, packet 0x56066b2fb5a0, state undef -> setup 3286206@1711972823.951766:usb_msd_data_in 8/8 (scsi 8) 2024-04-01 12:00:24.665+0000: shutting down, reason=crashed The backtrace reveals the following: Program terminated with signal SIGSEGV, Segmentation fault. 0 __memmove_sse2_unaligned_erms () at ../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:312 312 movq -8(%rsi,%rdx), %rcx [Current thread is 1 (Thread 0x7f0a9025fc00 (LWP 3286206))] (gdb) bt 0 __memmove_sse2_unaligned_erms () at ../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:312 1 memcpy (__len=8, __src=<optimized out>, __dest=<optimized out>) at /usr/include/bits/string_fortified.h:34 2 iov_from_buf_full (iov=<optimized out>, iov_cnt=<optimized out>, offset=<optimized out>, buf=0x0, bytes=bytes@entry=8) at ../util/iov.c:33 3 iov_from_buf (bytes=8, buf=<optimized out>, offset=<optimized out>, iov_cnt=<optimized out>, iov=<optimized out>) at /usr/src/debug/qemu-6-6.2.0-75.7.oe1.smartx.git.40.x86_64/include/qemu/iov.h:49 4 usb_packet_copy (p=p@entry=0x56066b2fb5a0, ptr=<optimized out>, bytes=bytes@entry=8) at ../hw/usb/core.c:636 5 usb_msd_copy_data (s=s@entry=0x56066c62c770, p=p@entry=0x56066b2fb5a0) at ../hw/usb/dev-storage.c:186 6 usb_msd_handle_data (dev=0x56066c62c770, p=0x56066b2fb5a0) at ../hw/usb/dev-storage.c:496 7 usb_handle_packet (dev=0x56066c62c770, p=p@entry=0x56066b2fb5a0) at ../hw/usb/core.c:455 8 uhci_handle_td (s=s@entry=0x56066bd5f210, q=0x56066bb7fbd0, q@entry=0x0, qh_addr=qh_addr@entry=902518530, td=td@entry=0x7fffe6e788f0, td_addr=<optimized out>, int_mask=int_mask@entry=0x7fffe6e788e4) at ../hw/usb/hcd-uhci.c:885 9 uhci_process_frame (s=s@entry=0x56066bd5f210) at ../hw/usb/hcd-uhci.c:1061 10 uhci_frame_timer (opaque=opaque@entry=0x56066bd5f210) at ../hw/usb/hcd-uhci.c:1159 11 timerlist_run_timers (timer_list=0x56066af26bd0) at ../util/qemu-timer.c:642 12 qemu_clock_run_timers (type=QEMU_CLOCK_VIRTUAL) at ../util/qemu-timer.c:656 13 qemu_clock_run_all_timers () at ../util/qemu-timer.c:738 14 main_loop_wait (nonblocking=nonblocking@entry=0) at ../util/main-loop.c:542 15 qemu_main_loop () at ../softmmu/runstate.c:739 16 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at ../softmmu/main.c:52 (gdb) frame 5 (gdb) p ((SCSIDiskReq *)s->req)->iov $1 = {iov_base = 0x0, iov_len = 0} (gdb) p/x s->req->tag $2 = 0x472 When designing the USB mass storage device model, QEMU places SCSI disk device as the backend of USB mass storage device. In addition, USB mass device driver in Guest OS conforms to the "Universal Serial Bus Mass Storage Class Bulk-Only Transport" specification in order to simulate the transform behavior between a USB controller and a USB mass device. The following shows the protocol hierarchy: +----------------+ CDROM driver | scsi command | CDROM +----------------+ +-----------------------+ USB mass | USB Mass Storage Class| USB mass storage driver | Bulk-Only Transport | storage device +-----------------------+ +----------------+ USB Controller | USB Protocol | USB device +----------------+ In the USB protocol layer, between the USB controller and USB device, at least two USB packets will be transformed when guest OS send a read operation to USB mass storage device: 1. The CBW packet, which will be delivered to the USB device's Bulk-Out endpoint. In order to simulate a read operation, the USB mass storage device parses the CBW and converts it to a SCSI command, which would be executed by CDROM(represented as SCSI disk in QEMU internally), and store the result data of the SCSI command in a buffer. 2. The DATA-IN packet, which will be delivered from the USB device's Bulk-In endpoint(fetched directly from the preceding buffer) to the USB controller. We consider UHCI to be the controller. The two packets mentioned above may have been processed by UHCI in two separate frame entries of the Frame List , and also described by two different TDs. Unlike the physical environment, a virtualized environment requires the QEMU to make sure that the result data of CBW is not lost and is delivered to the UHCI controller. Currently, these types of SCSI requests are not migrated, so QEMU cannot ensure the result data of the IO operation is not lost if there are inflight emulated SCSI requests during the live migration. Assume for the moment that the USB mass storage device is processing the CBW and storing the result data of the read operation to a buffre, live migration happens and moves the VM to the destination while not migrating the result data of the read operation. After migration, when UHCI at the destination issues a DATA-IN request to the USB mass storage device, a crash happens because USB mass storage device fetches the result data and get nothing. The scenario this patch addresses is this one. Theoretically, any device that uses the SCSI disk as a back-end would be affected by this issue. In this case, it is the USB CDROM. To fix it, inflight emulated SCSI request be migrated during live migration, similar to the DMA SCSI request. Signed-off-by: Hyman Huang <yong.huang@smartx.com> Message-ID: <878c8f093f3fc2f584b5c31cb2490d9f6a12131a.1716531409.git.yong.huang@smartx.com> [Do not bump migration version, introduce compat property instead. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 09:29:16 +03:00
{"scsi-disk-base", "migrate-emulated-scsi-request", "false" },
vfio/pci: migration: Skip config space check for Vendor Specific Information in VSC during restore/load In case of migration, during restore operation, qemu checks config space of the pci device with the config space in the migration stream captured during save operation. In case of config space data mismatch, restore operation is failed. config space check is done in function get_pci_config_device(). By default VSC (vendor-specific-capability) in config space is checked. Due to qemu's config space check for VSC, live migration is broken across NVIDIA vGPU devices in situation where source and destination host driver is different. In this situation, Vendor Specific Information in VSC varies on the destination to ensure vGPU feature capabilities exposed to the guest driver are compatible with destination host. If a vfio-pci device is migration capable and vfio-pci vendor driver is OK with volatile Vendor Specific Info in VSC then qemu should exempt config space check for Vendor Specific Info. It is vendor driver's responsibility to ensure that VSC is consistent across migration. Here consistency could mean that VSC format should be same on source and destination, however actual Vendor Specific Info may not be byte-to-byte identical. This patch skips the check for Vendor Specific Information in VSC for VFIO-PCI device by clearing pdev->cmask[] offsets. Config space check is still enforced for 3 byte VSC header. If cmask[] is not set for an offset, then qemu skips config space check for that offset. VSC check is skipped for machine types >= 9.1. The check would be enforced on older machine types (<= 9.0). Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Cédric Le Goater <clg@redhat.com> Signed-off-by: Vinayak Kale <vkale@nvidia.com> Reviewed-by: Cédric Le Goater <clg@redhat.com> Signed-off-by: Cédric Le Goater <clg@redhat.com>
2024-05-03 17:51:42 +03:00
{"vfio-pci", "skip-vsc-check", "false" },
{ "virtio-pci", "x-pcie-pm-no-soft-reset", "off" },
};
const size_t hw_compat_9_0_len = G_N_ELEMENTS(hw_compat_9_0);
GlobalProperty hw_compat_8_2[] = {
{ "migration", "zero-page-detection", "legacy"},
virtio-iommu: Change the default granule to the host page size We used to set the default granule to 4KB but with VFIO assignment it makes more sense to use the actual host page size. Indeed when hotplugging a VFIO device protected by a virtio-iommu on a 64kB/64kB host/guest config, we current get a qemu crash: "vfio: DMA mapping failed, unable to continue" This is due to the hot-attached VFIO device calling memory_region_iommu_set_page_size_mask() with 64kB granule whereas the virtio-iommu granule was already frozen to 4KB on machine init done. Set the granule property to "host" and introduce a new compat. The page size mask used before 9.0 was qemu_target_page_mask(). Since the virtio-iommu currently only supports x86_64 and aarch64, this matched a 4KB granule. Note that the new default will prevent 4kB guest on 64kB host because the granule will be set to 64kB which would be larger than the guest page size. In that situation, the virtio-iommu driver fails on viommu_domain_finalise() with "granule 0x10000 larger than system page size 0x1000". In that case the workaround is to request 4K granule. The current limitation of global granule in the virtio-iommu should be removed and turned into per domain granule. But until we get this upgraded, this new default is probably better because I don't think anyone is currently interested in running a 4KB page size guest with virtio-iommu on a 64KB host. However supporting 64kB guest on 64kB host with virtio-iommu and VFIO looks a more important feature. Signed-off-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Message-Id: <20240307134445.92296-4-eric.auger@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2024-03-07 16:43:04 +03:00
{ TYPE_VIRTIO_IOMMU_PCI, "granule", "4k" },
{ TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "64" },
{ "virtio-gpu-device", "x-scanout-vmstate-version", "1" },
};
const size_t hw_compat_8_2_len = G_N_ELEMENTS(hw_compat_8_2);
GlobalProperty hw_compat_8_1[] = {
{ TYPE_PCI_BRIDGE, "x-pci-express-writeable-slt-bug", "true" },
{ "ramfb", "x-migrate", "off" },
{ "vfio-pci-nohotplug", "x-ramfb-migrate", "off" },
{ "igb", "x-pcie-flr-init", "off" },
{ TYPE_VIRTIO_NET, "host_uso", "off"},
{ TYPE_VIRTIO_NET, "guest_uso4", "off"},
{ TYPE_VIRTIO_NET, "guest_uso6", "off"},
};
const size_t hw_compat_8_1_len = G_N_ELEMENTS(hw_compat_8_1);
GlobalProperty hw_compat_8_0[] = {
{ "migration", "multifd-flush-after-each-section", "on"},
{ TYPE_PCI_DEVICE, "x-pcie-ari-nextfn-1", "on" },
};
const size_t hw_compat_8_0_len = G_N_ELEMENTS(hw_compat_8_0);
GlobalProperty hw_compat_7_2[] = {
{ "e1000e", "migrate-timadj", "off" },
{ "virtio-mem", "x-early-migration", "false" },
migration: Fix potential race on postcopy_qemufile_src postcopy_qemufile_src object should be owned by one thread, either the main thread (e.g. when at the beginning, or at the end of migration), or by the return path thread (when during a preempt enabled postcopy migration). If that's not the case the access to the object might be racy. postcopy_preempt_shutdown_file() can be potentially racy, because it's called at the end phase of migration on the main thread, however during which the return path thread hasn't yet been recycled; the recycle happens in await_return_path_close_on_source() which is after this point. It means, logically it's posslbe the main thread and the return path thread are both operating on the same qemufile. While I don't think qemufile is thread safe at all. postcopy_preempt_shutdown_file() used to be needed because that's where we send EOS to dest so that dest can safely shutdown the preempt thread. To avoid the possible race, remove this only place that a race can happen. Instead we figure out another way to safely close the preempt thread on dest. The core idea during postcopy on deciding "when to stop" is that dest will send a postcopy SHUT message to src, telling src that all data is there. Hence to shut the dest preempt thread maybe better to do it directly on dest node. This patch proposed such a way that we change postcopy_prio_thread_created into PreemptThreadStatus, so that we kick the preempt thread on dest qemu by a sequence of: mis->preempt_thread_status = PREEMPT_THREAD_QUIT; qemu_file_shutdown(mis->postcopy_qemufile_dst); While here shutdown() is probably so far the easiest way to kick preempt thread from a blocked qemu_get_be64(). Then it reads preempt_thread_status to make sure it's not a network failure but a willingness to quit the thread. We could have avoided that extra status but just rely on migration status. The problem is postcopy_ram_incoming_cleanup() is just called early enough so we're still during POSTCOPY_ACTIVE no matter what.. So just make it simple to have the status introduced. One flag x-preempt-pre-7-2 is added to keep old pre-7.2 behaviors of postcopy preempt. Fixes: 9358982744 ("migration: Send requested page directly in rp-return thread") Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-03-26 20:25:39 +03:00
{ "migration", "x-preempt-pre-7-2", "true" },
hw/pci: Disable PCI_ERR_UNCOR_MASK register for machine type < 8.0 Since it's implementation on v8.0.0-rc0, having the PCI_ERR_UNCOR_MASK set for machine types < 8.0 will cause migration to fail if the target QEMU version is < 8.0.0 : qemu-system-x86_64: get_pci_config_device: Bad config data: i=0x10a read: 40 device: 0 cmask: ff wmask: 0 w1cmask:0 qemu-system-x86_64: Failed to load PCIDevice:config qemu-system-x86_64: Failed to load e1000e:parent_obj qemu-system-x86_64: error while loading state for instance 0x0 of device '0000:00:02.0/e1000e' qemu-system-x86_64: load of migration failed: Invalid argument The above test migrated a 7.2 machine type from QEMU master to QEMU 7.2.0, with this cmdline: ./qemu-system-x86_64 -M pc-q35-7.2 [-incoming XXX] In order to fix this, property x-pcie-err-unc-mask was introduced to control when PCI_ERR_UNCOR_MASK is enabled. This property is enabled by default, but is disabled if machine type <= 7.2. Fixes: 010746ae1d ("hw/pci/aer: Implement PCI_ERR_UNCOR_MASK register") Suggested-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Leonardo Bras <leobras@redhat.com> Message-Id: <20230503002701.854329-1-leobras@redhat.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Fixes: https://gitlab.com/qemu-project/qemu/-/issues/1576 Tested-by: Fiona Ebner <f.ebner@proxmox.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2023-05-03 03:27:02 +03:00
{ TYPE_PCI_DEVICE, "x-pcie-err-unc-mask", "off" },
};
const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2);
GlobalProperty hw_compat_7_1[] = {
{ "virtio-device", "queue_reset", "false" },
{ "virtio-rng-pci", "vectors", "0" },
{ "virtio-rng-pci-transitional", "vectors", "0" },
{ "virtio-rng-pci-non-transitional", "vectors", "0" },
};
const size_t hw_compat_7_1_len = G_N_ELEMENTS(hw_compat_7_1);
GlobalProperty hw_compat_7_0[] = {
{ "arm-gicv3-common", "force-8-bit-prio", "on" },
{ "nvme-ns", "eui64-default", "on"},
};
const size_t hw_compat_7_0_len = G_N_ELEMENTS(hw_compat_7_0);
GlobalProperty hw_compat_6_2[] = {
{ "PIIX4_PM", "x-not-migrate-acpi-index", "on"},
};
const size_t hw_compat_6_2_len = G_N_ELEMENTS(hw_compat_6_2);
GlobalProperty hw_compat_6_1[] = {
{ "vhost-user-vsock-device", "seqpacket", "off" },
{ "nvme-ns", "shared", "off" },
};
const size_t hw_compat_6_1_len = G_N_ELEMENTS(hw_compat_6_1);
GlobalProperty hw_compat_6_0[] = {
{ "gpex-pcihost", "allow-unmapped-accesses", "false" },
pckbd: don't update OBF flags if KBD_STAT_OBF is set Don't update the OBF flags in the status register and the cor- responding IRQ lines if KBD_STAT_OBF is set. Otherwise this may change the PS/2 event type. If the guest ISR was already scheduled, the changed event type will be rather surprising for the guest. This fixes a mouse event stream corruption. To reproduce the problem start a FreeDOS 1.2 guest with -machine pc,accel=kvm and -display gtk. The KVM in-kernel irqchip has to be enabled. Now open a text file with edit.exe in the guest and hold down the cursor right key and at the same time move the mouse around. You will quickly notice erratic mouse movements and unexpected mouse clicks. A trace file shows the mouse event stream corruption. Guest rip 0xce93 (f000:ce93) is the in al,0x60 instruction in the seabios mouse ISR, guest rip 0xceca (f000:ceca) is the in al,0x60 instruction in the seabios keyboard ISR. qemu-system-x86-5659 [007] .... 280.971116: tracing_mark_write: pckbd_kbd_update_irq kbd=0 aux=1 # gtk queues a mouse event qemu-system-x86-5665 [000] .... 280.971121: kvm_exit: reason EXTERNAL_INTERRUPT rip 0x22da info 0 800000fd qemu-system-x86-5665 [000] d..1 280.971122: kvm_entry: vcpu 0, rip 0x22da qemu-system-x86-5665 [000] .... 280.971123: kvm_exit: reason EXTERNAL_INTERRUPT rip 0x22da info 0 800000fd qemu-system-x86-5665 [000] d..1 280.971124: kvm_entry: vcpu 0, rip 0x22da qemu-system-x86-5665 [000] .... 280.971126: kvm_exit: reason IO_INSTRUCTION rip 0x110c8c info 640008 0 qemu-system-x86-5665 [000] .... 280.971176: tracing_mark_write: pckbd_kbd_read_status 0x3d # KBD_STAT_OBF and KBD_STAT_MOUSE_OBF set, the mouse ISR will # read data from the PS/2 controller. qemu-system-x86-5665 [000] d..1 280.971180: kvm_entry: vcpu 0, rip 0x110c8d qemu-system-x86-5665 [000] .... 280.971191: kvm_exit: reason EXTERNAL_INTERRUPT rip 0x110c8d info 0 800000f6 qemu-system-x86-5665 [000] d..1 280.971191: kvm_entry: vcpu 0, rip 0x110c8d qemu-system-x86-5665 [000] .... 280.971193: kvm_exit: reason IO_INSTRUCTION rip 0xce93 info 600048 0 # the mouse ISR wants to read data from the PS/2 controller qemu-system-x86-5659 [007] .... 280.971231: tracing_mark_write: pckbd_kbd_update_irq kbd=1 aux=0 qemu-system-x86-5659 [007] .... 280.971238: tracing_mark_write: pckbd_kbd_update_irq kbd=1 aux=0 # gtk queues a keyboard event 0xe0 0x4d (key right) qemu-system-x86-5665 [000] .... 280.971257: tracing_mark_write: pckbd_kbd_update_irq kbd=0 aux=1 qemu-system-x86-5665 [000] .... 280.971262: tracing_mark_write: pckbd_kbd_update_irq kbd=1 aux=0 # ps2_read_data() deasserts and reasserts the keyboard IRQ qemu-system-x86-5665 [000] .... 280.971266: tracing_mark_write: pckbd_kbd_read_data 0xe0 kbd # -> the mouse ISR receives keyboard data qemu-system-x86-5665 [000] d..1 280.971268: kvm_entry: vcpu 0, rip 0xce95 qemu-system-x86-5665 [000] .... 280.971269: kvm_exit: reason IO_INSTRUCTION rip 0xe828 info a00040 0 qemu-system-x86-5665 [000] .... 280.971270: kvm_ack_irq: irqchip PIC slave pin 12 qemu-system-x86-5665 [000] d..1 280.971270: kvm_entry: vcpu 0, rip 0xe82a qemu-system-x86-5665 [000] .... 280.971271: kvm_exit: reason IO_INSTRUCTION rip 0xe82a info 200040 0 qemu-system-x86-5665 [000] .... 280.971271: kvm_ack_irq: irqchip PIC master pin 2 qemu-system-x86-5665 [000] d..1 280.971271: kvm_entry: vcpu 0, rip 0xe82c qemu-system-x86-5665 [000] .... 280.971272: kvm_exit: reason PENDING_INTERRUPT rip 0x22da info 0 0 qemu-system-x86-5665 [000] d..1 280.971273: kvm_entry: vcpu 0, rip 0x22da qemu-system-x86-5665 [000] .... 280.971274: kvm_exit: reason IO_INSTRUCTION rip 0x110c8c info 640008 0 qemu-system-x86-5665 [000] .... 280.971275: tracing_mark_write: pckbd_kbd_read_status 0x1d qemu-system-x86-5665 [000] d..1 280.971276: kvm_entry: vcpu 0, rip 0x110c8d qemu-system-x86-5665 [000] .... 280.971277: kvm_exit: reason IO_INSTRUCTION rip 0xceca info 600048 0 # the keyboard ISR wants to read data from the PS/2 controller qemu-system-x86-5665 [000] .... 280.971279: tracing_mark_write: pckbd_kbd_update_irq kbd=0 aux=1 qemu-system-x86-5665 [000] .... 280.971282: tracing_mark_write: pckbd_kbd_read_data 0x4d kbd # the keyboard ISR receives the second byte of the keyboard event Signed-off-by: Volker Rümelin <vr_qemu@t-online.de> Message-Id: <20210525181441.27768-5-vr_qemu@t-online.de> [ kraxel: add missing include ] Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
2021-05-25 21:14:34 +03:00
{ "i8042", "extended-state", "false"},
{ "nvme-ns", "eui64-default", "off"},
{ "e1000", "init-vet", "off" },
{ "e1000e", "init-vet", "off" },
{ "vhost-vsock-device", "seqpacket", "off" },
};
const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0);
GlobalProperty hw_compat_5_2[] = {
{ "ICH9-LPC", "smm-compat", "on"},
{ "PIIX4_PM", "smm-compat", "on"},
{ "virtio-blk-device", "report-discard-granularity", "off" },
{ "virtio-net-pci-base", "vectors", "3"},
{ "nvme", "msix-exclusive-bar", "on"},
};
const size_t hw_compat_5_2_len = G_N_ELEMENTS(hw_compat_5_2);
GlobalProperty hw_compat_5_1[] = {
{ "vhost-scsi", "num_queues", "1"},
{ "vhost-user-blk", "num-queues", "1"},
{ "vhost-user-scsi", "num_queues", "1"},
{ "virtio-blk-device", "num-queues", "1"},
{ "virtio-scsi-device", "num_queues", "1"},
{ "nvme", "use-intel-id", "on"},
{ "pvpanic", "events", "1"}, /* PVPANIC_PANICKED */
{ "pl011", "migrate-clk", "off" },
{ "virtio-pci", "x-ats-page-aligned", "off"},
};
const size_t hw_compat_5_1_len = G_N_ELEMENTS(hw_compat_5_1);
GlobalProperty hw_compat_5_0[] = {
hw/pci-host: save/restore pci host config register The pci host config register is used to save PCI address for read/write config data. If guest writes a value to config register, and then QEMU pauses the vcpu to migrate, after the migration, the guest will continue to write pci config data, and the write data will be ignored because of new qemu process losing the config register state. To trigger the bug: 1. guest is booting in seabios. 2. guest enables the SMRAM in seabios:piix4_apmc_smm_setup, and then expects to disable the SMRAM by pci_config_writeb. 3. after guest writes the pci host config register, QEMU pauses vcpu to finish migration. 4. guest write of config data(0x0A) fails to disable the SMRAM because the config register state is lost. 5. guest continues to boot and crashes in ipxe option ROM due to SMRAM in enabled state. Example Reproducer: step 1. Make modifications to seabios and qemu for increase reproduction efficiency, write 0xf0 to 0x402 port notify qemu to stop vcpu after 0x0cf8 port wrote i440 configure register. qemu stop vcpu when catch 0x402 port wrote 0xf0. seabios:/src/hw/pci.c @@ -52,6 +52,11 @@ void pci_config_writeb(u16 bdf, u32 addr, u8 val) writeb(mmconfig_addr(bdf, addr), val); } else { outl(ioconfig_cmd(bdf, addr), PORT_PCI_CMD); + if (bdf == 0 && addr == 0x72 && val == 0xa) { + dprintf(1, "stop vcpu\n"); + outb(0xf0, 0x402); // notify qemu to stop vcpu + dprintf(1, "resume vcpu\n"); + } outb(val, PORT_PCI_DATA + (addr & 3)); } } qemu:hw/char/debugcon.c @@ -60,6 +61,9 @@ static void debugcon_ioport_write(void *opaque, hwaddr addr, uint64_t val, printf(" [debugcon: write addr=0x%04" HWADDR_PRIx " val=0x%02" PRIx64 "]\n", addr, val); #endif + if (ch == 0xf0) { + vm_stop(RUN_STATE_PAUSED); + } /* XXX this blocks entire thread. Rewrite to use * qemu_chr_fe_write and background I/O callbacks */ qemu_chr_fe_write_all(&s->chr, &ch, 1); step 2. start vm1 by the following command line, and then vm stopped. $ qemu-system-x86_64 -machine pc-i440fx-5.0,accel=kvm\ -netdev tap,ifname=tap-test,id=hostnet0,vhost=on,downscript=no,script=no\ -device virtio-net-pci,netdev=hostnet0,id=net0,bus=pci.0,addr=0x13,bootindex=3\ -device cirrus-vga,id=video0,vgamem_mb=16,bus=pci.0,addr=0x2\ -chardev file,id=seabios,path=/var/log/test.seabios,append=on\ -device isa-debugcon,iobase=0x402,chardev=seabios\ -monitor stdio step 3. start vm2 to accept vm1 state. $ qemu-system-x86_64 -machine pc-i440fx-5.0,accel=kvm\ -netdev tap,ifname=tap-test1,id=hostnet0,vhost=on,downscript=no,script=no\ -device virtio-net-pci,netdev=hostnet0,id=net0,bus=pci.0,addr=0x13,bootindex=3\ -device cirrus-vga,id=video0,vgamem_mb=16,bus=pci.0,addr=0x2\ -chardev file,id=seabios,path=/var/log/test.seabios,append=on\ -device isa-debugcon,iobase=0x402,chardev=seabios\ -monitor stdio \ -incoming tcp:127.0.0.1:8000 step 4. execute the following qmp command in vm1 to migrate. (qemu) migrate tcp:127.0.0.1:8000 step 5. execute the following qmp command in vm2 to resume vcpu. (qemu) cont Before this patch, we get KVM "emulation failure" error on vm2. This patch fixes it. Cc: qemu-stable@nongnu.org Signed-off-by: Hogan Wang <hogan.wang@huawei.com> Message-Id: <20200727084621.3279-1-hogan.wang@huawei.com> Reported-by: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-07-27 11:46:20 +03:00
{ "pci-host-bridge", "x-config-reg-migration-enabled", "off" },
{ "virtio-balloon-device", "page-poison", "false" },
{ "vmport", "x-read-set-eax", "off" },
{ "vmport", "x-signal-unsupported-cmd", "off" },
{ "vmport", "x-report-vmx-type", "off" },
{ "vmport", "x-cmds-v2", "off" },
{ "virtio-device", "x-disable-legacy-check", "true" },
};
const size_t hw_compat_5_0_len = G_N_ELEMENTS(hw_compat_5_0);
GlobalProperty hw_compat_4_2[] = {
{ "virtio-blk-device", "queue-size", "128"},
{ "virtio-scsi-device", "virtqueue_size", "128"},
{ "virtio-blk-device", "x-enable-wce-if-config-wce", "off" },
{ "virtio-blk-device", "seg-max-adjust", "off"},
{ "virtio-scsi-device", "seg_max_adjust", "off"},
{ "vhost-blk-device", "seg_max_adjust", "off"},
{ "usb-host", "suppress-remote-wake", "off" },
{ "usb-redir", "suppress-remote-wake", "off" },
{ "qxl", "revision", "4" },
{ "qxl-vga", "revision", "4" },
{ "fw_cfg", "acpi-mr-restore", "false" },
virtio: move 'use-disabled-flag' property to hw_compat_4_2 Commit 9d7bd0826f introduced a new 'use-disabled-flag' property set to true by default. To allow the migration, we set this property to false in the hw_compat, but in the wrong place (hw_compat_4_1). Since commit 9d7bd0826f was released with QEMU 5.0, we move 'use-disabled-flag' property to hw_compat_4_2, so 4.2 machine types will have the pre-patch behavior and the migration can work. The issue was discovered with vhost-vsock device and 4.2 machine type without running any kernel in the VM: $ qemu-4.2 -M pc-q35-4.2,accel=kvm \ -device vhost-vsock-pci,guest-cid=4 \ -monitor stdio -incoming tcp:0:3333 $ qemu-5.2 -M pc-q35-4.2,accel=kvm \ -device vhost-vsock-pci,guest-cid=3 \ -monitor stdio (qemu) migrate -d tcp:0:3333 # qemu-4.2 output qemu-system-x86_64: Failed to load virtio-vhost_vsock:virtio qemu-system-x86_64: error while loading state for instance 0x0 of device '0000:00:03.0/virtio-vhost_vsock' qemu-system-x86_64: load of migration failed: No such file or directory Reported-by: Jing Zhao <jinzhao@redhat.com> Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1907255 Fixes: 9d7bd0826f ("virtio-pci: disable vring processing when bus-mastering is disabled") Cc: mdroth@linux.vnet.ibm.com CC: qemu-stable@nongnu.org Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Message-Id: <20210108171252.209502-1-sgarzare@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2021-01-08 20:12:52 +03:00
{ "virtio-device", "use-disabled-flag", "false" },
};
const size_t hw_compat_4_2_len = G_N_ELEMENTS(hw_compat_4_2);
GlobalProperty hw_compat_4_1[] = {
{ "virtio-pci", "x-pcie-flr-init", "off" },
};
const size_t hw_compat_4_1_len = G_N_ELEMENTS(hw_compat_4_1);
hw: Nuke hw_compat_4_0_1 and pc_compat_4_0_1 Commit c87759ce876a fixed a regression affecting pc-q35 machines by introducing a new pc-q35-4.0.1 machine version to be used instead of pc-q35-4.0. The only purpose was to revert the default behaviour of not using split irqchip, but the change also introduced the usual hw_compat and pc_compat bits, and wired them for pc-q35 only. This raises questions when it comes to add new compat properties for 4.0* machine versions of any architecture. Where to add them ? In 4.0, 4.0.1 or both ? Error prone. Another possibility would be to teach all other architectures about 4.0.1. This solution isn't satisfying, especially since this is a pc-q35 specific issue. It turns out that the split irqchip default is handled in the machine option function and doesn't involve compat lists at all. Drop all the 4.0.1 compat lists and use the 4.0 ones instead in the 4.0.1 machine option function. Move the compat props that were added to the 4.0.1 since c87759ce876a to 4.0. Even if only hw_compat_4_0_1 had an impact on other architectures, drop pc_compat_4_0_1 as well for consistency. Fixes: c87759ce876a "q35: Revert to kernel irqchip" Suggested-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Greg Kurz <groug@kaod.org> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <156051774276.244890.8660277280145466396.stgit@bahia.lan> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-06-14 16:09:02 +03:00
GlobalProperty hw_compat_4_0[] = {
{ "VGA", "edid", "false" },
{ "secondary-vga", "edid", "false" },
{ "bochs-display", "edid", "false" },
{ "virtio-vga", "edid", "false" },
{ "virtio-gpu-device", "edid", "false" },
{ "virtio-device", "use-started", "false" },
{ "virtio-balloon-device", "qemu-4-0-config-size", "true" },
{ "pl031", "migrate-tick-offset", "false" },
};
const size_t hw_compat_4_0_len = G_N_ELEMENTS(hw_compat_4_0);
GlobalProperty hw_compat_3_1[] = {
{ "pcie-root-port", "x-speed", "2_5" },
{ "pcie-root-port", "x-width", "1" },
{ "memory-backend-file", "x-use-canonical-path-for-ramblock-id", "true" },
{ "memory-backend-memfd", "x-use-canonical-path-for-ramblock-id", "true" },
{ "tpm-crb", "ppi", "false" },
{ "tpm-tis", "ppi", "false" },
2019-01-10 15:51:08 +03:00
{ "usb-kbd", "serial", "42" },
{ "usb-mouse", "serial", "42" },
{ "usb-tablet", "serial", "42" },
{ "virtio-blk-device", "discard", "false" },
{ "virtio-blk-device", "write-zeroes", "false" },
{ "virtio-balloon-device", "qemu-4-0-config-size", "false" },
{ "pcie-root-port-base", "disable-acs", "true" }, /* Added in 4.1 */
};
const size_t hw_compat_3_1_len = G_N_ELEMENTS(hw_compat_3_1);
GlobalProperty hw_compat_3_0[] = {};
const size_t hw_compat_3_0_len = G_N_ELEMENTS(hw_compat_3_0);
GlobalProperty hw_compat_2_12[] = {
{ "hda-audio", "use-timer", "false" },
{ "cirrus-vga", "global-vmstate", "true" },
{ "VGA", "global-vmstate", "true" },
{ "vmware-svga", "global-vmstate", "true" },
{ "qxl-vga", "global-vmstate", "true" },
};
const size_t hw_compat_2_12_len = G_N_ELEMENTS(hw_compat_2_12);
GlobalProperty hw_compat_2_11[] = {
{ "hpet", "hpet-offset-saved", "false" },
{ "virtio-blk-pci", "vectors", "2" },
{ "vhost-user-blk-pci", "vectors", "2" },
{ "e1000", "migrate_tso_props", "off" },
};
const size_t hw_compat_2_11_len = G_N_ELEMENTS(hw_compat_2_11);
GlobalProperty hw_compat_2_10[] = {
{ "virtio-mouse-device", "wheel-axis", "false" },
{ "virtio-tablet-device", "wheel-axis", "false" },
};
const size_t hw_compat_2_10_len = G_N_ELEMENTS(hw_compat_2_10);
GlobalProperty hw_compat_2_9[] = {
{ "pci-bridge", "shpc", "off" },
{ "intel-iommu", "pt", "off" },
{ "virtio-net-device", "x-mtu-bypass-backend", "off" },
{ "pcie-root-port", "x-migrate-msix", "false" },
};
const size_t hw_compat_2_9_len = G_N_ELEMENTS(hw_compat_2_9);
GlobalProperty hw_compat_2_8[] = {
{ "fw_cfg_mem", "x-file-slots", "0x10" },
{ "fw_cfg_io", "x-file-slots", "0x10" },
{ "pflash_cfi01", "old-multiple-chip-handling", "on" },
{ "pci-bridge", "shpc", "on" },
{ TYPE_PCI_DEVICE, "x-pcie-extcap-init", "off" },
{ "virtio-pci", "x-pcie-deverr-init", "off" },
{ "virtio-pci", "x-pcie-lnkctl-init", "off" },
{ "virtio-pci", "x-pcie-pm-init", "off" },
{ "cirrus-vga", "vgamem_mb", "8" },
{ "isa-cirrus-vga", "vgamem_mb", "8" },
};
const size_t hw_compat_2_8_len = G_N_ELEMENTS(hw_compat_2_8);
GlobalProperty hw_compat_2_7[] = {
{ "virtio-pci", "page-per-vq", "on" },
{ "virtio-serial-device", "emergency-write", "off" },
{ "ioapic", "version", "0x11" },
{ "intel-iommu", "x-buggy-eim", "true" },
{ "virtio-pci", "x-ignore-backend-features", "on" },
};
const size_t hw_compat_2_7_len = G_N_ELEMENTS(hw_compat_2_7);
GlobalProperty hw_compat_2_6[] = {
{ "virtio-mmio", "format_transport_address", "off" },
/* Optional because not all virtio-pci devices support legacy mode */
{ "virtio-pci", "disable-modern", "on", .optional = true },
{ "virtio-pci", "disable-legacy", "off", .optional = true },
};
const size_t hw_compat_2_6_len = G_N_ELEMENTS(hw_compat_2_6);
GlobalProperty hw_compat_2_5[] = {
{ "isa-fdc", "fallback", "144" },
{ "pvscsi", "x-old-pci-configuration", "on" },
{ "pvscsi", "x-disable-pcie", "on" },
{ "vmxnet3", "x-old-msi-offsets", "on" },
{ "vmxnet3", "x-disable-pcie", "on" },
};
const size_t hw_compat_2_5_len = G_N_ELEMENTS(hw_compat_2_5);
GlobalProperty hw_compat_2_4[] = {
{ "e1000", "extra_mac_registers", "off" },
{ "virtio-pci", "x-disable-pcie", "on" },
{ "virtio-pci", "migrate-extra", "off" },
{ "fw_cfg_mem", "dma_enabled", "off" },
{ "fw_cfg_io", "dma_enabled", "off" }
};
const size_t hw_compat_2_4_len = G_N_ELEMENTS(hw_compat_2_4);
GlobalProperty hw_compat_2_3[] = {
{ "virtio-blk-pci", "any_layout", "off" },
{ "virtio-balloon-pci", "any_layout", "off" },
{ "virtio-serial-pci", "any_layout", "off" },
{ "virtio-9p-pci", "any_layout", "off" },
{ "virtio-rng-pci", "any_layout", "off" },
{ TYPE_PCI_DEVICE, "x-pcie-lnksta-dllla", "off" },
{ "migration", "send-configuration", "off" },
{ "migration", "send-section-footer", "off" },
{ "migration", "store-global-state", "off" },
};
const size_t hw_compat_2_3_len = G_N_ELEMENTS(hw_compat_2_3);
GlobalProperty hw_compat_2_2[] = {};
const size_t hw_compat_2_2_len = G_N_ELEMENTS(hw_compat_2_2);
GlobalProperty hw_compat_2_1[] = {
{ "intel-hda", "old_msi_addr", "on" },
{ "VGA", "qemu-extended-regs", "off" },
{ "secondary-vga", "qemu-extended-regs", "off" },
{ "virtio-scsi-pci", "any_layout", "off" },
{ "usb-mouse", "usb_version", "1" },
{ "usb-kbd", "usb_version", "1" },
{ "virtio-pci", "virtio-pci-bus-master-bug-migration", "on" },
};
const size_t hw_compat_2_1_len = G_N_ELEMENTS(hw_compat_2_1);
MachineState *current_machine;
static char *machine_get_kernel(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return g_strdup(ms->kernel_filename);
}
static void machine_set_kernel(Object *obj, const char *value, Error **errp)
{
MachineState *ms = MACHINE(obj);
g_free(ms->kernel_filename);
ms->kernel_filename = g_strdup(value);
}
static char *machine_get_initrd(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return g_strdup(ms->initrd_filename);
}
static void machine_set_initrd(Object *obj, const char *value, Error **errp)
{
MachineState *ms = MACHINE(obj);
g_free(ms->initrd_filename);
ms->initrd_filename = g_strdup(value);
}
static char *machine_get_append(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return g_strdup(ms->kernel_cmdline);
}
static void machine_set_append(Object *obj, const char *value, Error **errp)
{
MachineState *ms = MACHINE(obj);
g_free(ms->kernel_cmdline);
ms->kernel_cmdline = g_strdup(value);
}
static char *machine_get_dtb(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return g_strdup(ms->dtb);
}
static void machine_set_dtb(Object *obj, const char *value, Error **errp)
{
MachineState *ms = MACHINE(obj);
g_free(ms->dtb);
ms->dtb = g_strdup(value);
}
static char *machine_get_dumpdtb(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return g_strdup(ms->dumpdtb);
}
static void machine_set_dumpdtb(Object *obj, const char *value, Error **errp)
{
MachineState *ms = MACHINE(obj);
g_free(ms->dumpdtb);
ms->dumpdtb = g_strdup(value);
}
static void machine_get_phandle_start(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
MachineState *ms = MACHINE(obj);
int64_t value = ms->phandle_start;
qapi: Swap visit_* arguments for consistent 'name' placement JSON uses "name":value, but many of our visitor interfaces were called with visit_type_FOO(v, &value, name, errp). This can be a bit confusing to have to mentally swap the parameter order to match JSON order. It's particularly bad for visit_start_struct(), where the 'name' parameter is smack in the middle of the otherwise-related group of 'obj, kind, size' parameters! It's time to do a global swap of the parameter ordering, so that the 'name' parameter is always immediately after the Visitor argument. Additional reason in favor of the swap: the existing include/qjson.h prefers listing 'name' first in json_prop_*(), and I have plans to unify that file with the qapi visitors; listing 'name' first in qapi will minimize churn to the (admittedly few) qjson.h clients. Later patches will then fix docs, object.h, visitor-impl.h, and those clients to match. Done by first patching scripts/qapi*.py by hand to make generated files do what I want, then by running the following Coccinelle script to affect the rest of the code base: $ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'` I then had to apply some touchups (Coccinelle insisted on TAB indentation in visitor.h, and botched the signature of visit_type_enum() by rewriting 'const char *const strings[]' to the syntactically invalid 'const char*const[] strings'). The movement of parameters is sufficient to provoke compiler errors if any callers were missed. // Part 1: Swap declaration order @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_start_struct -(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type bool, TV, T1; identifier ARG1; @@ bool visit_optional -(TV v, T1 ARG1, const char *name) +(TV v, const char *name, T1 ARG1) { ... } @@ type TV, TErr, TObj, T1; identifier OBJ, ARG1; @@ void visit_get_next_type -(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp) { ... } @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_type_enum -(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type TV, TErr, TObj; identifier OBJ; identifier VISIT_TYPE =~ "^visit_type_"; @@ void VISIT_TYPE -(TV v, TObj OBJ, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, TErr errp) { ... } // Part 2: swap caller order @@ expression V, NAME, OBJ, ARG1, ARG2, ERR; identifier VISIT_TYPE =~ "^visit_type_"; @@ ( -visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR) +visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR) | -visit_optional(V, ARG1, NAME) +visit_optional(V, NAME, ARG1) | -visit_get_next_type(V, OBJ, ARG1, NAME, ERR) +visit_get_next_type(V, NAME, OBJ, ARG1, ERR) | -visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR) +visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR) | -VISIT_TYPE(V, OBJ, NAME, ERR) +VISIT_TYPE(V, NAME, OBJ, ERR) ) Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <1454075341-13658-19-git-send-email-eblake@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 16:48:54 +03:00
visit_type_int(v, name, &value, errp);
}
static void machine_set_phandle_start(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
MachineState *ms = MACHINE(obj);
int64_t value;
error: Eliminate error_propagate() with Coccinelle, part 1 When all we do with an Error we receive into a local variable is propagating to somewhere else, we can just as well receive it there right away. Convert if (!foo(..., &err)) { ... error_propagate(errp, err); ... return ... } to if (!foo(..., errp)) { ... ... return ... } where nothing else needs @err. Coccinelle script: @rule1 forall@ identifier fun, err, errp, lbl; expression list args, args2; binary operator op; constant c1, c2; symbol false; @@ if ( ( - fun(args, &err, args2) + fun(args, errp, args2) | - !fun(args, &err, args2) + !fun(args, errp, args2) | - fun(args, &err, args2) op c1 + fun(args, errp, args2) op c1 ) ) { ... when != err when != lbl: when strict - error_propagate(errp, err); ... when != err ( return; | return c2; | return false; ) } @rule2 forall@ identifier fun, err, errp, lbl; expression list args, args2; expression var; binary operator op; constant c1, c2; symbol false; @@ - var = fun(args, &err, args2); + var = fun(args, errp, args2); ... when != err if ( ( var | !var | var op c1 ) ) { ... when != err when != lbl: when strict - error_propagate(errp, err); ... when != err ( return; | return c2; | return false; | return var; ) } @depends on rule1 || rule2@ identifier err; @@ - Error *err = NULL; ... when != err Not exactly elegant, I'm afraid. The "when != lbl:" is necessary to avoid transforming if (fun(args, &err)) { goto out } ... out: error_propagate(errp, err); even though other paths to label out still need the error_propagate(). For an actual example, see sclp_realize(). Without the "when strict", Coccinelle transforms vfio_msix_setup(), incorrectly. I don't know what exactly "when strict" does, only that it helps here. The match of return is narrower than what I want, but I can't figure out how to express "return where the operand doesn't use @err". For an example where it's too narrow, see vfio_intx_enable(). Silently fails to convert hw/arm/armsse.c, because Coccinelle gets confused by ARMSSE being used both as typedef and function-like macro there. Converted manually. Line breaks tidied up manually. One nested declaration of @local_err deleted manually. Preexisting unwanted blank line dropped in hw/riscv/sifive_e.c. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 19:06:02 +03:00
if (!visit_type_int(v, name, &value, errp)) {
return;
}
ms->phandle_start = value;
}
static char *machine_get_dt_compatible(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return g_strdup(ms->dt_compatible);
}
static void machine_set_dt_compatible(Object *obj, const char *value, Error **errp)
{
MachineState *ms = MACHINE(obj);
g_free(ms->dt_compatible);
ms->dt_compatible = g_strdup(value);
}
static bool machine_get_dump_guest_core(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return ms->dump_guest_core;
}
static void machine_set_dump_guest_core(Object *obj, bool value, Error **errp)
{
MachineState *ms = MACHINE(obj);
if (!value && QEMU_MADV_DONTDUMP == QEMU_MADV_INVALID) {
error_setg(errp, "Dumping guest memory cannot be disabled on this host");
return;
}
ms->dump_guest_core = value;
}
static bool machine_get_mem_merge(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return ms->mem_merge;
}
static void machine_set_mem_merge(Object *obj, bool value, Error **errp)
{
MachineState *ms = MACHINE(obj);
if (value && QEMU_MADV_MERGEABLE == QEMU_MADV_INVALID) {
error_setg(errp, "Memory merging is not supported on this host");
return;
}
ms->mem_merge = value;
}
static bool machine_get_usb(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return ms->usb;
}
static void machine_set_usb(Object *obj, bool value, Error **errp)
{
MachineState *ms = MACHINE(obj);
ms->usb = value;
ms->usb_disabled = !value;
}
static bool machine_get_graphics(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return ms->enable_graphics;
}
static void machine_set_graphics(Object *obj, bool value, Error **errp)
{
MachineState *ms = MACHINE(obj);
ms->enable_graphics = value;
}
static char *machine_get_firmware(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return g_strdup(ms->firmware);
}
static void machine_set_firmware(Object *obj, const char *value, Error **errp)
{
MachineState *ms = MACHINE(obj);
g_free(ms->firmware);
ms->firmware = g_strdup(value);
}
static void machine_set_suppress_vmdesc(Object *obj, bool value, Error **errp)
{
MachineState *ms = MACHINE(obj);
ms->suppress_vmdesc = value;
}
static bool machine_get_suppress_vmdesc(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return ms->suppress_vmdesc;
}
static char *machine_get_memory_encryption(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
if (ms->cgs) {
return g_strdup(object_get_canonical_path_component(OBJECT(ms->cgs)));
}
return NULL;
}
static void machine_set_memory_encryption(Object *obj, const char *value,
Error **errp)
{
Object *cgs =
object_resolve_path_component(object_get_objects_root(), value);
if (!cgs) {
error_setg(errp, "No such memory encryption object '%s'", value);
return;
}
object_property_set_link(obj, "confidential-guest-support", cgs, errp);
}
static void machine_check_confidential_guest_support(const Object *obj,
const char *name,
Object *new_target,
Error **errp)
{
/*
* So far the only constraint is that the target has the
* TYPE_CONFIDENTIAL_GUEST_SUPPORT interface, and that's checked
* by the QOM core
*/
}
static bool machine_get_nvdimm(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return ms->nvdimms_state->is_enabled;
}
static void machine_set_nvdimm(Object *obj, bool value, Error **errp)
{
MachineState *ms = MACHINE(obj);
ms->nvdimms_state->is_enabled = value;
}
static bool machine_get_hmat(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return ms->numa_state->hmat_enabled;
}
static void machine_set_hmat(Object *obj, bool value, Error **errp)
{
MachineState *ms = MACHINE(obj);
ms->numa_state->hmat_enabled = value;
}
static void machine_get_mem(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
MachineState *ms = MACHINE(obj);
MemorySizeConfiguration mem = {
.has_size = true,
.size = ms->ram_size,
.has_max_size = !!ms->ram_slots,
.max_size = ms->maxram_size,
.has_slots = !!ms->ram_slots,
.slots = ms->ram_slots,
};
MemorySizeConfiguration *p_mem = &mem;
visit_type_MemorySizeConfiguration(v, name, &p_mem, &error_abort);
}
static void machine_set_mem(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
ERRP_GUARD();
MachineState *ms = MACHINE(obj);
MachineClass *mc = MACHINE_GET_CLASS(obj);
MemorySizeConfiguration *mem;
if (!visit_type_MemorySizeConfiguration(v, name, &mem, errp)) {
return;
}
if (!mem->has_size) {
mem->has_size = true;
mem->size = mc->default_ram_size;
}
mem->size = QEMU_ALIGN_UP(mem->size, 8192);
if (mc->fixup_ram_size) {
mem->size = mc->fixup_ram_size(mem->size);
}
if ((ram_addr_t)mem->size != mem->size) {
error_setg(errp, "ram size too large");
goto out_free;
}
if (mem->has_max_size) {
if (mem->max_size < mem->size) {
error_setg(errp, "invalid value of maxmem: "
"maximum memory size (0x%" PRIx64 ") must be at least "
"the initial memory size (0x%" PRIx64 ")",
mem->max_size, mem->size);
goto out_free;
}
if (mem->has_slots && mem->slots && mem->max_size == mem->size) {
error_setg(errp, "invalid value of maxmem: "
"memory slots were specified but maximum memory size "
"(0x%" PRIx64 ") is equal to the initial memory size "
"(0x%" PRIx64 ")", mem->max_size, mem->size);
goto out_free;
}
ms->maxram_size = mem->max_size;
} else {
if (mem->has_slots) {
error_setg(errp, "slots specified but no max-size");
goto out_free;
}
ms->maxram_size = mem->size;
}
ms->ram_size = mem->size;
ms->ram_slots = mem->has_slots ? mem->slots : 0;
out_free:
qapi_free_MemorySizeConfiguration(mem);
}
static char *machine_get_nvdimm_persistence(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return g_strdup(ms->nvdimms_state->persistence_string);
}
static void machine_set_nvdimm_persistence(Object *obj, const char *value,
Error **errp)
{
MachineState *ms = MACHINE(obj);
NVDIMMState *nvdimms_state = ms->nvdimms_state;
if (strcmp(value, "cpu") == 0) {
nvdimms_state->persistence = 3;
} else if (strcmp(value, "mem-ctrl") == 0) {
nvdimms_state->persistence = 2;
} else {
error_setg(errp, "-machine nvdimm-persistence=%s: unsupported option",
value);
return;
}
g_free(nvdimms_state->persistence_string);
nvdimms_state->persistence_string = g_strdup(value);
}
void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type)
{
QAPI_LIST_PREPEND(mc->allowed_dynamic_sysbus_devices, g_strdup(type));
}
bool device_is_dynamic_sysbus(MachineClass *mc, DeviceState *dev)
{
Object *obj = OBJECT(dev);
if (!object_dynamic_cast(obj, TYPE_SYS_BUS_DEVICE)) {
return false;
}
return device_type_is_dynamic_sysbus(mc, object_get_typename(obj));
}
bool device_type_is_dynamic_sysbus(MachineClass *mc, const char *type)
{
bool allowed = false;
strList *wl;
ObjectClass *klass = object_class_by_name(type);
for (wl = mc->allowed_dynamic_sysbus_devices;
!allowed && wl;
wl = wl->next) {
allowed |= !!object_class_dynamic_cast(klass, wl->value);
}
return allowed;
}
static char *machine_get_audiodev(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return g_strdup(ms->audiodev);
}
static void machine_set_audiodev(Object *obj, const char *value,
Error **errp)
{
MachineState *ms = MACHINE(obj);
if (!audio_state_by_name(value, errp)) {
return;
}
g_free(ms->audiodev);
ms->audiodev = g_strdup(value);
}
HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine)
{
int i;
HotpluggableCPUList *head = NULL;
MachineClass *mc = MACHINE_GET_CLASS(machine);
/* force board to initialize possible_cpus if it hasn't been done yet */
mc->possible_cpu_arch_ids(machine);
for (i = 0; i < machine->possible_cpus->len; i++) {
CPUState *cpu;
HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1);
cpu_item->type = g_strdup(machine->possible_cpus->cpus[i].type);
cpu_item->vcpus_count = machine->possible_cpus->cpus[i].vcpus_count;
cpu_item->props = g_memdup(&machine->possible_cpus->cpus[i].props,
sizeof(*cpu_item->props));
cpu = machine->possible_cpus->cpus[i].cpu;
if (cpu) {
cpu_item->qom_path = object_get_canonical_path(OBJECT(cpu));
}
QAPI_LIST_PREPEND(head, cpu_item);
}
return head;
}
/**
* machine_set_cpu_numa_node:
* @machine: machine object to modify
* @props: specifies which cpu objects to assign to
* numa node specified by @props.node_id
* @errp: if an error occurs, a pointer to an area to store the error
*
* Associate NUMA node specified by @props.node_id with cpu slots that
* match socket/core/thread-ids specified by @props. It's recommended to use
* query-hotpluggable-cpus.props values to specify affected cpu slots,
* which would lead to exact 1:1 mapping of cpu slots to NUMA node.
*
* However for CLI convenience it's possible to pass in subset of properties,
* which would affect all cpu slots that match it.
* Ex for pc machine:
* -smp 4,cores=2,sockets=2 -numa node,nodeid=0 -numa node,nodeid=1 \
* -numa cpu,node-id=0,socket_id=0 \
* -numa cpu,node-id=1,socket_id=1
* will assign all child cores of socket 0 to node 0 and
* of socket 1 to node 1.
*
* On attempt of reassigning (already assigned) cpu slot to another NUMA node,
* return error.
* Empty subset is disallowed and function will return with error in this case.
*/
void machine_set_cpu_numa_node(MachineState *machine,
const CpuInstanceProperties *props, Error **errp)
{
MachineClass *mc = MACHINE_GET_CLASS(machine);
NodeInfo *numa_info = machine->numa_state->nodes;
bool match = false;
int i;
if (!mc->possible_cpu_arch_ids) {
error_setg(errp, "mapping of CPUs to NUMA node is not supported");
return;
}
/* disabling node mapping is not supported, forbid it */
assert(props->has_node_id);
/* force board to initialize possible_cpus if it hasn't been done yet */
mc->possible_cpu_arch_ids(machine);
for (i = 0; i < machine->possible_cpus->len; i++) {
CPUArchId *slot = &machine->possible_cpus->cpus[i];
/* reject unsupported by board properties */
if (props->has_thread_id && !slot->props.has_thread_id) {
error_setg(errp, "thread-id is not supported");
return;
}
if (props->has_core_id && !slot->props.has_core_id) {
error_setg(errp, "core-id is not supported");
return;
}
if (props->has_module_id && !slot->props.has_module_id) {
error_setg(errp, "module-id is not supported");
return;
}
if (props->has_cluster_id && !slot->props.has_cluster_id) {
error_setg(errp, "cluster-id is not supported");
return;
}
if (props->has_socket_id && !slot->props.has_socket_id) {
error_setg(errp, "socket-id is not supported");
return;
}
if (props->has_die_id && !slot->props.has_die_id) {
error_setg(errp, "die-id is not supported");
return;
}
/* skip slots with explicit mismatch */
if (props->has_thread_id && props->thread_id != slot->props.thread_id) {
continue;
}
if (props->has_core_id && props->core_id != slot->props.core_id) {
continue;
}
if (props->has_module_id &&
props->module_id != slot->props.module_id) {
continue;
}
if (props->has_cluster_id &&
props->cluster_id != slot->props.cluster_id) {
continue;
}
if (props->has_die_id && props->die_id != slot->props.die_id) {
continue;
}
if (props->has_socket_id && props->socket_id != slot->props.socket_id) {
continue;
}
/* reject assignment if slot is already assigned, for compatibility
* of legacy cpu_index mapping with SPAPR core based mapping do not
* error out if cpu thread and matched core have the same node-id */
if (slot->props.has_node_id &&
slot->props.node_id != props->node_id) {
error_setg(errp, "CPU is already assigned to node-id: %" PRId64,
slot->props.node_id);
return;
}
/* assign slot to node as it's matched '-numa cpu' key */
match = true;
slot->props.node_id = props->node_id;
slot->props.has_node_id = props->has_node_id;
if (machine->numa_state->hmat_enabled) {
if ((numa_info[props->node_id].initiator < MAX_NODES) &&
(props->node_id != numa_info[props->node_id].initiator)) {
error_setg(errp, "The initiator of CPU NUMA node %" PRId64
" should be itself (got %" PRIu16 ")",
props->node_id, numa_info[props->node_id].initiator);
return;
}
numa_info[props->node_id].has_cpu = true;
numa_info[props->node_id].initiator = props->node_id;
}
}
if (!match) {
error_setg(errp, "no match found");
}
}
static void machine_get_smp(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
MachineState *ms = MACHINE(obj);
SMPConfiguration *config = &(SMPConfiguration){
.has_cpus = true, .cpus = ms->smp.cpus,
.has_drawers = true, .drawers = ms->smp.drawers,
.has_books = true, .books = ms->smp.books,
.has_sockets = true, .sockets = ms->smp.sockets,
.has_dies = true, .dies = ms->smp.dies,
hw/core/machine: Introduce CPU cluster topology support The new Cluster-Aware Scheduling support has landed in Linux 5.16, which has been proved to benefit the scheduling performance (e.g. load balance and wake_affine strategy) on both x86_64 and AArch64. So now in Linux 5.16 we have four-level arch-neutral CPU topology definition like below and a new scheduler level for clusters. struct cpu_topology { int thread_id; int core_id; int cluster_id; int package_id; int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; cpumask_t cluster_sibling; cpumask_t llc_sibling; } A cluster generally means a group of CPU cores which share L2 cache or other mid-level resources, and it is the shared resources that is used to improve scheduler's behavior. From the point of view of the size range, it's between CPU die and CPU core. For example, on some ARM64 Kunpeng servers, we have 6 clusters in each NUMA node, and 4 CPU cores in each cluster. The 4 CPU cores share a separate L2 cache and a L3 cache tag, which brings cache affinity advantage. In virtualization, on the Hosts which have pClusters (physical clusters), if we can design a vCPU topology with cluster level for guest kernel and have a dedicated vCPU pinning. A Cluster-Aware Guest kernel can also make use of the cache affinity of CPU clusters to gain similar scheduling performance. This patch adds infrastructure for CPU cluster level topology configuration and parsing, so that the user can specify cluster parameter if their machines support it. Signed-off-by: Yanan Wang <wangyanan55@huawei.com> Message-Id: <20211228092221.21068-3-wangyanan55@huawei.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> [PMD: Added '(since 7.0)' to @clusters in qapi/machine.json] Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2021-12-28 12:22:09 +03:00
.has_clusters = true, .clusters = ms->smp.clusters,
.has_modules = true, .modules = ms->smp.modules,
.has_cores = true, .cores = ms->smp.cores,
.has_threads = true, .threads = ms->smp.threads,
.has_maxcpus = true, .maxcpus = ms->smp.max_cpus,
};
hw/core/machine: Introduce CPU cluster topology support The new Cluster-Aware Scheduling support has landed in Linux 5.16, which has been proved to benefit the scheduling performance (e.g. load balance and wake_affine strategy) on both x86_64 and AArch64. So now in Linux 5.16 we have four-level arch-neutral CPU topology definition like below and a new scheduler level for clusters. struct cpu_topology { int thread_id; int core_id; int cluster_id; int package_id; int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; cpumask_t cluster_sibling; cpumask_t llc_sibling; } A cluster generally means a group of CPU cores which share L2 cache or other mid-level resources, and it is the shared resources that is used to improve scheduler's behavior. From the point of view of the size range, it's between CPU die and CPU core. For example, on some ARM64 Kunpeng servers, we have 6 clusters in each NUMA node, and 4 CPU cores in each cluster. The 4 CPU cores share a separate L2 cache and a L3 cache tag, which brings cache affinity advantage. In virtualization, on the Hosts which have pClusters (physical clusters), if we can design a vCPU topology with cluster level for guest kernel and have a dedicated vCPU pinning. A Cluster-Aware Guest kernel can also make use of the cache affinity of CPU clusters to gain similar scheduling performance. This patch adds infrastructure for CPU cluster level topology configuration and parsing, so that the user can specify cluster parameter if their machines support it. Signed-off-by: Yanan Wang <wangyanan55@huawei.com> Message-Id: <20211228092221.21068-3-wangyanan55@huawei.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> [PMD: Added '(since 7.0)' to @clusters in qapi/machine.json] Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2021-12-28 12:22:09 +03:00
if (!visit_type_SMPConfiguration(v, name, &config, &error_abort)) {
return;
}
}
static void machine_set_smp(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
MachineState *ms = MACHINE(obj);
g_autoptr(SMPConfiguration) config = NULL;
if (!visit_type_SMPConfiguration(v, name, &config, errp)) {
return;
}
machine_parse_smp_config(ms, config, errp);
}
static void machine_get_boot(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
MachineState *ms = MACHINE(obj);
BootConfiguration *config = &ms->boot_config;
visit_type_BootConfiguration(v, name, &config, &error_abort);
}
static void machine_free_boot_config(MachineState *ms)
{
g_free(ms->boot_config.order);
g_free(ms->boot_config.once);
g_free(ms->boot_config.splash);
}
static void machine_copy_boot_config(MachineState *ms, BootConfiguration *config)
{
MachineClass *machine_class = MACHINE_GET_CLASS(ms);
machine_free_boot_config(ms);
ms->boot_config = *config;
if (!config->order) {
ms->boot_config.order = g_strdup(machine_class->default_boot_order);
}
}
static void machine_set_boot(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
ERRP_GUARD();
MachineState *ms = MACHINE(obj);
BootConfiguration *config = NULL;
if (!visit_type_BootConfiguration(v, name, &config, errp)) {
return;
}
if (config->order) {
validate_bootdevices(config->order, errp);
if (*errp) {
goto out_free;
}
}
if (config->once) {
validate_bootdevices(config->once, errp);
if (*errp) {
goto out_free;
}
}
machine_copy_boot_config(ms, config);
/* Strings live in ms->boot_config. */
free(config);
return;
out_free:
qapi_free_BootConfiguration(config);
}
void machine_add_audiodev_property(MachineClass *mc)
{
ObjectClass *oc = OBJECT_CLASS(mc);
object_class_property_add_str(oc, "audiodev",
machine_get_audiodev,
machine_set_audiodev);
object_class_property_set_description(oc, "audiodev",
"Audiodev to use for default machine devices");
}
static void machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
/* Default 128 MB as guest ram size */
mc->default_ram_size = 128 * MiB;
mc->rom_file_has_mr = true;
/* numa node memory size aligned on 8MB by default.
* On Linux, each node's border has to be 8MB aligned
*/
mc->numa_mem_align_shift = 23;
object_class_property_add_str(oc, "kernel",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_kernel, machine_set_kernel);
object_class_property_set_description(oc, "kernel",
"Linux kernel image file");
object_class_property_add_str(oc, "initrd",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_initrd, machine_set_initrd);
object_class_property_set_description(oc, "initrd",
"Linux initial ramdisk file");
object_class_property_add_str(oc, "append",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_append, machine_set_append);
object_class_property_set_description(oc, "append",
"Linux kernel command line");
object_class_property_add_str(oc, "dtb",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_dtb, machine_set_dtb);
object_class_property_set_description(oc, "dtb",
"Linux kernel device tree file");
object_class_property_add_str(oc, "dumpdtb",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_dumpdtb, machine_set_dumpdtb);
object_class_property_set_description(oc, "dumpdtb",
"Dump current dtb to a file and quit");
object_class_property_add(oc, "boot", "BootConfiguration",
machine_get_boot, machine_set_boot,
NULL, NULL);
object_class_property_set_description(oc, "boot",
"Boot configuration");
object_class_property_add(oc, "smp", "SMPConfiguration",
machine_get_smp, machine_set_smp,
NULL, NULL);
object_class_property_set_description(oc, "smp",
"CPU topology");
object_class_property_add(oc, "phandle-start", "int",
machine_get_phandle_start, machine_set_phandle_start,
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
NULL, NULL);
object_class_property_set_description(oc, "phandle-start",
"The first phandle ID we may generate dynamically");
object_class_property_add_str(oc, "dt-compatible",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_dt_compatible, machine_set_dt_compatible);
object_class_property_set_description(oc, "dt-compatible",
"Overrides the \"compatible\" property of the dt root node");
object_class_property_add_bool(oc, "dump-guest-core",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_dump_guest_core, machine_set_dump_guest_core);
object_class_property_set_description(oc, "dump-guest-core",
"Include guest memory in a core dump");
object_class_property_add_bool(oc, "mem-merge",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_mem_merge, machine_set_mem_merge);
object_class_property_set_description(oc, "mem-merge",
"Enable/disable memory merge support");
object_class_property_add_bool(oc, "usb",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_usb, machine_set_usb);
object_class_property_set_description(oc, "usb",
"Set on/off to enable/disable usb");
object_class_property_add_bool(oc, "graphics",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_graphics, machine_set_graphics);
object_class_property_set_description(oc, "graphics",
"Set on/off to enable/disable graphics emulation");
object_class_property_add_str(oc, "firmware",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_firmware, machine_set_firmware);
object_class_property_set_description(oc, "firmware",
"Firmware image");
object_class_property_add_bool(oc, "suppress-vmdesc",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_suppress_vmdesc, machine_set_suppress_vmdesc);
object_class_property_set_description(oc, "suppress-vmdesc",
"Set on to disable self-describing migration");
object_class_property_add_link(oc, "confidential-guest-support",
TYPE_CONFIDENTIAL_GUEST_SUPPORT,
offsetof(MachineState, cgs),
machine_check_confidential_guest_support,
OBJ_PROP_LINK_STRONG);
object_class_property_set_description(oc, "confidential-guest-support",
"Set confidential guest scheme to support");
/* For compatibility */
object_class_property_add_str(oc, "memory-encryption",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_memory_encryption, machine_set_memory_encryption);
object_class_property_set_description(oc, "memory-encryption",
"Set memory encryption object to use");
object_class_property_add_link(oc, "memory-backend", TYPE_MEMORY_BACKEND,
offsetof(MachineState, memdev), object_property_allow_set_link,
OBJ_PROP_LINK_STRONG);
object_class_property_set_description(oc, "memory-backend",
"Set RAM backend"
"Valid value is ID of hostmem based backend");
object_class_property_add(oc, "memory", "MemorySizeConfiguration",
machine_get_mem, machine_set_mem,
NULL, NULL);
object_class_property_set_description(oc, "memory",
"Memory size configuration");
}
static void machine_class_base_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->max_cpus = mc->max_cpus ?: 1;
mc->min_cpus = mc->min_cpus ?: 1;
mc->default_cpus = mc->default_cpus ?: 1;
if (!object_class_is_abstract(oc)) {
const char *cname = object_class_get_name(oc);
assert(g_str_has_suffix(cname, TYPE_MACHINE_SUFFIX));
mc->name = g_strndup(cname,
strlen(cname) - strlen(TYPE_MACHINE_SUFFIX));
mc->compat_props = g_ptr_array_new();
}
}
static void machine_initfn(Object *obj)
{
MachineState *ms = MACHINE(obj);
MachineClass *mc = MACHINE_GET_CLASS(obj);
container_get(obj, "/peripheral");
container_get(obj, "/peripheral-anon");
ms->dump_guest_core = true;
ms->mem_merge = (QEMU_MADV_MERGEABLE != QEMU_MADV_INVALID);
ms->enable_graphics = true;
ms->kernel_cmdline = g_strdup("");
ms->ram_size = mc->default_ram_size;
ms->maxram_size = mc->default_ram_size;
if (mc->nvdimm_supported) {
ms->nvdimms_state = g_new0(NVDIMMState, 1);
object_property_add_bool(obj, "nvdimm",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_nvdimm, machine_set_nvdimm);
object_property_set_description(obj, "nvdimm",
"Set on/off to enable/disable "
"NVDIMM instantiation");
object_property_add_str(obj, "nvdimm-persistence",
machine_get_nvdimm_persistence,
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_set_nvdimm_persistence);
object_property_set_description(obj, "nvdimm-persistence",
"Set NVDIMM persistence"
"Valid values are cpu, mem-ctrl");
}
if (mc->cpu_index_to_instance_props && mc->get_default_cpu_node_id) {
ms->numa_state = g_new0(NumaState, 1);
object_property_add_bool(obj, "hmat",
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
machine_get_hmat, machine_set_hmat);
object_property_set_description(obj, "hmat",
"Set on/off to enable/disable "
"ACPI Heterogeneous Memory Attribute "
"Table (HMAT)");
}
/* default to mc->default_cpus */
ms->smp.cpus = mc->default_cpus;
ms->smp.max_cpus = mc->default_cpus;
ms->smp.drawers = 1;
ms->smp.books = 1;
ms->smp.sockets = 1;
ms->smp.dies = 1;
hw/core/machine: Introduce CPU cluster topology support The new Cluster-Aware Scheduling support has landed in Linux 5.16, which has been proved to benefit the scheduling performance (e.g. load balance and wake_affine strategy) on both x86_64 and AArch64. So now in Linux 5.16 we have four-level arch-neutral CPU topology definition like below and a new scheduler level for clusters. struct cpu_topology { int thread_id; int core_id; int cluster_id; int package_id; int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; cpumask_t cluster_sibling; cpumask_t llc_sibling; } A cluster generally means a group of CPU cores which share L2 cache or other mid-level resources, and it is the shared resources that is used to improve scheduler's behavior. From the point of view of the size range, it's between CPU die and CPU core. For example, on some ARM64 Kunpeng servers, we have 6 clusters in each NUMA node, and 4 CPU cores in each cluster. The 4 CPU cores share a separate L2 cache and a L3 cache tag, which brings cache affinity advantage. In virtualization, on the Hosts which have pClusters (physical clusters), if we can design a vCPU topology with cluster level for guest kernel and have a dedicated vCPU pinning. A Cluster-Aware Guest kernel can also make use of the cache affinity of CPU clusters to gain similar scheduling performance. This patch adds infrastructure for CPU cluster level topology configuration and parsing, so that the user can specify cluster parameter if their machines support it. Signed-off-by: Yanan Wang <wangyanan55@huawei.com> Message-Id: <20211228092221.21068-3-wangyanan55@huawei.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> [PMD: Added '(since 7.0)' to @clusters in qapi/machine.json] Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2021-12-28 12:22:09 +03:00
ms->smp.clusters = 1;
hw/core/machine: Introduce the module as a CPU topology level In x86, module is the topology level above core, which contains a set of cores that share certain resources (in current products, the resource usually includes L2 cache, as well as module scoped features and MSRs). Though smp.clusters could also share the L2 cache resource [1], there are following reasons that drive us to introduce the new smp.modules: * As the CPU topology abstraction in device tree [2], cluster supports nesting (though currently QEMU hasn't support that). In contrast, (x86) module does not support nesting. * Due to nesting, there is great flexibility in sharing resources on cluster, rather than narrowing cluster down to sharing L2 (and L3 tags) as the lowest topology level that contains cores. * Flexible nesting of cluster allows it to correspond to any level between the x86 package and core. * In Linux kernel, x86's cluster only represents the L2 cache domain but QEMU's smp.clusters is the CPU topology level. Linux kernel will also expose module level topology information in sysfs for x86. To avoid cluster ambiguity and keep a consistent CPU topology naming style with the Linux kernel, we introduce module level for x86. The module is, in existing hardware practice, the lowest layer that contains the core, while the cluster is able to have a higher topological scope than the module due to its nesting. Therefore, place the module between the cluster and the core: drawer/book/socket/die/cluster/module/core/thread With the above topological hierarchy order, introduce module level support in MachineState and MachineClass. [1]: https://lore.kernel.org/qemu-devel/c3d68005-54e0-b8fe-8dc1-5989fe3c7e69@huawei.com/ [2]: https://www.kernel.org/doc/Documentation/devicetree/bindings/cpu/cpu-topology.txt Suggested-by: Xiaoyao Li <xiaoyao.li@intel.com> Tested-by: Yongwei Ma <yongwei.ma@intel.com> Signed-off-by: Zhao Liu <zhao1.liu@intel.com> Tested-by: Babu Moger <babu.moger@amd.com> Message-ID: <20240424154929.1487382-2-zhao1.liu@intel.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2024-04-24 18:49:09 +03:00
ms->smp.modules = 1;
ms->smp.cores = 1;
ms->smp.threads = 1;
machine_copy_boot_config(ms, &(BootConfiguration){ 0 });
}
static void machine_finalize(Object *obj)
{
MachineState *ms = MACHINE(obj);
machine_free_boot_config(ms);
g_free(ms->kernel_filename);
g_free(ms->initrd_filename);
g_free(ms->kernel_cmdline);
g_free(ms->dtb);
g_free(ms->dumpdtb);
g_free(ms->dt_compatible);
g_free(ms->firmware);
g_free(ms->device_memory);
g_free(ms->nvdimms_state);
g_free(ms->numa_state);
g_free(ms->audiodev);
}
bool machine_usb(MachineState *machine)
{
return machine->usb;
}
int machine_phandle_start(MachineState *machine)
{
return machine->phandle_start;
}
bool machine_dump_guest_core(MachineState *machine)
{
return machine->dump_guest_core;
}
bool machine_mem_merge(MachineState *machine)
{
return machine->mem_merge;
}
bool machine_require_guest_memfd(MachineState *machine)
{
return machine->cgs && machine->cgs->require_guest_memfd;
}
static char *cpu_slot_to_string(const CPUArchId *cpu)
{
GString *s = g_string_new(NULL);
if (cpu->props.has_socket_id) {
g_string_append_printf(s, "socket-id: %"PRId64, cpu->props.socket_id);
}
if (cpu->props.has_die_id) {
if (s->len) {
g_string_append_printf(s, ", ");
}
g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id);
}
if (cpu->props.has_cluster_id) {
if (s->len) {
g_string_append_printf(s, ", ");
}
g_string_append_printf(s, "cluster-id: %"PRId64, cpu->props.cluster_id);
}
if (cpu->props.has_module_id) {
if (s->len) {
g_string_append_printf(s, ", ");
}
g_string_append_printf(s, "module-id: %"PRId64, cpu->props.module_id);
}
if (cpu->props.has_core_id) {
if (s->len) {
g_string_append_printf(s, ", ");
}
g_string_append_printf(s, "core-id: %"PRId64, cpu->props.core_id);
}
if (cpu->props.has_thread_id) {
if (s->len) {
g_string_append_printf(s, ", ");
}
g_string_append_printf(s, "thread-id: %"PRId64, cpu->props.thread_id);
}
return g_string_free(s, false);
}
static void numa_validate_initiator(NumaState *numa_state)
{
int i;
NodeInfo *numa_info = numa_state->nodes;
for (i = 0; i < numa_state->num_nodes; i++) {
if (numa_info[i].initiator == MAX_NODES) {
hmat acpi: Don't require initiator value in -numa The "Memory Proximity Domain Attributes" structure of the ACPI HMAT has a "Processor Proximity Domain Valid" flag that is currently always set because Qemu -numa requires an initiator=X value when hmat=on. Unsetting this flag allows to create more complex memory topologies by having multiple best initiators for a single memory target. This patch allows -numa without initiator=X when hmat=on by keeping the default value MAX_NODES in numa_state->nodes[i].initiator. All places reading numa_state->nodes[i].initiator already check whether it's different from MAX_NODES before using it. Tested with qemu-system-x86_64 -accel kvm \ -machine pc,hmat=on \ -drive if=pflash,format=raw,file=./OVMF.fd \ -drive media=disk,format=qcow2,file=efi.qcow2 \ -smp 4 \ -m 3G \ -object memory-backend-ram,size=1G,id=ram0 \ -object memory-backend-ram,size=1G,id=ram1 \ -object memory-backend-ram,size=1G,id=ram2 \ -numa node,nodeid=0,memdev=ram0,cpus=0-1 \ -numa node,nodeid=1,memdev=ram1,cpus=2-3 \ -numa node,nodeid=2,memdev=ram2 \ -numa hmat-lb,initiator=0,target=0,hierarchy=memory,data-type=access-latency,latency=10 \ -numa hmat-lb,initiator=0,target=0,hierarchy=memory,data-type=access-bandwidth,bandwidth=10485760 \ -numa hmat-lb,initiator=0,target=1,hierarchy=memory,data-type=access-latency,latency=20 \ -numa hmat-lb,initiator=0,target=1,hierarchy=memory,data-type=access-bandwidth,bandwidth=5242880 \ -numa hmat-lb,initiator=0,target=2,hierarchy=memory,data-type=access-latency,latency=30 \ -numa hmat-lb,initiator=0,target=2,hierarchy=memory,data-type=access-bandwidth,bandwidth=1048576 \ -numa hmat-lb,initiator=1,target=0,hierarchy=memory,data-type=access-latency,latency=20 \ -numa hmat-lb,initiator=1,target=0,hierarchy=memory,data-type=access-bandwidth,bandwidth=5242880 \ -numa hmat-lb,initiator=1,target=1,hierarchy=memory,data-type=access-latency,latency=10 \ -numa hmat-lb,initiator=1,target=1,hierarchy=memory,data-type=access-bandwidth,bandwidth=10485760 \ -numa hmat-lb,initiator=1,target=2,hierarchy=memory,data-type=access-latency,latency=30 \ -numa hmat-lb,initiator=1,target=2,hierarchy=memory,data-type=access-bandwidth,bandwidth=1048576 which reports NUMA node2 at same distance from both node0 and node1 as seen in lstopo: Machine (2966MB total) + Package P#0 NUMANode P#2 (979MB) Group0 NUMANode P#0 (980MB) Core P#0 + PU P#0 Core P#1 + PU P#1 Group0 NUMANode P#1 (1007MB) Core P#2 + PU P#2 Core P#3 + PU P#3 Before this patch, we had to add ",initiator=X" to "-numa node,nodeid=2,memdev=ram2". The lstopo output difference between initiator=1 and no initiator is: @@ -1,10 +1,10 @@ Machine (2966MB total) + Package P#0 + NUMANode P#2 (979MB) Group0 NUMANode P#0 (980MB) Core P#0 + PU P#0 Core P#1 + PU P#1 Group0 NUMANode P#1 (1007MB) - NUMANode P#2 (979MB) Core P#2 + PU P#2 Core P#3 + PU P#3 Corresponding changes in the HMAT MPDA structure: @@ -49,10 +49,10 @@ [078h 0120 2] Structure Type : 0000 [Memory Proximity Domain Attributes] [07Ah 0122 2] Reserved : 0000 [07Ch 0124 4] Length : 00000028 -[080h 0128 2] Flags (decoded below) : 0001 - Processor Proximity Domain Valid : 1 +[080h 0128 2] Flags (decoded below) : 0000 + Processor Proximity Domain Valid : 0 [082h 0130 2] Reserved1 : 0000 -[084h 0132 4] Attached Initiator Proximity Domain : 00000001 +[084h 0132 4] Attached Initiator Proximity Domain : 00000080 [088h 0136 4] Memory Proximity Domain : 00000002 [08Ch 0140 4] Reserved2 : 00000000 [090h 0144 8] Reserved3 : 0000000000000000 Final HMAT SLLB structures: [0A0h 0160 2] Structure Type : 0001 [System Locality Latency and Bandwidth Information] [0A2h 0162 2] Reserved : 0000 [0A4h 0164 4] Length : 00000040 [0A8h 0168 1] Flags (decoded below) : 00 Memory Hierarchy : 0 [0A9h 0169 1] Data Type : 00 [0AAh 0170 2] Reserved1 : 0000 [0ACh 0172 4] Initiator Proximity Domains # : 00000002 [0B0h 0176 4] Target Proximity Domains # : 00000003 [0B4h 0180 4] Reserved2 : 00000000 [0B8h 0184 8] Entry Base Unit : 0000000000002710 [0C0h 0192 4] Initiator Proximity Domain List : 00000000 [0C4h 0196 4] Initiator Proximity Domain List : 00000001 [0C8h 0200 4] Target Proximity Domain List : 00000000 [0CCh 0204 4] Target Proximity Domain List : 00000001 [0D0h 0208 4] Target Proximity Domain List : 00000002 [0D4h 0212 2] Entry : 0001 [0D6h 0214 2] Entry : 0002 [0D8h 0216 2] Entry : 0003 [0DAh 0218 2] Entry : 0002 [0DCh 0220 2] Entry : 0001 [0DEh 0222 2] Entry : 0003 [0E0h 0224 2] Structure Type : 0001 [System Locality Latency and Bandwidth Information] [0E2h 0226 2] Reserved : 0000 [0E4h 0228 4] Length : 00000040 [0E8h 0232 1] Flags (decoded below) : 00 Memory Hierarchy : 0 [0E9h 0233 1] Data Type : 03 [0EAh 0234 2] Reserved1 : 0000 [0ECh 0236 4] Initiator Proximity Domains # : 00000002 [0F0h 0240 4] Target Proximity Domains # : 00000003 [0F4h 0244 4] Reserved2 : 00000000 [0F8h 0248 8] Entry Base Unit : 0000000000000001 [100h 0256 4] Initiator Proximity Domain List : 00000000 [104h 0260 4] Initiator Proximity Domain List : 00000001 [108h 0264 4] Target Proximity Domain List : 00000000 [10Ch 0268 4] Target Proximity Domain List : 00000001 [110h 0272 4] Target Proximity Domain List : 00000002 [114h 0276 2] Entry : 000A [116h 0278 2] Entry : 0005 [118h 0280 2] Entry : 0001 [11Ah 0282 2] Entry : 0005 [11Ch 0284 2] Entry : 000A [11Eh 0286 2] Entry : 0001 Signed-off-by: Brice Goglin <Brice.Goglin@inria.fr> Signed-off-by: Hesham Almatary <hesham.almatary@huawei.com> Reviewed-by: Jingqi Liu <jingqi.liu@intel.com> Message-Id: <20221027100037.251-2-hesham.almatary@huawei.com> Tested-by: Yicong Yang <yangyicong@hisilicon.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-10-27 13:00:30 +03:00
continue;
}
if (!numa_info[numa_info[i].initiator].present) {
error_report("NUMA node %" PRIu16 " is missing, use "
"'-numa node' option to declare it first",
numa_info[i].initiator);
exit(1);
}
if (!numa_info[numa_info[i].initiator].has_cpu) {
error_report("The initiator of NUMA node %d is invalid", i);
exit(1);
}
}
}
static void machine_numa_finish_cpu_init(MachineState *machine)
{
int i;
bool default_mapping;
GString *s = g_string_new(NULL);
MachineClass *mc = MACHINE_GET_CLASS(machine);
const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(machine);
assert(machine->numa_state->num_nodes);
for (i = 0; i < possible_cpus->len; i++) {
if (possible_cpus->cpus[i].props.has_node_id) {
break;
}
}
default_mapping = (i == possible_cpus->len);
for (i = 0; i < possible_cpus->len; i++) {
const CPUArchId *cpu_slot = &possible_cpus->cpus[i];
if (!cpu_slot->props.has_node_id) {
/* fetch default mapping from board and enable it */
CpuInstanceProperties props = cpu_slot->props;
props.node_id = mc->get_default_cpu_node_id(machine, i);
if (!default_mapping) {
/* record slots with not set mapping,
* TODO: make it hard error in future */
char *cpu_str = cpu_slot_to_string(cpu_slot);
g_string_append_printf(s, "%sCPU %d [%s]",
s->len ? ", " : "", i, cpu_str);
g_free(cpu_str);
/* non mapped cpus used to fallback to node 0 */
props.node_id = 0;
}
props.has_node_id = true;
machine_set_cpu_numa_node(machine, &props, &error_fatal);
}
}
if (machine->numa_state->hmat_enabled) {
numa_validate_initiator(machine->numa_state);
}
if (s->len && !qtest_enabled()) {
Convert error_report() to warn_report() Convert all uses of error_report("warning:"... to use warn_report() instead. This helps standardise on a single method of printing warnings to the user. All of the warnings were changed using these two commands: find ./* -type f -exec sed -i \ 's|error_report(".*warning[,:] |warn_report("|Ig' {} + Indentation fixed up manually afterwards. The test-qdev-global-props test case was manually updated to ensure that this patch passes make check (as the test cases are case sensitive). Signed-off-by: Alistair Francis <alistair.francis@xilinx.com> Suggested-by: Thomas Huth <thuth@redhat.com> Cc: Jeff Cody <jcody@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Max Reitz <mreitz@redhat.com> Cc: Ronnie Sahlberg <ronniesahlberg@gmail.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Lieven <pl@kamp.de> Cc: Josh Durgin <jdurgin@redhat.com> Cc: "Richard W.M. Jones" <rjones@redhat.com> Cc: Markus Armbruster <armbru@redhat.com> Cc: Peter Crosthwaite <crosthwaite.peter@gmail.com> Cc: Richard Henderson <rth@twiddle.net> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Greg Kurz <groug@kaod.org> Cc: Rob Herring <robh@kernel.org> Cc: Peter Maydell <peter.maydell@linaro.org> Cc: Peter Chubb <peter.chubb@nicta.com.au> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Marcel Apfelbaum <marcel@redhat.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Igor Mammedov <imammedo@redhat.com> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: Alexander Graf <agraf@suse.de> Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: Jason Wang <jasowang@redhat.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Acked-by: David Gibson <david@gibson.dropbear.id.au> Acked-by: Greg Kurz <groug@kaod.org> Acked-by: Cornelia Huck <cohuck@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed by: Peter Chubb <peter.chubb@data61.csiro.au> Acked-by: Max Reitz <mreitz@redhat.com> Acked-by: Marcel Apfelbaum <marcel@redhat.com> Message-Id: <e1cfa2cd47087c248dd24caca9c33d9af0c499b0.1499866456.git.alistair.francis@xilinx.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2017-07-12 16:57:41 +03:00
warn_report("CPU(s) not present in any NUMA nodes: %s",
s->str);
warn_report("All CPU(s) up to maxcpus should be described "
"in NUMA config, ability to start up with partial NUMA "
"mappings is obsoleted and will be removed in future");
}
g_string_free(s, true);
}
numa: Validate cluster and NUMA node boundary if required For some architectures like ARM64, multiple CPUs in one cluster can be associated with different NUMA nodes, which is irregular configuration because we shouldn't have this in baremetal environment. The irregular configuration causes Linux guest to misbehave, as the following warning messages indicate. -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \ -numa node,nodeid=0,cpus=0-1,memdev=ram0 \ -numa node,nodeid=1,cpus=2-3,memdev=ram1 \ -numa node,nodeid=2,cpus=4-5,memdev=ram2 \ ------------[ cut here ]------------ WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 build_sched_domains+0x284/0x910 Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1 pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : build_sched_domains+0x284/0x910 lr : build_sched_domains+0x184/0x910 sp : ffff80000804bd50 x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000 x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840 x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508 x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014 x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0 x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041 x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001 x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002 x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001 Call trace: build_sched_domains+0x284/0x910 sched_init_domains+0xac/0xe0 sched_init_smp+0x48/0xc8 kernel_init_freeable+0x140/0x1ac kernel_init+0x28/0x140 ret_from_fork+0x10/0x20 Improve the situation to warn when multiple CPUs in one cluster have been associated with different NUMA nodes. However, one NUMA node is allowed to be associated with different clusters. Signed-off-by: Gavin Shan <gshan@redhat.com> Acked-by: Philippe Mathieu-Daudé <philmd@linaro.org> Acked-by: Igor Mammedov <imammedo@redhat.com> Message-Id: <20230509002739.18388-2-gshan@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-05-09 03:27:37 +03:00
static void validate_cpu_cluster_to_numa_boundary(MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
NumaState *state = ms->numa_state;
const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
const CPUArchId *cpus = possible_cpus->cpus;
int i, j;
if (qtest_enabled() || state->num_nodes <= 1 || possible_cpus->len <= 1) {
numa: Validate cluster and NUMA node boundary if required For some architectures like ARM64, multiple CPUs in one cluster can be associated with different NUMA nodes, which is irregular configuration because we shouldn't have this in baremetal environment. The irregular configuration causes Linux guest to misbehave, as the following warning messages indicate. -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \ -numa node,nodeid=0,cpus=0-1,memdev=ram0 \ -numa node,nodeid=1,cpus=2-3,memdev=ram1 \ -numa node,nodeid=2,cpus=4-5,memdev=ram2 \ ------------[ cut here ]------------ WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 build_sched_domains+0x284/0x910 Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1 pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : build_sched_domains+0x284/0x910 lr : build_sched_domains+0x184/0x910 sp : ffff80000804bd50 x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000 x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840 x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508 x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014 x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0 x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041 x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001 x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002 x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001 Call trace: build_sched_domains+0x284/0x910 sched_init_domains+0xac/0xe0 sched_init_smp+0x48/0xc8 kernel_init_freeable+0x140/0x1ac kernel_init+0x28/0x140 ret_from_fork+0x10/0x20 Improve the situation to warn when multiple CPUs in one cluster have been associated with different NUMA nodes. However, one NUMA node is allowed to be associated with different clusters. Signed-off-by: Gavin Shan <gshan@redhat.com> Acked-by: Philippe Mathieu-Daudé <philmd@linaro.org> Acked-by: Igor Mammedov <imammedo@redhat.com> Message-Id: <20230509002739.18388-2-gshan@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-05-09 03:27:37 +03:00
return;
}
/*
* The Linux scheduling domain can't be parsed when the multiple CPUs
* in one cluster have been associated with different NUMA nodes. However,
* it's fine to associate one NUMA node with CPUs in different clusters.
*/
for (i = 0; i < possible_cpus->len; i++) {
for (j = i + 1; j < possible_cpus->len; j++) {
if (cpus[i].props.has_socket_id &&
cpus[i].props.has_cluster_id &&
cpus[i].props.has_node_id &&
cpus[j].props.has_socket_id &&
cpus[j].props.has_cluster_id &&
cpus[j].props.has_node_id &&
cpus[i].props.socket_id == cpus[j].props.socket_id &&
cpus[i].props.cluster_id == cpus[j].props.cluster_id &&
cpus[i].props.node_id != cpus[j].props.node_id) {
warn_report("CPU-%d and CPU-%d in socket-%" PRId64 "-cluster-%" PRId64
" have been associated with node-%" PRId64 " and node-%" PRId64
" respectively. It can cause OSes like Linux to"
" misbehave", i, j, cpus[i].props.socket_id,
cpus[i].props.cluster_id, cpus[i].props.node_id,
cpus[j].props.node_id);
}
}
}
}
MemoryRegion *machine_consume_memdev(MachineState *machine,
HostMemoryBackend *backend)
{
MemoryRegion *ret = host_memory_backend_get_memory(backend);
if (host_memory_backend_is_mapped(backend)) {
error_report("memory backend %s can't be used multiple times.",
object_get_canonical_path_component(OBJECT(backend)));
exit(EXIT_FAILURE);
}
host_memory_backend_set_mapped(backend, true);
vmstate_register_ram_global(ret);
return ret;
}
static bool create_default_memdev(MachineState *ms, const char *path, Error **errp)
{
Object *obj;
MachineClass *mc = MACHINE_GET_CLASS(ms);
bool r = false;
obj = object_new(path ? TYPE_MEMORY_BACKEND_FILE : TYPE_MEMORY_BACKEND_RAM);
if (path) {
if (!object_property_set_str(obj, "mem-path", path, errp)) {
goto out;
}
}
if (!object_property_set_int(obj, "size", ms->ram_size, errp)) {
goto out;
}
object_property_add_child(object_get_objects_root(), mc->default_ram_id,
obj);
/* Ensure backend's memory region name is equal to mc->default_ram_id */
if (!object_property_set_bool(obj, "x-use-canonical-path-for-ramblock-id",
false, errp)) {
goto out;
}
if (!user_creatable_complete(USER_CREATABLE(obj), errp)) {
goto out;
}
r = object_property_set_link(OBJECT(ms), "memory-backend", obj, errp);
out:
object_unref(obj);
return r;
}
const char *machine_class_default_cpu_type(MachineClass *mc)
{
if (mc->valid_cpu_types && !mc->valid_cpu_types[1]) {
/* Only a single CPU type allowed: use it as default. */
return mc->valid_cpu_types[0];
}
return mc->default_cpu_type;
}
static bool is_cpu_type_supported(const MachineState *machine, Error **errp)
{
MachineClass *mc = MACHINE_GET_CLASS(machine);
ObjectClass *oc = object_class_by_name(machine->cpu_type);
CPUClass *cc;
int i;
/*
* Check if the user specified CPU type is supported when the valid
* CPU types have been determined. Note that the user specified CPU
* type is provided through '-cpu' option.
*/
if (mc->valid_cpu_types) {
assert(mc->valid_cpu_types[0] != NULL);
for (i = 0; mc->valid_cpu_types[i]; i++) {
if (object_class_dynamic_cast(oc, mc->valid_cpu_types[i])) {
break;
}
}
/* The user specified CPU type isn't valid */
if (!mc->valid_cpu_types[i]) {
g_autofree char *requested = cpu_model_from_type(machine->cpu_type);
error_setg(errp, "Invalid CPU model: %s", requested);
if (!mc->valid_cpu_types[1]) {
g_autofree char *model = cpu_model_from_type(
mc->valid_cpu_types[0]);
error_append_hint(errp, "The only valid type is: %s\n", model);
} else {
error_append_hint(errp, "The valid models are: ");
for (i = 0; mc->valid_cpu_types[i]; i++) {
g_autofree char *model = cpu_model_from_type(
mc->valid_cpu_types[i]);
error_append_hint(errp, "%s%s",
model,
mc->valid_cpu_types[i + 1] ? ", " : "");
}
error_append_hint(errp, "\n");
}
return false;
}
}
/* Check if CPU type is deprecated and warn if so */
cc = CPU_CLASS(oc);
assert(cc != NULL);
if (cc->deprecation_note) {
warn_report("CPU model %s is deprecated -- %s",
machine->cpu_type, cc->deprecation_note);
}
return true;
}
void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp)
{
ERRP_GUARD();
MachineClass *machine_class = MACHINE_GET_CLASS(machine);
/* This checkpoint is required by replay to separate prior clock
reading from the other reads, because timer polling functions query
clock values from the log. */
replay_checkpoint(CHECKPOINT_INIT);
if (!xen_enabled()) {
/* On 32-bit hosts, QEMU is limited by virtual address space */
if (machine->ram_size > (2047 << 20) && HOST_LONG_BITS == 32) {
error_setg(errp, "at most 2047 MB RAM can be simulated");
return;
}
}
if (machine->memdev) {
ram_addr_t backend_size = object_property_get_uint(OBJECT(machine->memdev),
"size", &error_abort);
if (backend_size != machine->ram_size) {
error_setg(errp, "Machine memory size does not match the size of the memory backend");
return;
}
} else if (machine_class->default_ram_id && machine->ram_size &&
numa_uses_legacy_mem()) {
if (object_property_find(object_get_objects_root(),
machine_class->default_ram_id)) {
error_setg(errp, "object's id '%s' is reserved for the default"
" RAM backend, it can't be used for any other purposes",
machine_class->default_ram_id);
error_append_hint(errp,
"Change the object's 'id' to something else or disable"
" automatic creation of the default RAM backend by setting"
" 'memory-backend=%s' with '-machine'.\n",
machine_class->default_ram_id);
return;
}
if (!create_default_memdev(current_machine, mem_path, errp)) {
return;
}
}
if (machine->numa_state) {
numa_complete_configuration(machine);
if (machine->numa_state->num_nodes) {
machine_numa_finish_cpu_init(machine);
numa: Validate cluster and NUMA node boundary if required For some architectures like ARM64, multiple CPUs in one cluster can be associated with different NUMA nodes, which is irregular configuration because we shouldn't have this in baremetal environment. The irregular configuration causes Linux guest to misbehave, as the following warning messages indicate. -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \ -numa node,nodeid=0,cpus=0-1,memdev=ram0 \ -numa node,nodeid=1,cpus=2-3,memdev=ram1 \ -numa node,nodeid=2,cpus=4-5,memdev=ram2 \ ------------[ cut here ]------------ WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 build_sched_domains+0x284/0x910 Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1 pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : build_sched_domains+0x284/0x910 lr : build_sched_domains+0x184/0x910 sp : ffff80000804bd50 x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000 x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840 x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508 x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014 x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0 x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041 x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001 x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002 x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001 Call trace: build_sched_domains+0x284/0x910 sched_init_domains+0xac/0xe0 sched_init_smp+0x48/0xc8 kernel_init_freeable+0x140/0x1ac kernel_init+0x28/0x140 ret_from_fork+0x10/0x20 Improve the situation to warn when multiple CPUs in one cluster have been associated with different NUMA nodes. However, one NUMA node is allowed to be associated with different clusters. Signed-off-by: Gavin Shan <gshan@redhat.com> Acked-by: Philippe Mathieu-Daudé <philmd@linaro.org> Acked-by: Igor Mammedov <imammedo@redhat.com> Message-Id: <20230509002739.18388-2-gshan@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-05-09 03:27:37 +03:00
if (machine_class->cpu_cluster_has_numa_boundary) {
validate_cpu_cluster_to_numa_boundary(machine);
}
}
}
if (!machine->ram && machine->memdev) {
machine->ram = machine_consume_memdev(machine, machine->memdev);
}
/* Check if the CPU type is supported */
if (machine->cpu_type && !is_cpu_type_supported(machine, errp)) {
return;
}
if (machine->cgs) {
/*
* With confidential guests, the host can't see the real
* contents of RAM, so there's no point in it trying to merge
* areas.
*/
machine_set_mem_merge(OBJECT(machine), false, &error_abort);
/*
* Virtio devices can't count on directly accessing guest
* memory, so they need iommu_platform=on to use normal DMA
* mechanisms. That requires also disabling legacy virtio
* support for those virtio pci devices which allow it.
*/
object_register_sugar_prop(TYPE_VIRTIO_PCI, "disable-legacy",
"on", true);
object_register_sugar_prop(TYPE_VIRTIO_DEVICE, "iommu_platform",
"on", false);
}
accel_init_interfaces(ACCEL_GET_CLASS(machine->accelerator));
machine_class->init(machine);
phase_advance(PHASE_MACHINE_INITIALIZED);
}
static NotifierList machine_init_done_notifiers =
NOTIFIER_LIST_INITIALIZER(machine_init_done_notifiers);
void qemu_add_machine_init_done_notifier(Notifier *notify)
{
notifier_list_add(&machine_init_done_notifiers, notify);
if (phase_check(PHASE_MACHINE_READY)) {
notify->notify(notify, NULL);
}
}
void qemu_remove_machine_init_done_notifier(Notifier *notify)
{
notifier_remove(notify);
}
void qdev_machine_creation_done(void)
{
cpu_synchronize_all_post_init();
if (current_machine->boot_config.once) {
qemu_boot_set(current_machine->boot_config.once, &error_fatal);
qemu_register_reset(restore_boot_order, g_strdup(current_machine->boot_config.order));
}
/*
* ok, initial machine setup is done, starting from now we can
* only create hotpluggable devices
*/
phase_advance(PHASE_MACHINE_READY);
qdev_assert_realized_properly();
/* TODO: once all bus devices are qdevified, this should be done
* when bus is created by qdev.c */
/*
* This is where we arrange for the sysbus to be reset when the
* whole simulation is reset. In turn, resetting the sysbus will cause
* all devices hanging off it (and all their child buses, recursively)
* to be reset. Note that this will *not* reset any Device objects
* which are not attached to some part of the qbus tree!
*/
qemu_register_resettable(OBJECT(sysbus_get_default()));
notifier_list_notify(&machine_init_done_notifiers, NULL);
if (rom_check_and_register_reset() != 0) {
exit(1);
}
replay_start();
/* This checkpoint is required by replay to separate prior clock
reading from the other reads, because timer polling functions query
clock values from the log. */
replay_checkpoint(CHECKPOINT_RESET);
qemu_system_reset(SHUTDOWN_CAUSE_NONE);
register_global_state();
}
static const TypeInfo machine_info = {
.name = TYPE_MACHINE,
.parent = TYPE_OBJECT,
.abstract = true,
.class_size = sizeof(MachineClass),
.class_init = machine_class_init,
.class_base_init = machine_class_base_init,
.instance_size = sizeof(MachineState),
.instance_init = machine_initfn,
.instance_finalize = machine_finalize,
};
static void machine_register_types(void)
{
type_register_static(&machine_info);
}
type_init(machine_register_types)