vfio queue:
* Improvement of error reporting during migration * Removed Vendor Specific Capability check on newer machine * Addition of a VFIO migration QAPI event * Changed prototype of routines using an error parameter to return bool * Several cleanups regarding autofree variables -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEoPZlSPBIlev+awtgUaNDx8/77KEFAmZNwDEACgkQUaNDx8/7 7KHaYQ/+MUFOiWEiAwJdP8I1DkY6mJV3ZDixKMHLmr8xH6fAkR2htEw6UUcYijcn Z0wVvcB7A1wetgIAB2EPc2o6JtRD1uEW2pPq3SVpdWO2rWYa4QLvldOiJ8A+Kvss 0ZugWirgZsM7+ka9TCuysmqWdQD+P6z2RURMSwiPi6QPHwv1Tt69gLSxFeV5WWai +mS6wUbaU3LSt6yRhORRvFkCss4je3D3YR73ivholGHANxi/7C5T22KwOHrW6Qzf uk3W/zq1yL1YLXSu6WoKPw0mMCvNtGyKK2oAlhG3Ln1tPYnctNrlfXlApqxEOGl3 adGtwd6fyg6UTRR+vOXEy1QPCGcHtKWc5SuV5E677JftARJMwzbXrJw9Y9xS2RCQ oRYS5814k9RdubTxu+/l8NLICMdox7dNy//QLyrIdD7nJKYhFODkV1giWh4NWkt6 m0T3PGLlUJ/V2ngWQu9Aw150m3lCPEKt+Nv/mGOEFDRu9dv55Vb7oJwr1dBB/n+e 1lNNpDmV0YipoKYMzrlBwNwxhXGJOtNPwHtw/vZuiy70CXUwo0t4XLMpWbWasxZc 0yz4O9RLRJEhPtPqv54aLsE2kNY10I8vwHBlhyNgIEsA7eCDduA+65aPBaqIF7z6 GjvYdixF+vAZFexn0mDi1gtM3Yh60Hiiq1j7kKyyti/q0WUQzIc= =awMc -----END PGP SIGNATURE----- Merge tag 'pull-vfio-20240522' of https://github.com/legoater/qemu into staging vfio queue: * Improvement of error reporting during migration * Removed Vendor Specific Capability check on newer machine * Addition of a VFIO migration QAPI event * Changed prototype of routines using an error parameter to return bool * Several cleanups regarding autofree variables # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEoPZlSPBIlev+awtgUaNDx8/77KEFAmZNwDEACgkQUaNDx8/7 # 7KHaYQ/+MUFOiWEiAwJdP8I1DkY6mJV3ZDixKMHLmr8xH6fAkR2htEw6UUcYijcn # Z0wVvcB7A1wetgIAB2EPc2o6JtRD1uEW2pPq3SVpdWO2rWYa4QLvldOiJ8A+Kvss # 0ZugWirgZsM7+ka9TCuysmqWdQD+P6z2RURMSwiPi6QPHwv1Tt69gLSxFeV5WWai # +mS6wUbaU3LSt6yRhORRvFkCss4je3D3YR73ivholGHANxi/7C5T22KwOHrW6Qzf # uk3W/zq1yL1YLXSu6WoKPw0mMCvNtGyKK2oAlhG3Ln1tPYnctNrlfXlApqxEOGl3 # adGtwd6fyg6UTRR+vOXEy1QPCGcHtKWc5SuV5E677JftARJMwzbXrJw9Y9xS2RCQ # oRYS5814k9RdubTxu+/l8NLICMdox7dNy//QLyrIdD7nJKYhFODkV1giWh4NWkt6 # m0T3PGLlUJ/V2ngWQu9Aw150m3lCPEKt+Nv/mGOEFDRu9dv55Vb7oJwr1dBB/n+e # 1lNNpDmV0YipoKYMzrlBwNwxhXGJOtNPwHtw/vZuiy70CXUwo0t4XLMpWbWasxZc # 0yz4O9RLRJEhPtPqv54aLsE2kNY10I8vwHBlhyNgIEsA7eCDduA+65aPBaqIF7z6 # GjvYdixF+vAZFexn0mDi1gtM3Yh60Hiiq1j7kKyyti/q0WUQzIc= # =awMc # -----END PGP SIGNATURE----- # gpg: Signature made Wed 22 May 2024 02:51:45 AM PDT # gpg: using RSA key A0F66548F04895EBFE6B0B6051A343C7CFFBECA1 # gpg: Good signature from "Cédric Le Goater <clg@kaod.org>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: A0F6 6548 F048 95EB FE6B 0B60 51A3 43C7 CFFB ECA1 * tag 'pull-vfio-20240522' of https://github.com/legoater/qemu: (47 commits) vfio/igd: Use g_autofree in vfio_probe_igd_bar4_quirk() vfio: Use g_autofree in all call site of vfio_get_region_info() vfio/pci-quirks: Make vfio_add_*_cap() return bool vfio/pci-quirks: Make vfio_pci_igd_opregion_init() return bool vfio/pci: Use g_autofree for vfio_region_info pointer vfio/pci: Make capability related functions return bool vfio/pci: Make vfio_populate_vga() return bool vfio/pci: Make vfio_intx_enable() return bool vfio/pci: Make vfio_populate_device() return a bool vfio/pci: Make vfio_pci_relocate_msix() and vfio_msix_early_setup() return a bool vfio/pci: Make vfio_intx_enable_kvm() return a bool vfio/ccw: Make vfio_ccw_get_region() return a bool vfio/platform: Make vfio_populate_device() and vfio_base_device_init() return bool vfio/helpers: Make vfio_device_get_name() return bool vfio/helpers: Make vfio_set_irq_signaling() return bool vfio/helpers: Use g_autofree in vfio_set_irq_signaling() vfio/display: Make vfio_display_*() return bool vfio/display: Fix error path in call site of ramfb_setup() backends/iommufd: Make iommufd_backend_*() return bool vfio/cpr: Make vfio_cpr_register_container() return bool ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
6af8037c42
@ -2164,6 +2164,7 @@ F: hw/vfio/*
|
||||
F: include/hw/vfio/
|
||||
F: docs/igd-assign.txt
|
||||
F: docs/devel/migration/vfio.rst
|
||||
F: qapi/vfio.json
|
||||
|
||||
vfio-ccw
|
||||
M: Eric Farman <farman@linux.ibm.com>
|
||||
|
@ -72,24 +72,22 @@ static void iommufd_backend_class_init(ObjectClass *oc, void *data)
|
||||
object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd);
|
||||
}
|
||||
|
||||
int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp)
|
||||
bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp)
|
||||
{
|
||||
int fd, ret = 0;
|
||||
int fd;
|
||||
|
||||
if (be->owned && !be->users) {
|
||||
fd = qemu_open_old("/dev/iommu", O_RDWR);
|
||||
if (fd < 0) {
|
||||
error_setg_errno(errp, errno, "/dev/iommu opening failed");
|
||||
ret = fd;
|
||||
goto out;
|
||||
return false;
|
||||
}
|
||||
be->fd = fd;
|
||||
}
|
||||
be->users++;
|
||||
out:
|
||||
trace_iommufd_backend_connect(be->fd, be->owned,
|
||||
be->users, ret);
|
||||
return ret;
|
||||
|
||||
trace_iommufd_backend_connect(be->fd, be->owned, be->users);
|
||||
return true;
|
||||
}
|
||||
|
||||
void iommufd_backend_disconnect(IOMMUFDBackend *be)
|
||||
@ -106,25 +104,24 @@ out:
|
||||
trace_iommufd_backend_disconnect(be->fd, be->users);
|
||||
}
|
||||
|
||||
int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
|
||||
Error **errp)
|
||||
bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
|
||||
Error **errp)
|
||||
{
|
||||
int ret, fd = be->fd;
|
||||
int fd = be->fd;
|
||||
struct iommu_ioas_alloc alloc_data = {
|
||||
.size = sizeof(alloc_data),
|
||||
.flags = 0,
|
||||
};
|
||||
|
||||
ret = ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data);
|
||||
if (ret) {
|
||||
if (ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data)) {
|
||||
error_setg_errno(errp, errno, "Failed to allocate ioas");
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
*ioas_id = alloc_data.out_ioas_id;
|
||||
trace_iommufd_backend_alloc_ioas(fd, *ioas_id, ret);
|
||||
trace_iommufd_backend_alloc_ioas(fd, *ioas_id);
|
||||
|
||||
return ret;
|
||||
return true;
|
||||
}
|
||||
|
||||
void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id)
|
||||
|
@ -7,11 +7,11 @@ dbus_vmstate_loading(const char *id) "id: %s"
|
||||
dbus_vmstate_saving(const char *id) "id: %s"
|
||||
|
||||
# iommufd.c
|
||||
iommufd_backend_connect(int fd, bool owned, uint32_t users, int ret) "fd=%d owned=%d users=%d (%d)"
|
||||
iommufd_backend_connect(int fd, bool owned, uint32_t users) "fd=%d owned=%d users=%d"
|
||||
iommufd_backend_disconnect(int fd, uint32_t users) "fd=%d users=%d"
|
||||
iommu_backend_set_fd(int fd) "pre-opened /dev/iommu fd=%d"
|
||||
iommufd_backend_map_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, void *vaddr, bool readonly, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" addr=%p readonly=%d (%d)"
|
||||
iommufd_backend_unmap_dma_non_exist(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " Unmap nonexistent mapping: iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)"
|
||||
iommufd_backend_unmap_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)"
|
||||
iommufd_backend_alloc_ioas(int iommufd, uint32_t ioas, int ret) " iommufd=%d ioas=%d (%d)"
|
||||
iommufd_backend_alloc_ioas(int iommufd, uint32_t ioas) " iommufd=%d ioas=%d"
|
||||
iommufd_backend_free_id(int iommufd, uint32_t id, int ret) " iommufd=%d id=%d (%d)"
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
GlobalProperty hw_compat_9_0[] = {
|
||||
{"arm-cpu", "backcompat-cntfrq", "true" },
|
||||
{"vfio-pci", "skip-vsc-check", "false" },
|
||||
};
|
||||
const size_t hw_compat_9_0_len = G_N_ELEMENTS(hw_compat_9_0);
|
||||
|
||||
|
35
hw/vfio/ap.c
35
hw/vfio/ap.c
@ -70,14 +70,14 @@ static void vfio_ap_req_notifier_handler(void *opaque)
|
||||
}
|
||||
}
|
||||
|
||||
static void vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev,
|
||||
static bool vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev,
|
||||
unsigned int irq, Error **errp)
|
||||
{
|
||||
int fd;
|
||||
size_t argsz;
|
||||
IOHandler *fd_read;
|
||||
EventNotifier *notifier;
|
||||
struct vfio_irq_info *irq_info;
|
||||
g_autofree struct vfio_irq_info *irq_info = NULL;
|
||||
VFIODevice *vdev = &vapdev->vdev;
|
||||
|
||||
switch (irq) {
|
||||
@ -87,13 +87,13 @@ static void vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev,
|
||||
break;
|
||||
default:
|
||||
error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (vdev->num_irqs < irq + 1) {
|
||||
error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
|
||||
irq, vdev->num_irqs);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
argsz = sizeof(*irq_info);
|
||||
@ -104,28 +104,26 @@ static void vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev,
|
||||
if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
|
||||
irq_info) < 0 || irq_info->count < 1) {
|
||||
error_setg_errno(errp, errno, "vfio: Error getting irq info");
|
||||
goto out_free_info;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (event_notifier_init(notifier, 0)) {
|
||||
error_setg_errno(errp, errno,
|
||||
"vfio: Unable to init event notifier for irq (%d)",
|
||||
irq);
|
||||
goto out_free_info;
|
||||
return false;
|
||||
}
|
||||
|
||||
fd = event_notifier_get_fd(notifier);
|
||||
qemu_set_fd_handler(fd, fd_read, NULL, vapdev);
|
||||
|
||||
if (vfio_set_irq_signaling(vdev, irq, 0, VFIO_IRQ_SET_ACTION_TRIGGER, fd,
|
||||
errp)) {
|
||||
if (!vfio_set_irq_signaling(vdev, irq, 0, VFIO_IRQ_SET_ACTION_TRIGGER, fd,
|
||||
errp)) {
|
||||
qemu_set_fd_handler(fd, NULL, NULL, vapdev);
|
||||
event_notifier_cleanup(notifier);
|
||||
}
|
||||
|
||||
out_free_info:
|
||||
g_free(irq_info);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_ap_unregister_irq_notifier(VFIOAPDevice *vapdev,
|
||||
@ -143,8 +141,8 @@ static void vfio_ap_unregister_irq_notifier(VFIOAPDevice *vapdev,
|
||||
return;
|
||||
}
|
||||
|
||||
if (vfio_set_irq_signaling(&vapdev->vdev, irq, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
|
||||
if (!vfio_set_irq_signaling(&vapdev->vdev, irq, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
|
||||
warn_reportf_err(err, VFIO_MSG_PREFIX, vapdev->vdev.name);
|
||||
}
|
||||
|
||||
@ -156,23 +154,20 @@ static void vfio_ap_unregister_irq_notifier(VFIOAPDevice *vapdev,
|
||||
static void vfio_ap_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
int ret;
|
||||
Error *err = NULL;
|
||||
VFIOAPDevice *vapdev = VFIO_AP_DEVICE(dev);
|
||||
VFIODevice *vbasedev = &vapdev->vdev;
|
||||
|
||||
if (vfio_device_get_name(vbasedev, errp) < 0) {
|
||||
if (!vfio_device_get_name(vbasedev, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = vfio_attach_device(vbasedev->name, vbasedev,
|
||||
&address_space_memory, errp);
|
||||
if (ret) {
|
||||
if (!vfio_attach_device(vbasedev->name, vbasedev,
|
||||
&address_space_memory, errp)) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
vfio_ap_register_irq_notifier(vapdev, VFIO_AP_REQ_IRQ_INDEX, &err);
|
||||
if (err) {
|
||||
if (!vfio_ap_register_irq_notifier(vapdev, VFIO_AP_REQ_IRQ_INDEX, &err)) {
|
||||
/*
|
||||
* Report this error, but do not make it a failing condition.
|
||||
* Lack of this IRQ in the host does not prevent normal operation.
|
||||
|
@ -379,12 +379,12 @@ read_err:
|
||||
css_inject_io_interrupt(sch);
|
||||
}
|
||||
|
||||
static void vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
|
||||
static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
|
||||
unsigned int irq,
|
||||
Error **errp)
|
||||
{
|
||||
VFIODevice *vdev = &vcdev->vdev;
|
||||
struct vfio_irq_info *irq_info;
|
||||
g_autofree struct vfio_irq_info *irq_info = NULL;
|
||||
size_t argsz;
|
||||
int fd;
|
||||
EventNotifier *notifier;
|
||||
@ -405,13 +405,13 @@ static void vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
|
||||
break;
|
||||
default:
|
||||
error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (vdev->num_irqs < irq + 1) {
|
||||
error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
|
||||
irq, vdev->num_irqs);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
argsz = sizeof(*irq_info);
|
||||
@ -421,27 +421,26 @@ static void vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
|
||||
if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
|
||||
irq_info) < 0 || irq_info->count < 1) {
|
||||
error_setg_errno(errp, errno, "vfio: Error getting irq info");
|
||||
goto out_free_info;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (event_notifier_init(notifier, 0)) {
|
||||
error_setg_errno(errp, errno,
|
||||
"vfio: Unable to init event notifier for irq (%d)",
|
||||
irq);
|
||||
goto out_free_info;
|
||||
return false;
|
||||
}
|
||||
|
||||
fd = event_notifier_get_fd(notifier);
|
||||
qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
|
||||
|
||||
if (vfio_set_irq_signaling(vdev, irq, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
|
||||
if (!vfio_set_irq_signaling(vdev, irq, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
|
||||
qemu_set_fd_handler(fd, NULL, NULL, vcdev);
|
||||
event_notifier_cleanup(notifier);
|
||||
}
|
||||
|
||||
out_free_info:
|
||||
g_free(irq_info);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
|
||||
@ -465,8 +464,8 @@ static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
|
||||
return;
|
||||
}
|
||||
|
||||
if (vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
|
||||
if (!vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
|
||||
warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
|
||||
}
|
||||
|
||||
@ -475,7 +474,7 @@ static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
|
||||
event_notifier_cleanup(notifier);
|
||||
}
|
||||
|
||||
static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
|
||||
static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
|
||||
{
|
||||
VFIODevice *vdev = &vcdev->vdev;
|
||||
struct vfio_region_info *info;
|
||||
@ -484,7 +483,7 @@ static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
|
||||
/* Sanity check device */
|
||||
if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
|
||||
error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -494,13 +493,13 @@ static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
|
||||
if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
|
||||
error_setg(errp, "vfio: too few regions (%u), expected at least %u",
|
||||
vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
|
||||
if (ret) {
|
||||
error_setg_errno(errp, -ret, "vfio: Error getting config info");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
vcdev->io_region_size = info->size;
|
||||
@ -554,7 +553,7 @@ static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
|
||||
g_free(info);
|
||||
}
|
||||
|
||||
return;
|
||||
return true;
|
||||
|
||||
out_err:
|
||||
g_free(vcdev->crw_region);
|
||||
@ -562,7 +561,7 @@ out_err:
|
||||
g_free(vcdev->async_cmd_region);
|
||||
g_free(vcdev->io_region);
|
||||
g_free(info);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
|
||||
@ -580,7 +579,6 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp)
|
||||
S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
|
||||
VFIODevice *vbasedev = &vcdev->vdev;
|
||||
Error *err = NULL;
|
||||
int ret;
|
||||
|
||||
/* Call the class init function for subchannel. */
|
||||
if (cdc->realize) {
|
||||
@ -590,35 +588,31 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
if (vfio_device_get_name(vbasedev, errp) < 0) {
|
||||
if (!vfio_device_get_name(vbasedev, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = vfio_attach_device(cdev->mdevid, vbasedev,
|
||||
&address_space_memory, errp);
|
||||
if (ret) {
|
||||
if (!vfio_attach_device(cdev->mdevid, vbasedev,
|
||||
&address_space_memory, errp)) {
|
||||
goto out_attach_dev_err;
|
||||
}
|
||||
|
||||
vfio_ccw_get_region(vcdev, &err);
|
||||
if (err) {
|
||||
if (!vfio_ccw_get_region(vcdev, &err)) {
|
||||
goto out_region_err;
|
||||
}
|
||||
|
||||
vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, &err);
|
||||
if (err) {
|
||||
if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, &err)) {
|
||||
goto out_io_notifier_err;
|
||||
}
|
||||
|
||||
if (vcdev->crw_region) {
|
||||
vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX, &err);
|
||||
if (err) {
|
||||
if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX,
|
||||
&err)) {
|
||||
goto out_irq_notifier_err;
|
||||
}
|
||||
}
|
||||
|
||||
vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err);
|
||||
if (err) {
|
||||
if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err)) {
|
||||
/*
|
||||
* Report this error, but do not make it a failing condition.
|
||||
* Lack of this IRQ in the host does not prevent normal operation.
|
||||
|
119
hw/vfio/common.c
119
hw/vfio/common.c
@ -147,10 +147,10 @@ bool vfio_viommu_preset(VFIODevice *vbasedev)
|
||||
return vbasedev->bcontainer->space->as != &address_space_memory;
|
||||
}
|
||||
|
||||
static void vfio_set_migration_error(int err)
|
||||
static void vfio_set_migration_error(int ret)
|
||||
{
|
||||
if (migration_is_setup_or_active()) {
|
||||
migration_file_set_error(err);
|
||||
migration_file_set_error(ret, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -253,12 +253,13 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
|
||||
|
||||
/* Called with rcu_read_lock held. */
|
||||
static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
||||
ram_addr_t *ram_addr, bool *read_only)
|
||||
ram_addr_t *ram_addr, bool *read_only,
|
||||
Error **errp)
|
||||
{
|
||||
bool ret, mr_has_discard_manager;
|
||||
|
||||
ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only,
|
||||
&mr_has_discard_manager);
|
||||
&mr_has_discard_manager, errp);
|
||||
if (ret && mr_has_discard_manager) {
|
||||
/*
|
||||
* Malicious VMs might trigger discarding of IOMMU-mapped memory. The
|
||||
@ -288,6 +289,7 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||
hwaddr iova = iotlb->iova + giommu->iommu_offset;
|
||||
void *vaddr;
|
||||
int ret;
|
||||
Error *local_err = NULL;
|
||||
|
||||
trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
|
||||
iova, iova + iotlb->addr_mask);
|
||||
@ -304,7 +306,8 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||
if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
|
||||
bool read_only;
|
||||
|
||||
if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) {
|
||||
if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, &local_err)) {
|
||||
error_report_err(local_err);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
@ -585,7 +588,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
|
||||
return;
|
||||
}
|
||||
|
||||
if (vfio_container_add_section_window(bcontainer, section, &err)) {
|
||||
if (!vfio_container_add_section_window(bcontainer, section, &err)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -1027,7 +1030,8 @@ static void vfio_device_feature_dma_logging_start_destroy(
|
||||
g_free(feature);
|
||||
}
|
||||
|
||||
static int vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer)
|
||||
static int vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer,
|
||||
Error **errp)
|
||||
{
|
||||
struct vfio_device_feature *feature;
|
||||
VFIODirtyRanges ranges;
|
||||
@ -1038,6 +1042,7 @@ static int vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer)
|
||||
feature = vfio_device_feature_dma_logging_start_create(bcontainer,
|
||||
&ranges);
|
||||
if (!feature) {
|
||||
error_setg_errno(errp, errno, "Failed to prepare DMA logging");
|
||||
return -errno;
|
||||
}
|
||||
|
||||
@ -1049,8 +1054,8 @@ static int vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer)
|
||||
ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
|
||||
if (ret) {
|
||||
ret = -errno;
|
||||
error_report("%s: Failed to start DMA logging, err %d (%s)",
|
||||
vbasedev->name, ret, strerror(errno));
|
||||
error_setg_errno(errp, errno, "%s: Failed to start DMA logging",
|
||||
vbasedev->name);
|
||||
goto out;
|
||||
}
|
||||
vbasedev->dirty_tracking = true;
|
||||
@ -1069,20 +1074,19 @@ out:
|
||||
static bool vfio_listener_log_global_start(MemoryListener *listener,
|
||||
Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
|
||||
listener);
|
||||
int ret;
|
||||
|
||||
if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
|
||||
ret = vfio_devices_dma_logging_start(bcontainer);
|
||||
ret = vfio_devices_dma_logging_start(bcontainer, errp);
|
||||
} else {
|
||||
ret = vfio_container_set_dirty_page_tracking(bcontainer, true);
|
||||
ret = vfio_container_set_dirty_page_tracking(bcontainer, true, errp);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
error_report("vfio: Could not start dirty page tracking, err: %d (%s)",
|
||||
ret, strerror(-ret));
|
||||
vfio_set_migration_error(ret);
|
||||
error_prepend(errp, "vfio: Could not start dirty page tracking - ");
|
||||
}
|
||||
return !ret;
|
||||
}
|
||||
@ -1091,17 +1095,20 @@ static void vfio_listener_log_global_stop(MemoryListener *listener)
|
||||
{
|
||||
VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
|
||||
listener);
|
||||
Error *local_err = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
|
||||
vfio_devices_dma_logging_stop(bcontainer);
|
||||
} else {
|
||||
ret = vfio_container_set_dirty_page_tracking(bcontainer, false);
|
||||
ret = vfio_container_set_dirty_page_tracking(bcontainer, false,
|
||||
&local_err);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
error_report("vfio: Could not stop dirty page tracking, err: %d (%s)",
|
||||
ret, strerror(-ret));
|
||||
error_prepend(&local_err,
|
||||
"vfio: Could not stop dirty page tracking - ");
|
||||
error_report_err(local_err);
|
||||
vfio_set_migration_error(ret);
|
||||
}
|
||||
}
|
||||
@ -1133,8 +1140,7 @@ static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova,
|
||||
}
|
||||
|
||||
int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
|
||||
VFIOBitmap *vbmap, hwaddr iova,
|
||||
hwaddr size)
|
||||
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp)
|
||||
{
|
||||
VFIODevice *vbasedev;
|
||||
int ret;
|
||||
@ -1143,10 +1149,10 @@ int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
|
||||
ret = vfio_device_dma_logging_report(vbasedev, iova, size,
|
||||
vbmap->bitmap);
|
||||
if (ret) {
|
||||
error_report("%s: Failed to get DMA logging report, iova: "
|
||||
"0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
|
||||
", err: %d (%s)",
|
||||
vbasedev->name, iova, size, ret, strerror(-ret));
|
||||
error_setg_errno(errp, -ret,
|
||||
"%s: Failed to get DMA logging report, iova: "
|
||||
"0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx,
|
||||
vbasedev->name, iova, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1156,7 +1162,7 @@ int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
|
||||
}
|
||||
|
||||
int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
|
||||
uint64_t size, ram_addr_t ram_addr)
|
||||
uint64_t size, ram_addr_t ram_addr, Error **errp)
|
||||
{
|
||||
bool all_device_dirty_tracking =
|
||||
vfio_devices_all_device_dirty_tracking(bcontainer);
|
||||
@ -1173,13 +1179,17 @@ int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
|
||||
|
||||
ret = vfio_bitmap_alloc(&vbmap, size);
|
||||
if (ret) {
|
||||
error_setg_errno(errp, -ret,
|
||||
"Failed to allocate dirty tracking bitmap");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (all_device_dirty_tracking) {
|
||||
ret = vfio_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size);
|
||||
ret = vfio_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size,
|
||||
errp);
|
||||
} else {
|
||||
ret = vfio_container_query_dirty_bitmap(bcontainer, &vbmap, iova, size);
|
||||
ret = vfio_container_query_dirty_bitmap(bcontainer, &vbmap, iova, size,
|
||||
errp);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
@ -1209,6 +1219,7 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||
VFIOContainerBase *bcontainer = giommu->bcontainer;
|
||||
hwaddr iova = iotlb->iova + giommu->iommu_offset;
|
||||
ram_addr_t translated_addr;
|
||||
Error *local_err = NULL;
|
||||
int ret = -EINVAL;
|
||||
|
||||
trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
|
||||
@ -1220,16 +1231,22 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
|
||||
ret = vfio_get_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1,
|
||||
translated_addr);
|
||||
if (ret) {
|
||||
error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
|
||||
"0x%"HWADDR_PRIx") = %d (%s)",
|
||||
bcontainer, iova, iotlb->addr_mask + 1, ret,
|
||||
strerror(-ret));
|
||||
}
|
||||
if (!vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL, &local_err)) {
|
||||
error_report_err(local_err);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = vfio_get_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1,
|
||||
translated_addr, &local_err);
|
||||
if (ret) {
|
||||
error_prepend(&local_err,
|
||||
"vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
|
||||
"0x%"HWADDR_PRIx") failed - ", bcontainer, iova,
|
||||
iotlb->addr_mask + 1);
|
||||
error_report_err(local_err);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
@ -1246,12 +1263,19 @@ static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
|
||||
const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
|
||||
section->offset_within_region;
|
||||
VFIORamDiscardListener *vrdl = opaque;
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Sync the whole mapped region (spanning multiple individual mappings)
|
||||
* in one go.
|
||||
*/
|
||||
return vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr);
|
||||
ret = vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr,
|
||||
&local_err);
|
||||
if (ret) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1283,7 +1307,7 @@ vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer,
|
||||
}
|
||||
|
||||
static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section)
|
||||
MemoryRegionSection *section, Error **errp)
|
||||
{
|
||||
ram_addr_t ram_addr;
|
||||
|
||||
@ -1314,7 +1338,14 @@ static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer,
|
||||
}
|
||||
return 0;
|
||||
} else if (memory_region_has_ram_discard_manager(section->mr)) {
|
||||
return vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section);
|
||||
int ret;
|
||||
|
||||
ret = vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section);
|
||||
if (ret) {
|
||||
error_setg(errp,
|
||||
"Failed to sync dirty bitmap with RAM discard listener");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ram_addr = memory_region_get_ram_addr(section->mr) +
|
||||
@ -1322,7 +1353,7 @@ static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer,
|
||||
|
||||
return vfio_get_dirty_bitmap(bcontainer,
|
||||
REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
|
||||
int128_get64(section->size), ram_addr);
|
||||
int128_get64(section->size), ram_addr, errp);
|
||||
}
|
||||
|
||||
static void vfio_listener_log_sync(MemoryListener *listener,
|
||||
@ -1331,16 +1362,16 @@ static void vfio_listener_log_sync(MemoryListener *listener,
|
||||
VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
|
||||
listener);
|
||||
int ret;
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (vfio_listener_skipped_section(section)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (vfio_devices_all_dirty_tracking(bcontainer)) {
|
||||
ret = vfio_sync_dirty_bitmap(bcontainer, section);
|
||||
ret = vfio_sync_dirty_bitmap(bcontainer, section, &local_err);
|
||||
if (ret) {
|
||||
error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret,
|
||||
strerror(-ret));
|
||||
error_report_err(local_err);
|
||||
vfio_set_migration_error(ret);
|
||||
}
|
||||
}
|
||||
@ -1492,8 +1523,8 @@ retry:
|
||||
return info;
|
||||
}
|
||||
|
||||
int vfio_attach_device(char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp)
|
||||
bool vfio_attach_device(char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp)
|
||||
{
|
||||
const VFIOIOMMUClass *ops =
|
||||
VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_LEGACY));
|
||||
|
@ -31,12 +31,12 @@ int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
|
||||
return bcontainer->ops->dma_unmap(bcontainer, iova, size, iotlb);
|
||||
}
|
||||
|
||||
int vfio_container_add_section_window(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section,
|
||||
Error **errp)
|
||||
bool vfio_container_add_section_window(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section,
|
||||
Error **errp)
|
||||
{
|
||||
if (!bcontainer->ops->add_window) {
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
return bcontainer->ops->add_window(bcontainer, section, errp);
|
||||
@ -53,22 +53,22 @@ void vfio_container_del_section_window(VFIOContainerBase *bcontainer,
|
||||
}
|
||||
|
||||
int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
|
||||
bool start)
|
||||
bool start, Error **errp)
|
||||
{
|
||||
if (!bcontainer->dirty_pages_supported) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
g_assert(bcontainer->ops->set_dirty_page_tracking);
|
||||
return bcontainer->ops->set_dirty_page_tracking(bcontainer, start);
|
||||
return bcontainer->ops->set_dirty_page_tracking(bcontainer, start, errp);
|
||||
}
|
||||
|
||||
int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
|
||||
VFIOBitmap *vbmap,
|
||||
hwaddr iova, hwaddr size)
|
||||
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp)
|
||||
{
|
||||
g_assert(bcontainer->ops->query_dirty_bitmap);
|
||||
return bcontainer->ops->query_dirty_bitmap(bcontainer, vbmap, iova, size);
|
||||
return bcontainer->ops->query_dirty_bitmap(bcontainer, vbmap, iova, size,
|
||||
errp);
|
||||
}
|
||||
|
||||
void vfio_container_init(VFIOContainerBase *bcontainer, VFIOAddressSpace *space,
|
||||
|
@ -130,6 +130,7 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
|
||||
};
|
||||
bool need_dirty_sync = false;
|
||||
int ret;
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) {
|
||||
if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
|
||||
@ -165,8 +166,9 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
|
||||
|
||||
if (need_dirty_sync) {
|
||||
ret = vfio_get_dirty_bitmap(bcontainer, iova, size,
|
||||
iotlb->translated_addr);
|
||||
iotlb->translated_addr, &local_err);
|
||||
if (ret) {
|
||||
error_report_err(local_err);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -209,7 +211,7 @@ static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
|
||||
|
||||
static int
|
||||
vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer,
|
||||
bool start)
|
||||
bool start, Error **errp)
|
||||
{
|
||||
const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
|
||||
bcontainer);
|
||||
@ -227,16 +229,15 @@ vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer,
|
||||
ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty);
|
||||
if (ret) {
|
||||
ret = -errno;
|
||||
error_report("Failed to set dirty tracking flag 0x%x errno: %d",
|
||||
dirty.flags, errno);
|
||||
error_setg_errno(errp, errno, "Failed to set dirty tracking flag 0x%x",
|
||||
dirty.flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
|
||||
VFIOBitmap *vbmap,
|
||||
hwaddr iova, hwaddr size)
|
||||
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp)
|
||||
{
|
||||
const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
|
||||
bcontainer);
|
||||
@ -264,9 +265,10 @@ static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
|
||||
ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
|
||||
if (ret) {
|
||||
ret = -errno;
|
||||
error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
|
||||
" size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
|
||||
(uint64_t)range->size, errno);
|
||||
error_setg_errno(errp, errno,
|
||||
"Failed to get dirty bitmap for iova: 0x%"PRIx64
|
||||
" size: 0x%"PRIx64, (uint64_t)range->iova,
|
||||
(uint64_t)range->size);
|
||||
}
|
||||
|
||||
g_free(dbitmap);
|
||||
@ -391,21 +393,20 @@ static const VFIOIOMMUClass *vfio_get_iommu_class(int iommu_type, Error **errp)
|
||||
return VFIO_IOMMU_CLASS(klass);
|
||||
}
|
||||
|
||||
static int vfio_set_iommu(VFIOContainer *container, int group_fd,
|
||||
VFIOAddressSpace *space, Error **errp)
|
||||
static bool vfio_set_iommu(VFIOContainer *container, int group_fd,
|
||||
VFIOAddressSpace *space, Error **errp)
|
||||
{
|
||||
int iommu_type, ret;
|
||||
int iommu_type;
|
||||
const VFIOIOMMUClass *vioc;
|
||||
|
||||
iommu_type = vfio_get_iommu_type(container, errp);
|
||||
if (iommu_type < 0) {
|
||||
return iommu_type;
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
|
||||
if (ret) {
|
||||
if (ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
|
||||
error_setg_errno(errp, errno, "Failed to set group container");
|
||||
return -errno;
|
||||
return false;
|
||||
}
|
||||
|
||||
while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
|
||||
@ -420,7 +421,7 @@ static int vfio_set_iommu(VFIOContainer *container, int group_fd,
|
||||
continue;
|
||||
}
|
||||
error_setg_errno(errp, errno, "Failed to set iommu for container");
|
||||
return -errno;
|
||||
return false;
|
||||
}
|
||||
|
||||
container->iommu_type = iommu_type;
|
||||
@ -428,11 +429,11 @@ static int vfio_set_iommu(VFIOContainer *container, int group_fd,
|
||||
vioc = vfio_get_iommu_class(iommu_type, errp);
|
||||
if (!vioc) {
|
||||
error_setg(errp, "No available IOMMU models");
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
vfio_container_init(&container->bcontainer, space, vioc);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int vfio_get_iommu_info(VFIOContainer *container,
|
||||
@ -505,7 +506,7 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container,
|
||||
}
|
||||
}
|
||||
|
||||
static int vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp)
|
||||
static bool vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp)
|
||||
{
|
||||
VFIOContainer *container = container_of(bcontainer, VFIOContainer,
|
||||
bcontainer);
|
||||
@ -515,7 +516,7 @@ static int vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp)
|
||||
ret = vfio_get_iommu_info(container, &info);
|
||||
if (ret) {
|
||||
error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info");
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (info->flags & VFIO_IOMMU_INFO_PGSIZES) {
|
||||
@ -531,11 +532,11 @@ static int vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp)
|
||||
vfio_get_info_iova_range(info, bcontainer);
|
||||
|
||||
vfio_get_iommu_info_migration(container, info);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
|
||||
Error **errp)
|
||||
static bool vfio_connect_container(VFIOGroup *group, AddressSpace *as,
|
||||
Error **errp)
|
||||
{
|
||||
VFIOContainer *container;
|
||||
VFIOContainerBase *bcontainer;
|
||||
@ -587,19 +588,18 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
|
||||
error_report("vfio: error disconnecting group %d from"
|
||||
" container", group->groupid);
|
||||
}
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
group->container = container;
|
||||
QLIST_INSERT_HEAD(&container->group_list, group, container_next);
|
||||
vfio_kvm_device_add_group(group);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
fd = qemu_open_old("/dev/vfio/vfio", O_RDWR);
|
||||
if (fd < 0) {
|
||||
error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
|
||||
ret = -errno;
|
||||
goto put_space_exit;
|
||||
}
|
||||
|
||||
@ -607,7 +607,6 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
|
||||
if (ret != VFIO_API_VERSION) {
|
||||
error_setg(errp, "supported vfio version: %d, "
|
||||
"reported version: %d", VFIO_API_VERSION, ret);
|
||||
ret = -EINVAL;
|
||||
goto close_fd_exit;
|
||||
}
|
||||
|
||||
@ -615,13 +614,11 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
|
||||
container->fd = fd;
|
||||
bcontainer = &container->bcontainer;
|
||||
|
||||
ret = vfio_set_iommu(container, group->fd, space, errp);
|
||||
if (ret) {
|
||||
if (!vfio_set_iommu(container, group->fd, space, errp)) {
|
||||
goto free_container_exit;
|
||||
}
|
||||
|
||||
ret = vfio_cpr_register_container(bcontainer, errp);
|
||||
if (ret) {
|
||||
if (!vfio_cpr_register_container(bcontainer, errp)) {
|
||||
goto free_container_exit;
|
||||
}
|
||||
|
||||
@ -633,8 +630,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
|
||||
|
||||
assert(bcontainer->ops->setup);
|
||||
|
||||
ret = bcontainer->ops->setup(bcontainer, errp);
|
||||
if (ret) {
|
||||
if (!bcontainer->ops->setup(bcontainer, errp)) {
|
||||
goto enable_discards_exit;
|
||||
}
|
||||
|
||||
@ -650,7 +646,6 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
|
||||
memory_listener_register(&bcontainer->listener, bcontainer->space->as);
|
||||
|
||||
if (bcontainer->error) {
|
||||
ret = -1;
|
||||
error_propagate_prepend(errp, bcontainer->error,
|
||||
"memory listener initialization failed: ");
|
||||
goto listener_release_exit;
|
||||
@ -658,7 +653,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
|
||||
|
||||
bcontainer->initialized = true;
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
listener_release_exit:
|
||||
QLIST_REMOVE(group, container_next);
|
||||
QLIST_REMOVE(bcontainer, next);
|
||||
@ -683,7 +678,7 @@ close_fd_exit:
|
||||
put_space_exit:
|
||||
vfio_put_address_space(space);
|
||||
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void vfio_disconnect_container(VFIOGroup *group)
|
||||
@ -770,7 +765,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
|
||||
group->groupid = groupid;
|
||||
QLIST_INIT(&group->device_list);
|
||||
|
||||
if (vfio_connect_container(group, as, errp)) {
|
||||
if (!vfio_connect_container(group, as, errp)) {
|
||||
error_prepend(errp, "failed to setup container for group %d: ",
|
||||
groupid);
|
||||
goto close_fd_exit;
|
||||
@ -806,8 +801,8 @@ static void vfio_put_group(VFIOGroup *group)
|
||||
g_free(group);
|
||||
}
|
||||
|
||||
static int vfio_get_device(VFIOGroup *group, const char *name,
|
||||
VFIODevice *vbasedev, Error **errp)
|
||||
static bool vfio_get_device(VFIOGroup *group, const char *name,
|
||||
VFIODevice *vbasedev, Error **errp)
|
||||
{
|
||||
g_autofree struct vfio_device_info *info = NULL;
|
||||
int fd;
|
||||
@ -819,14 +814,14 @@ static int vfio_get_device(VFIOGroup *group, const char *name,
|
||||
error_append_hint(errp,
|
||||
"Verify all devices in group %d are bound to vfio-<bus> "
|
||||
"or pci-stub and not already in use\n", group->groupid);
|
||||
return fd;
|
||||
return false;
|
||||
}
|
||||
|
||||
info = vfio_get_device_info(fd);
|
||||
if (!info) {
|
||||
error_setg_errno(errp, errno, "error getting device info");
|
||||
close(fd);
|
||||
return -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -841,7 +836,7 @@ static int vfio_get_device(VFIOGroup *group, const char *name,
|
||||
error_setg(errp, "Inconsistent setting of support for discarding "
|
||||
"RAM (e.g., balloon) within group");
|
||||
close(fd);
|
||||
return -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!group->ram_block_discard_allowed) {
|
||||
@ -862,7 +857,7 @@ static int vfio_get_device(VFIOGroup *group, const char *name,
|
||||
|
||||
vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_put_base_device(VFIODevice *vbasedev)
|
||||
@ -908,37 +903,35 @@ static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp)
|
||||
* @name and @vbasedev->name are likely to be different depending
|
||||
* on the type of the device, hence the need for passing @name
|
||||
*/
|
||||
static int vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp)
|
||||
static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp)
|
||||
{
|
||||
int groupid = vfio_device_groupid(vbasedev, errp);
|
||||
VFIODevice *vbasedev_iter;
|
||||
VFIOGroup *group;
|
||||
VFIOContainerBase *bcontainer;
|
||||
int ret;
|
||||
|
||||
if (groupid < 0) {
|
||||
return groupid;
|
||||
return false;
|
||||
}
|
||||
|
||||
trace_vfio_attach_device(vbasedev->name, groupid);
|
||||
|
||||
group = vfio_get_group(groupid, as, errp);
|
||||
if (!group) {
|
||||
return -ENOENT;
|
||||
return false;
|
||||
}
|
||||
|
||||
QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
|
||||
if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
|
||||
error_setg(errp, "device is already attached");
|
||||
vfio_put_group(group);
|
||||
return -EBUSY;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
ret = vfio_get_device(group, name, vbasedev, errp);
|
||||
if (ret) {
|
||||
if (!vfio_get_device(group, name, vbasedev, errp)) {
|
||||
vfio_put_group(group);
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
bcontainer = &group->container->bcontainer;
|
||||
@ -946,7 +939,7 @@ static int vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev,
|
||||
QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
|
||||
QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
|
||||
|
||||
return ret;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_legacy_detach_device(VFIODevice *vbasedev)
|
||||
|
@ -25,12 +25,12 @@ static int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp)
|
||||
bool vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp)
|
||||
{
|
||||
migration_add_notifier_mode(&bcontainer->cpr_reboot_notifier,
|
||||
vfio_cpr_reboot_notifier,
|
||||
MIG_MODE_CPR_REBOOT);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer)
|
||||
|
@ -348,11 +348,11 @@ static const GraphicHwOps vfio_display_dmabuf_ops = {
|
||||
.ui_info = vfio_display_edid_ui_info,
|
||||
};
|
||||
|
||||
static int vfio_display_dmabuf_init(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_display_dmabuf_init(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
if (!display_opengl) {
|
||||
error_setg(errp, "vfio-display-dmabuf: opengl not available");
|
||||
return -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
vdev->dpy = g_new0(VFIODisplay, 1);
|
||||
@ -361,9 +361,12 @@ static int vfio_display_dmabuf_init(VFIOPCIDevice *vdev, Error **errp)
|
||||
vdev);
|
||||
if (vdev->enable_ramfb) {
|
||||
vdev->dpy->ramfb = ramfb_setup(errp);
|
||||
if (!vdev->dpy->ramfb) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
vfio_display_edid_init(vdev);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_display_dmabuf_exit(VFIODisplay *dpy)
|
||||
@ -480,7 +483,7 @@ static const GraphicHwOps vfio_display_region_ops = {
|
||||
.gfx_update = vfio_display_region_update,
|
||||
};
|
||||
|
||||
static int vfio_display_region_init(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_display_region_init(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
vdev->dpy = g_new0(VFIODisplay, 1);
|
||||
vdev->dpy->con = graphic_console_init(DEVICE(vdev), 0,
|
||||
@ -488,8 +491,11 @@ static int vfio_display_region_init(VFIOPCIDevice *vdev, Error **errp)
|
||||
vdev);
|
||||
if (vdev->enable_ramfb) {
|
||||
vdev->dpy->ramfb = ramfb_setup(errp);
|
||||
if (!vdev->dpy->ramfb) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_display_region_exit(VFIODisplay *dpy)
|
||||
@ -504,7 +510,7 @@ static void vfio_display_region_exit(VFIODisplay *dpy)
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp)
|
||||
bool vfio_display_probe(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
struct vfio_device_gfx_plane_info probe;
|
||||
int ret;
|
||||
@ -527,11 +533,11 @@ int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp)
|
||||
|
||||
if (vdev->display == ON_OFF_AUTO_AUTO) {
|
||||
/* not an error in automatic mode */
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
error_setg(errp, "vfio: device doesn't support any (known) display method");
|
||||
return -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
void vfio_display_finalize(VFIOPCIDevice *vdev)
|
||||
|
@ -107,12 +107,12 @@ static const char *index_to_str(VFIODevice *vbasedev, int index)
|
||||
}
|
||||
}
|
||||
|
||||
int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
|
||||
int action, int fd, Error **errp)
|
||||
bool vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
|
||||
int action, int fd, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
struct vfio_irq_set *irq_set;
|
||||
int argsz, ret = 0;
|
||||
g_autofree struct vfio_irq_set *irq_set = NULL;
|
||||
int argsz;
|
||||
const char *name;
|
||||
int32_t *pfd;
|
||||
|
||||
@ -127,16 +127,11 @@ int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
|
||||
pfd = (int32_t *)&irq_set->data;
|
||||
*pfd = fd;
|
||||
|
||||
if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
|
||||
ret = -errno;
|
||||
}
|
||||
g_free(irq_set);
|
||||
|
||||
if (!ret) {
|
||||
return 0;
|
||||
if (!ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure");
|
||||
error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure");
|
||||
|
||||
name = index_to_str(vbasedev, index);
|
||||
if (name) {
|
||||
@ -147,7 +142,7 @@ int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
|
||||
error_prepend(errp,
|
||||
"Failed to %s %s eventfd signaling for interrupt ",
|
||||
fd < 0 ? "tear down" : "set up", action_to_str(action));
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -348,7 +343,7 @@ static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
|
||||
int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
|
||||
int index, const char *name)
|
||||
{
|
||||
struct vfio_region_info *info;
|
||||
g_autofree struct vfio_region_info *info = NULL;
|
||||
int ret;
|
||||
|
||||
ret = vfio_get_region_info(vbasedev, index, &info);
|
||||
@ -381,8 +376,6 @@ int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
|
||||
}
|
||||
}
|
||||
|
||||
g_free(info);
|
||||
|
||||
trace_vfio_region_setup(vbasedev->name, index, name,
|
||||
region->flags, region->fd_offset, region->size);
|
||||
return 0;
|
||||
@ -599,20 +592,19 @@ int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
|
||||
|
||||
bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
|
||||
{
|
||||
struct vfio_region_info *info = NULL;
|
||||
g_autofree struct vfio_region_info *info = NULL;
|
||||
bool ret = false;
|
||||
|
||||
if (!vfio_get_region_info(vbasedev, region, &info)) {
|
||||
if (vfio_get_region_info_cap(info, cap_type)) {
|
||||
ret = true;
|
||||
}
|
||||
g_free(info);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
|
||||
bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
struct stat st;
|
||||
@ -621,7 +613,7 @@ int vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
|
||||
if (stat(vbasedev->sysfsdev, &st) < 0) {
|
||||
error_setg_errno(errp, errno, "no such host device");
|
||||
error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev);
|
||||
return -errno;
|
||||
return false;
|
||||
}
|
||||
/* User may specify a name, e.g: VFIO platform device */
|
||||
if (!vbasedev->name) {
|
||||
@ -630,7 +622,7 @@ int vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
|
||||
} else {
|
||||
if (!vbasedev->iommufd) {
|
||||
error_setg(errp, "Use FD passing only with iommufd backend");
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* Give a name with fd so any function printing out vbasedev->name
|
||||
@ -641,7 +633,7 @@ int vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp)
|
||||
|
@ -367,8 +367,10 @@ static const MemoryRegionOps vfio_igd_index_quirk = {
|
||||
|
||||
void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
{
|
||||
struct vfio_region_info *rom = NULL, *opregion = NULL,
|
||||
*host = NULL, *lpc = NULL;
|
||||
g_autofree struct vfio_region_info *rom = NULL;
|
||||
g_autofree struct vfio_region_info *opregion = NULL;
|
||||
g_autofree struct vfio_region_info *host = NULL;
|
||||
g_autofree struct vfio_region_info *lpc = NULL;
|
||||
VFIOQuirk *quirk;
|
||||
VFIOIGDQuirk *igd;
|
||||
PCIDevice *lpc_bridge;
|
||||
@ -426,7 +428,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
if ((ret || !rom->size) && !vdev->pdev.romfile) {
|
||||
error_report("IGD device %s has no ROM, legacy mode disabled",
|
||||
vdev->vbasedev.name);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -437,7 +439,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
error_report("IGD device %s hotplugged, ROM disabled, "
|
||||
"legacy mode disabled", vdev->vbasedev.name);
|
||||
vdev->rom_read_failed = true;
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -450,7 +452,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
if (ret) {
|
||||
error_report("IGD device %s does not support OpRegion access,"
|
||||
"legacy mode disabled", vdev->vbasedev.name);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = vfio_get_dev_region_info(&vdev->vbasedev,
|
||||
@ -459,7 +461,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
if (ret) {
|
||||
error_report("IGD device %s does not support host bridge access,"
|
||||
"legacy mode disabled", vdev->vbasedev.name);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = vfio_get_dev_region_info(&vdev->vbasedev,
|
||||
@ -468,7 +470,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
if (ret) {
|
||||
error_report("IGD device %s does not support LPC bridge access,"
|
||||
"legacy mode disabled", vdev->vbasedev.name);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4);
|
||||
@ -478,11 +480,11 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
* try to enable it. Probably shouldn't be using legacy mode without VGA,
|
||||
* but also no point in us enabling VGA if disabled in hardware.
|
||||
*/
|
||||
if (!(gmch & 0x2) && !vdev->vga && vfio_populate_vga(vdev, &err)) {
|
||||
if (!(gmch & 0x2) && !vdev->vga && !vfio_populate_vga(vdev, &err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
error_report("IGD device %s failed to enable VGA access, "
|
||||
"legacy mode disabled", vdev->vbasedev.name);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Create our LPC/ISA bridge */
|
||||
@ -490,7 +492,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
if (ret) {
|
||||
error_report("IGD device %s failed to create LPC bridge, "
|
||||
"legacy mode disabled", vdev->vbasedev.name);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Stuff some host values into the VM PCI host bridge */
|
||||
@ -498,15 +500,14 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
if (ret) {
|
||||
error_report("IGD device %s failed to modify host bridge, "
|
||||
"legacy mode disabled", vdev->vbasedev.name);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Setup OpRegion access */
|
||||
ret = vfio_pci_igd_opregion_init(vdev, opregion, &err);
|
||||
if (ret) {
|
||||
if (!vfio_pci_igd_opregion_init(vdev, opregion, &err)) {
|
||||
error_append_hint(&err, "IGD legacy mode disabled\n");
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Setup our quirk to munge GTT addresses to the VM allocated buffer */
|
||||
@ -608,10 +609,4 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
}
|
||||
|
||||
trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, ggms_mb + gms_mb);
|
||||
|
||||
out:
|
||||
g_free(rom);
|
||||
g_free(opregion);
|
||||
g_free(host);
|
||||
g_free(lpc);
|
||||
}
|
||||
|
@ -49,9 +49,9 @@ static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer,
|
||||
container->ioas_id, iova, size);
|
||||
}
|
||||
|
||||
static int iommufd_cdev_kvm_device_add(VFIODevice *vbasedev, Error **errp)
|
||||
static bool iommufd_cdev_kvm_device_add(VFIODevice *vbasedev, Error **errp)
|
||||
{
|
||||
return vfio_kvm_device_add_fd(vbasedev->fd, errp);
|
||||
return !vfio_kvm_device_add_fd(vbasedev->fd, errp);
|
||||
}
|
||||
|
||||
static void iommufd_cdev_kvm_device_del(VFIODevice *vbasedev)
|
||||
@ -63,18 +63,16 @@ static void iommufd_cdev_kvm_device_del(VFIODevice *vbasedev)
|
||||
}
|
||||
}
|
||||
|
||||
static int iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp)
|
||||
static bool iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp)
|
||||
{
|
||||
IOMMUFDBackend *iommufd = vbasedev->iommufd;
|
||||
struct vfio_device_bind_iommufd bind = {
|
||||
.argsz = sizeof(bind),
|
||||
.flags = 0,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = iommufd_backend_connect(iommufd, errp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
if (!iommufd_backend_connect(iommufd, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -82,15 +80,13 @@ static int iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp)
|
||||
* in KVM. Especially for some emulated devices, it requires
|
||||
* to have kvm information in the device open.
|
||||
*/
|
||||
ret = iommufd_cdev_kvm_device_add(vbasedev, errp);
|
||||
if (ret) {
|
||||
if (!iommufd_cdev_kvm_device_add(vbasedev, errp)) {
|
||||
goto err_kvm_device_add;
|
||||
}
|
||||
|
||||
/* Bind device to iommufd */
|
||||
bind.iommufd = iommufd->fd;
|
||||
ret = ioctl(vbasedev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind);
|
||||
if (ret) {
|
||||
if (ioctl(vbasedev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
|
||||
error_setg_errno(errp, errno, "error bind device fd=%d to iommufd=%d",
|
||||
vbasedev->fd, bind.iommufd);
|
||||
goto err_bind;
|
||||
@ -99,12 +95,12 @@ static int iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp)
|
||||
vbasedev->devid = bind.out_devid;
|
||||
trace_iommufd_cdev_connect_and_bind(bind.iommufd, vbasedev->name,
|
||||
vbasedev->fd, vbasedev->devid);
|
||||
return ret;
|
||||
return true;
|
||||
err_bind:
|
||||
iommufd_cdev_kvm_device_del(vbasedev);
|
||||
err_kvm_device_add:
|
||||
iommufd_backend_disconnect(iommufd);
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void iommufd_cdev_unbind_and_disconnect(VFIODevice *vbasedev)
|
||||
@ -176,10 +172,10 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id,
|
||||
static bool iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id,
|
||||
Error **errp)
|
||||
{
|
||||
int ret, iommufd = vbasedev->iommufd->fd;
|
||||
int iommufd = vbasedev->iommufd->fd;
|
||||
struct vfio_device_attach_iommufd_pt attach_data = {
|
||||
.argsz = sizeof(attach_data),
|
||||
.flags = 0,
|
||||
@ -187,38 +183,38 @@ static int iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id,
|
||||
};
|
||||
|
||||
/* Attach device to an IOAS or hwpt within iommufd */
|
||||
ret = ioctl(vbasedev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data);
|
||||
if (ret) {
|
||||
if (ioctl(vbasedev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
|
||||
error_setg_errno(errp, errno,
|
||||
"[iommufd=%d] error attach %s (%d) to id=%d",
|
||||
iommufd, vbasedev->name, vbasedev->fd, id);
|
||||
} else {
|
||||
trace_iommufd_cdev_attach_ioas_hwpt(iommufd, vbasedev->name,
|
||||
vbasedev->fd, id);
|
||||
return false;
|
||||
}
|
||||
return ret;
|
||||
|
||||
trace_iommufd_cdev_attach_ioas_hwpt(iommufd, vbasedev->name,
|
||||
vbasedev->fd, id);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int iommufd_cdev_detach_ioas_hwpt(VFIODevice *vbasedev, Error **errp)
|
||||
static bool iommufd_cdev_detach_ioas_hwpt(VFIODevice *vbasedev, Error **errp)
|
||||
{
|
||||
int ret, iommufd = vbasedev->iommufd->fd;
|
||||
int iommufd = vbasedev->iommufd->fd;
|
||||
struct vfio_device_detach_iommufd_pt detach_data = {
|
||||
.argsz = sizeof(detach_data),
|
||||
.flags = 0,
|
||||
};
|
||||
|
||||
ret = ioctl(vbasedev->fd, VFIO_DEVICE_DETACH_IOMMUFD_PT, &detach_data);
|
||||
if (ret) {
|
||||
if (ioctl(vbasedev->fd, VFIO_DEVICE_DETACH_IOMMUFD_PT, &detach_data)) {
|
||||
error_setg_errno(errp, errno, "detach %s failed", vbasedev->name);
|
||||
} else {
|
||||
trace_iommufd_cdev_detach_ioas_hwpt(iommufd, vbasedev->name);
|
||||
return false;
|
||||
}
|
||||
return ret;
|
||||
|
||||
trace_iommufd_cdev_detach_ioas_hwpt(iommufd, vbasedev->name);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int iommufd_cdev_attach_container(VFIODevice *vbasedev,
|
||||
VFIOIOMMUFDContainer *container,
|
||||
Error **errp)
|
||||
static bool iommufd_cdev_attach_container(VFIODevice *vbasedev,
|
||||
VFIOIOMMUFDContainer *container,
|
||||
Error **errp)
|
||||
{
|
||||
return iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp);
|
||||
}
|
||||
@ -228,7 +224,7 @@ static void iommufd_cdev_detach_container(VFIODevice *vbasedev,
|
||||
{
|
||||
Error *err = NULL;
|
||||
|
||||
if (iommufd_cdev_detach_ioas_hwpt(vbasedev, &err)) {
|
||||
if (!iommufd_cdev_detach_ioas_hwpt(vbasedev, &err)) {
|
||||
error_report_err(err);
|
||||
}
|
||||
}
|
||||
@ -254,20 +250,19 @@ static int iommufd_cdev_ram_block_discard_disable(bool state)
|
||||
return ram_block_uncoordinated_discard_disable(state);
|
||||
}
|
||||
|
||||
static int iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container,
|
||||
uint32_t ioas_id, Error **errp)
|
||||
static bool iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container,
|
||||
uint32_t ioas_id, Error **errp)
|
||||
{
|
||||
VFIOContainerBase *bcontainer = &container->bcontainer;
|
||||
struct iommu_ioas_iova_ranges *info;
|
||||
g_autofree struct iommu_ioas_iova_ranges *info = NULL;
|
||||
struct iommu_iova_range *iova_ranges;
|
||||
int ret, sz, fd = container->be->fd;
|
||||
int sz, fd = container->be->fd;
|
||||
|
||||
info = g_malloc0(sizeof(*info));
|
||||
info->size = sizeof(*info);
|
||||
info->ioas_id = ioas_id;
|
||||
|
||||
ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
|
||||
if (ret && errno != EMSGSIZE) {
|
||||
if (ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info) && errno != EMSGSIZE) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -275,8 +270,7 @@ static int iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container,
|
||||
info = g_realloc(info, sizeof(*info) + sz);
|
||||
info->allowed_iovas = (uintptr_t)(info + 1);
|
||||
|
||||
ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
|
||||
if (ret) {
|
||||
if (ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info)) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -291,18 +285,15 @@ static int iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container,
|
||||
}
|
||||
bcontainer->pgsizes = info->out_iova_alignment;
|
||||
|
||||
g_free(info);
|
||||
return 0;
|
||||
return true;
|
||||
|
||||
error:
|
||||
ret = -errno;
|
||||
g_free(info);
|
||||
error_setg_errno(errp, errno, "Cannot get IOVA ranges");
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp)
|
||||
static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp)
|
||||
{
|
||||
VFIOContainerBase *bcontainer;
|
||||
VFIOIOMMUFDContainer *container;
|
||||
@ -317,15 +308,14 @@ static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
|
||||
if (vbasedev->fd < 0) {
|
||||
devfd = iommufd_cdev_getfd(vbasedev->sysfsdev, errp);
|
||||
if (devfd < 0) {
|
||||
return devfd;
|
||||
return false;
|
||||
}
|
||||
vbasedev->fd = devfd;
|
||||
} else {
|
||||
devfd = vbasedev->fd;
|
||||
}
|
||||
|
||||
ret = iommufd_cdev_connect_and_bind(vbasedev, errp);
|
||||
if (ret) {
|
||||
if (!iommufd_cdev_connect_and_bind(vbasedev, errp)) {
|
||||
goto err_connect_bind;
|
||||
}
|
||||
|
||||
@ -338,7 +328,7 @@ static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
|
||||
vbasedev->iommufd != container->be) {
|
||||
continue;
|
||||
}
|
||||
if (iommufd_cdev_attach_container(vbasedev, container, &err)) {
|
||||
if (!iommufd_cdev_attach_container(vbasedev, container, &err)) {
|
||||
const char *msg = error_get_pretty(err);
|
||||
|
||||
trace_iommufd_cdev_fail_attach_existing_container(msg);
|
||||
@ -356,8 +346,7 @@ static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
|
||||
}
|
||||
|
||||
/* Need to allocate a new dedicated container */
|
||||
ret = iommufd_backend_alloc_ioas(vbasedev->iommufd, &ioas_id, errp);
|
||||
if (ret < 0) {
|
||||
if (!iommufd_backend_alloc_ioas(vbasedev->iommufd, &ioas_id, errp)) {
|
||||
goto err_alloc_ioas;
|
||||
}
|
||||
|
||||
@ -371,8 +360,7 @@ static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
|
||||
vfio_container_init(bcontainer, space, iommufd_vioc);
|
||||
QLIST_INSERT_HEAD(&space->containers, bcontainer, next);
|
||||
|
||||
ret = iommufd_cdev_attach_container(vbasedev, container, errp);
|
||||
if (ret) {
|
||||
if (!iommufd_cdev_attach_container(vbasedev, container, errp)) {
|
||||
goto err_attach_container;
|
||||
}
|
||||
|
||||
@ -381,8 +369,7 @@ static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
|
||||
goto err_discard_disable;
|
||||
}
|
||||
|
||||
ret = iommufd_cdev_get_info_iova_range(container, ioas_id, &err);
|
||||
if (ret) {
|
||||
if (!iommufd_cdev_get_info_iova_range(container, ioas_id, &err)) {
|
||||
error_append_hint(&err,
|
||||
"Fallback to default 64bit IOVA range and 4K page size\n");
|
||||
warn_report_err(err);
|
||||
@ -394,7 +381,6 @@ static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
|
||||
memory_listener_register(&bcontainer->listener, bcontainer->space->as);
|
||||
|
||||
if (bcontainer->error) {
|
||||
ret = -1;
|
||||
error_propagate_prepend(errp, bcontainer->error,
|
||||
"memory listener initialization failed: ");
|
||||
goto err_listener_register;
|
||||
@ -409,8 +395,7 @@ found_container:
|
||||
goto err_listener_register;
|
||||
}
|
||||
|
||||
ret = vfio_cpr_register_container(bcontainer, errp);
|
||||
if (ret) {
|
||||
if (!vfio_cpr_register_container(bcontainer, errp)) {
|
||||
goto err_listener_register;
|
||||
}
|
||||
|
||||
@ -433,7 +418,7 @@ found_container:
|
||||
|
||||
trace_iommufd_cdev_device_info(vbasedev->name, devfd, vbasedev->num_irqs,
|
||||
vbasedev->num_regions, vbasedev->flags);
|
||||
return 0;
|
||||
return true;
|
||||
|
||||
err_listener_register:
|
||||
iommufd_cdev_ram_block_discard_disable(false);
|
||||
@ -446,7 +431,7 @@ err_alloc_ioas:
|
||||
iommufd_cdev_unbind_and_disconnect(vbasedev);
|
||||
err_connect_bind:
|
||||
close(vbasedev->fd);
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void iommufd_cdev_detach(VFIODevice *vbasedev)
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "migration/register.h"
|
||||
#include "migration/blocker.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qapi-events-vfio.h"
|
||||
#include "exec/ramlist.h"
|
||||
#include "exec/ram_addr.h"
|
||||
#include "pci.h"
|
||||
@ -80,9 +81,65 @@ static const char *mig_state_to_str(enum vfio_device_mig_state state)
|
||||
}
|
||||
}
|
||||
|
||||
static VfioMigrationState
|
||||
mig_state_to_qapi_state(enum vfio_device_mig_state state)
|
||||
{
|
||||
switch (state) {
|
||||
case VFIO_DEVICE_STATE_STOP:
|
||||
return QAPI_VFIO_MIGRATION_STATE_STOP;
|
||||
case VFIO_DEVICE_STATE_RUNNING:
|
||||
return QAPI_VFIO_MIGRATION_STATE_RUNNING;
|
||||
case VFIO_DEVICE_STATE_STOP_COPY:
|
||||
return QAPI_VFIO_MIGRATION_STATE_STOP_COPY;
|
||||
case VFIO_DEVICE_STATE_RESUMING:
|
||||
return QAPI_VFIO_MIGRATION_STATE_RESUMING;
|
||||
case VFIO_DEVICE_STATE_RUNNING_P2P:
|
||||
return QAPI_VFIO_MIGRATION_STATE_RUNNING_P2P;
|
||||
case VFIO_DEVICE_STATE_PRE_COPY:
|
||||
return QAPI_VFIO_MIGRATION_STATE_PRE_COPY;
|
||||
case VFIO_DEVICE_STATE_PRE_COPY_P2P:
|
||||
return QAPI_VFIO_MIGRATION_STATE_PRE_COPY_P2P;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
static void vfio_migration_send_event(VFIODevice *vbasedev)
|
||||
{
|
||||
VFIOMigration *migration = vbasedev->migration;
|
||||
DeviceState *dev = vbasedev->dev;
|
||||
g_autofree char *qom_path = NULL;
|
||||
Object *obj;
|
||||
|
||||
if (!vbasedev->migration_events) {
|
||||
return;
|
||||
}
|
||||
|
||||
g_assert(vbasedev->ops->vfio_get_object);
|
||||
obj = vbasedev->ops->vfio_get_object(vbasedev);
|
||||
g_assert(obj);
|
||||
qom_path = object_get_canonical_path(obj);
|
||||
|
||||
qapi_event_send_vfio_migration(
|
||||
dev->id, qom_path, mig_state_to_qapi_state(migration->device_state));
|
||||
}
|
||||
|
||||
static void vfio_migration_set_device_state(VFIODevice *vbasedev,
|
||||
enum vfio_device_mig_state state)
|
||||
{
|
||||
VFIOMigration *migration = vbasedev->migration;
|
||||
|
||||
trace_vfio_migration_set_device_state(vbasedev->name,
|
||||
mig_state_to_str(state));
|
||||
|
||||
migration->device_state = state;
|
||||
vfio_migration_send_event(vbasedev);
|
||||
}
|
||||
|
||||
static int vfio_migration_set_state(VFIODevice *vbasedev,
|
||||
enum vfio_device_mig_state new_state,
|
||||
enum vfio_device_mig_state recover_state)
|
||||
enum vfio_device_mig_state recover_state,
|
||||
Error **errp)
|
||||
{
|
||||
VFIOMigration *migration = vbasedev->migration;
|
||||
uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
|
||||
@ -92,6 +149,16 @@ static int vfio_migration_set_state(VFIODevice *vbasedev,
|
||||
struct vfio_device_feature_mig_state *mig_state =
|
||||
(struct vfio_device_feature_mig_state *)feature->data;
|
||||
int ret;
|
||||
g_autofree char *error_prefix =
|
||||
g_strdup_printf("%s: Failed setting device state to %s.",
|
||||
vbasedev->name, mig_state_to_str(new_state));
|
||||
|
||||
trace_vfio_migration_set_state(vbasedev->name, mig_state_to_str(new_state),
|
||||
mig_state_to_str(recover_state));
|
||||
|
||||
if (new_state == migration->device_state) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
feature->argsz = sizeof(buf);
|
||||
feature->flags =
|
||||
@ -102,22 +169,24 @@ static int vfio_migration_set_state(VFIODevice *vbasedev,
|
||||
ret = -errno;
|
||||
|
||||
if (recover_state == VFIO_DEVICE_STATE_ERROR) {
|
||||
error_report("%s: Failed setting device state to %s, err: %s. "
|
||||
"Recover state is ERROR. Resetting device",
|
||||
vbasedev->name, mig_state_to_str(new_state),
|
||||
strerror(errno));
|
||||
error_setg_errno(errp, errno,
|
||||
"%s Recover state is ERROR. Resetting device",
|
||||
error_prefix);
|
||||
|
||||
goto reset_device;
|
||||
}
|
||||
|
||||
error_report(
|
||||
"%s: Failed setting device state to %s, err: %s. Setting device in recover state %s",
|
||||
vbasedev->name, mig_state_to_str(new_state),
|
||||
strerror(errno), mig_state_to_str(recover_state));
|
||||
error_setg_errno(errp, errno,
|
||||
"%s Setting device in recover state %s",
|
||||
error_prefix, mig_state_to_str(recover_state));
|
||||
|
||||
mig_state->device_state = recover_state;
|
||||
if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
|
||||
ret = -errno;
|
||||
/*
|
||||
* If setting the device in recover state fails, report
|
||||
* the error here and propagate the first error.
|
||||
*/
|
||||
error_report(
|
||||
"%s: Failed setting device in recover state, err: %s. Resetting device",
|
||||
vbasedev->name, strerror(errno));
|
||||
@ -125,19 +194,19 @@ static int vfio_migration_set_state(VFIODevice *vbasedev,
|
||||
goto reset_device;
|
||||
}
|
||||
|
||||
migration->device_state = recover_state;
|
||||
vfio_migration_set_device_state(vbasedev, recover_state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
migration->device_state = new_state;
|
||||
vfio_migration_set_device_state(vbasedev, new_state);
|
||||
if (mig_state->data_fd != -1) {
|
||||
if (migration->data_fd != -1) {
|
||||
/*
|
||||
* This can happen if the device is asynchronously reset and
|
||||
* terminates a data transfer.
|
||||
*/
|
||||
error_report("%s: data_fd out of sync", vbasedev->name);
|
||||
error_setg(errp, "%s: data_fd out of sync", vbasedev->name);
|
||||
close(mig_state->data_fd);
|
||||
|
||||
return -EBADF;
|
||||
@ -146,8 +215,6 @@ static int vfio_migration_set_state(VFIODevice *vbasedev,
|
||||
migration->data_fd = mig_state->data_fd;
|
||||
}
|
||||
|
||||
trace_vfio_migration_set_state(vbasedev->name, mig_state_to_str(new_state));
|
||||
|
||||
return 0;
|
||||
|
||||
reset_device:
|
||||
@ -156,7 +223,7 @@ reset_device:
|
||||
strerror(errno));
|
||||
}
|
||||
|
||||
migration->device_state = VFIO_DEVICE_STATE_RUNNING;
|
||||
vfio_migration_set_device_state(vbasedev, VFIO_DEVICE_STATE_RUNNING);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -168,10 +235,11 @@ reset_device:
|
||||
*/
|
||||
static int
|
||||
vfio_migration_set_state_or_reset(VFIODevice *vbasedev,
|
||||
enum vfio_device_mig_state new_state)
|
||||
enum vfio_device_mig_state new_state,
|
||||
Error **errp)
|
||||
{
|
||||
return vfio_migration_set_state(vbasedev, new_state,
|
||||
VFIO_DEVICE_STATE_ERROR);
|
||||
VFIO_DEVICE_STATE_ERROR, errp);
|
||||
}
|
||||
|
||||
static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev,
|
||||
@ -186,21 +254,30 @@ static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_save_device_config_state(QEMUFile *f, void *opaque)
|
||||
static int vfio_save_device_config_state(QEMUFile *f, void *opaque,
|
||||
Error **errp)
|
||||
{
|
||||
VFIODevice *vbasedev = opaque;
|
||||
int ret;
|
||||
|
||||
qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_STATE);
|
||||
|
||||
if (vbasedev->ops && vbasedev->ops->vfio_save_config) {
|
||||
vbasedev->ops->vfio_save_config(vbasedev, f);
|
||||
ret = vbasedev->ops->vfio_save_config(vbasedev, f, errp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
|
||||
|
||||
trace_vfio_save_device_config_state(vbasedev->name);
|
||||
|
||||
return qemu_file_get_error(f);
|
||||
ret = qemu_file_get_error(f);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Failed to save state");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_load_device_config_state(QEMUFile *f, void *opaque)
|
||||
@ -399,10 +476,8 @@ static int vfio_save_setup(QEMUFile *f, void *opaque, Error **errp)
|
||||
switch (migration->device_state) {
|
||||
case VFIO_DEVICE_STATE_RUNNING:
|
||||
ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_PRE_COPY,
|
||||
VFIO_DEVICE_STATE_RUNNING);
|
||||
VFIO_DEVICE_STATE_RUNNING, errp);
|
||||
if (ret) {
|
||||
error_setg(errp, "%s: Failed to set new PRE_COPY state",
|
||||
vbasedev->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -435,13 +510,20 @@ static void vfio_save_cleanup(void *opaque)
|
||||
{
|
||||
VFIODevice *vbasedev = opaque;
|
||||
VFIOMigration *migration = vbasedev->migration;
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Changing device state from STOP_COPY to STOP can take time. Do it here,
|
||||
* after migration has completed, so it won't increase downtime.
|
||||
*/
|
||||
if (migration->device_state == VFIO_DEVICE_STATE_STOP_COPY) {
|
||||
vfio_migration_set_state_or_reset(vbasedev, VFIO_DEVICE_STATE_STOP);
|
||||
ret = vfio_migration_set_state_or_reset(vbasedev,
|
||||
VFIO_DEVICE_STATE_STOP,
|
||||
&local_err);
|
||||
if (ret) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
}
|
||||
|
||||
g_free(migration->data_buffer);
|
||||
@ -549,11 +631,13 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
|
||||
VFIODevice *vbasedev = opaque;
|
||||
ssize_t data_size;
|
||||
int ret;
|
||||
Error *local_err = NULL;
|
||||
|
||||
/* We reach here with device state STOP or STOP_COPY only */
|
||||
ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP_COPY,
|
||||
VFIO_DEVICE_STATE_STOP);
|
||||
VFIO_DEVICE_STATE_STOP, &local_err);
|
||||
if (ret) {
|
||||
error_report_err(local_err);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -566,9 +650,6 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
|
||||
|
||||
qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
|
||||
ret = qemu_file_get_error(f);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_vfio_save_complete_precopy(vbasedev->name, ret);
|
||||
|
||||
@ -578,27 +659,24 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
|
||||
static void vfio_save_state(QEMUFile *f, void *opaque)
|
||||
{
|
||||
VFIODevice *vbasedev = opaque;
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
ret = vfio_save_device_config_state(f, opaque);
|
||||
ret = vfio_save_device_config_state(f, opaque, &local_err);
|
||||
if (ret) {
|
||||
error_report("%s: Failed to save device config space",
|
||||
vbasedev->name);
|
||||
qemu_file_set_error(f, ret);
|
||||
error_prepend(&local_err,
|
||||
"vfio: Failed to save device config space of %s - ",
|
||||
vbasedev->name);
|
||||
qemu_file_set_error_obj(f, ret, local_err);
|
||||
}
|
||||
}
|
||||
|
||||
static int vfio_load_setup(QEMUFile *f, void *opaque, Error **errp)
|
||||
{
|
||||
VFIODevice *vbasedev = opaque;
|
||||
int ret;
|
||||
|
||||
ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RESUMING,
|
||||
vbasedev->migration->device_state);
|
||||
if (ret) {
|
||||
error_setg(errp, "%s: Failed to set RESUMING state", vbasedev->name);
|
||||
}
|
||||
return ret;
|
||||
return vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RESUMING,
|
||||
vbasedev->migration->device_state, errp);
|
||||
}
|
||||
|
||||
static int vfio_load_cleanup(void *opaque)
|
||||
@ -714,19 +792,20 @@ static void vfio_vmstate_change_prepare(void *opaque, bool running,
|
||||
VFIODevice *vbasedev = opaque;
|
||||
VFIOMigration *migration = vbasedev->migration;
|
||||
enum vfio_device_mig_state new_state;
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
new_state = migration->device_state == VFIO_DEVICE_STATE_PRE_COPY ?
|
||||
VFIO_DEVICE_STATE_PRE_COPY_P2P :
|
||||
VFIO_DEVICE_STATE_RUNNING_P2P;
|
||||
|
||||
ret = vfio_migration_set_state_or_reset(vbasedev, new_state);
|
||||
ret = vfio_migration_set_state_or_reset(vbasedev, new_state, &local_err);
|
||||
if (ret) {
|
||||
/*
|
||||
* Migration should be aborted in this case, but vm_state_notify()
|
||||
* currently does not support reporting failures.
|
||||
*/
|
||||
migration_file_set_error(ret);
|
||||
migration_file_set_error(ret, local_err);
|
||||
}
|
||||
|
||||
trace_vfio_vmstate_change_prepare(vbasedev->name, running,
|
||||
@ -738,6 +817,7 @@ static void vfio_vmstate_change(void *opaque, bool running, RunState state)
|
||||
{
|
||||
VFIODevice *vbasedev = opaque;
|
||||
enum vfio_device_mig_state new_state;
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
if (running) {
|
||||
@ -750,13 +830,13 @@ static void vfio_vmstate_change(void *opaque, bool running, RunState state)
|
||||
VFIO_DEVICE_STATE_STOP;
|
||||
}
|
||||
|
||||
ret = vfio_migration_set_state_or_reset(vbasedev, new_state);
|
||||
ret = vfio_migration_set_state_or_reset(vbasedev, new_state, &local_err);
|
||||
if (ret) {
|
||||
/*
|
||||
* Migration should be aborted in this case, but vm_state_notify()
|
||||
* currently does not support reporting failures.
|
||||
*/
|
||||
migration_file_set_error(ret);
|
||||
migration_file_set_error(ret, local_err);
|
||||
}
|
||||
|
||||
trace_vfio_vmstate_change(vbasedev->name, running, RunState_str(state),
|
||||
@ -769,11 +849,23 @@ static int vfio_migration_state_notifier(NotifierWithReturn *notifier,
|
||||
VFIOMigration *migration = container_of(notifier, VFIOMigration,
|
||||
migration_state);
|
||||
VFIODevice *vbasedev = migration->vbasedev;
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
trace_vfio_migration_state_notifier(vbasedev->name, e->type);
|
||||
|
||||
if (e->type == MIG_EVENT_PRECOPY_FAILED) {
|
||||
vfio_migration_set_state_or_reset(vbasedev, VFIO_DEVICE_STATE_RUNNING);
|
||||
/*
|
||||
* MigrationNotifyFunc may not return an error code and an Error
|
||||
* object for MIG_EVENT_PRECOPY_FAILED. Hence, report the error
|
||||
* locally and ignore the errp argument.
|
||||
*/
|
||||
ret = vfio_migration_set_state_or_reset(vbasedev,
|
||||
VFIO_DEVICE_STATE_RUNNING,
|
||||
&local_err);
|
||||
if (ret) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1169,8 +1169,8 @@ static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr)
|
||||
* the table and to write the base address of that memory to the ASLS register
|
||||
* of the IGD device.
|
||||
*/
|
||||
int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
|
||||
struct vfio_region_info *info, Error **errp)
|
||||
bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
|
||||
struct vfio_region_info *info, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1181,7 +1181,7 @@ int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
|
||||
error_setg(errp, "failed to read IGD OpRegion");
|
||||
g_free(vdev->igd_opregion);
|
||||
vdev->igd_opregion = NULL;
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1206,7 +1206,7 @@ int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
|
||||
pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0);
|
||||
pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0);
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1536,7 +1536,7 @@ static bool is_valid_std_cap_offset(uint8_t pos)
|
||||
pos <= (PCI_CFG_SPACE_SIZE - PCI_CAP_SIZEOF));
|
||||
}
|
||||
|
||||
static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
PCIDevice *pdev = &vdev->pdev;
|
||||
@ -1545,18 +1545,18 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
uint8_t tmp;
|
||||
|
||||
if (vdev->nv_gpudirect_clique == 0xFF) {
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID)) {
|
||||
error_setg(errp, "NVIDIA GPUDirect Clique ID: invalid device vendor");
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pci_get_byte(pdev->config + PCI_CLASS_DEVICE + 1) !=
|
||||
PCI_BASE_CLASS_DISPLAY) {
|
||||
error_setg(errp, "NVIDIA GPUDirect Clique ID: unsupported PCI class");
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1572,7 +1572,7 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
vdev->config_offset + PCI_CAPABILITY_LIST);
|
||||
if (ret != 1 || !is_valid_std_cap_offset(tmp)) {
|
||||
error_setg(errp, "NVIDIA GPUDirect Clique ID: error getting cap list");
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
do {
|
||||
@ -1590,13 +1590,13 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
pos = 0xD4;
|
||||
} else {
|
||||
error_setg(errp, "NVIDIA GPUDirect Clique ID: invalid config space");
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, 8, errp);
|
||||
if (ret < 0) {
|
||||
error_prepend(errp, "Failed to add NVIDIA GPUDirect cap: ");
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(vdev->emulated_config_bits + pos, 0xFF, 8);
|
||||
@ -1608,7 +1608,7 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
pci_set_byte(pdev->config + pos++, vdev->nv_gpudirect_clique << 3);
|
||||
pci_set_byte(pdev->config + pos, 0);
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1629,7 +1629,7 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
*/
|
||||
#define VMD_SHADOW_CAP_VER 1
|
||||
#define VMD_SHADOW_CAP_LEN 24
|
||||
static int vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
uint8_t membar_phys[16];
|
||||
@ -1639,7 +1639,7 @@ static int vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x467F) ||
|
||||
vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x4C3D) ||
|
||||
vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x9A0B))) {
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
ret = pread(vdev->vbasedev.fd, membar_phys, 16,
|
||||
@ -1647,14 +1647,14 @@ static int vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
if (ret != 16) {
|
||||
error_report("VMD %s cannot read MEMBARs (%d)",
|
||||
vdev->vbasedev.name, ret);
|
||||
return -EFAULT;
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = pci_add_capability(&vdev->pdev, PCI_CAP_ID_VNDR, pos,
|
||||
VMD_SHADOW_CAP_LEN, errp);
|
||||
if (ret < 0) {
|
||||
error_prepend(errp, "Failed to add VMD MEMBAR Shadow cap: ");
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(vdev->emulated_config_bits + pos, 0xFF, VMD_SHADOW_CAP_LEN);
|
||||
@ -1664,22 +1664,18 @@ static int vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp)
|
||||
pci_set_long(vdev->pdev.config + pos, 0x53484457); /* SHDW */
|
||||
memcpy(vdev->pdev.config + pos + 4, membar_phys, 16);
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
|
||||
bool vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = vfio_add_nv_gpudirect_cap(vdev, errp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
if (!vfio_add_nv_gpudirect_cap(vdev, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = vfio_add_vmd_shadow_cap(vdev, errp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
if (!vfio_add_vmd_shadow_cap(vdev, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
280
hw/vfio/pci.c
280
hw/vfio/pci.c
@ -116,7 +116,7 @@ static void vfio_intx_eoi(VFIODevice *vbasedev)
|
||||
vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
|
||||
}
|
||||
|
||||
static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
#ifdef CONFIG_KVM
|
||||
int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt);
|
||||
@ -124,7 +124,7 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
|
||||
if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
|
||||
vdev->intx.route.mode != PCI_INTX_ENABLED ||
|
||||
!kvm_resamplefds_enabled()) {
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Get to a known interrupt state */
|
||||
@ -147,10 +147,10 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
|
||||
goto fail_irqfd;
|
||||
}
|
||||
|
||||
if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_UNMASK,
|
||||
event_notifier_get_fd(&vdev->intx.unmask),
|
||||
errp)) {
|
||||
if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_UNMASK,
|
||||
event_notifier_get_fd(&vdev->intx.unmask),
|
||||
errp)) {
|
||||
goto fail_vfio;
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
|
||||
|
||||
trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
|
||||
|
||||
return;
|
||||
return true;
|
||||
|
||||
fail_vfio:
|
||||
kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
|
||||
@ -171,6 +171,9 @@ fail_irqfd:
|
||||
fail:
|
||||
qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev);
|
||||
vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -226,8 +229,7 @@ static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route)
|
||||
return;
|
||||
}
|
||||
|
||||
vfio_intx_enable_kvm(vdev, &err);
|
||||
if (err) {
|
||||
if (!vfio_intx_enable_kvm(vdev, &err)) {
|
||||
warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
}
|
||||
|
||||
@ -259,7 +261,7 @@ static void vfio_irqchip_change(Notifier *notify, void *data)
|
||||
vfio_intx_update(vdev, &vdev->intx.route);
|
||||
}
|
||||
|
||||
static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
|
||||
Error *err = NULL;
|
||||
@ -268,7 +270,7 @@ static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
|
||||
|
||||
|
||||
if (!pin) {
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
vfio_disable_interrupts(vdev);
|
||||
@ -290,27 +292,26 @@ static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
|
||||
ret = event_notifier_init(&vdev->intx.interrupt, 0);
|
||||
if (ret) {
|
||||
error_setg_errno(errp, -ret, "event_notifier_init failed");
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
fd = event_notifier_get_fd(&vdev->intx.interrupt);
|
||||
qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev);
|
||||
|
||||
if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
|
||||
if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
|
||||
qemu_set_fd_handler(fd, NULL, NULL, vdev);
|
||||
event_notifier_cleanup(&vdev->intx.interrupt);
|
||||
return -errno;
|
||||
return false;
|
||||
}
|
||||
|
||||
vfio_intx_enable_kvm(vdev, &err);
|
||||
if (err) {
|
||||
if (!vfio_intx_enable_kvm(vdev, &err)) {
|
||||
warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
}
|
||||
|
||||
vdev->interrupt = VFIO_INT_INTx;
|
||||
|
||||
trace_vfio_intx_enable(vdev->vbasedev.name);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_intx_disable(VFIOPCIDevice *vdev)
|
||||
@ -590,9 +591,10 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
|
||||
fd = event_notifier_get_fd(&vector->interrupt);
|
||||
}
|
||||
|
||||
if (vfio_set_irq_signaling(&vdev->vbasedev,
|
||||
VFIO_PCI_MSIX_IRQ_INDEX, nr,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
|
||||
if (!vfio_set_irq_signaling(&vdev->vbasedev,
|
||||
VFIO_PCI_MSIX_IRQ_INDEX, nr,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd,
|
||||
&err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
}
|
||||
}
|
||||
@ -634,8 +636,9 @@ static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
|
||||
int32_t fd = event_notifier_get_fd(&vector->interrupt);
|
||||
Error *err = NULL;
|
||||
|
||||
if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
|
||||
if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX,
|
||||
nr, VFIO_IRQ_SET_ACTION_TRIGGER, fd,
|
||||
&err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
}
|
||||
}
|
||||
@ -833,8 +836,7 @@ static void vfio_msix_disable(VFIOPCIDevice *vdev)
|
||||
vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
|
||||
|
||||
vfio_msi_disable_common(vdev);
|
||||
vfio_intx_enable(vdev, &err);
|
||||
if (err) {
|
||||
if (!vfio_intx_enable(vdev, &err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
}
|
||||
|
||||
@ -877,7 +879,7 @@ static void vfio_update_msi(VFIOPCIDevice *vdev)
|
||||
|
||||
static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
|
||||
{
|
||||
struct vfio_region_info *reg_info;
|
||||
g_autofree struct vfio_region_info *reg_info = NULL;
|
||||
uint64_t size;
|
||||
off_t off = 0;
|
||||
ssize_t bytes;
|
||||
@ -895,8 +897,6 @@ static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
|
||||
vdev->rom_size = size = reg_info->size;
|
||||
vdev->rom_offset = reg_info->offset;
|
||||
|
||||
g_free(reg_info);
|
||||
|
||||
if (!vdev->rom_size) {
|
||||
vdev->rom_read_failed = true;
|
||||
error_report("vfio-pci: Cannot read device rom at "
|
||||
@ -1337,7 +1337,7 @@ static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
|
||||
}
|
||||
}
|
||||
|
||||
static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
|
||||
static bool vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
|
||||
{
|
||||
uint16_t ctrl;
|
||||
bool msi_64bit, msi_maskbit;
|
||||
@ -1347,7 +1347,7 @@ static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
|
||||
if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
|
||||
vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
|
||||
error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
|
||||
return -errno;
|
||||
return false;
|
||||
}
|
||||
ctrl = le16_to_cpu(ctrl);
|
||||
|
||||
@ -1360,14 +1360,14 @@ static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
|
||||
ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOTSUP) {
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
error_propagate_prepend(errp, err, "msi_init failed: ");
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
|
||||
@ -1447,13 +1447,13 @@ static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
|
||||
}
|
||||
}
|
||||
|
||||
static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
int target_bar = -1;
|
||||
size_t msix_sz;
|
||||
|
||||
if (!vdev->msix || vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* The actual minimum size of MSI-X structures */
|
||||
@ -1476,7 +1476,7 @@ static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
|
||||
if (target_bar < 0) {
|
||||
error_setg(errp, "No automatic MSI-X relocation available for "
|
||||
"device %04x:%04x", vdev->vendor_id, vdev->device_id);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
target_bar = (int)(vdev->msix_relo - OFF_AUTOPCIBAR_BAR0);
|
||||
@ -1486,7 +1486,7 @@ static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
|
||||
if (vdev->bars[target_bar].ioport) {
|
||||
error_setg(errp, "Invalid MSI-X relocation BAR %d, "
|
||||
"I/O port BAR", target_bar);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Cannot use a BAR in the "shadow" of a 64-bit BAR */
|
||||
@ -1494,7 +1494,7 @@ static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
|
||||
target_bar > 0 && vdev->bars[target_bar - 1].mem64) {
|
||||
error_setg(errp, "Invalid MSI-X relocation BAR %d, "
|
||||
"consumed by 64-bit BAR %d", target_bar, target_bar - 1);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* 2GB max size for 32-bit BARs, cannot double if already > 1G */
|
||||
@ -1502,7 +1502,7 @@ static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
|
||||
!vdev->bars[target_bar].mem64) {
|
||||
error_setg(errp, "Invalid MSI-X relocation BAR %d, "
|
||||
"no space to extend 32-bit BAR", target_bar);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1537,6 +1537,7 @@ static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
|
||||
|
||||
trace_vfio_msix_relo(vdev->vbasedev.name,
|
||||
vdev->msix->table_bar, vdev->msix->table_offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1547,7 +1548,7 @@ static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
|
||||
* need to first look for where the MSI-X table lives. So we
|
||||
* unfortunately split MSI-X setup across two functions.
|
||||
*/
|
||||
static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
uint8_t pos;
|
||||
uint16_t ctrl;
|
||||
@ -1559,25 +1560,25 @@ static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
|
||||
|
||||
pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
|
||||
if (!pos) {
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (pread(fd, &ctrl, sizeof(ctrl),
|
||||
vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
|
||||
error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pread(fd, &table, sizeof(table),
|
||||
vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
|
||||
error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pread(fd, &pba, sizeof(pba),
|
||||
vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
|
||||
error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
ctrl = le16_to_cpu(ctrl);
|
||||
@ -1595,7 +1596,7 @@ static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "failed to get MSI-X irq info");
|
||||
g_free(msix);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
msix->noresize = !!(irq_info.flags & VFIO_IRQ_INFO_NORESIZE);
|
||||
@ -1627,7 +1628,7 @@ static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
|
||||
error_setg(errp, "hardware reports invalid configuration, "
|
||||
"MSIX PBA outside of specified BAR");
|
||||
g_free(msix);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1638,10 +1639,10 @@ static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
|
||||
|
||||
vfio_pci_fixup_msix_region(vdev);
|
||||
|
||||
vfio_pci_relocate_msix(vdev, errp);
|
||||
return vfio_pci_relocate_msix(vdev, errp);
|
||||
}
|
||||
|
||||
static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
|
||||
static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
Error *err = NULL;
|
||||
@ -1657,11 +1658,11 @@ static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOTSUP) {
|
||||
warn_report_err(err);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
error_propagate(errp, err);
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1695,7 +1696,7 @@ static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
|
||||
memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_teardown_msi(VFIOPCIDevice *vdev)
|
||||
@ -1974,8 +1975,8 @@ static void vfio_pci_disable_rp_atomics(VFIOPCIDevice *vdev)
|
||||
}
|
||||
}
|
||||
|
||||
static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
|
||||
Error **errp)
|
||||
static bool vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
|
||||
Error **errp)
|
||||
{
|
||||
uint16_t flags;
|
||||
uint8_t type;
|
||||
@ -1989,7 +1990,7 @@ static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
|
||||
|
||||
error_setg(errp, "assignment of PCIe type 0x%x "
|
||||
"devices is not currently supported", type);
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) {
|
||||
@ -2022,7 +2023,7 @@ static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
|
||||
}
|
||||
|
||||
if (pci_bus_is_express(bus)) {
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
} else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) {
|
||||
@ -2060,7 +2061,7 @@ static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
|
||||
* Legacy endpoints don't belong on the root complex. Windows
|
||||
* seems to be happier with devices if we skip the capability.
|
||||
*/
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
} else {
|
||||
@ -2096,12 +2097,12 @@ static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
|
||||
pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
|
||||
errp);
|
||||
if (pos < 0) {
|
||||
return pos;
|
||||
return false;
|
||||
}
|
||||
|
||||
vdev->pdev.exp.exp_cap = pos;
|
||||
|
||||
return pos;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
|
||||
@ -2134,12 +2135,34 @@ static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
|
||||
}
|
||||
}
|
||||
|
||||
static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
|
||||
static bool vfio_add_vendor_specific_cap(VFIOPCIDevice *vdev, int pos,
|
||||
uint8_t size, Error **errp)
|
||||
{
|
||||
PCIDevice *pdev = &vdev->pdev;
|
||||
|
||||
pos = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, size, errp);
|
||||
if (pos < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Exempt config space check for Vendor Specific Information during
|
||||
* restore/load.
|
||||
* Config space check is still enforced for 3 byte VSC header.
|
||||
*/
|
||||
if (vdev->skip_vsc_check && size > 3) {
|
||||
memset(pdev->cmask + pos + 3, 0, size - 3);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
PCIDevice *pdev = &vdev->pdev;
|
||||
uint8_t cap_id, next, size;
|
||||
int ret;
|
||||
bool ret;
|
||||
|
||||
cap_id = pdev->config[pos];
|
||||
next = pdev->config[pos + PCI_CAP_LIST_NEXT];
|
||||
@ -2160,9 +2183,8 @@ static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
|
||||
* will be changed as we unwind the stack.
|
||||
*/
|
||||
if (next) {
|
||||
ret = vfio_add_std_cap(vdev, next, errp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
if (!vfio_add_std_cap(vdev, next, errp)) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
/* Begin the rebuild, use QEMU emulated list bits */
|
||||
@ -2170,9 +2192,8 @@ static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
|
||||
vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
|
||||
vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
|
||||
|
||||
ret = vfio_add_virt_caps(vdev, errp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
if (!vfio_add_virt_caps(vdev, errp)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2196,25 +2217,27 @@ static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
|
||||
case PCI_CAP_ID_PM:
|
||||
vfio_check_pm_reset(vdev, pos);
|
||||
vdev->pm_cap = pos;
|
||||
ret = pci_add_capability(pdev, cap_id, pos, size, errp);
|
||||
ret = pci_add_capability(pdev, cap_id, pos, size, errp) >= 0;
|
||||
break;
|
||||
case PCI_CAP_ID_AF:
|
||||
vfio_check_af_flr(vdev, pos);
|
||||
ret = pci_add_capability(pdev, cap_id, pos, size, errp);
|
||||
ret = pci_add_capability(pdev, cap_id, pos, size, errp) >= 0;
|
||||
break;
|
||||
case PCI_CAP_ID_VNDR:
|
||||
ret = vfio_add_vendor_specific_cap(vdev, pos, size, errp);
|
||||
break;
|
||||
default:
|
||||
ret = pci_add_capability(pdev, cap_id, pos, size, errp);
|
||||
ret = pci_add_capability(pdev, cap_id, pos, size, errp) >= 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
if (!ret) {
|
||||
error_prepend(errp,
|
||||
"failed to add PCI capability 0x%x[0x%x]@0x%x: ",
|
||||
cap_id, size, pos);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_setup_rebar_ecap(VFIOPCIDevice *vdev, uint16_t pos)
|
||||
@ -2360,23 +2383,21 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
|
||||
return;
|
||||
}
|
||||
|
||||
static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
PCIDevice *pdev = &vdev->pdev;
|
||||
int ret;
|
||||
|
||||
if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
|
||||
!pdev->config[PCI_CAPABILITY_LIST]) {
|
||||
return 0; /* Nothing to add */
|
||||
return true; /* Nothing to add */
|
||||
}
|
||||
|
||||
ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
if (!vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
vfio_add_ext_cap(vdev);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
|
||||
@ -2421,8 +2442,7 @@ void vfio_pci_post_reset(VFIOPCIDevice *vdev)
|
||||
Error *err = NULL;
|
||||
int nr;
|
||||
|
||||
vfio_intx_enable(vdev, &err);
|
||||
if (err) {
|
||||
if (!vfio_intx_enable(vdev, &err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
}
|
||||
|
||||
@ -2586,11 +2606,12 @@ static const VMStateDescription vmstate_vfio_pci_config = {
|
||||
}
|
||||
};
|
||||
|
||||
static void vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f)
|
||||
static int vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f, Error **errp)
|
||||
{
|
||||
VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
|
||||
|
||||
vmstate_save_state(f, &vmstate_vfio_pci_config, vdev, NULL);
|
||||
return vmstate_save_state_with_err(f, &vmstate_vfio_pci_config, vdev, NULL,
|
||||
errp);
|
||||
}
|
||||
|
||||
static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f)
|
||||
@ -2642,10 +2663,10 @@ static VFIODeviceOps vfio_pci_ops = {
|
||||
.vfio_load_config = vfio_pci_load_config,
|
||||
};
|
||||
|
||||
int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
|
||||
bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
VFIODevice *vbasedev = &vdev->vbasedev;
|
||||
struct vfio_region_info *reg_info;
|
||||
g_autofree struct vfio_region_info *reg_info = NULL;
|
||||
int ret;
|
||||
|
||||
ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, ®_info);
|
||||
@ -2653,7 +2674,7 @@ int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
|
||||
error_setg_errno(errp, -ret,
|
||||
"failed getting region info for VGA region index %d",
|
||||
VFIO_PCI_VGA_REGION_INDEX);
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
|
||||
@ -2662,8 +2683,7 @@ int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
|
||||
error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
|
||||
(unsigned long)reg_info->flags,
|
||||
(unsigned long)reg_info->size);
|
||||
g_free(reg_info);
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
vdev->vga = g_new0(VFIOVGA, 1);
|
||||
@ -2671,8 +2691,6 @@ int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
|
||||
vdev->vga->fd_offset = reg_info->offset;
|
||||
vdev->vga->fd = vdev->vbasedev.fd;
|
||||
|
||||
g_free(reg_info);
|
||||
|
||||
vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
|
||||
vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
|
||||
QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
|
||||
@ -2707,31 +2725,31 @@ int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
|
||||
&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
|
||||
&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
|
||||
static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
|
||||
{
|
||||
VFIODevice *vbasedev = &vdev->vbasedev;
|
||||
struct vfio_region_info *reg_info;
|
||||
g_autofree struct vfio_region_info *reg_info = NULL;
|
||||
struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
|
||||
int i, ret = -1;
|
||||
|
||||
/* Sanity check device */
|
||||
if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
|
||||
error_setg(errp, "this isn't a PCI device");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
|
||||
error_setg(errp, "unexpected number of io regions %u",
|
||||
vbasedev->num_regions);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
|
||||
error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
|
||||
@ -2743,7 +2761,7 @@ static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
|
||||
|
||||
if (ret) {
|
||||
error_setg_errno(errp, -ret, "failed to get region %d info", i);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
QLIST_INIT(&vdev->bars[i].quirks);
|
||||
@ -2753,7 +2771,7 @@ static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
|
||||
VFIO_PCI_CONFIG_REGION_INDEX, ®_info);
|
||||
if (ret) {
|
||||
error_setg_errno(errp, -ret, "failed to get config info");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
trace_vfio_populate_device_config(vdev->vbasedev.name,
|
||||
@ -2767,14 +2785,11 @@ static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
|
||||
}
|
||||
vdev->config_offset = reg_info->offset;
|
||||
|
||||
g_free(reg_info);
|
||||
|
||||
if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
|
||||
ret = vfio_populate_vga(vdev, errp);
|
||||
if (ret) {
|
||||
if (!vfio_populate_vga(vdev, errp)) {
|
||||
error_append_hint(errp, "device does not support "
|
||||
"requested feature x-vga\n");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2791,6 +2806,8 @@ static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
|
||||
"Could not enable error recovery for the device",
|
||||
vbasedev->name);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vfio_pci_put_device(VFIOPCIDevice *vdev)
|
||||
@ -2847,8 +2864,8 @@ static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
|
||||
fd = event_notifier_get_fd(&vdev->err_notifier);
|
||||
qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev);
|
||||
|
||||
if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
|
||||
if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
qemu_set_fd_handler(fd, NULL, NULL, vdev);
|
||||
event_notifier_cleanup(&vdev->err_notifier);
|
||||
@ -2864,8 +2881,8 @@ static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
|
||||
return;
|
||||
}
|
||||
|
||||
if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
|
||||
if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
}
|
||||
qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
|
||||
@ -2912,8 +2929,8 @@ static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
|
||||
fd = event_notifier_get_fd(&vdev->req_notifier);
|
||||
qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev);
|
||||
|
||||
if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
|
||||
if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
qemu_set_fd_handler(fd, NULL, NULL, vdev);
|
||||
event_notifier_cleanup(&vdev->req_notifier);
|
||||
@ -2930,8 +2947,8 @@ static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
|
||||
return;
|
||||
}
|
||||
|
||||
if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
|
||||
if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
}
|
||||
qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
|
||||
@ -2946,12 +2963,12 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
||||
ERRP_GUARD();
|
||||
VFIOPCIDevice *vdev = VFIO_PCI(pdev);
|
||||
VFIODevice *vbasedev = &vdev->vbasedev;
|
||||
char *tmp, *subsys;
|
||||
Error *err = NULL;
|
||||
char *subsys;
|
||||
int i, ret;
|
||||
bool is_mdev;
|
||||
char uuid[UUID_STR_LEN];
|
||||
char *name;
|
||||
g_autofree char *name = NULL;
|
||||
g_autofree char *tmp = NULL;
|
||||
|
||||
if (vbasedev->fd < 0 && !vbasedev->sysfsdev) {
|
||||
if (!(~vdev->host.domain || ~vdev->host.bus ||
|
||||
@ -2970,7 +2987,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
||||
vdev->host.slot, vdev->host.function);
|
||||
}
|
||||
|
||||
if (vfio_device_get_name(vbasedev, errp) < 0) {
|
||||
if (!vfio_device_get_name(vbasedev, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2982,7 +2999,6 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
||||
*/
|
||||
tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev);
|
||||
subsys = realpath(tmp, NULL);
|
||||
g_free(tmp);
|
||||
is_mdev = subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
|
||||
free(subsys);
|
||||
|
||||
@ -3001,16 +3017,12 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
||||
name = g_strdup(vbasedev->name);
|
||||
}
|
||||
|
||||
ret = vfio_attach_device(name, vbasedev,
|
||||
pci_device_iommu_address_space(pdev), errp);
|
||||
g_free(name);
|
||||
if (ret) {
|
||||
if (!vfio_attach_device(name, vbasedev,
|
||||
pci_device_iommu_address_space(pdev), errp)) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
vfio_populate_device(vdev, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
if (!vfio_populate_device(vdev, errp)) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -3103,16 +3115,13 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
||||
|
||||
vfio_bars_prepare(vdev);
|
||||
|
||||
vfio_msix_early_setup(vdev, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
if (!vfio_msix_early_setup(vdev, errp)) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
vfio_bars_register(vdev);
|
||||
|
||||
ret = vfio_add_capabilities(vdev, errp);
|
||||
if (ret) {
|
||||
if (!vfio_add_capabilities(vdev, errp)) {
|
||||
goto out_teardown;
|
||||
}
|
||||
|
||||
@ -3126,7 +3135,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
||||
|
||||
if (!vdev->igd_opregion &&
|
||||
vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
|
||||
struct vfio_region_info *opregion;
|
||||
g_autofree struct vfio_region_info *opregion = NULL;
|
||||
|
||||
if (vdev->pdev.qdev.hotplugged) {
|
||||
error_setg(errp,
|
||||
@ -3144,9 +3153,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
||||
goto out_teardown;
|
||||
}
|
||||
|
||||
ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
|
||||
g_free(opregion);
|
||||
if (ret) {
|
||||
if (!vfio_pci_igd_opregion_init(vdev, opregion, errp)) {
|
||||
goto out_teardown;
|
||||
}
|
||||
}
|
||||
@ -3169,15 +3176,13 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
||||
vfio_intx_routing_notifier);
|
||||
vdev->irqchip_change_notifier.notify = vfio_irqchip_change;
|
||||
kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier);
|
||||
ret = vfio_intx_enable(vdev, errp);
|
||||
if (ret) {
|
||||
if (!vfio_intx_enable(vdev, errp)) {
|
||||
goto out_deregister;
|
||||
}
|
||||
}
|
||||
|
||||
if (vdev->display != ON_OFF_AUTO_OFF) {
|
||||
ret = vfio_display_probe(vdev, errp);
|
||||
if (ret) {
|
||||
if (!vfio_display_probe(vdev, errp)) {
|
||||
goto out_deregister;
|
||||
}
|
||||
}
|
||||
@ -3362,6 +3367,8 @@ static Property vfio_pci_dev_properties[] = {
|
||||
VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
|
||||
DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice,
|
||||
vbasedev.enable_migration, ON_OFF_AUTO_AUTO),
|
||||
DEFINE_PROP_BOOL("migration-events", VFIOPCIDevice,
|
||||
vbasedev.migration_events, false),
|
||||
DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
|
||||
DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
|
||||
vbasedev.ram_block_discard_allowed, false),
|
||||
@ -3390,6 +3397,7 @@ static Property vfio_pci_dev_properties[] = {
|
||||
DEFINE_PROP_LINK("iommufd", VFIOPCIDevice, vbasedev.iommufd,
|
||||
TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
|
||||
#endif
|
||||
DEFINE_PROP_BOOL("skip-vsc-check", VFIOPCIDevice, skip_vsc_check, true),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
@ -177,6 +177,7 @@ struct VFIOPCIDevice {
|
||||
OnOffAuto ramfb_migrate;
|
||||
bool defer_kvm_irq_routing;
|
||||
bool clear_parent_atomics_on_exit;
|
||||
bool skip_vsc_check;
|
||||
VFIODisplay *dpy;
|
||||
Notifier irqchip_change_notifier;
|
||||
};
|
||||
@ -211,7 +212,7 @@ void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr);
|
||||
void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr);
|
||||
void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr);
|
||||
void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev);
|
||||
int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp);
|
||||
bool vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp);
|
||||
void vfio_quirk_reset(VFIOPCIDevice *vdev);
|
||||
VFIOQuirk *vfio_quirk_alloc(int nr_mem);
|
||||
void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr);
|
||||
@ -224,14 +225,14 @@ bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name);
|
||||
int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev,
|
||||
struct vfio_pci_hot_reset_info **info_p);
|
||||
|
||||
int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
|
||||
bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
|
||||
|
||||
int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
|
||||
struct vfio_region_info *info,
|
||||
Error **errp);
|
||||
bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
|
||||
struct vfio_region_info *info,
|
||||
Error **errp);
|
||||
|
||||
void vfio_display_reset(VFIOPCIDevice *vdev);
|
||||
int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
|
||||
bool vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
|
||||
void vfio_display_finalize(VFIOPCIDevice *vdev);
|
||||
|
||||
extern const VMStateDescription vfio_display_vmstate;
|
||||
|
@ -115,18 +115,17 @@ static int vfio_set_trigger_eventfd(VFIOINTp *intp,
|
||||
VFIODevice *vbasedev = &intp->vdev->vbasedev;
|
||||
int32_t fd = event_notifier_get_fd(intp->interrupt);
|
||||
Error *err = NULL;
|
||||
int ret;
|
||||
|
||||
qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp);
|
||||
|
||||
ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err);
|
||||
if (ret) {
|
||||
if (!vfio_set_irq_signaling(vbasedev, intp->pin, 0,
|
||||
VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
|
||||
qemu_set_fd_handler(fd, NULL, NULL, NULL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -355,15 +354,14 @@ static int vfio_set_resample_eventfd(VFIOINTp *intp)
|
||||
int32_t fd = event_notifier_get_fd(intp->unmask);
|
||||
VFIODevice *vbasedev = &intp->vdev->vbasedev;
|
||||
Error *err = NULL;
|
||||
int ret;
|
||||
|
||||
qemu_set_fd_handler(fd, NULL, NULL, NULL);
|
||||
ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
|
||||
VFIO_IRQ_SET_ACTION_UNMASK, fd, &err);
|
||||
if (ret) {
|
||||
if (!vfio_set_irq_signaling(vbasedev, intp->pin, 0,
|
||||
VFIO_IRQ_SET_ACTION_UNMASK, fd, &err)) {
|
||||
error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -443,7 +441,7 @@ static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
|
||||
* @errp: error object
|
||||
*
|
||||
*/
|
||||
static int vfio_populate_device(VFIODevice *vbasedev, Error **errp)
|
||||
static bool vfio_populate_device(VFIODevice *vbasedev, Error **errp)
|
||||
{
|
||||
VFIOINTp *intp, *tmp;
|
||||
int i, ret = -1;
|
||||
@ -452,7 +450,7 @@ static int vfio_populate_device(VFIODevice *vbasedev, Error **errp)
|
||||
|
||||
if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
|
||||
error_setg(errp, "this isn't a platform device");
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions);
|
||||
@ -489,12 +487,11 @@ static int vfio_populate_device(VFIODevice *vbasedev, Error **errp)
|
||||
irq.flags);
|
||||
intp = vfio_init_intp(vbasedev, irq, errp);
|
||||
if (!intp) {
|
||||
ret = -1;
|
||||
goto irq_err;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
return true;
|
||||
irq_err:
|
||||
timer_del(vdev->mmap_timer);
|
||||
QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) {
|
||||
@ -509,7 +506,7 @@ reg_error:
|
||||
g_free(vdev->regions[i]);
|
||||
}
|
||||
g_free(vdev->regions);
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* specialized functions for VFIO Platform devices */
|
||||
@ -529,10 +526,8 @@ static VFIODeviceOps vfio_platform_ops = {
|
||||
* fd retrieval, resource query.
|
||||
* Precondition: the device name must be initialized
|
||||
*/
|
||||
static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
|
||||
static bool vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* @fd takes precedence over @sysfsdev which takes precedence over @host */
|
||||
if (vbasedev->fd < 0 && vbasedev->sysfsdev) {
|
||||
g_free(vbasedev->name);
|
||||
@ -540,30 +535,28 @@ static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
|
||||
} else if (vbasedev->fd < 0) {
|
||||
if (!vbasedev->name || strchr(vbasedev->name, '/')) {
|
||||
error_setg(errp, "wrong host device name");
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s",
|
||||
vbasedev->name);
|
||||
}
|
||||
|
||||
ret = vfio_device_get_name(vbasedev, errp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
if (!vfio_device_get_name(vbasedev, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = vfio_attach_device(vbasedev->name, vbasedev,
|
||||
&address_space_memory, errp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
if (!vfio_attach_device(vbasedev->name, vbasedev,
|
||||
&address_space_memory, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = vfio_populate_device(vbasedev, errp);
|
||||
if (ret) {
|
||||
vfio_detach_device(vbasedev);
|
||||
if (vfio_populate_device(vbasedev, errp)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
vfio_detach_device(vbasedev);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -580,7 +573,7 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp)
|
||||
VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
|
||||
SysBusDevice *sbdev = SYS_BUS_DEVICE(dev);
|
||||
VFIODevice *vbasedev = &vdev->vbasedev;
|
||||
int i, ret;
|
||||
int i;
|
||||
|
||||
qemu_mutex_init(&vdev->intp_mutex);
|
||||
|
||||
@ -588,9 +581,8 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp)
|
||||
vbasedev->sysfsdev : vbasedev->name,
|
||||
vdev->compat);
|
||||
|
||||
ret = vfio_base_device_init(vbasedev, errp);
|
||||
if (ret) {
|
||||
goto out;
|
||||
if (!vfio_base_device_init(vbasedev, errp)) {
|
||||
goto init_err;
|
||||
}
|
||||
|
||||
if (!vdev->compat) {
|
||||
@ -622,11 +614,9 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp)
|
||||
}
|
||||
sysbus_init_mmio(sbdev, vdev->regions[i]->mem);
|
||||
}
|
||||
out:
|
||||
if (!ret) {
|
||||
return;
|
||||
}
|
||||
return;
|
||||
|
||||
init_err:
|
||||
if (vdev->vbasedev.name) {
|
||||
error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
|
||||
} else {
|
||||
|
@ -323,7 +323,7 @@ static int vfio_spapr_create_window(VFIOContainer *container,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static bool
|
||||
vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section,
|
||||
Error **errp)
|
||||
@ -351,13 +351,13 @@ vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer,
|
||||
error_setg(errp, "Container %p can't map guest IOVA region"
|
||||
" 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container,
|
||||
iova, end);
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (container->iommu_type != VFIO_SPAPR_TCE_v2_IOMMU) {
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* For now intersections are not allowed, we may relax this later */
|
||||
@ -373,14 +373,14 @@ vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer,
|
||||
section->offset_within_address_space +
|
||||
int128_get64(section->size) - 1,
|
||||
hostwin->min_iova, hostwin->max_iova);
|
||||
return -EINVAL;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ret = vfio_spapr_create_window(container, section, &pgsize);
|
||||
if (ret) {
|
||||
error_setg_errno(errp, -ret, "Failed to create SPAPR window");
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
vfio_host_win_add(scontainer, section->offset_within_address_space,
|
||||
@ -406,14 +406,14 @@ vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer,
|
||||
"vfio: failed GROUP_SET_SPAPR_TCE for "
|
||||
"KVM VFIO device %d and group fd %d",
|
||||
param.tablefd, param.groupfd);
|
||||
return -errno;
|
||||
return false;
|
||||
}
|
||||
trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -458,8 +458,8 @@ static void vfio_spapr_container_release(VFIOContainerBase *bcontainer)
|
||||
}
|
||||
}
|
||||
|
||||
static int vfio_spapr_container_setup(VFIOContainerBase *bcontainer,
|
||||
Error **errp)
|
||||
static bool vfio_spapr_container_setup(VFIOContainerBase *bcontainer,
|
||||
Error **errp)
|
||||
{
|
||||
VFIOContainer *container = container_of(bcontainer, VFIOContainer,
|
||||
bcontainer);
|
||||
@ -480,7 +480,7 @@ static int vfio_spapr_container_setup(VFIOContainerBase *bcontainer,
|
||||
ret = ioctl(fd, VFIO_IOMMU_ENABLE);
|
||||
if (ret) {
|
||||
error_setg_errno(errp, errno, "failed to enable container");
|
||||
return -errno;
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
scontainer->prereg_listener = vfio_prereg_listener;
|
||||
@ -488,7 +488,6 @@ static int vfio_spapr_container_setup(VFIOContainerBase *bcontainer,
|
||||
memory_listener_register(&scontainer->prereg_listener,
|
||||
&address_space_memory);
|
||||
if (bcontainer->error) {
|
||||
ret = -1;
|
||||
error_propagate_prepend(errp, bcontainer->error,
|
||||
"RAM memory listener initialization failed: ");
|
||||
goto listener_unregister_exit;
|
||||
@ -500,7 +499,6 @@ static int vfio_spapr_container_setup(VFIOContainerBase *bcontainer,
|
||||
if (ret) {
|
||||
error_setg_errno(errp, errno,
|
||||
"VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
|
||||
ret = -errno;
|
||||
goto listener_unregister_exit;
|
||||
}
|
||||
|
||||
@ -527,13 +525,13 @@ static int vfio_spapr_container_setup(VFIOContainerBase *bcontainer,
|
||||
0x1000);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
|
||||
listener_unregister_exit:
|
||||
if (v2) {
|
||||
memory_listener_unregister(&scontainer->prereg_listener);
|
||||
}
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void vfio_iommu_spapr_class_init(ObjectClass *klass, void *data)
|
||||
|
@ -152,7 +152,8 @@ vfio_load_device_config_state(const char *name) " (%s)"
|
||||
vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
|
||||
vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size 0x%"PRIx64" ret %d"
|
||||
vfio_migration_realize(const char *name) " (%s)"
|
||||
vfio_migration_set_state(const char *name, const char *state) " (%s) state %s"
|
||||
vfio_migration_set_device_state(const char *name, const char *state) " (%s) state %s"
|
||||
vfio_migration_set_state(const char *name, const char *new_state, const char *recover_state) " (%s) new state %s, recover state %s"
|
||||
vfio_migration_state_notifier(const char *name, int state) " (%s) state %d"
|
||||
vfio_save_block(const char *name, int data_size) " (%s) data_size %d"
|
||||
vfio_save_cleanup(const char *name) " (%s)"
|
||||
|
@ -208,6 +208,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||
void *vaddr;
|
||||
int ret;
|
||||
Int128 llend;
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (iotlb->target_as != &address_space_memory) {
|
||||
error_report("Wrong target AS \"%s\", only system memory is allowed",
|
||||
@ -227,7 +228,9 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||
if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
|
||||
bool read_only;
|
||||
|
||||
if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) {
|
||||
if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL,
|
||||
&local_err)) {
|
||||
error_report_err(local_err);
|
||||
return;
|
||||
}
|
||||
ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||
|
@ -774,9 +774,22 @@ void ram_discard_manager_register_listener(RamDiscardManager *rdm,
|
||||
void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
|
||||
RamDiscardListener *rdl);
|
||||
|
||||
/**
|
||||
* memory_get_xlat_addr: Extract addresses from a TLB entry
|
||||
*
|
||||
* @iotlb: pointer to an #IOMMUTLBEntry
|
||||
* @vaddr: virtual address
|
||||
* @ram_addr: RAM address
|
||||
* @read_only: indicates if writes are allowed
|
||||
* @mr_has_discard_manager: indicates memory is controlled by a
|
||||
* RamDiscardManager
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*
|
||||
* Return: true on success, else false setting @errp with error.
|
||||
*/
|
||||
bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
||||
ram_addr_t *ram_addr, bool *read_only,
|
||||
bool *mr_has_discard_manager);
|
||||
bool *mr_has_discard_manager, Error **errp);
|
||||
|
||||
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
|
||||
typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
|
||||
|
@ -115,6 +115,7 @@ typedef struct VFIODevice {
|
||||
bool no_mmap;
|
||||
bool ram_block_discard_allowed;
|
||||
OnOffAuto enable_migration;
|
||||
bool migration_events;
|
||||
VFIODeviceOps *ops;
|
||||
unsigned int num_irqs;
|
||||
unsigned int num_regions;
|
||||
@ -133,7 +134,30 @@ struct VFIODeviceOps {
|
||||
int (*vfio_hot_reset_multi)(VFIODevice *vdev);
|
||||
void (*vfio_eoi)(VFIODevice *vdev);
|
||||
Object *(*vfio_get_object)(VFIODevice *vdev);
|
||||
void (*vfio_save_config)(VFIODevice *vdev, QEMUFile *f);
|
||||
|
||||
/**
|
||||
* @vfio_save_config
|
||||
*
|
||||
* Save device config state
|
||||
*
|
||||
* @vdev: #VFIODevice for which to save the config
|
||||
* @f: #QEMUFile where to send the data
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*
|
||||
* Returns zero to indicate success and negative for error
|
||||
*/
|
||||
int (*vfio_save_config)(VFIODevice *vdev, QEMUFile *f, Error **errp);
|
||||
|
||||
/**
|
||||
* @vfio_load_config
|
||||
*
|
||||
* Load device config state
|
||||
*
|
||||
* @vdev: #VFIODevice for which to load the config
|
||||
* @f: #QEMUFile where to get the data
|
||||
*
|
||||
* Returns zero to indicate success and negative for error
|
||||
*/
|
||||
int (*vfio_load_config)(VFIODevice *vdev, QEMUFile *f);
|
||||
};
|
||||
|
||||
@ -183,8 +207,8 @@ void vfio_spapr_container_deinit(VFIOContainer *container);
|
||||
void vfio_disable_irqindex(VFIODevice *vbasedev, int index);
|
||||
void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index);
|
||||
void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index);
|
||||
int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
|
||||
int action, int fd, Error **errp);
|
||||
bool vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
|
||||
int action, int fd, Error **errp);
|
||||
void vfio_region_write(void *opaque, hwaddr addr,
|
||||
uint64_t data, unsigned size);
|
||||
uint64_t vfio_region_read(void *opaque,
|
||||
@ -198,14 +222,14 @@ void vfio_region_exit(VFIORegion *region);
|
||||
void vfio_region_finalize(VFIORegion *region);
|
||||
void vfio_reset_handler(void *opaque);
|
||||
struct vfio_device_info *vfio_get_device_info(int fd);
|
||||
int vfio_attach_device(char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp);
|
||||
bool vfio_attach_device(char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp);
|
||||
void vfio_detach_device(VFIODevice *vbasedev);
|
||||
|
||||
int vfio_kvm_device_add_fd(int fd, Error **errp);
|
||||
int vfio_kvm_device_del_fd(int fd, Error **errp);
|
||||
|
||||
int vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp);
|
||||
bool vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp);
|
||||
void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer);
|
||||
|
||||
extern const MemoryRegionOps vfio_region_ops;
|
||||
@ -250,13 +274,12 @@ vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer);
|
||||
bool
|
||||
vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer);
|
||||
int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
|
||||
VFIOBitmap *vbmap, hwaddr iova,
|
||||
hwaddr size);
|
||||
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp);
|
||||
int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
|
||||
uint64_t size, ram_addr_t ram_addr);
|
||||
uint64_t size, ram_addr_t ram_addr, Error **errp);
|
||||
|
||||
/* Returns 0 on success, or a negative errno. */
|
||||
int vfio_device_get_name(VFIODevice *vbasedev, Error **errp);
|
||||
bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp);
|
||||
void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp);
|
||||
void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
|
||||
DeviceState *dev, bool ram_discard);
|
||||
|
@ -76,16 +76,15 @@ int vfio_container_dma_map(VFIOContainerBase *bcontainer,
|
||||
int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
|
||||
hwaddr iova, ram_addr_t size,
|
||||
IOMMUTLBEntry *iotlb);
|
||||
int vfio_container_add_section_window(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section,
|
||||
Error **errp);
|
||||
bool vfio_container_add_section_window(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section,
|
||||
Error **errp);
|
||||
void vfio_container_del_section_window(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section);
|
||||
int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
|
||||
bool start);
|
||||
bool start, Error **errp);
|
||||
int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
|
||||
VFIOBitmap *vbmap,
|
||||
hwaddr iova, hwaddr size);
|
||||
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp);
|
||||
|
||||
void vfio_container_init(VFIOContainerBase *bcontainer,
|
||||
VFIOAddressSpace *space,
|
||||
@ -111,29 +110,55 @@ struct VFIOIOMMUClass {
|
||||
InterfaceClass parent_class;
|
||||
|
||||
/* basic feature */
|
||||
int (*setup)(VFIOContainerBase *bcontainer, Error **errp);
|
||||
bool (*setup)(VFIOContainerBase *bcontainer, Error **errp);
|
||||
int (*dma_map)(const VFIOContainerBase *bcontainer,
|
||||
hwaddr iova, ram_addr_t size,
|
||||
void *vaddr, bool readonly);
|
||||
int (*dma_unmap)(const VFIOContainerBase *bcontainer,
|
||||
hwaddr iova, ram_addr_t size,
|
||||
IOMMUTLBEntry *iotlb);
|
||||
int (*attach_device)(const char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp);
|
||||
bool (*attach_device)(const char *name, VFIODevice *vbasedev,
|
||||
AddressSpace *as, Error **errp);
|
||||
void (*detach_device)(VFIODevice *vbasedev);
|
||||
|
||||
/* migration feature */
|
||||
|
||||
/**
|
||||
* @set_dirty_page_tracking
|
||||
*
|
||||
* Start or stop dirty pages tracking on VFIO container
|
||||
*
|
||||
* @bcontainer: #VFIOContainerBase on which to de/activate dirty
|
||||
* page tracking
|
||||
* @start: indicates whether to start or stop dirty pages tracking
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*
|
||||
* Returns zero to indicate success and negative for error
|
||||
*/
|
||||
int (*set_dirty_page_tracking)(const VFIOContainerBase *bcontainer,
|
||||
bool start);
|
||||
bool start, Error **errp);
|
||||
/**
|
||||
* @query_dirty_bitmap
|
||||
*
|
||||
* Get bitmap of dirty pages from container
|
||||
*
|
||||
* @bcontainer: #VFIOContainerBase from which to get dirty pages
|
||||
* @vbmap: #VFIOBitmap internal bitmap structure
|
||||
* @iova: iova base address
|
||||
* @size: size of iova range
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*
|
||||
* Returns zero to indicate success and negative for error
|
||||
*/
|
||||
int (*query_dirty_bitmap)(const VFIOContainerBase *bcontainer,
|
||||
VFIOBitmap *vbmap,
|
||||
hwaddr iova, hwaddr size);
|
||||
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp);
|
||||
/* PCI specific */
|
||||
int (*pci_hot_reset)(VFIODevice *vbasedev, bool single);
|
||||
|
||||
/* SPAPR specific */
|
||||
int (*add_window)(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section,
|
||||
Error **errp);
|
||||
bool (*add_window)(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section,
|
||||
Error **errp);
|
||||
void (*del_window)(VFIOContainerBase *bcontainer,
|
||||
MemoryRegionSection *section);
|
||||
void (*release)(VFIOContainerBase *bcontainer);
|
||||
|
@ -97,7 +97,7 @@ void migration_add_notifier_mode(NotifierWithReturn *notify,
|
||||
|
||||
void migration_remove_notifier(NotifierWithReturn *notify);
|
||||
bool migration_is_running(void);
|
||||
void migration_file_set_error(int err);
|
||||
void migration_file_set_error(int ret, Error *err);
|
||||
|
||||
/* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */
|
||||
bool migration_in_incoming_postcopy(void);
|
||||
|
@ -23,11 +23,11 @@ struct IOMMUFDBackend {
|
||||
/*< public >*/
|
||||
};
|
||||
|
||||
int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp);
|
||||
bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp);
|
||||
void iommufd_backend_disconnect(IOMMUFDBackend *be);
|
||||
|
||||
int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
|
||||
Error **errp);
|
||||
bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
|
||||
Error **errp);
|
||||
void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id);
|
||||
int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
|
||||
ram_addr_t size, void *vaddr, bool readonly);
|
||||
|
@ -2994,13 +2994,15 @@ static MigThrError postcopy_pause(MigrationState *s)
|
||||
}
|
||||
}
|
||||
|
||||
void migration_file_set_error(int err)
|
||||
void migration_file_set_error(int ret, Error *err)
|
||||
{
|
||||
MigrationState *s = current_migration;
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
|
||||
if (s->to_dst_file) {
|
||||
qemu_file_set_error(s->to_dst_file, err);
|
||||
qemu_file_set_error_obj(s->to_dst_file, ret, err);
|
||||
} else if (err) {
|
||||
error_report_err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ qapi_all_modules = [
|
||||
'stats',
|
||||
'trace',
|
||||
'transaction',
|
||||
'vfio',
|
||||
'virtio',
|
||||
'yank',
|
||||
]
|
||||
|
@ -78,5 +78,6 @@
|
||||
{ 'include': 'pci.json' }
|
||||
{ 'include': 'stats.json' }
|
||||
{ 'include': 'virtio.json' }
|
||||
{ 'include': 'vfio.json' }
|
||||
{ 'include': 'cryptodev.json' }
|
||||
{ 'include': 'cxl.json' }
|
||||
|
67
qapi/vfio.json
Normal file
67
qapi/vfio.json
Normal file
@ -0,0 +1,67 @@
|
||||
# -*- Mode: Python -*-
|
||||
# vim: filetype=python
|
||||
#
|
||||
|
||||
##
|
||||
# = VFIO devices
|
||||
##
|
||||
|
||||
##
|
||||
# @VfioMigrationState:
|
||||
#
|
||||
# An enumeration of the VFIO device migration states.
|
||||
#
|
||||
# @stop: The device is stopped.
|
||||
#
|
||||
# @running: The device is running.
|
||||
#
|
||||
# @stop-copy: The device is stopped and its internal state is available
|
||||
# for reading.
|
||||
#
|
||||
# @resuming: The device is stopped and its internal state is available
|
||||
# for writing.
|
||||
#
|
||||
# @running-p2p: The device is running in the P2P quiescent state.
|
||||
#
|
||||
# @pre-copy: The device is running, tracking its internal state and its
|
||||
# internal state is available for reading.
|
||||
#
|
||||
# @pre-copy-p2p: The device is running in the P2P quiescent state,
|
||||
# tracking its internal state and its internal state is available
|
||||
# for reading.
|
||||
#
|
||||
# Since: 9.1
|
||||
##
|
||||
{ 'enum': 'VfioMigrationState',
|
||||
'data': [ 'stop', 'running', 'stop-copy', 'resuming', 'running-p2p',
|
||||
'pre-copy', 'pre-copy-p2p' ],
|
||||
'prefix': 'QAPI_VFIO_MIGRATION_STATE' }
|
||||
|
||||
##
|
||||
# @VFIO_MIGRATION:
|
||||
#
|
||||
# This event is emitted when a VFIO device migration state is changed.
|
||||
#
|
||||
# @device-id: The device's id, if it has one.
|
||||
#
|
||||
# @qom-path: The device's QOM path.
|
||||
#
|
||||
# @device-state: The new changed device migration state.
|
||||
#
|
||||
# Since: 9.1
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# <- { "timestamp": { "seconds": 1713771323, "microseconds": 212268 },
|
||||
# "event": "VFIO_MIGRATION",
|
||||
# "data": {
|
||||
# "device-id": "vfio_dev1",
|
||||
# "qom-path": "/machine/peripheral/vfio_dev1",
|
||||
# "device-state": "stop" } }
|
||||
##
|
||||
{ 'event': 'VFIO_MIGRATION',
|
||||
'data': {
|
||||
'device-id': 'str',
|
||||
'qom-path': 'str',
|
||||
'device-state': 'VfioMigrationState'
|
||||
} }
|
@ -2179,7 +2179,7 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
|
||||
/* Called with rcu_read_lock held. */
|
||||
bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
||||
ram_addr_t *ram_addr, bool *read_only,
|
||||
bool *mr_has_discard_manager)
|
||||
bool *mr_has_discard_manager, Error **errp)
|
||||
{
|
||||
MemoryRegion *mr;
|
||||
hwaddr xlat;
|
||||
@ -2197,7 +2197,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
||||
mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
|
||||
&xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
|
||||
if (!memory_region_is_ram(mr)) {
|
||||
error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat);
|
||||
error_setg(errp, "iommu map to non memory area %" HWADDR_PRIx "", xlat);
|
||||
return false;
|
||||
} else if (memory_region_has_ram_discard_manager(mr)) {
|
||||
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
|
||||
@ -2216,8 +2216,8 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
||||
* were already restored before IOMMUs are restored.
|
||||
*/
|
||||
if (!ram_discard_manager_is_populated(rdm, &tmp)) {
|
||||
error_report("iommu map to discarded memory (e.g., unplugged via"
|
||||
" virtio-mem): %" HWADDR_PRIx "",
|
||||
error_setg(errp, "iommu map to discarded memory (e.g., unplugged"
|
||||
" via virtio-mem): %" HWADDR_PRIx "",
|
||||
iotlb->translated_addr);
|
||||
return false;
|
||||
}
|
||||
@ -2228,7 +2228,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
|
||||
* check that it did not truncate too much.
|
||||
*/
|
||||
if (len & iotlb->addr_mask) {
|
||||
error_report("iommu has granularity incompatible with target AS");
|
||||
error_setg(errp, "iommu has granularity incompatible with target AS");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user