2017-05-17 03:48:07 +03:00
|
|
|
/*
|
|
|
|
* vfio based subchannel assignment support
|
|
|
|
*
|
|
|
|
* Copyright 2017 IBM Corp.
|
2019-05-07 18:47:33 +03:00
|
|
|
* Copyright 2019 Red Hat, Inc.
|
|
|
|
*
|
2017-05-17 03:48:07 +03:00
|
|
|
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
|
|
|
|
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
|
|
|
|
* Pierre Morel <pmorel@linux.vnet.ibm.com>
|
2019-05-07 18:47:33 +03:00
|
|
|
* Cornelia Huck <cohuck@redhat.com>
|
2017-05-17 03:48:07 +03:00
|
|
|
*
|
2018-02-27 20:25:41 +03:00
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or (at
|
|
|
|
* your option) any later version. See the COPYING file in the top-level
|
2017-05-17 03:48:07 +03:00
|
|
|
* directory.
|
|
|
|
*/
|
|
|
|
|
2017-10-17 19:43:53 +03:00
|
|
|
#include "qemu/osdep.h"
|
2023-11-21 11:44:15 +03:00
|
|
|
#include CONFIG_DEVICES /* CONFIG_IOMMUFD */
|
2017-05-17 03:48:07 +03:00
|
|
|
#include <linux/vfio.h>
|
2017-05-17 03:48:08 +03:00
|
|
|
#include <linux/vfio_ccw.h>
|
2017-05-17 03:48:07 +03:00
|
|
|
#include <sys/ioctl.h>
|
|
|
|
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "hw/vfio/vfio-common.h"
|
2023-11-21 11:44:15 +03:00
|
|
|
#include "sysemu/iommufd.h"
|
2017-05-17 03:48:07 +03:00
|
|
|
#include "hw/s390x/s390-ccw.h"
|
2019-04-04 17:34:20 +03:00
|
|
|
#include "hw/s390x/vfio-ccw.h"
|
2019-08-12 08:23:51 +03:00
|
|
|
#include "hw/qdev-properties.h"
|
2017-05-17 03:48:07 +03:00
|
|
|
#include "hw/s390x/ccw-device.h"
|
2018-05-29 02:26:59 +03:00
|
|
|
#include "exec/address-spaces.h"
|
2017-05-17 03:48:09 +03:00
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2017-05-17 03:48:07 +03:00
|
|
|
|
2019-04-04 17:34:20 +03:00
|
|
|
struct VFIOCCWDevice {
|
2017-05-17 03:48:07 +03:00
|
|
|
S390CCWDevice cdev;
|
|
|
|
VFIODevice vdev;
|
2017-05-17 03:48:08 +03:00
|
|
|
uint64_t io_region_size;
|
|
|
|
uint64_t io_region_offset;
|
|
|
|
struct ccw_io_region *io_region;
|
2019-05-07 18:47:33 +03:00
|
|
|
uint64_t async_cmd_region_size;
|
|
|
|
uint64_t async_cmd_region_offset;
|
|
|
|
struct ccw_cmd_region *async_cmd_region;
|
2020-05-05 15:57:54 +03:00
|
|
|
uint64_t schib_region_size;
|
|
|
|
uint64_t schib_region_offset;
|
|
|
|
struct ccw_schib_region *schib_region;
|
2020-05-05 15:57:57 +03:00
|
|
|
uint64_t crw_region_size;
|
|
|
|
uint64_t crw_region_offset;
|
|
|
|
struct ccw_crw_region *crw_region;
|
2017-05-17 03:48:09 +03:00
|
|
|
EventNotifier io_notifier;
|
2020-05-05 15:57:57 +03:00
|
|
|
EventNotifier crw_notifier;
|
2021-01-04 23:20:57 +03:00
|
|
|
EventNotifier req_notifier;
|
2018-05-24 20:58:27 +03:00
|
|
|
bool force_orb_pfch;
|
|
|
|
bool warned_orb_pfch;
|
2019-04-04 17:34:20 +03:00
|
|
|
};
|
2017-05-17 03:48:07 +03:00
|
|
|
|
2018-05-24 20:58:27 +03:00
|
|
|
static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch,
|
|
|
|
const char *msg)
|
|
|
|
{
|
2018-08-30 17:59:01 +03:00
|
|
|
warn_report_once_cond(&vcdev->warned_orb_pfch,
|
|
|
|
"vfio-ccw (devno %x.%x.%04x): %s",
|
|
|
|
sch->cssid, sch->ssid, sch->devno, msg);
|
2018-05-24 20:58:27 +03:00
|
|
|
}
|
|
|
|
|
2017-05-17 03:48:07 +03:00
|
|
|
static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
|
|
|
|
{
|
|
|
|
vdev->needs_reset = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't need vfio_hot_reset_multi and vfio_eoi operations for
|
|
|
|
* vfio_ccw device now.
|
|
|
|
*/
|
|
|
|
struct VFIODeviceOps vfio_ccw_ops = {
|
|
|
|
.vfio_compute_needs_reset = vfio_ccw_compute_needs_reset,
|
|
|
|
};
|
|
|
|
|
2017-10-17 17:04:49 +03:00
|
|
|
static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
|
2017-05-17 03:48:10 +03:00
|
|
|
{
|
2023-02-13 20:01:45 +03:00
|
|
|
VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
|
2017-05-17 03:48:10 +03:00
|
|
|
struct ccw_io_region *region = vcdev->io_region;
|
|
|
|
int ret;
|
|
|
|
|
2020-05-12 21:15:35 +03:00
|
|
|
if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) {
|
|
|
|
sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
|
|
|
|
warn_once_pfch(vcdev, sch, "PFCH flag forced");
|
2018-05-24 20:58:27 +03:00
|
|
|
}
|
|
|
|
|
2017-05-17 03:48:10 +03:00
|
|
|
QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW));
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB));
|
|
|
|
|
|
|
|
memset(region, 0, sizeof(*region));
|
|
|
|
|
2017-10-17 17:04:49 +03:00
|
|
|
memcpy(region->orb_area, &sch->orb, sizeof(ORB));
|
|
|
|
memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW));
|
2017-05-17 03:48:10 +03:00
|
|
|
|
|
|
|
again:
|
|
|
|
ret = pwrite(vcdev->vdev.fd, region,
|
|
|
|
vcdev->io_region_size, vcdev->io_region_offset);
|
|
|
|
if (ret != vcdev->io_region_size) {
|
|
|
|
if (errno == EAGAIN) {
|
|
|
|
goto again;
|
|
|
|
}
|
2019-11-28 17:30:14 +03:00
|
|
|
error_report("vfio-ccw: write I/O region failed with errno=%d", errno);
|
2021-03-03 19:07:39 +03:00
|
|
|
ret = errno ? -errno : -EFAULT;
|
2017-10-17 17:04:49 +03:00
|
|
|
} else {
|
2021-03-03 19:07:39 +03:00
|
|
|
ret = 0;
|
2017-10-17 17:04:49 +03:00
|
|
|
}
|
|
|
|
switch (ret) {
|
|
|
|
case 0:
|
|
|
|
return IOINST_CC_EXPECTED;
|
|
|
|
case -EBUSY:
|
|
|
|
return IOINST_CC_BUSY;
|
|
|
|
case -ENODEV:
|
|
|
|
case -EACCES:
|
|
|
|
return IOINST_CC_NOT_OPERATIONAL;
|
|
|
|
case -EFAULT:
|
|
|
|
default:
|
|
|
|
sch_gen_unit_exception(sch);
|
|
|
|
css_inject_io_interrupt(sch);
|
|
|
|
return IOINST_CC_EXPECTED;
|
2017-05-17 03:48:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:54 +03:00
|
|
|
static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
|
|
|
|
{
|
2023-02-13 20:01:45 +03:00
|
|
|
VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
|
2020-05-05 15:57:54 +03:00
|
|
|
SCHIB *schib = &sch->curr_status;
|
|
|
|
struct ccw_schib_region *region = vcdev->schib_region;
|
|
|
|
SCHIB *s;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* schib region not available so nothing else to do */
|
|
|
|
if (!region) {
|
|
|
|
return IOINST_CC_EXPECTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(region, 0, sizeof(*region));
|
|
|
|
ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size,
|
|
|
|
vcdev->schib_region_offset);
|
|
|
|
|
|
|
|
if (ret == -1) {
|
|
|
|
/*
|
|
|
|
* Device is probably damaged, but store subchannel does not
|
|
|
|
* have a nonzero cc defined for this scenario. Log an error,
|
|
|
|
* and presume things are otherwise fine.
|
|
|
|
*/
|
|
|
|
error_report("vfio-ccw: store region read failed with errno=%d", errno);
|
|
|
|
return IOINST_CC_EXPECTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Selectively copy path-related bits of the SCHIB,
|
|
|
|
* rather than copying the entire struct.
|
|
|
|
*/
|
|
|
|
s = (SCHIB *)region->schib_area;
|
|
|
|
schib->pmcw.pnom = s->pmcw.pnom;
|
|
|
|
schib->pmcw.lpum = s->pmcw.lpum;
|
|
|
|
schib->pmcw.pam = s->pmcw.pam;
|
|
|
|
schib->pmcw.pom = s->pmcw.pom;
|
|
|
|
|
|
|
|
if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) {
|
|
|
|
schib->scsw.flags |= SCSW_FLAGS_MASK_PNO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return IOINST_CC_EXPECTED;
|
|
|
|
}
|
|
|
|
|
2019-05-07 18:47:33 +03:00
|
|
|
static int vfio_ccw_handle_clear(SubchDev *sch)
|
|
|
|
{
|
2023-02-13 20:01:45 +03:00
|
|
|
VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
|
2019-05-07 18:47:33 +03:00
|
|
|
struct ccw_cmd_region *region = vcdev->async_cmd_region;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!vcdev->async_cmd_region) {
|
|
|
|
/* Async command region not available, fall back to emulation */
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(region, 0, sizeof(*region));
|
|
|
|
region->command = VFIO_CCW_ASYNC_CMD_CSCH;
|
|
|
|
|
|
|
|
again:
|
|
|
|
ret = pwrite(vcdev->vdev.fd, region,
|
|
|
|
vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
|
|
|
|
if (ret != vcdev->async_cmd_region_size) {
|
|
|
|
if (errno == EAGAIN) {
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
|
2021-03-03 19:07:39 +03:00
|
|
|
ret = errno ? -errno : -EFAULT;
|
2019-05-07 18:47:33 +03:00
|
|
|
} else {
|
2021-03-03 19:07:39 +03:00
|
|
|
ret = 0;
|
2019-05-07 18:47:33 +03:00
|
|
|
}
|
|
|
|
switch (ret) {
|
|
|
|
case 0:
|
|
|
|
case -ENODEV:
|
|
|
|
case -EACCES:
|
2021-07-05 19:39:51 +03:00
|
|
|
return ret;
|
2019-05-07 18:47:33 +03:00
|
|
|
case -EFAULT:
|
|
|
|
default:
|
|
|
|
sch_gen_unit_exception(sch);
|
|
|
|
css_inject_io_interrupt(sch);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vfio_ccw_handle_halt(SubchDev *sch)
|
|
|
|
{
|
2023-02-13 20:01:45 +03:00
|
|
|
VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
|
2019-05-07 18:47:33 +03:00
|
|
|
struct ccw_cmd_region *region = vcdev->async_cmd_region;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!vcdev->async_cmd_region) {
|
|
|
|
/* Async command region not available, fall back to emulation */
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(region, 0, sizeof(*region));
|
|
|
|
region->command = VFIO_CCW_ASYNC_CMD_HSCH;
|
|
|
|
|
|
|
|
again:
|
|
|
|
ret = pwrite(vcdev->vdev.fd, region,
|
|
|
|
vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
|
|
|
|
if (ret != vcdev->async_cmd_region_size) {
|
|
|
|
if (errno == EAGAIN) {
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
|
2021-03-03 19:07:39 +03:00
|
|
|
ret = errno ? -errno : -EFAULT;
|
2019-05-07 18:47:33 +03:00
|
|
|
} else {
|
2021-03-03 19:07:39 +03:00
|
|
|
ret = 0;
|
2019-05-07 18:47:33 +03:00
|
|
|
}
|
|
|
|
switch (ret) {
|
|
|
|
case 0:
|
|
|
|
case -EBUSY:
|
|
|
|
case -ENODEV:
|
|
|
|
case -EACCES:
|
2021-07-05 19:39:51 +03:00
|
|
|
return ret;
|
2019-05-07 18:47:33 +03:00
|
|
|
case -EFAULT:
|
|
|
|
default:
|
|
|
|
sch_gen_unit_exception(sch);
|
|
|
|
css_inject_io_interrupt(sch);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-17 03:48:07 +03:00
|
|
|
static void vfio_ccw_reset(DeviceState *dev)
|
|
|
|
{
|
2023-02-13 20:01:45 +03:00
|
|
|
VFIOCCWDevice *vcdev = VFIO_CCW(dev);
|
2017-05-17 03:48:07 +03:00
|
|
|
|
|
|
|
ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
|
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:57 +03:00
|
|
|
static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev)
|
|
|
|
{
|
|
|
|
struct ccw_crw_region *region = vcdev->crw_region;
|
|
|
|
CRW crw;
|
|
|
|
int size;
|
|
|
|
|
|
|
|
/* Keep reading CRWs as long as data is returned */
|
|
|
|
do {
|
|
|
|
memset(region, 0, sizeof(*region));
|
|
|
|
size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size,
|
|
|
|
vcdev->crw_region_offset);
|
|
|
|
|
|
|
|
if (size == -1) {
|
|
|
|
error_report("vfio-ccw: Read crw region failed with errno=%d",
|
|
|
|
errno);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (region->crw == 0) {
|
|
|
|
/* No more CRWs to queue */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&crw, ®ion->crw, sizeof(CRW));
|
|
|
|
|
|
|
|
css_crw_add_to_queue(crw);
|
|
|
|
} while (1);
|
|
|
|
}
|
|
|
|
|
2021-01-04 23:20:57 +03:00
|
|
|
static void vfio_ccw_req_notifier_handler(void *opaque)
|
|
|
|
{
|
|
|
|
VFIOCCWDevice *vcdev = opaque;
|
|
|
|
Error *err = NULL;
|
|
|
|
|
|
|
|
if (!event_notifier_test_and_clear(&vcdev->req_notifier)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
qdev_unplug(DEVICE(vcdev), &err);
|
|
|
|
if (err) {
|
|
|
|
warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:57 +03:00
|
|
|
static void vfio_ccw_crw_notifier_handler(void *opaque)
|
|
|
|
{
|
|
|
|
VFIOCCWDevice *vcdev = opaque;
|
|
|
|
|
|
|
|
while (event_notifier_test_and_clear(&vcdev->crw_notifier)) {
|
|
|
|
vfio_ccw_crw_read(vcdev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-17 03:48:09 +03:00
|
|
|
static void vfio_ccw_io_notifier_handler(void *opaque)
|
|
|
|
{
|
|
|
|
VFIOCCWDevice *vcdev = opaque;
|
2017-05-17 03:48:10 +03:00
|
|
|
struct ccw_io_region *region = vcdev->io_region;
|
2023-02-13 20:01:44 +03:00
|
|
|
CcwDevice *ccw_dev = CCW_DEVICE(vcdev);
|
2017-05-17 03:48:10 +03:00
|
|
|
SubchDev *sch = ccw_dev->sch;
|
2019-03-29 14:11:01 +03:00
|
|
|
SCHIB *schib = &sch->curr_status;
|
|
|
|
SCSW s;
|
2017-05-17 03:48:10 +03:00
|
|
|
IRB irb;
|
2021-06-18 02:25:37 +03:00
|
|
|
ESW esw;
|
2017-05-17 03:48:10 +03:00
|
|
|
int size;
|
2017-05-17 03:48:09 +03:00
|
|
|
|
|
|
|
if (!event_notifier_test_and_clear(&vcdev->io_notifier)) {
|
|
|
|
return;
|
|
|
|
}
|
2017-05-17 03:48:10 +03:00
|
|
|
|
|
|
|
size = pread(vcdev->vdev.fd, region, vcdev->io_region_size,
|
|
|
|
vcdev->io_region_offset);
|
|
|
|
if (size == -1) {
|
|
|
|
switch (errno) {
|
|
|
|
case ENODEV:
|
|
|
|
/* Generate a deferred cc 3 condition. */
|
2019-03-29 14:11:01 +03:00
|
|
|
schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
|
|
|
|
schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
|
|
|
|
schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
|
2017-05-17 03:48:10 +03:00
|
|
|
goto read_err;
|
|
|
|
case EFAULT:
|
|
|
|
/* Memory problem, generate channel data check. */
|
2019-03-29 14:11:01 +03:00
|
|
|
schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
|
|
|
|
schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK;
|
|
|
|
schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
|
|
|
|
schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
|
2017-05-17 03:48:10 +03:00
|
|
|
SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
|
|
|
|
goto read_err;
|
|
|
|
default:
|
|
|
|
/* Error, generate channel program check. */
|
2019-03-29 14:11:01 +03:00
|
|
|
schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
|
|
|
|
schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
|
|
|
|
schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
|
|
|
|
schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
|
2017-05-17 03:48:10 +03:00
|
|
|
SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
|
|
|
|
goto read_err;
|
|
|
|
}
|
|
|
|
} else if (size != vcdev->io_region_size) {
|
|
|
|
/* Information transfer error, generate channel-control check. */
|
2019-03-29 14:11:01 +03:00
|
|
|
schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
|
|
|
|
schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK;
|
|
|
|
schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
|
|
|
|
schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
|
2017-05-17 03:48:10 +03:00
|
|
|
SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
|
|
|
|
goto read_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&irb, region->irb_area, sizeof(IRB));
|
|
|
|
|
|
|
|
/* Update control block via irb. */
|
2019-03-29 14:11:01 +03:00
|
|
|
s = schib->scsw;
|
|
|
|
copy_scsw_to_guest(&s, &irb.scsw);
|
|
|
|
schib->scsw = s;
|
2017-05-17 03:48:10 +03:00
|
|
|
|
2021-06-18 02:25:37 +03:00
|
|
|
copy_esw_to_guest(&esw, &irb.esw);
|
|
|
|
sch->esw = esw;
|
|
|
|
|
2017-05-17 03:48:12 +03:00
|
|
|
/* If a uint check is pending, copy sense data. */
|
2019-03-29 14:11:01 +03:00
|
|
|
if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
|
|
|
|
(schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
|
2017-05-17 03:48:12 +03:00
|
|
|
memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw));
|
|
|
|
}
|
|
|
|
|
2017-05-17 03:48:10 +03:00
|
|
|
read_err:
|
|
|
|
css_inject_io_interrupt(sch);
|
2017-05-17 03:48:09 +03:00
|
|
|
}
|
|
|
|
|
2024-04-25 12:02:14 +03:00
|
|
|
static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
|
2020-05-05 15:57:55 +03:00
|
|
|
unsigned int irq,
|
|
|
|
Error **errp)
|
2017-05-17 03:48:09 +03:00
|
|
|
{
|
|
|
|
VFIODevice *vdev = &vcdev->vdev;
|
2024-04-25 12:02:13 +03:00
|
|
|
g_autofree struct vfio_irq_info *irq_info = NULL;
|
2017-05-17 03:48:09 +03:00
|
|
|
size_t argsz;
|
2019-04-12 14:42:31 +03:00
|
|
|
int fd;
|
2020-05-05 15:57:55 +03:00
|
|
|
EventNotifier *notifier;
|
|
|
|
IOHandler *fd_read;
|
|
|
|
|
|
|
|
switch (irq) {
|
|
|
|
case VFIO_CCW_IO_IRQ_INDEX:
|
|
|
|
notifier = &vcdev->io_notifier;
|
|
|
|
fd_read = vfio_ccw_io_notifier_handler;
|
|
|
|
break;
|
2020-05-05 15:57:57 +03:00
|
|
|
case VFIO_CCW_CRW_IRQ_INDEX:
|
|
|
|
notifier = &vcdev->crw_notifier;
|
|
|
|
fd_read = vfio_ccw_crw_notifier_handler;
|
|
|
|
break;
|
2021-01-04 23:20:57 +03:00
|
|
|
case VFIO_CCW_REQ_IRQ_INDEX:
|
|
|
|
notifier = &vcdev->req_notifier;
|
|
|
|
fd_read = vfio_ccw_req_notifier_handler;
|
|
|
|
break;
|
2020-05-05 15:57:55 +03:00
|
|
|
default:
|
|
|
|
error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
|
2024-04-25 12:02:14 +03:00
|
|
|
return false;
|
2020-05-05 15:57:55 +03:00
|
|
|
}
|
2017-05-17 03:48:09 +03:00
|
|
|
|
2020-05-05 15:57:55 +03:00
|
|
|
if (vdev->num_irqs < irq + 1) {
|
2021-04-21 18:20:53 +03:00
|
|
|
error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
|
|
|
|
irq, vdev->num_irqs);
|
2024-04-25 12:02:14 +03:00
|
|
|
return false;
|
2017-05-17 03:48:09 +03:00
|
|
|
}
|
|
|
|
|
2017-07-18 04:49:25 +03:00
|
|
|
argsz = sizeof(*irq_info);
|
2017-05-17 03:48:09 +03:00
|
|
|
irq_info = g_malloc0(argsz);
|
2020-05-05 15:57:55 +03:00
|
|
|
irq_info->index = irq;
|
2017-05-17 03:48:09 +03:00
|
|
|
irq_info->argsz = argsz;
|
|
|
|
if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
|
|
|
|
irq_info) < 0 || irq_info->count < 1) {
|
|
|
|
error_setg_errno(errp, errno, "vfio: Error getting irq info");
|
2024-04-25 12:02:14 +03:00
|
|
|
return false;
|
2017-05-17 03:48:09 +03:00
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:55 +03:00
|
|
|
if (event_notifier_init(notifier, 0)) {
|
2017-05-17 03:48:09 +03:00
|
|
|
error_setg_errno(errp, errno,
|
2020-05-05 15:57:55 +03:00
|
|
|
"vfio: Unable to init event notifier for irq (%d)",
|
|
|
|
irq);
|
2024-04-25 12:02:14 +03:00
|
|
|
return false;
|
2017-05-17 03:48:09 +03:00
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:55 +03:00
|
|
|
fd = event_notifier_get_fd(notifier);
|
|
|
|
qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
|
2019-04-12 14:42:31 +03:00
|
|
|
|
2024-05-22 07:39:59 +03:00
|
|
|
if (!vfio_set_irq_signaling(vdev, irq, 0,
|
|
|
|
VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
|
2019-04-12 14:42:31 +03:00
|
|
|
qemu_set_fd_handler(fd, NULL, NULL, vcdev);
|
2020-05-05 15:57:55 +03:00
|
|
|
event_notifier_cleanup(notifier);
|
2017-05-17 03:48:09 +03:00
|
|
|
}
|
2024-04-25 12:02:14 +03:00
|
|
|
|
|
|
|
return true;
|
2017-05-17 03:48:09 +03:00
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:55 +03:00
|
|
|
static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
|
|
|
|
unsigned int irq)
|
2017-05-17 03:48:09 +03:00
|
|
|
{
|
2019-04-12 14:42:31 +03:00
|
|
|
Error *err = NULL;
|
2020-05-05 15:57:55 +03:00
|
|
|
EventNotifier *notifier;
|
|
|
|
|
|
|
|
switch (irq) {
|
|
|
|
case VFIO_CCW_IO_IRQ_INDEX:
|
|
|
|
notifier = &vcdev->io_notifier;
|
|
|
|
break;
|
2020-05-05 15:57:57 +03:00
|
|
|
case VFIO_CCW_CRW_IRQ_INDEX:
|
|
|
|
notifier = &vcdev->crw_notifier;
|
|
|
|
break;
|
2021-01-04 23:20:57 +03:00
|
|
|
case VFIO_CCW_REQ_IRQ_INDEX:
|
|
|
|
notifier = &vcdev->req_notifier;
|
|
|
|
break;
|
2020-05-05 15:57:55 +03:00
|
|
|
default:
|
|
|
|
error_report("vfio: Unsupported device irq(%d)", irq);
|
|
|
|
return;
|
|
|
|
}
|
2019-04-12 14:42:31 +03:00
|
|
|
|
2024-05-22 07:39:59 +03:00
|
|
|
if (!vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
|
|
|
|
VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
|
2021-04-28 17:36:52 +03:00
|
|
|
warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
|
2017-05-17 03:48:09 +03:00
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:55 +03:00
|
|
|
qemu_set_fd_handler(event_notifier_get_fd(notifier),
|
2017-05-17 03:48:09 +03:00
|
|
|
NULL, NULL, vcdev);
|
2020-05-05 15:57:55 +03:00
|
|
|
event_notifier_cleanup(notifier);
|
2017-05-17 03:48:09 +03:00
|
|
|
}
|
|
|
|
|
2024-05-22 07:40:02 +03:00
|
|
|
static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
|
2017-05-17 03:48:08 +03:00
|
|
|
{
|
|
|
|
VFIODevice *vdev = &vcdev->vdev;
|
|
|
|
struct vfio_region_info *info;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Sanity check device */
|
|
|
|
if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
|
|
|
|
error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
|
2024-05-22 07:40:02 +03:00
|
|
|
return false;
|
2017-05-17 03:48:08 +03:00
|
|
|
}
|
|
|
|
|
2019-05-07 18:47:33 +03:00
|
|
|
/*
|
|
|
|
* We always expect at least the I/O region to be present. We also
|
|
|
|
* may have a variable number of regions governed by capabilities.
|
|
|
|
*/
|
2017-05-17 03:48:08 +03:00
|
|
|
if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
|
2019-05-07 18:47:33 +03:00
|
|
|
error_setg(errp, "vfio: too few regions (%u), expected at least %u",
|
|
|
|
vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
|
2024-05-22 07:40:02 +03:00
|
|
|
return false;
|
2017-05-17 03:48:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
|
|
|
|
if (ret) {
|
|
|
|
error_setg_errno(errp, -ret, "vfio: Error getting config info");
|
2024-05-22 07:40:02 +03:00
|
|
|
return false;
|
2017-05-17 03:48:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
vcdev->io_region_size = info->size;
|
|
|
|
if (sizeof(*vcdev->io_region) != vcdev->io_region_size) {
|
|
|
|
error_setg(errp, "vfio: Unexpected size of the I/O region");
|
2020-05-05 15:57:53 +03:00
|
|
|
goto out_err;
|
2017-05-17 03:48:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
vcdev->io_region_offset = info->offset;
|
|
|
|
vcdev->io_region = g_malloc0(info->size);
|
2020-09-28 13:17:01 +03:00
|
|
|
g_free(info);
|
2017-05-17 03:48:08 +03:00
|
|
|
|
2019-05-07 18:47:33 +03:00
|
|
|
/* check for the optional async command region */
|
|
|
|
ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
|
|
|
|
VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
|
|
|
|
if (!ret) {
|
|
|
|
vcdev->async_cmd_region_size = info->size;
|
|
|
|
if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
|
|
|
|
error_setg(errp, "vfio: Unexpected size of the async cmd region");
|
2020-05-05 15:57:53 +03:00
|
|
|
goto out_err;
|
2019-05-07 18:47:33 +03:00
|
|
|
}
|
|
|
|
vcdev->async_cmd_region_offset = info->offset;
|
|
|
|
vcdev->async_cmd_region = g_malloc0(info->size);
|
2020-09-28 13:17:01 +03:00
|
|
|
g_free(info);
|
2019-05-07 18:47:33 +03:00
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:54 +03:00
|
|
|
ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
|
|
|
|
VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
|
|
|
|
if (!ret) {
|
|
|
|
vcdev->schib_region_size = info->size;
|
|
|
|
if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) {
|
|
|
|
error_setg(errp, "vfio: Unexpected size of the schib region");
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
vcdev->schib_region_offset = info->offset;
|
|
|
|
vcdev->schib_region = g_malloc(info->size);
|
2020-09-28 13:17:01 +03:00
|
|
|
g_free(info);
|
2020-05-05 15:57:54 +03:00
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:57 +03:00
|
|
|
ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
|
|
|
|
VFIO_REGION_SUBTYPE_CCW_CRW, &info);
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
vcdev->crw_region_size = info->size;
|
|
|
|
if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) {
|
|
|
|
error_setg(errp, "vfio: Unexpected size of the CRW region");
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
vcdev->crw_region_offset = info->offset;
|
|
|
|
vcdev->crw_region = g_malloc(info->size);
|
2020-09-28 13:17:01 +03:00
|
|
|
g_free(info);
|
2020-05-05 15:57:57 +03:00
|
|
|
}
|
|
|
|
|
2024-05-22 07:40:02 +03:00
|
|
|
return true;
|
2020-05-05 15:57:53 +03:00
|
|
|
|
|
|
|
out_err:
|
2020-05-05 15:57:57 +03:00
|
|
|
g_free(vcdev->crw_region);
|
2020-05-05 15:57:54 +03:00
|
|
|
g_free(vcdev->schib_region);
|
2020-05-05 15:57:53 +03:00
|
|
|
g_free(vcdev->async_cmd_region);
|
|
|
|
g_free(vcdev->io_region);
|
|
|
|
g_free(info);
|
2024-05-22 07:40:02 +03:00
|
|
|
return false;
|
2017-05-17 03:48:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
|
|
|
|
{
|
2020-05-05 15:57:57 +03:00
|
|
|
g_free(vcdev->crw_region);
|
2020-05-05 15:57:54 +03:00
|
|
|
g_free(vcdev->schib_region);
|
2019-05-07 18:47:33 +03:00
|
|
|
g_free(vcdev->async_cmd_region);
|
2017-05-17 03:48:08 +03:00
|
|
|
g_free(vcdev->io_region);
|
|
|
|
}
|
|
|
|
|
2017-05-17 03:48:07 +03:00
|
|
|
static void vfio_ccw_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
2023-02-13 20:01:43 +03:00
|
|
|
S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
|
2023-02-13 20:01:45 +03:00
|
|
|
VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
|
2017-05-17 03:48:07 +03:00
|
|
|
S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
|
2023-10-09 12:09:12 +03:00
|
|
|
VFIODevice *vbasedev = &vcdev->vdev;
|
2017-05-17 03:48:07 +03:00
|
|
|
Error *err = NULL;
|
|
|
|
|
|
|
|
/* Call the class init function for subchannel. */
|
|
|
|
if (cdc->realize) {
|
|
|
|
cdc->realize(cdev, vcdev->vdev.sysfsdev, &err);
|
|
|
|
if (err) {
|
|
|
|
goto out_err_propagate;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-22 07:40:00 +03:00
|
|
|
if (!vfio_device_get_name(vbasedev, errp)) {
|
2023-11-21 11:44:16 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-05-07 09:42:44 +03:00
|
|
|
if (!vfio_attach_device(cdev->mdevid, vbasedev,
|
|
|
|
&address_space_memory, errp)) {
|
2023-10-09 12:09:12 +03:00
|
|
|
goto out_attach_dev_err;
|
2017-05-17 03:48:07 +03:00
|
|
|
}
|
|
|
|
|
2024-05-22 07:40:02 +03:00
|
|
|
if (!vfio_ccw_get_region(vcdev, &err)) {
|
2017-05-17 03:48:08 +03:00
|
|
|
goto out_region_err;
|
|
|
|
}
|
|
|
|
|
2024-04-25 12:02:14 +03:00
|
|
|
if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, &err)) {
|
2021-01-04 23:20:57 +03:00
|
|
|
goto out_io_notifier_err;
|
2017-05-17 03:48:09 +03:00
|
|
|
}
|
|
|
|
|
2020-05-05 15:57:57 +03:00
|
|
|
if (vcdev->crw_region) {
|
2024-04-25 12:02:14 +03:00
|
|
|
if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX,
|
|
|
|
&err)) {
|
2021-04-28 17:36:52 +03:00
|
|
|
goto out_irq_notifier_err;
|
2020-05-05 15:57:57 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-25 12:02:14 +03:00
|
|
|
if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err)) {
|
2021-04-21 18:20:53 +03:00
|
|
|
/*
|
|
|
|
* Report this error, but do not make it a failing condition.
|
|
|
|
* Lack of this IRQ in the host does not prevent normal operation.
|
|
|
|
*/
|
|
|
|
error_report_err(err);
|
2021-01-04 23:20:57 +03:00
|
|
|
}
|
|
|
|
|
2017-05-17 03:48:07 +03:00
|
|
|
return;
|
|
|
|
|
2021-04-28 17:36:52 +03:00
|
|
|
out_irq_notifier_err:
|
|
|
|
vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
|
|
|
|
vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
|
2021-01-04 23:20:57 +03:00
|
|
|
vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
|
|
|
|
out_io_notifier_err:
|
2017-05-17 03:48:09 +03:00
|
|
|
vfio_ccw_put_region(vcdev);
|
2017-05-17 03:48:08 +03:00
|
|
|
out_region_err:
|
2023-10-09 12:09:12 +03:00
|
|
|
vfio_detach_device(vbasedev);
|
|
|
|
out_attach_dev_err:
|
|
|
|
g_free(vbasedev->name);
|
2017-05-17 03:48:07 +03:00
|
|
|
if (cdc->unrealize) {
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
cdc->unrealize(cdev);
|
2017-05-17 03:48:07 +03:00
|
|
|
}
|
|
|
|
out_err_propagate:
|
|
|
|
error_propagate(errp, err);
|
|
|
|
}
|
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
static void vfio_ccw_unrealize(DeviceState *dev)
|
2017-05-17 03:48:07 +03:00
|
|
|
{
|
2023-02-13 20:01:43 +03:00
|
|
|
S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
|
2023-02-13 20:01:45 +03:00
|
|
|
VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
|
2017-05-17 03:48:07 +03:00
|
|
|
S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
|
|
|
|
|
2021-01-04 23:20:57 +03:00
|
|
|
vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
|
2020-05-05 15:57:57 +03:00
|
|
|
vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
|
2020-05-05 15:57:55 +03:00
|
|
|
vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
|
2017-05-17 03:48:08 +03:00
|
|
|
vfio_ccw_put_region(vcdev);
|
2023-10-09 12:09:12 +03:00
|
|
|
vfio_detach_device(&vcdev->vdev);
|
|
|
|
g_free(vcdev->vdev.name);
|
2017-05-17 03:48:07 +03:00
|
|
|
|
|
|
|
if (cdc->unrealize) {
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
cdc->unrealize(cdev);
|
2017-05-17 03:48:07 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static Property vfio_ccw_properties[] = {
|
|
|
|
DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
|
2018-05-24 20:58:27 +03:00
|
|
|
DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
|
2023-11-21 11:44:15 +03:00
|
|
|
#ifdef CONFIG_IOMMUFD
|
|
|
|
DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd,
|
|
|
|
TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
|
|
|
|
#endif
|
2017-05-17 03:48:07 +03:00
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
|
|
|
static const VMStateDescription vfio_ccw_vmstate = {
|
2019-05-21 18:15:41 +03:00
|
|
|
.name = "vfio-ccw",
|
2017-05-17 03:48:07 +03:00
|
|
|
.unmigratable = 1,
|
|
|
|
};
|
|
|
|
|
2023-11-21 11:44:16 +03:00
|
|
|
static void vfio_ccw_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
VFIOCCWDevice *vcdev = VFIO_CCW(obj);
|
2023-11-21 11:44:24 +03:00
|
|
|
VFIODevice *vbasedev = &vcdev->vdev;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All vfio-ccw devices are believed to operate in a way compatible with
|
|
|
|
* discarding of memory in RAM blocks, ie. pages pinned in the host are
|
|
|
|
* in the current working set of the guest driver and therefore never
|
|
|
|
* overlap e.g., with pages available to the guest balloon driver. This
|
|
|
|
* needs to be set before vfio_get_device() for vfio common to handle
|
|
|
|
* ram_block_discard_disable().
|
|
|
|
*/
|
2023-11-21 11:44:25 +03:00
|
|
|
vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_CCW, &vfio_ccw_ops,
|
|
|
|
DEVICE(vcdev), true);
|
2023-11-21 11:44:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_IOMMUFD
|
|
|
|
static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp)
|
|
|
|
{
|
|
|
|
vfio_device_set_fd(&VFIO_CCW(obj)->vdev, str, errp);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-05-17 03:48:07 +03:00
|
|
|
static void vfio_ccw_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
2017-05-17 03:48:10 +03:00
|
|
|
S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
|
2017-05-17 03:48:07 +03:00
|
|
|
|
2020-01-10 18:30:32 +03:00
|
|
|
device_class_set_props(dc, vfio_ccw_properties);
|
2023-11-21 11:44:16 +03:00
|
|
|
#ifdef CONFIG_IOMMUFD
|
|
|
|
object_class_property_add_str(klass, "fd", NULL, vfio_ccw_set_fd);
|
|
|
|
#endif
|
2017-05-17 03:48:07 +03:00
|
|
|
dc->vmsd = &vfio_ccw_vmstate;
|
|
|
|
dc->desc = "VFIO-based subchannel assignment";
|
2017-10-04 11:51:49 +03:00
|
|
|
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
2017-05-17 03:48:07 +03:00
|
|
|
dc->realize = vfio_ccw_realize;
|
|
|
|
dc->unrealize = vfio_ccw_unrealize;
|
|
|
|
dc->reset = vfio_ccw_reset;
|
2017-05-17 03:48:10 +03:00
|
|
|
|
|
|
|
cdc->handle_request = vfio_ccw_handle_request;
|
2019-05-07 18:47:33 +03:00
|
|
|
cdc->handle_halt = vfio_ccw_handle_halt;
|
|
|
|
cdc->handle_clear = vfio_ccw_handle_clear;
|
2020-05-05 15:57:54 +03:00
|
|
|
cdc->handle_store = vfio_ccw_handle_store;
|
2017-05-17 03:48:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo vfio_ccw_info = {
|
|
|
|
.name = TYPE_VFIO_CCW,
|
|
|
|
.parent = TYPE_S390_CCW,
|
|
|
|
.instance_size = sizeof(VFIOCCWDevice),
|
2023-11-21 11:44:16 +03:00
|
|
|
.instance_init = vfio_ccw_instance_init,
|
2017-05-17 03:48:07 +03:00
|
|
|
.class_init = vfio_ccw_class_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void register_vfio_ccw_type(void)
|
|
|
|
{
|
|
|
|
type_register_static(&vfio_ccw_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(register_vfio_ccw_type)
|