2013-01-24 10:08:55 +04:00
|
|
|
/*
|
|
|
|
* virtio ccw target implementation
|
|
|
|
*
|
2015-05-27 14:11:59 +03:00
|
|
|
* Copyright 2012,2015 IBM Corp.
|
2013-01-24 10:08:55 +04:00
|
|
|
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
|
2015-05-27 14:11:59 +03:00
|
|
|
* Pierre Morel <pmorel@linux.vnet.ibm.com>
|
2013-01-24 10:08:55 +04:00
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or (at
|
|
|
|
* your option) any later version. See the COPYING file in the top-level
|
|
|
|
* directory.
|
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:00 +03:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2022-03-28 14:41:29 +03:00
|
|
|
#include "exec/address-spaces.h"
|
2015-12-04 14:06:26 +03:00
|
|
|
#include "sysemu/kvm.h"
|
2013-01-24 10:08:55 +04:00
|
|
|
#include "net/net.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/virtio/virtio.h"
|
2019-08-12 08:23:39 +03:00
|
|
|
#include "migration/qemu-file-types.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/virtio/virtio-net.h"
|
2013-01-24 10:08:55 +04:00
|
|
|
#include "qemu/bitops.h"
|
2015-03-17 20:29:20 +03:00
|
|
|
#include "qemu/error-report.h"
|
2022-03-28 14:41:29 +03:00
|
|
|
#include "qemu/log.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2014-12-11 16:25:12 +03:00
|
|
|
#include "hw/virtio/virtio-access.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/virtio/virtio-bus.h"
|
2013-07-15 19:45:03 +04:00
|
|
|
#include "hw/s390x/adapter.h"
|
|
|
|
#include "hw/s390x/s390_flic.h"
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2015-12-04 14:06:26 +03:00
|
|
|
#include "hw/s390x/ioinst.h"
|
|
|
|
#include "hw/s390x/css.h"
|
2013-01-24 10:08:55 +04:00
|
|
|
#include "virtio-ccw.h"
|
|
|
|
#include "trace.h"
|
2015-11-06 14:32:40 +03:00
|
|
|
#include "hw/s390x/css-bridge.h"
|
2017-07-04 00:34:14 +03:00
|
|
|
#include "hw/s390x/s390-virtio-ccw.h"
|
2021-05-17 16:06:28 +03:00
|
|
|
#include "sysemu/replay.h"
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2015-12-10 14:55:06 +03:00
|
|
|
#define NR_CLASSIC_INDICATOR_BITS 64
|
|
|
|
|
2021-03-17 12:56:21 +03:00
|
|
|
bool have_virtio_ccw = true;
|
|
|
|
|
2017-07-04 00:34:14 +03:00
|
|
|
static int virtio_ccw_dev_post_load(void *opaque, int version_id)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque);
|
|
|
|
CcwDevice *ccw_dev = CCW_DEVICE(dev);
|
|
|
|
CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
|
|
|
|
|
|
|
|
ccw_dev->sch->driver_data = dev;
|
|
|
|
if (ccw_dev->sch->thinint_active) {
|
|
|
|
dev->routes.adapter.adapter_id = css_get_adapter_id(
|
|
|
|
CSS_IO_ADAPTER_VIRTIO,
|
|
|
|
dev->thinint_isc);
|
|
|
|
}
|
|
|
|
/* Re-fill subch_id after loading the subchannel states.*/
|
|
|
|
if (ck->refill_ids) {
|
|
|
|
ck->refill_ids(ccw_dev);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct VirtioCcwDeviceTmp {
|
|
|
|
VirtioCcwDevice *parent;
|
|
|
|
uint16_t config_vector;
|
|
|
|
} VirtioCcwDeviceTmp;
|
|
|
|
|
2017-09-25 14:29:12 +03:00
|
|
|
static int virtio_ccw_dev_tmp_pre_save(void *opaque)
|
2017-07-04 00:34:14 +03:00
|
|
|
{
|
|
|
|
VirtioCcwDeviceTmp *tmp = opaque;
|
|
|
|
VirtioCcwDevice *dev = tmp->parent;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
|
|
|
|
|
|
|
tmp->config_vector = vdev->config_vector;
|
2017-09-25 14:29:12 +03:00
|
|
|
|
|
|
|
return 0;
|
2017-07-04 00:34:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id)
|
|
|
|
{
|
|
|
|
VirtioCcwDeviceTmp *tmp = opaque;
|
|
|
|
VirtioCcwDevice *dev = tmp->parent;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
|
|
|
|
|
|
|
vdev->config_vector = tmp->config_vector;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const VMStateDescription vmstate_virtio_ccw_dev_tmp = {
|
|
|
|
.name = "s390_virtio_ccw_dev_tmp",
|
|
|
|
.pre_save = virtio_ccw_dev_tmp_pre_save,
|
|
|
|
.post_load = virtio_ccw_dev_tmp_post_load,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
const VMStateDescription vmstate_virtio_ccw_dev = {
|
|
|
|
.name = "s390_virtio_ccw_dev",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.post_load = virtio_ccw_dev_post_load,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice),
|
|
|
|
VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice),
|
|
|
|
VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice),
|
|
|
|
VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice),
|
|
|
|
/*
|
|
|
|
* Ugly hack because VirtIODevice does not migrate itself.
|
|
|
|
* This also makes legacy via vmstate_save_state possible.
|
|
|
|
*/
|
|
|
|
VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp,
|
|
|
|
vmstate_virtio_ccw_dev_tmp),
|
|
|
|
VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes,
|
|
|
|
AdapterRoutes),
|
|
|
|
VMSTATE_UINT8(thinint_isc, VirtioCcwDevice),
|
|
|
|
VMSTATE_INT32(revision, VirtioCcwDevice),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-08-23 22:27:30 +04:00
|
|
|
static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
|
|
|
|
VirtioCcwDevice *dev);
|
2013-04-24 12:21:18 +04:00
|
|
|
|
2013-01-24 10:08:55 +04:00
|
|
|
VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = NULL;
|
2013-09-20 15:51:52 +04:00
|
|
|
VirtioCcwDevice *dev = sch->driver_data;
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2013-09-20 15:51:52 +04:00
|
|
|
if (dev) {
|
|
|
|
vdev = virtio_bus_get_device(&dev->bus);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
return vdev;
|
|
|
|
}
|
|
|
|
|
2016-06-10 12:04:11 +03:00
|
|
|
static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
|
2013-02-15 13:18:43 +04:00
|
|
|
{
|
2016-06-10 12:04:11 +03:00
|
|
|
virtio_bus_start_ioeventfd(&dev->bus);
|
|
|
|
}
|
2013-02-15 13:18:43 +04:00
|
|
|
|
2016-06-10 12:04:11 +03:00
|
|
|
static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
|
|
|
|
{
|
|
|
|
virtio_bus_stop_ioeventfd(&dev->bus);
|
2013-02-15 13:18:43 +04:00
|
|
|
}
|
|
|
|
|
2016-10-21 23:48:08 +03:00
|
|
|
static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
|
2013-02-15 13:18:43 +04:00
|
|
|
{
|
2016-06-10 12:04:11 +03:00
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
2013-02-15 13:18:43 +04:00
|
|
|
|
2016-10-21 23:48:08 +03:00
|
|
|
return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
|
2016-06-10 12:04:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
|
|
|
|
int n, bool assign)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *ccw_dev = CCW_DEVICE(dev);
|
|
|
|
SubchDev *sch = ccw_dev->sch;
|
2016-06-10 12:04:11 +03:00
|
|
|
uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
|
|
|
|
|
|
|
|
return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
|
2013-02-15 13:18:43 +04:00
|
|
|
}
|
|
|
|
|
2013-01-24 10:08:55 +04:00
|
|
|
/* Communication blocks used by several channel commands. */
|
2014-12-11 16:25:13 +03:00
|
|
|
typedef struct VqInfoBlockLegacy {
|
2013-01-24 10:08:55 +04:00
|
|
|
uint64_t queue;
|
|
|
|
uint32_t align;
|
|
|
|
uint16_t index;
|
|
|
|
uint16_t num;
|
2014-12-11 16:25:13 +03:00
|
|
|
} QEMU_PACKED VqInfoBlockLegacy;
|
|
|
|
|
|
|
|
typedef struct VqInfoBlock {
|
|
|
|
uint64_t desc;
|
|
|
|
uint32_t res0;
|
|
|
|
uint16_t index;
|
|
|
|
uint16_t num;
|
|
|
|
uint64_t avail;
|
|
|
|
uint64_t used;
|
2013-01-24 10:08:55 +04:00
|
|
|
} QEMU_PACKED VqInfoBlock;
|
|
|
|
|
|
|
|
typedef struct VqConfigBlock {
|
|
|
|
uint16_t index;
|
|
|
|
uint16_t num_max;
|
|
|
|
} QEMU_PACKED VqConfigBlock;
|
|
|
|
|
|
|
|
typedef struct VirtioFeatDesc {
|
|
|
|
uint32_t features;
|
|
|
|
uint8_t index;
|
|
|
|
} QEMU_PACKED VirtioFeatDesc;
|
|
|
|
|
2013-02-06 13:31:37 +04:00
|
|
|
typedef struct VirtioThinintInfo {
|
|
|
|
hwaddr summary_indicator;
|
|
|
|
hwaddr device_indicator;
|
|
|
|
uint64_t ind_bit;
|
|
|
|
uint8_t isc;
|
|
|
|
} QEMU_PACKED VirtioThinintInfo;
|
|
|
|
|
2014-12-11 16:25:12 +03:00
|
|
|
typedef struct VirtioRevInfo {
|
|
|
|
uint16_t revision;
|
|
|
|
uint16_t length;
|
2020-03-04 18:38:16 +03:00
|
|
|
uint8_t data[];
|
2014-12-11 16:25:12 +03:00
|
|
|
} QEMU_PACKED VirtioRevInfo;
|
|
|
|
|
2013-01-24 10:08:55 +04:00
|
|
|
/* Specify where the virtqueues for the subchannel are in guest memory. */
|
2014-12-11 16:25:13 +03:00
|
|
|
static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
|
|
|
|
VqInfoBlockLegacy *linfo)
|
2013-01-24 10:08:55 +04:00
|
|
|
{
|
2013-09-20 15:51:52 +04:00
|
|
|
VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
|
2014-12-11 16:25:13 +03:00
|
|
|
uint16_t index = info ? info->index : linfo->index;
|
|
|
|
uint16_t num = info ? info->num : linfo->num;
|
|
|
|
uint64_t desc = info ? info->desc : linfo->queue;
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2015-12-07 18:45:17 +03:00
|
|
|
if (index >= VIRTIO_QUEUE_MAX) {
|
2013-01-24 10:08:55 +04:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Current code in virtio.c relies on 4K alignment. */
|
2014-12-11 16:25:13 +03:00
|
|
|
if (linfo && desc && (linfo->align != 4096)) {
|
2013-01-24 10:08:55 +04:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-09-20 15:51:52 +04:00
|
|
|
if (!vdev) {
|
2013-01-24 10:08:55 +04:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-12-11 16:25:13 +03:00
|
|
|
if (info) {
|
|
|
|
virtio_queue_set_rings(vdev, index, desc, info->avail, info->used);
|
|
|
|
} else {
|
|
|
|
virtio_queue_set_addr(vdev, index, desc);
|
|
|
|
}
|
|
|
|
if (!desc) {
|
2015-04-23 09:21:40 +03:00
|
|
|
virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR);
|
2013-01-24 10:08:55 +04:00
|
|
|
} else {
|
2015-09-11 16:16:42 +03:00
|
|
|
if (info) {
|
|
|
|
/* virtio-1 allows changing the ring size. */
|
2017-01-13 00:26:22 +03:00
|
|
|
if (virtio_queue_get_max_num(vdev, index) < num) {
|
2015-09-11 16:16:42 +03:00
|
|
|
/* Fail if we exceed the maximum number. */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
virtio_queue_set_num(vdev, index, num);
|
2023-03-17 03:27:51 +03:00
|
|
|
virtio_init_region_cache(vdev, index);
|
2015-09-11 16:16:42 +03:00
|
|
|
} else if (virtio_queue_get_num(vdev, index) > num) {
|
|
|
|
/* Fail if we don't have a big enough queue. */
|
2013-01-24 10:08:55 +04:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2015-09-11 16:16:42 +03:00
|
|
|
/* We ignore possible increased num for legacy for compatibility. */
|
2013-09-20 15:51:52 +04:00
|
|
|
virtio_queue_set_vector(vdev, index, index);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
/* tell notify handler in case of config change */
|
2015-12-07 18:45:17 +03:00
|
|
|
vdev->config_vector = VIRTIO_QUEUE_MAX;
|
2013-01-24 10:08:55 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-09 09:35:45 +03:00
|
|
|
static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev)
|
2015-06-23 16:46:31 +03:00
|
|
|
{
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *ccw_dev = CCW_DEVICE(dev);
|
|
|
|
|
2022-06-09 09:35:45 +03:00
|
|
|
virtio_bus_reset(&dev->bus);
|
2015-06-23 16:46:31 +03:00
|
|
|
if (dev->indicators) {
|
|
|
|
release_indicator(&dev->routes.adapter, dev->indicators);
|
|
|
|
dev->indicators = NULL;
|
|
|
|
}
|
|
|
|
if (dev->indicators2) {
|
|
|
|
release_indicator(&dev->routes.adapter, dev->indicators2);
|
|
|
|
dev->indicators2 = NULL;
|
|
|
|
}
|
|
|
|
if (dev->summary_indicator) {
|
|
|
|
release_indicator(&dev->routes.adapter, dev->summary_indicator);
|
|
|
|
dev->summary_indicator = NULL;
|
|
|
|
}
|
2016-02-26 08:46:12 +03:00
|
|
|
ccw_dev->sch->thinint_active = false;
|
2015-06-23 16:46:31 +03:00
|
|
|
}
|
|
|
|
|
2014-12-11 16:25:13 +03:00
|
|
|
static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
|
|
|
|
bool is_legacy)
|
2013-01-24 10:08:55 +04:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
VqInfoBlock info;
|
2014-12-11 16:25:13 +03:00
|
|
|
VqInfoBlockLegacy linfo;
|
|
|
|
size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info);
|
|
|
|
|
|
|
|
if (check_len) {
|
|
|
|
if (ccw.count != info_len) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else if (ccw.count < info_len) {
|
|
|
|
/* Can't execute command. */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (!ccw.cda) {
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
if (is_legacy) {
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_read(&sch->cds, linfo);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
2018-12-10 15:04:36 +03:00
|
|
|
linfo.queue = be64_to_cpu(linfo.queue);
|
|
|
|
linfo.align = be32_to_cpu(linfo.align);
|
|
|
|
linfo.index = be16_to_cpu(linfo.index);
|
|
|
|
linfo.num = be16_to_cpu(linfo.num);
|
2014-12-11 16:25:13 +03:00
|
|
|
ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
|
|
|
|
} else {
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_read(&sch->cds, info);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
2018-12-10 15:04:36 +03:00
|
|
|
info.desc = be64_to_cpu(info.desc);
|
|
|
|
info.index = be16_to_cpu(info.index);
|
|
|
|
info.num = be16_to_cpu(info.num);
|
|
|
|
info.avail = be64_to_cpu(info.avail);
|
|
|
|
info.used = be64_to_cpu(info.used);
|
2014-12-11 16:25:13 +03:00
|
|
|
ret = virtio_ccw_set_vqs(sch, &info, NULL);
|
|
|
|
}
|
|
|
|
sch->curr_status.scsw.count = 0;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
|
|
|
{
|
|
|
|
int ret;
|
2014-12-11 16:25:12 +03:00
|
|
|
VirtioRevInfo revinfo;
|
2013-01-24 10:08:55 +04:00
|
|
|
uint8_t status;
|
|
|
|
VirtioFeatDesc features;
|
|
|
|
hwaddr indicators;
|
|
|
|
VqConfigBlock vq_config;
|
|
|
|
VirtioCcwDevice *dev = sch->driver_data;
|
2013-09-20 15:51:52 +04:00
|
|
|
VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
|
2013-01-24 10:08:55 +04:00
|
|
|
bool check_len;
|
|
|
|
int len;
|
2017-09-21 21:08:39 +03:00
|
|
|
VirtioThinintInfo thinint;
|
2013-01-24 10:08:55 +04:00
|
|
|
|
|
|
|
if (!dev) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
|
|
|
|
ccw.cmd_code);
|
|
|
|
check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
|
|
|
|
|
2021-02-16 14:18:30 +03:00
|
|
|
if (dev->revision < 0 && ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) {
|
|
|
|
if (dev->force_revision_1) {
|
|
|
|
/*
|
|
|
|
* virtio-1 drivers must start with negotiating to a revision >= 1,
|
|
|
|
* so post a command reject for all other commands
|
|
|
|
*/
|
|
|
|
return -ENOSYS;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the driver issues any command that is not SET_VIRTIO_REV,
|
|
|
|
* we'll have to operate the device in legacy mode.
|
|
|
|
*/
|
|
|
|
dev->revision = 0;
|
|
|
|
}
|
2017-02-14 16:06:07 +03:00
|
|
|
}
|
|
|
|
|
2013-01-24 10:08:55 +04:00
|
|
|
/* Look at the command. */
|
|
|
|
switch (ccw.cmd_code) {
|
|
|
|
case CCW_CMD_SET_VQ:
|
2014-12-11 16:25:13 +03:00
|
|
|
ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1);
|
2013-01-24 10:08:55 +04:00
|
|
|
break;
|
|
|
|
case CCW_CMD_VDEV_RESET:
|
2022-06-09 09:35:45 +03:00
|
|
|
virtio_ccw_reset_virtio(dev);
|
2013-01-24 10:08:55 +04:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
case CCW_CMD_READ_FEAT:
|
|
|
|
if (check_len) {
|
|
|
|
if (ccw.count != sizeof(features)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (ccw.count < sizeof(features)) {
|
|
|
|
/* Can't execute command. */
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!ccw.cda) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
2016-11-04 13:04:23 +03:00
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
|
2017-09-21 21:08:39 +03:00
|
|
|
ccw_dstream_advance(&sch->cds, sizeof(features.features));
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_read(&sch->cds, features.index);
|
|
|
|
if (ret) {
|
|
|
|
break;
|
|
|
|
}
|
2015-05-26 17:34:47 +03:00
|
|
|
if (features.index == 0) {
|
2015-09-11 16:16:44 +03:00
|
|
|
if (dev->revision >= 1) {
|
|
|
|
/* Don't offer legacy features for modern devices. */
|
|
|
|
features.features = (uint32_t)
|
2016-11-04 13:04:23 +03:00
|
|
|
(vdev->host_features & ~vdc->legacy_features);
|
2015-09-11 16:16:44 +03:00
|
|
|
} else {
|
|
|
|
features.features = (uint32_t)vdev->host_features;
|
|
|
|
}
|
2015-09-11 16:16:43 +03:00
|
|
|
} else if ((features.index == 1) && (dev->revision >= 1)) {
|
2014-12-11 16:25:12 +03:00
|
|
|
/*
|
2015-09-11 16:16:43 +03:00
|
|
|
* Only offer feature bits beyond 31 if the guest has
|
|
|
|
* negotiated at least revision 1.
|
2014-12-11 16:25:12 +03:00
|
|
|
*/
|
2015-09-11 16:16:43 +03:00
|
|
|
features.features = (uint32_t)(vdev->host_features >> 32);
|
2013-01-24 10:08:55 +04:00
|
|
|
} else {
|
|
|
|
/* Return zeroes if the guest supports more feature bits. */
|
|
|
|
features.features = 0;
|
|
|
|
}
|
2017-09-21 21:08:39 +03:00
|
|
|
ccw_dstream_rewind(&sch->cds);
|
2018-12-10 15:04:36 +03:00
|
|
|
features.features = cpu_to_le32(features.features);
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_write(&sch->cds, features.features);
|
|
|
|
if (!ret) {
|
|
|
|
sch->curr_status.scsw.count = ccw.count - sizeof(features);
|
|
|
|
}
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case CCW_CMD_WRITE_FEAT:
|
|
|
|
if (check_len) {
|
|
|
|
if (ccw.count != sizeof(features)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (ccw.count < sizeof(features)) {
|
|
|
|
/* Can't execute command. */
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!ccw.cda) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_read(&sch->cds, features);
|
|
|
|
if (ret) {
|
|
|
|
break;
|
|
|
|
}
|
2018-12-10 15:04:36 +03:00
|
|
|
features.features = le32_to_cpu(features.features);
|
2015-05-26 17:34:47 +03:00
|
|
|
if (features.index == 0) {
|
2014-12-11 16:25:12 +03:00
|
|
|
virtio_set_features(vdev,
|
|
|
|
(vdev->guest_features & 0xffffffff00000000ULL) |
|
|
|
|
features.features);
|
2015-09-11 16:16:43 +03:00
|
|
|
} else if ((features.index == 1) && (dev->revision >= 1)) {
|
2014-12-11 16:25:12 +03:00
|
|
|
/*
|
2015-09-11 16:16:43 +03:00
|
|
|
* If the guest did not negotiate at least revision 1,
|
|
|
|
* we did not offer it any feature bits beyond 31. Such a
|
|
|
|
* guest passing us any bit here is therefore buggy.
|
2014-12-11 16:25:12 +03:00
|
|
|
*/
|
|
|
|
virtio_set_features(vdev,
|
|
|
|
(vdev->guest_features & 0x00000000ffffffffULL) |
|
|
|
|
((uint64_t)features.features << 32));
|
2013-01-24 10:08:55 +04:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the guest supports more feature bits, assert that it
|
|
|
|
* passes us zeroes for those we don't support.
|
|
|
|
*/
|
|
|
|
if (features.features) {
|
2017-12-20 20:24:41 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Guest bug: features[%i]=%x (expected 0)",
|
|
|
|
features.index, features.features);
|
2013-01-24 10:08:55 +04:00
|
|
|
/* XXX: do a unit check here? */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sch->curr_status.scsw.count = ccw.count - sizeof(features);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case CCW_CMD_READ_CONF:
|
|
|
|
if (check_len) {
|
2013-09-20 15:51:52 +04:00
|
|
|
if (ccw.count > vdev->config_len) {
|
2013-01-24 10:08:55 +04:00
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-09-20 15:51:52 +04:00
|
|
|
len = MIN(ccw.count, vdev->config_len);
|
2013-01-24 10:08:55 +04:00
|
|
|
if (!ccw.cda) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
2013-09-20 15:51:52 +04:00
|
|
|
virtio_bus_get_vdev_config(&dev->bus, vdev->config);
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_write_buf(&sch->cds, vdev->config, len);
|
|
|
|
if (ret) {
|
|
|
|
sch->curr_status.scsw.count = ccw.count - len;
|
|
|
|
}
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case CCW_CMD_WRITE_CONF:
|
|
|
|
if (check_len) {
|
2013-09-20 15:51:52 +04:00
|
|
|
if (ccw.count > vdev->config_len) {
|
2013-01-24 10:08:55 +04:00
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-09-20 15:51:52 +04:00
|
|
|
len = MIN(ccw.count, vdev->config_len);
|
2013-01-24 10:08:55 +04:00
|
|
|
if (!ccw.cda) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
2017-09-21 21:08:39 +03:00
|
|
|
ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len);
|
|
|
|
if (!ret) {
|
2013-09-20 15:51:52 +04:00
|
|
|
virtio_bus_set_vdev_config(&dev->bus, vdev->config);
|
2013-01-24 10:08:55 +04:00
|
|
|
sch->curr_status.scsw.count = ccw.count - len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2016-09-07 13:58:40 +03:00
|
|
|
case CCW_CMD_READ_STATUS:
|
|
|
|
if (check_len) {
|
|
|
|
if (ccw.count != sizeof(status)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (ccw.count < sizeof(status)) {
|
|
|
|
/* Can't execute command. */
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!ccw.cda) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
|
|
|
address_space_stb(&address_space_memory, ccw.cda, vdev->status,
|
|
|
|
MEMTXATTRS_UNSPECIFIED, NULL);
|
2017-10-17 17:40:51 +03:00
|
|
|
sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);
|
2016-09-07 13:58:40 +03:00
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
break;
|
2013-01-24 10:08:55 +04:00
|
|
|
case CCW_CMD_WRITE_STATUS:
|
|
|
|
if (check_len) {
|
|
|
|
if (ccw.count != sizeof(status)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (ccw.count < sizeof(status)) {
|
|
|
|
/* Can't execute command. */
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!ccw.cda) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_read(&sch->cds, status);
|
|
|
|
if (ret) {
|
|
|
|
break;
|
|
|
|
}
|
2013-02-15 13:18:43 +04:00
|
|
|
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
|
|
|
virtio_ccw_stop_ioeventfd(dev);
|
|
|
|
}
|
2015-06-04 13:34:15 +03:00
|
|
|
if (virtio_set_status(vdev, status) == 0) {
|
|
|
|
if (vdev->status == 0) {
|
2022-06-09 09:35:45 +03:00
|
|
|
virtio_ccw_reset_virtio(dev);
|
2015-06-04 13:34:15 +03:00
|
|
|
}
|
|
|
|
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
|
|
|
|
virtio_ccw_start_ioeventfd(dev);
|
|
|
|
}
|
|
|
|
sch->curr_status.scsw.count = ccw.count - sizeof(status);
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
/* Trigger a command reject. */
|
|
|
|
ret = -ENOSYS;
|
2013-02-15 13:18:43 +04:00
|
|
|
}
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case CCW_CMD_SET_IND:
|
|
|
|
if (check_len) {
|
|
|
|
if (ccw.count != sizeof(indicators)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (ccw.count < sizeof(indicators)) {
|
|
|
|
/* Can't execute command. */
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2013-02-06 13:31:37 +04:00
|
|
|
if (sch->thinint_active) {
|
|
|
|
/* Trigger a command reject. */
|
|
|
|
ret = -ENOSYS;
|
|
|
|
break;
|
|
|
|
}
|
2015-12-10 14:55:06 +03:00
|
|
|
if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) {
|
|
|
|
/* More queues than indicator bits --> trigger a reject */
|
|
|
|
ret = -ENOSYS;
|
|
|
|
break;
|
|
|
|
}
|
2013-06-05 19:13:05 +04:00
|
|
|
if (!ccw.cda) {
|
2013-01-24 10:08:55 +04:00
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_read(&sch->cds, indicators);
|
|
|
|
if (ret) {
|
|
|
|
break;
|
|
|
|
}
|
2018-12-10 15:04:36 +03:00
|
|
|
indicators = be64_to_cpu(indicators);
|
2013-08-01 19:27:00 +04:00
|
|
|
dev->indicators = get_indicator(indicators, sizeof(uint64_t));
|
2013-01-24 10:08:55 +04:00
|
|
|
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case CCW_CMD_SET_CONF_IND:
|
|
|
|
if (check_len) {
|
|
|
|
if (ccw.count != sizeof(indicators)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (ccw.count < sizeof(indicators)) {
|
|
|
|
/* Can't execute command. */
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2013-06-05 19:13:05 +04:00
|
|
|
if (!ccw.cda) {
|
2013-01-24 10:08:55 +04:00
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_read(&sch->cds, indicators);
|
|
|
|
if (ret) {
|
|
|
|
break;
|
|
|
|
}
|
2018-12-10 15:04:36 +03:00
|
|
|
indicators = be64_to_cpu(indicators);
|
2013-08-01 19:27:00 +04:00
|
|
|
dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
|
2013-01-24 10:08:55 +04:00
|
|
|
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case CCW_CMD_READ_VQ_CONF:
|
|
|
|
if (check_len) {
|
|
|
|
if (ccw.count != sizeof(vq_config)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (ccw.count < sizeof(vq_config)) {
|
|
|
|
/* Can't execute command. */
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!ccw.cda) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_read(&sch->cds, vq_config.index);
|
|
|
|
if (ret) {
|
|
|
|
break;
|
|
|
|
}
|
2018-12-10 15:04:36 +03:00
|
|
|
vq_config.index = be16_to_cpu(vq_config.index);
|
2015-12-07 18:45:17 +03:00
|
|
|
if (vq_config.index >= VIRTIO_QUEUE_MAX) {
|
2015-03-20 15:16:20 +03:00
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2013-09-20 15:51:52 +04:00
|
|
|
vq_config.num_max = virtio_queue_get_num(vdev,
|
2013-01-24 10:08:55 +04:00
|
|
|
vq_config.index);
|
2018-12-10 15:04:36 +03:00
|
|
|
vq_config.num_max = cpu_to_be16(vq_config.num_max);
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_write(&sch->cds, vq_config.num_max);
|
|
|
|
if (!ret) {
|
|
|
|
sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
|
|
|
|
}
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
break;
|
2013-02-06 13:31:37 +04:00
|
|
|
case CCW_CMD_SET_IND_ADAPTER:
|
|
|
|
if (check_len) {
|
2017-09-21 21:08:39 +03:00
|
|
|
if (ccw.count != sizeof(thinint)) {
|
2013-02-06 13:31:37 +04:00
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2017-09-21 21:08:39 +03:00
|
|
|
} else if (ccw.count < sizeof(thinint)) {
|
2013-02-06 13:31:37 +04:00
|
|
|
/* Can't execute command. */
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!ccw.cda) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else if (dev->indicators && !sch->thinint_active) {
|
|
|
|
/* Trigger a command reject. */
|
|
|
|
ret = -ENOSYS;
|
|
|
|
} else {
|
2017-09-21 21:08:39 +03:00
|
|
|
if (ccw_dstream_read(&sch->cds, thinint)) {
|
2013-02-06 13:31:37 +04:00
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
2018-12-10 15:04:36 +03:00
|
|
|
thinint.ind_bit = be64_to_cpu(thinint.ind_bit);
|
|
|
|
thinint.summary_indicator =
|
|
|
|
be64_to_cpu(thinint.summary_indicator);
|
|
|
|
thinint.device_indicator =
|
|
|
|
be64_to_cpu(thinint.device_indicator);
|
2015-03-11 12:57:50 +03:00
|
|
|
|
2013-08-01 19:27:00 +04:00
|
|
|
dev->summary_indicator =
|
2017-09-21 21:08:39 +03:00
|
|
|
get_indicator(thinint.summary_indicator, sizeof(uint8_t));
|
2015-03-11 12:57:50 +03:00
|
|
|
dev->indicators =
|
2017-09-21 21:08:39 +03:00
|
|
|
get_indicator(thinint.device_indicator,
|
|
|
|
thinint.ind_bit / 8 + 1);
|
|
|
|
dev->thinint_isc = thinint.isc;
|
|
|
|
dev->routes.adapter.ind_offset = thinint.ind_bit;
|
2013-07-15 19:45:03 +04:00
|
|
|
dev->routes.adapter.summary_offset = 7;
|
2016-11-24 13:10:39 +03:00
|
|
|
dev->routes.adapter.adapter_id = css_get_adapter_id(
|
|
|
|
CSS_IO_ADAPTER_VIRTIO,
|
|
|
|
dev->thinint_isc);
|
2013-08-01 19:27:00 +04:00
|
|
|
sch->thinint_active = ((dev->indicators != NULL) &&
|
|
|
|
(dev->summary_indicator != NULL));
|
2017-09-21 21:08:39 +03:00
|
|
|
sch->curr_status.scsw.count = ccw.count - sizeof(thinint);
|
2013-02-06 13:31:37 +04:00
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2014-12-11 16:25:12 +03:00
|
|
|
case CCW_CMD_SET_VIRTIO_REV:
|
|
|
|
len = sizeof(revinfo);
|
|
|
|
if (ccw.count < len) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!ccw.cda) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
2021-04-08 19:32:09 +03:00
|
|
|
ret = ccw_dstream_read_buf(&sch->cds, &revinfo, 4);
|
|
|
|
if (ret < 0) {
|
|
|
|
break;
|
|
|
|
}
|
2018-12-10 15:04:36 +03:00
|
|
|
revinfo.revision = be16_to_cpu(revinfo.revision);
|
|
|
|
revinfo.length = be16_to_cpu(revinfo.length);
|
2014-12-11 16:25:12 +03:00
|
|
|
if (ccw.count < len + revinfo.length ||
|
|
|
|
(check_len && ccw.count > len + revinfo.length)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Once we start to support revisions with additional data, we'll
|
|
|
|
* need to fetch it here. Nothing to do for now, though.
|
|
|
|
*/
|
|
|
|
if (dev->revision >= 0 ||
|
2017-02-14 16:06:07 +03:00
|
|
|
revinfo.revision > virtio_ccw_rev_max(dev) ||
|
|
|
|
(dev->force_revision_1 && !revinfo.revision)) {
|
2014-12-11 16:25:12 +03:00
|
|
|
ret = -ENOSYS;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
dev->revision = revinfo.revision;
|
|
|
|
break;
|
2013-01-24 10:08:55 +04:00
|
|
|
default:
|
2013-01-28 20:01:30 +04:00
|
|
|
ret = -ENOSYS;
|
2013-01-24 10:08:55 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-12-11 16:25:12 +03:00
|
|
|
static void virtio_sch_disable_cb(SubchDev *sch)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = sch->driver_data;
|
|
|
|
|
|
|
|
dev->revision = -1;
|
|
|
|
}
|
|
|
|
|
2015-04-21 17:36:55 +03:00
|
|
|
static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
|
2013-01-24 10:08:55 +04:00
|
|
|
{
|
2015-04-21 17:36:55 +03:00
|
|
|
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *ccw_dev = CCW_DEVICE(dev);
|
2017-02-22 05:10:30 +03:00
|
|
|
CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
|
2017-05-17 03:48:05 +03:00
|
|
|
SubchDev *sch;
|
2016-06-15 18:16:05 +03:00
|
|
|
Error *err = NULL;
|
2020-01-16 15:10:35 +03:00
|
|
|
int i;
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2018-07-23 19:32:21 +03:00
|
|
|
sch = css_create_sch(ccw_dev->devno, errp);
|
2016-06-15 18:16:05 +03:00
|
|
|
if (!sch) {
|
|
|
|
return;
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
2017-02-14 16:06:07 +03:00
|
|
|
if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) {
|
|
|
|
error_setg(&err, "Invalid value of property max_rev "
|
|
|
|
"(is %d expected >= 1)", virtio_ccw_rev_max(dev));
|
2017-02-22 05:10:30 +03:00
|
|
|
goto out_err;
|
2017-02-14 16:06:07 +03:00
|
|
|
}
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2016-06-15 18:16:05 +03:00
|
|
|
sch->driver_data = dev;
|
2013-01-24 10:08:55 +04:00
|
|
|
sch->ccw_cb = virtio_ccw_cb;
|
2014-12-11 16:25:12 +03:00
|
|
|
sch->disable_cb = virtio_sch_disable_cb;
|
2013-01-24 10:08:55 +04:00
|
|
|
sch->id.reserved = 0xff;
|
|
|
|
sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
|
2017-05-17 03:48:11 +03:00
|
|
|
sch->do_subchannel_work = do_subchannel_work_virtual;
|
2021-06-18 02:25:36 +03:00
|
|
|
sch->irb_cb = build_irb_virtual;
|
2016-02-26 08:46:12 +03:00
|
|
|
ccw_dev->sch = sch;
|
2016-06-15 18:16:05 +03:00
|
|
|
dev->indicators = NULL;
|
2014-12-11 16:25:12 +03:00
|
|
|
dev->revision = -1;
|
2020-01-16 15:10:35 +03:00
|
|
|
for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) {
|
|
|
|
dev->routes.gsi[i] = -1;
|
|
|
|
}
|
2016-06-15 18:16:05 +03:00
|
|
|
css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
|
|
|
|
|
|
|
|
trace_virtio_ccw_new_device(
|
|
|
|
sch->cssid, sch->ssid, sch->schid, sch->devno,
|
2017-02-15 08:33:03 +03:00
|
|
|
ccw_dev->devno.valid ? "user-configured" : "auto-configured");
|
2014-12-11 16:25:12 +03:00
|
|
|
|
2017-07-04 16:23:50 +03:00
|
|
|
if (kvm_enabled() && !kvm_eventfds_enabled()) {
|
2016-10-21 23:48:04 +03:00
|
|
|
dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
|
|
|
|
}
|
|
|
|
|
2021-05-17 16:06:28 +03:00
|
|
|
/* fd-based ioevents can't be synchronized in record/replay */
|
|
|
|
if (replay_mode != REPLAY_MODE_NONE) {
|
|
|
|
dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
|
|
|
|
}
|
|
|
|
|
2015-04-21 17:36:55 +03:00
|
|
|
if (k->realize) {
|
|
|
|
k->realize(dev, &err);
|
2017-02-22 05:10:30 +03:00
|
|
|
if (err) {
|
|
|
|
goto out_err;
|
|
|
|
}
|
2015-04-21 17:36:55 +03:00
|
|
|
}
|
2017-02-22 05:10:30 +03:00
|
|
|
|
|
|
|
ck->realize(ccw_dev, &err);
|
2015-04-21 17:36:55 +03:00
|
|
|
if (err) {
|
2017-02-22 05:10:30 +03:00
|
|
|
goto out_err;
|
2015-04-21 17:36:55 +03:00
|
|
|
}
|
2017-02-22 05:10:30 +03:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
error_propagate(errp, err);
|
|
|
|
css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
|
|
|
|
ccw_dev->sch = NULL;
|
|
|
|
g_free(sch);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev)
|
2013-01-24 10:08:55 +04:00
|
|
|
{
|
2018-07-25 15:20:15 +03:00
|
|
|
VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *ccw_dev = CCW_DEVICE(dev);
|
|
|
|
SubchDev *sch = ccw_dev->sch;
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2018-07-25 15:20:15 +03:00
|
|
|
if (dc->unrealize) {
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
dc->unrealize(dev);
|
2018-07-25 15:20:15 +03:00
|
|
|
}
|
|
|
|
|
2013-01-24 10:08:55 +04:00
|
|
|
if (sch) {
|
|
|
|
css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
|
|
|
|
g_free(sch);
|
2018-03-07 19:29:58 +03:00
|
|
|
ccw_dev->sch = NULL;
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
2013-08-01 19:27:00 +04:00
|
|
|
if (dev->indicators) {
|
2013-07-15 19:45:03 +04:00
|
|
|
release_indicator(&dev->routes.adapter, dev->indicators);
|
2013-08-01 19:27:00 +04:00
|
|
|
dev->indicators = NULL;
|
|
|
|
}
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* DeviceState to VirtioCcwDevice. Note: used on datapath,
|
|
|
|
* be careful and test performance if you change this.
|
|
|
|
*/
|
|
|
|
static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
|
|
|
|
{
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *ccw_dev = to_ccw_dev_fast(d);
|
|
|
|
|
|
|
|
return container_of(ccw_dev, VirtioCcwDevice, parent_obj);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
2013-02-06 13:31:37 +04:00
|
|
|
static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
|
|
|
|
uint8_t to_be_set)
|
|
|
|
{
|
2020-06-16 07:50:34 +03:00
|
|
|
uint8_t expected, actual;
|
2013-02-06 13:31:37 +04:00
|
|
|
hwaddr len = 1;
|
2020-06-16 07:50:34 +03:00
|
|
|
/* avoid multiple fetches */
|
|
|
|
uint8_t volatile *ind_addr;
|
2013-02-06 13:31:37 +04:00
|
|
|
|
2020-02-19 22:20:42 +03:00
|
|
|
ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
|
2013-02-06 13:31:37 +04:00
|
|
|
if (!ind_addr) {
|
|
|
|
error_report("%s(%x.%x.%04x): unable to access indicator",
|
|
|
|
__func__, sch->cssid, sch->ssid, sch->schid);
|
|
|
|
return -1;
|
|
|
|
}
|
2020-06-16 07:50:34 +03:00
|
|
|
actual = *ind_addr;
|
2013-02-06 13:31:37 +04:00
|
|
|
do {
|
2020-06-16 07:50:34 +03:00
|
|
|
expected = actual;
|
2020-09-23 13:56:46 +03:00
|
|
|
actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
|
2020-06-16 07:50:34 +03:00
|
|
|
} while (actual != expected);
|
|
|
|
trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set);
|
|
|
|
cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
|
2013-02-06 13:31:37 +04:00
|
|
|
|
2020-06-16 07:50:34 +03:00
|
|
|
return actual;
|
2013-02-06 13:31:37 +04:00
|
|
|
}
|
|
|
|
|
2013-01-24 10:08:55 +04:00
|
|
|
static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *ccw_dev = to_ccw_dev_fast(d);
|
|
|
|
SubchDev *sch = ccw_dev->sch;
|
2013-01-24 10:08:55 +04:00
|
|
|
uint64_t indicators;
|
|
|
|
|
2018-05-16 16:27:57 +03:00
|
|
|
if (vector == VIRTIO_NO_VECTOR) {
|
2013-01-24 10:08:55 +04:00
|
|
|
return;
|
|
|
|
}
|
2018-05-16 16:27:57 +03:00
|
|
|
/*
|
|
|
|
* vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
|
|
|
|
* vector == VIRTIO_QUEUE_MAX: configuration change notification
|
|
|
|
* bits beyond that are unused and should never be notified for
|
|
|
|
*/
|
|
|
|
assert(vector <= VIRTIO_QUEUE_MAX);
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2015-12-07 18:45:17 +03:00
|
|
|
if (vector < VIRTIO_QUEUE_MAX) {
|
2013-04-23 19:15:19 +04:00
|
|
|
if (!dev->indicators) {
|
|
|
|
return;
|
|
|
|
}
|
2013-02-06 13:31:37 +04:00
|
|
|
if (sch->thinint_active) {
|
|
|
|
/*
|
|
|
|
* In the adapter interrupt case, indicators points to a
|
|
|
|
* memory area that may be (way) larger than 64 bit and
|
|
|
|
* ind_bit indicates the start of the indicators in a big
|
|
|
|
* endian notation.
|
|
|
|
*/
|
2013-07-15 19:45:03 +04:00
|
|
|
uint64_t ind_bit = dev->routes.adapter.ind_offset;
|
|
|
|
|
2013-08-01 19:27:00 +04:00
|
|
|
virtio_set_ind_atomic(sch, dev->indicators->addr +
|
2013-07-15 19:45:03 +04:00
|
|
|
(ind_bit + vector) / 8,
|
|
|
|
0x80 >> ((ind_bit + vector) % 8));
|
2013-08-01 19:27:00 +04:00
|
|
|
if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr,
|
2013-02-06 13:31:37 +04:00
|
|
|
0x01)) {
|
2017-02-17 10:26:48 +03:00
|
|
|
css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc);
|
2013-02-06 13:31:37 +04:00
|
|
|
}
|
|
|
|
} else {
|
2018-05-16 16:27:57 +03:00
|
|
|
assert(vector < NR_CLASSIC_INDICATOR_BITS);
|
Switch non-CPU callers from ld/st*_phys to address_space_ld/st*
Switch all the uses of ld/st*_phys to address_space_ld/st*,
except for those cases where the address space is the CPU's
(ie cs->as). This was done with the following script which
generates a Coccinelle patch.
A few over-80-columns lines in the result were rewrapped by
hand where Coccinelle failed to do the wrapping automatically,
as well as one location where it didn't put a line-continuation
'\' when wrapping lines on a change made to a match inside
a macro definition.
===begin===
#!/bin/sh -e
# Usage:
# ./ldst-phys.spatch.sh > ldst-phys.spatch
# spatch -sp_file ldst-phys.spatch -dir . | sed -e '/^+/s/\t/ /g' > out.patch
# patch -p1 < out.patch
for FN in ub uw_le uw_be l_le l_be q_le q_be uw l q; do
cat <<EOF
@ cpu_matches_ld_${FN} @
expression E1,E2;
identifier as;
@@
ld${FN}_phys(E1->as,E2)
@ other_matches_ld_${FN} depends on !cpu_matches_ld_${FN} @
expression E1,E2;
@@
-ld${FN}_phys(E1,E2)
+address_space_ld${FN}(E1,E2, MEMTXATTRS_UNSPECIFIED, NULL)
EOF
done
for FN in b w_le w_be l_le l_be q_le q_be w l q; do
cat <<EOF
@ cpu_matches_st_${FN} @
expression E1,E2,E3;
identifier as;
@@
st${FN}_phys(E1->as,E2,E3)
@ other_matches_st_${FN} depends on !cpu_matches_st_${FN} @
expression E1,E2,E3;
@@
-st${FN}_phys(E1,E2,E3)
+address_space_st${FN}(E1,E2,E3, MEMTXATTRS_UNSPECIFIED, NULL)
EOF
done
===endit===
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2015-04-26 18:49:24 +03:00
|
|
|
indicators = address_space_ldq(&address_space_memory,
|
|
|
|
dev->indicators->addr,
|
|
|
|
MEMTXATTRS_UNSPECIFIED,
|
|
|
|
NULL);
|
2013-02-06 13:31:37 +04:00
|
|
|
indicators |= 1ULL << vector;
|
Switch non-CPU callers from ld/st*_phys to address_space_ld/st*
Switch all the uses of ld/st*_phys to address_space_ld/st*,
except for those cases where the address space is the CPU's
(ie cs->as). This was done with the following script which
generates a Coccinelle patch.
A few over-80-columns lines in the result were rewrapped by
hand where Coccinelle failed to do the wrapping automatically,
as well as one location where it didn't put a line-continuation
'\' when wrapping lines on a change made to a match inside
a macro definition.
===begin===
#!/bin/sh -e
# Usage:
# ./ldst-phys.spatch.sh > ldst-phys.spatch
# spatch -sp_file ldst-phys.spatch -dir . | sed -e '/^+/s/\t/ /g' > out.patch
# patch -p1 < out.patch
for FN in ub uw_le uw_be l_le l_be q_le q_be uw l q; do
cat <<EOF
@ cpu_matches_ld_${FN} @
expression E1,E2;
identifier as;
@@
ld${FN}_phys(E1->as,E2)
@ other_matches_ld_${FN} depends on !cpu_matches_ld_${FN} @
expression E1,E2;
@@
-ld${FN}_phys(E1,E2)
+address_space_ld${FN}(E1,E2, MEMTXATTRS_UNSPECIFIED, NULL)
EOF
done
for FN in b w_le w_be l_le l_be q_le q_be w l q; do
cat <<EOF
@ cpu_matches_st_${FN} @
expression E1,E2,E3;
identifier as;
@@
st${FN}_phys(E1->as,E2,E3)
@ other_matches_st_${FN} depends on !cpu_matches_st_${FN} @
expression E1,E2,E3;
@@
-st${FN}_phys(E1,E2,E3)
+address_space_st${FN}(E1,E2,E3, MEMTXATTRS_UNSPECIFIED, NULL)
EOF
done
===endit===
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2015-04-26 18:49:24 +03:00
|
|
|
address_space_stq(&address_space_memory, dev->indicators->addr,
|
|
|
|
indicators, MEMTXATTRS_UNSPECIFIED, NULL);
|
2013-02-06 13:31:37 +04:00
|
|
|
css_conditional_io_interrupt(sch);
|
|
|
|
}
|
2013-01-24 10:08:55 +04:00
|
|
|
} else {
|
2013-04-23 19:15:19 +04:00
|
|
|
if (!dev->indicators2) {
|
|
|
|
return;
|
|
|
|
}
|
Switch non-CPU callers from ld/st*_phys to address_space_ld/st*
Switch all the uses of ld/st*_phys to address_space_ld/st*,
except for those cases where the address space is the CPU's
(ie cs->as). This was done with the following script which
generates a Coccinelle patch.
A few over-80-columns lines in the result were rewrapped by
hand where Coccinelle failed to do the wrapping automatically,
as well as one location where it didn't put a line-continuation
'\' when wrapping lines on a change made to a match inside
a macro definition.
===begin===
#!/bin/sh -e
# Usage:
# ./ldst-phys.spatch.sh > ldst-phys.spatch
# spatch -sp_file ldst-phys.spatch -dir . | sed -e '/^+/s/\t/ /g' > out.patch
# patch -p1 < out.patch
for FN in ub uw_le uw_be l_le l_be q_le q_be uw l q; do
cat <<EOF
@ cpu_matches_ld_${FN} @
expression E1,E2;
identifier as;
@@
ld${FN}_phys(E1->as,E2)
@ other_matches_ld_${FN} depends on !cpu_matches_ld_${FN} @
expression E1,E2;
@@
-ld${FN}_phys(E1,E2)
+address_space_ld${FN}(E1,E2, MEMTXATTRS_UNSPECIFIED, NULL)
EOF
done
for FN in b w_le w_be l_le l_be q_le q_be w l q; do
cat <<EOF
@ cpu_matches_st_${FN} @
expression E1,E2,E3;
identifier as;
@@
st${FN}_phys(E1->as,E2,E3)
@ other_matches_st_${FN} depends on !cpu_matches_st_${FN} @
expression E1,E2,E3;
@@
-st${FN}_phys(E1,E2,E3)
+address_space_st${FN}(E1,E2,E3, MEMTXATTRS_UNSPECIFIED, NULL)
EOF
done
===endit===
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2015-04-26 18:49:24 +03:00
|
|
|
indicators = address_space_ldq(&address_space_memory,
|
|
|
|
dev->indicators2->addr,
|
|
|
|
MEMTXATTRS_UNSPECIFIED,
|
|
|
|
NULL);
|
2018-05-16 16:27:57 +03:00
|
|
|
indicators |= 1ULL;
|
Switch non-CPU callers from ld/st*_phys to address_space_ld/st*
Switch all the uses of ld/st*_phys to address_space_ld/st*,
except for those cases where the address space is the CPU's
(ie cs->as). This was done with the following script which
generates a Coccinelle patch.
A few over-80-columns lines in the result were rewrapped by
hand where Coccinelle failed to do the wrapping automatically,
as well as one location where it didn't put a line-continuation
'\' when wrapping lines on a change made to a match inside
a macro definition.
===begin===
#!/bin/sh -e
# Usage:
# ./ldst-phys.spatch.sh > ldst-phys.spatch
# spatch -sp_file ldst-phys.spatch -dir . | sed -e '/^+/s/\t/ /g' > out.patch
# patch -p1 < out.patch
for FN in ub uw_le uw_be l_le l_be q_le q_be uw l q; do
cat <<EOF
@ cpu_matches_ld_${FN} @
expression E1,E2;
identifier as;
@@
ld${FN}_phys(E1->as,E2)
@ other_matches_ld_${FN} depends on !cpu_matches_ld_${FN} @
expression E1,E2;
@@
-ld${FN}_phys(E1,E2)
+address_space_ld${FN}(E1,E2, MEMTXATTRS_UNSPECIFIED, NULL)
EOF
done
for FN in b w_le w_be l_le l_be q_le q_be w l q; do
cat <<EOF
@ cpu_matches_st_${FN} @
expression E1,E2,E3;
identifier as;
@@
st${FN}_phys(E1->as,E2,E3)
@ other_matches_st_${FN} depends on !cpu_matches_st_${FN} @
expression E1,E2,E3;
@@
-st${FN}_phys(E1,E2,E3)
+address_space_st${FN}(E1,E2,E3, MEMTXATTRS_UNSPECIFIED, NULL)
EOF
done
===endit===
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2015-04-26 18:49:24 +03:00
|
|
|
address_space_stq(&address_space_memory, dev->indicators2->addr,
|
|
|
|
indicators, MEMTXATTRS_UNSPECIFIED, NULL);
|
2013-02-06 13:31:37 +04:00
|
|
|
css_conditional_io_interrupt(sch);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_ccw_reset(DeviceState *d)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
2018-05-07 16:27:57 +03:00
|
|
|
VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2022-06-09 09:35:45 +03:00
|
|
|
virtio_ccw_reset_virtio(dev);
|
2018-05-07 16:27:57 +03:00
|
|
|
if (vdc->parent_reset) {
|
|
|
|
vdc->parent_reset(d);
|
|
|
|
}
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
2013-02-15 13:18:43 +04:00
|
|
|
static void virtio_ccw_vmstate_change(DeviceState *d, bool running)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
|
|
|
|
|
|
|
if (running) {
|
|
|
|
virtio_ccw_start_ioeventfd(dev);
|
|
|
|
} else {
|
|
|
|
virtio_ccw_stop_ioeventfd(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-19 16:48:17 +04:00
|
|
|
static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
|
|
|
|
{
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *dev = CCW_DEVICE(d);
|
2013-02-19 16:48:17 +04:00
|
|
|
|
|
|
|
return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
|
|
|
|
}
|
|
|
|
|
2013-07-15 19:45:03 +04:00
|
|
|
static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
|
|
|
|
{
|
|
|
|
int r;
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *ccw_dev = CCW_DEVICE(dev);
|
2013-07-15 19:45:03 +04:00
|
|
|
|
2016-02-26 08:46:12 +03:00
|
|
|
if (!ccw_dev->sch->thinint_active) {
|
2013-07-15 19:45:03 +04:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = map_indicator(&dev->routes.adapter, dev->summary_indicator);
|
|
|
|
if (r) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
r = map_indicator(&dev->routes.adapter, dev->indicators);
|
|
|
|
if (r) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
dev->routes.adapter.summary_addr = dev->summary_indicator->map;
|
|
|
|
dev->routes.adapter.ind_addr = dev->indicators->map;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
|
|
|
int ret;
|
|
|
|
S390FLICState *fs = s390_get_flic();
|
2018-01-29 15:56:23 +03:00
|
|
|
S390FLICStateClass *fsc = s390_get_flic_class(fs);
|
2013-07-15 19:45:03 +04:00
|
|
|
|
|
|
|
ret = virtio_ccw_get_mappings(dev);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
for (i = 0; i < nvqs; i++) {
|
|
|
|
if (!virtio_queue_get_num(vdev, i)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dev->routes.num_routes = i;
|
|
|
|
return fsc->add_adapter_routes(fs, &dev->routes);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs)
|
|
|
|
{
|
|
|
|
S390FLICState *fs = s390_get_flic();
|
2018-01-29 15:56:23 +03:00
|
|
|
S390FLICStateClass *fsc = s390_get_flic_class(fs);
|
2013-07-15 19:45:03 +04:00
|
|
|
|
|
|
|
fsc->release_adapter_routes(fs, &dev->routes);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
|
|
|
VirtQueue *vq = virtio_get_queue(vdev, n);
|
|
|
|
EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
|
|
|
|
|
2015-07-06 21:15:13 +03:00
|
|
|
return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL,
|
|
|
|
dev->routes.gsi[n]);
|
2013-07-15 19:45:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
|
|
|
VirtQueue *vq = virtio_get_queue(vdev, n);
|
|
|
|
EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
|
|
|
|
int ret;
|
|
|
|
|
2015-07-06 21:15:13 +03:00
|
|
|
ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier,
|
|
|
|
dev->routes.gsi[n]);
|
2013-07-15 19:45:03 +04:00
|
|
|
assert(ret == 0);
|
|
|
|
}
|
|
|
|
|
2013-02-19 16:48:17 +04:00
|
|
|
static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
|
|
|
|
bool assign, bool with_irqfd)
|
|
|
|
{
|
2013-09-20 15:51:52 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
|
|
|
VirtQueue *vq = virtio_get_queue(vdev, n);
|
2013-02-19 16:48:17 +04:00
|
|
|
EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
|
2013-09-20 15:51:52 +04:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2013-02-19 16:48:17 +04:00
|
|
|
|
|
|
|
if (assign) {
|
|
|
|
int r = event_notifier_init(notifier, 0);
|
|
|
|
|
|
|
|
if (r < 0) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
|
2013-07-15 19:45:03 +04:00
|
|
|
if (with_irqfd) {
|
|
|
|
r = virtio_ccw_add_irqfd(dev, n);
|
|
|
|
if (r) {
|
|
|
|
virtio_queue_set_guest_notifier_fd_handler(vq, false,
|
|
|
|
with_irqfd);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We do not support individual masking for channel devices, so we
|
|
|
|
* need to manually trigger any guest masking callbacks here.
|
2013-02-19 16:48:17 +04:00
|
|
|
*/
|
2016-12-15 21:23:24 +03:00
|
|
|
if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
|
2013-09-20 15:51:52 +04:00
|
|
|
k->guest_notifier_mask(vdev, n, false);
|
2013-02-19 16:48:17 +04:00
|
|
|
}
|
|
|
|
/* get lost events and re-inject */
|
|
|
|
if (k->guest_notifier_pending &&
|
2013-09-20 15:51:52 +04:00
|
|
|
k->guest_notifier_pending(vdev, n)) {
|
2013-02-19 16:48:17 +04:00
|
|
|
event_notifier_set(notifier);
|
|
|
|
}
|
|
|
|
} else {
|
2016-12-15 21:23:24 +03:00
|
|
|
if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
|
2013-09-20 15:51:52 +04:00
|
|
|
k->guest_notifier_mask(vdev, n, true);
|
2013-02-19 16:48:17 +04:00
|
|
|
}
|
2013-07-15 19:45:03 +04:00
|
|
|
if (with_irqfd) {
|
|
|
|
virtio_ccw_remove_irqfd(dev, n);
|
|
|
|
}
|
2013-02-19 16:48:17 +04:00
|
|
|
virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
|
|
|
|
event_notifier_cleanup(notifier);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs,
|
|
|
|
bool assigned)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
2013-09-20 15:51:52 +04:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *ccw_dev = CCW_DEVICE(d);
|
|
|
|
bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled();
|
2013-02-19 16:48:17 +04:00
|
|
|
int r, n;
|
|
|
|
|
2013-07-15 19:45:03 +04:00
|
|
|
if (with_irqfd && assigned) {
|
|
|
|
/* irq routes need to be set up before assigning irqfds */
|
|
|
|
r = virtio_ccw_setup_irqroutes(dev, nvqs);
|
|
|
|
if (r < 0) {
|
|
|
|
goto irqroute_error;
|
|
|
|
}
|
|
|
|
}
|
2013-02-19 16:48:17 +04:00
|
|
|
for (n = 0; n < nvqs; n++) {
|
|
|
|
if (!virtio_queue_get_num(vdev, n)) {
|
|
|
|
break;
|
|
|
|
}
|
2013-07-15 19:45:03 +04:00
|
|
|
r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd);
|
2013-02-19 16:48:17 +04:00
|
|
|
if (r < 0) {
|
|
|
|
goto assign_error;
|
|
|
|
}
|
|
|
|
}
|
2013-07-15 19:45:03 +04:00
|
|
|
if (with_irqfd && !assigned) {
|
|
|
|
/* release irq routes after irqfds have been released */
|
|
|
|
virtio_ccw_release_irqroutes(dev, nvqs);
|
|
|
|
}
|
2013-02-19 16:48:17 +04:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
assign_error:
|
|
|
|
while (--n >= 0) {
|
|
|
|
virtio_ccw_set_guest_notifier(dev, n, !assigned, false);
|
|
|
|
}
|
2013-07-15 19:45:03 +04:00
|
|
|
irqroute_error:
|
|
|
|
if (with_irqfd && assigned) {
|
|
|
|
virtio_ccw_release_irqroutes(dev, nvqs);
|
|
|
|
}
|
2013-02-19 16:48:17 +04:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-02-11 16:29:44 +04:00
|
|
|
static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
|
|
|
|
|
|
|
qemu_put_be16(f, virtio_queue_vector(vdev, n));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
|
|
|
uint16_t vector;
|
|
|
|
|
|
|
|
qemu_get_be16s(f, &vector);
|
|
|
|
virtio_queue_set_vector(vdev, n , vector);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
2017-07-04 00:34:14 +03:00
|
|
|
vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL);
|
2014-02-11 16:29:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
2017-07-04 00:34:14 +03:00
|
|
|
return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1);
|
2014-02-11 16:29:44 +04:00
|
|
|
}
|
|
|
|
|
2016-09-13 16:30:30 +03:00
|
|
|
static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
|
|
|
|
|
|
|
if (dev->max_rev >= 1) {
|
|
|
|
virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 17:36:56 +03:00
|
|
|
/* This is called by virtio-bus just after the device is plugged. */
|
2015-05-29 09:15:25 +03:00
|
|
|
static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
|
2015-04-21 17:36:56 +03:00
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
2015-05-29 09:15:28 +03:00
|
|
|
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
2016-02-26 08:46:12 +03:00
|
|
|
CcwDevice *ccw_dev = CCW_DEVICE(d);
|
|
|
|
SubchDev *sch = ccw_dev->sch;
|
2015-05-29 09:15:28 +03:00
|
|
|
int n = virtio_get_num_queues(vdev);
|
2016-12-09 21:58:10 +03:00
|
|
|
S390FLICState *flic = s390_get_flic();
|
2015-05-29 09:15:28 +03:00
|
|
|
|
2016-09-13 16:30:30 +03:00
|
|
|
if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
|
|
|
|
dev->max_rev = 0;
|
|
|
|
}
|
|
|
|
|
2020-07-07 13:54:46 +03:00
|
|
|
if (!virtio_ccw_rev_max(dev) && !virtio_legacy_allowed(vdev)) {
|
2020-09-21 15:25:03 +03:00
|
|
|
/*
|
|
|
|
* To avoid migration issues, we allow legacy mode when legacy
|
|
|
|
* check is disabled in the old machine types (< 5.1).
|
|
|
|
*/
|
|
|
|
if (virtio_legacy_check_disabled(vdev)) {
|
|
|
|
warn_report("device requires revision >= 1, but for backward "
|
|
|
|
"compatibility max_revision=0 is allowed");
|
|
|
|
} else {
|
|
|
|
error_setg(errp, "Invalid value of property max_rev "
|
|
|
|
"(is %d expected >= 1)", virtio_ccw_rev_max(dev));
|
|
|
|
return;
|
|
|
|
}
|
2020-07-07 13:54:46 +03:00
|
|
|
}
|
|
|
|
|
2015-12-07 18:45:17 +03:00
|
|
|
if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) {
|
2016-05-18 15:41:59 +03:00
|
|
|
error_setg(errp, "The number of virtqueues %d "
|
2015-12-07 18:45:17 +03:00
|
|
|
"exceeds virtio limit %d", n,
|
|
|
|
VIRTIO_QUEUE_MAX);
|
2015-05-29 09:15:28 +03:00
|
|
|
return;
|
|
|
|
}
|
2016-12-09 21:58:10 +03:00
|
|
|
if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) {
|
|
|
|
error_setg(errp, "The number of virtqueues %d "
|
|
|
|
"exceeds flic adapter route limit %d", n,
|
|
|
|
flic->adapter_routes_max_batch);
|
|
|
|
return;
|
|
|
|
}
|
2015-04-21 17:36:56 +03:00
|
|
|
|
|
|
|
sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
|
|
|
|
|
2015-09-11 16:16:44 +03:00
|
|
|
|
2015-04-21 17:36:56 +03:00
|
|
|
css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
|
|
|
|
d->hotplugged, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_ccw_device_unplugged(DeviceState *d)
|
|
|
|
{
|
|
|
|
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
|
|
|
|
|
|
|
virtio_ccw_stop_ioeventfd(dev);
|
|
|
|
}
|
2013-01-24 10:08:55 +04:00
|
|
|
/**************** Virtio-ccw Bus Device Descriptions *******************/
|
|
|
|
|
2015-02-27 16:53:39 +03:00
|
|
|
static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp)
|
2013-01-24 10:08:55 +04:00
|
|
|
{
|
|
|
|
VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
|
|
|
|
|
2013-08-23 22:27:30 +04:00
|
|
|
virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev);
|
2015-04-21 17:36:55 +03:00
|
|
|
virtio_ccw_device_realize(_dev, errp);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
static void virtio_ccw_busdev_unrealize(DeviceState *dev)
|
2013-01-24 10:08:55 +04:00
|
|
|
{
|
|
|
|
VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
|
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
virtio_ccw_device_unrealize(_dev);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
2016-02-26 08:46:12 +03:00
|
|
|
static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
2013-01-24 10:08:55 +04:00
|
|
|
{
|
2016-02-26 08:46:12 +03:00
|
|
|
VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev);
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2013-10-15 18:47:16 +04:00
|
|
|
virtio_ccw_stop_ioeventfd(_dev);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
2016-02-26 08:46:12 +03:00
|
|
|
CCWDeviceClass *k = CCW_DEVICE_CLASS(dc);
|
2018-05-07 16:27:57 +03:00
|
|
|
VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass);
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2016-02-26 08:46:12 +03:00
|
|
|
k->unplug = virtio_ccw_busdev_unplug;
|
2015-02-27 16:53:39 +03:00
|
|
|
dc->realize = virtio_ccw_busdev_realize;
|
2018-03-07 19:29:58 +03:00
|
|
|
dc->unrealize = virtio_ccw_busdev_unrealize;
|
2018-05-07 16:27:57 +03:00
|
|
|
device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo virtio_ccw_device_info = {
|
|
|
|
.name = TYPE_VIRTIO_CCW_DEVICE,
|
2016-02-26 08:46:12 +03:00
|
|
|
.parent = TYPE_CCW_DEVICE,
|
2013-01-24 10:08:55 +04:00
|
|
|
.instance_size = sizeof(VirtioCcwDevice),
|
|
|
|
.class_init = virtio_ccw_device_class_init,
|
|
|
|
.class_size = sizeof(VirtIOCCWDeviceClass),
|
|
|
|
.abstract = true,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* virtio-ccw-bus */
|
|
|
|
|
2013-08-23 22:27:30 +04:00
|
|
|
static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
|
|
|
|
VirtioCcwDevice *dev)
|
2013-01-24 10:08:55 +04:00
|
|
|
{
|
|
|
|
DeviceState *qdev = DEVICE(dev);
|
2013-04-30 18:08:47 +04:00
|
|
|
char virtio_bus_name[] = "virtio-bus";
|
2013-01-24 10:08:55 +04:00
|
|
|
|
2021-09-23 15:11:51 +03:00
|
|
|
qbus_init(bus, bus_size, TYPE_VIRTIO_CCW_BUS, qdev, virtio_bus_name);
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
|
|
|
|
BusClass *bus_class = BUS_CLASS(klass);
|
|
|
|
|
|
|
|
bus_class->max_dev = 1;
|
|
|
|
k->notify = virtio_ccw_notify;
|
2013-02-15 13:18:43 +04:00
|
|
|
k->vmstate_change = virtio_ccw_vmstate_change;
|
2013-02-19 16:48:17 +04:00
|
|
|
k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
|
|
|
|
k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
|
2014-02-11 16:29:44 +04:00
|
|
|
k->save_queue = virtio_ccw_save_queue;
|
|
|
|
k->load_queue = virtio_ccw_load_queue;
|
|
|
|
k->save_config = virtio_ccw_save_config;
|
|
|
|
k->load_config = virtio_ccw_load_config;
|
2016-09-13 16:30:30 +03:00
|
|
|
k->pre_plugged = virtio_ccw_pre_plugged;
|
2015-04-21 17:36:56 +03:00
|
|
|
k->device_plugged = virtio_ccw_device_plugged;
|
|
|
|
k->device_unplugged = virtio_ccw_device_unplugged;
|
2016-10-21 23:48:08 +03:00
|
|
|
k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
|
2016-06-10 12:04:11 +03:00
|
|
|
k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
|
2013-01-24 10:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo virtio_ccw_bus_info = {
|
|
|
|
.name = TYPE_VIRTIO_CCW_BUS,
|
|
|
|
.parent = TYPE_VIRTIO_BUS,
|
|
|
|
.instance_size = sizeof(VirtioCcwBusState),
|
2020-08-24 15:20:51 +03:00
|
|
|
.class_size = sizeof(VirtioCcwBusClass),
|
2013-01-24 10:08:55 +04:00
|
|
|
.class_init = virtio_ccw_bus_class_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void virtio_ccw_register(void)
|
|
|
|
{
|
|
|
|
type_register_static(&virtio_ccw_bus_info);
|
|
|
|
type_register_static(&virtio_ccw_device_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(virtio_ccw_register)
|