2017-03-02 21:25:52 +03:00
|
|
|
/*
|
|
|
|
* vhost-user-scsi host device
|
|
|
|
*
|
|
|
|
* Copyright (c) 2016 Nutanix Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* Author:
|
|
|
|
* Felipe Franciosi <felipe@nutanix.com>
|
|
|
|
*
|
|
|
|
* This work is largely based on the "vhost-scsi" implementation by:
|
|
|
|
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
|
|
|
|
* Nicholas Bellinger <nab@risingtidesystems.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
|
|
|
* See the COPYING.LIB file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qemu/error-report.h"
|
|
|
|
#include "hw/fw-path-provider.h"
|
|
|
|
#include "hw/qdev-core.h"
|
2019-08-12 08:23:51 +03:00
|
|
|
#include "hw/qdev-properties.h"
|
2020-12-12 01:05:12 +03:00
|
|
|
#include "hw/qdev-properties-system.h"
|
2017-03-02 21:25:52 +03:00
|
|
|
#include "hw/virtio/vhost.h"
|
|
|
|
#include "hw/virtio/vhost-backend.h"
|
|
|
|
#include "hw/virtio/vhost-user-scsi.h"
|
|
|
|
#include "hw/virtio/virtio.h"
|
|
|
|
#include "chardev/char-fe.h"
|
2019-08-12 08:23:58 +03:00
|
|
|
#include "sysemu/sysemu.h"
|
2017-03-02 21:25:52 +03:00
|
|
|
|
|
|
|
/* Features supported by the host application */
|
|
|
|
static const int user_feature_bits[] = {
|
|
|
|
VIRTIO_F_NOTIFY_ON_EMPTY,
|
|
|
|
VIRTIO_RING_F_INDIRECT_DESC,
|
|
|
|
VIRTIO_RING_F_EVENT_IDX,
|
|
|
|
VIRTIO_SCSI_F_HOTPLUG,
|
2022-11-21 13:11:01 +03:00
|
|
|
VIRTIO_F_RING_RESET,
|
2017-03-02 21:25:52 +03:00
|
|
|
VHOST_INVALID_FEATURE_BIT
|
|
|
|
};
|
|
|
|
|
2023-10-09 07:46:59 +03:00
|
|
|
static int vhost_user_scsi_start(VHostUserSCSI *s, Error **errp)
|
|
|
|
{
|
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = vhost_scsi_common_start(vsc, errp);
|
|
|
|
s->started_vu = !(ret < 0);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vhost_user_scsi_stop(VHostUserSCSI *s)
|
|
|
|
{
|
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
|
|
|
|
|
|
|
|
if (!s->started_vu) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
s->started_vu = false;
|
|
|
|
|
|
|
|
vhost_scsi_common_stop(vsc);
|
|
|
|
}
|
|
|
|
|
2017-03-02 21:25:52 +03:00
|
|
|
static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
|
|
|
|
{
|
|
|
|
VHostUserSCSI *s = (VHostUserSCSI *)vdev;
|
2023-10-09 07:46:59 +03:00
|
|
|
DeviceState *dev = DEVICE(vdev);
|
2017-03-02 21:25:52 +03:00
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
|
2023-10-09 07:46:59 +03:00
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
|
|
|
|
bool should_start = virtio_device_should_start(vdev, status);
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int ret;
|
2017-03-02 21:25:52 +03:00
|
|
|
|
2023-10-09 07:46:59 +03:00
|
|
|
if (!s->connected) {
|
2017-03-02 21:25:52 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-10-09 07:46:59 +03:00
|
|
|
if (vhost_dev_is_started(&vsc->dev) == should_start) {
|
|
|
|
return;
|
|
|
|
}
|
2017-03-02 21:25:52 +03:00
|
|
|
|
2023-10-09 07:46:59 +03:00
|
|
|
if (should_start) {
|
|
|
|
ret = vhost_user_scsi_start(s, &local_err);
|
2017-03-02 21:25:52 +03:00
|
|
|
if (ret < 0) {
|
2023-10-09 07:46:59 +03:00
|
|
|
error_reportf_err(local_err, "unable to start vhost-user-scsi: %s",
|
|
|
|
strerror(-ret));
|
|
|
|
qemu_chr_fe_disconnect(&vs->conf.chardev);
|
2017-03-02 21:25:52 +03:00
|
|
|
}
|
|
|
|
} else {
|
2023-10-09 07:46:59 +03:00
|
|
|
vhost_user_scsi_stop(s);
|
2017-03-02 21:25:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-09 07:47:00 +03:00
|
|
|
static void vhost_user_scsi_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
2017-03-02 21:25:52 +03:00
|
|
|
{
|
2023-10-09 07:47:00 +03:00
|
|
|
VHostUserSCSI *s = (VHostUserSCSI *)vdev;
|
|
|
|
DeviceState *dev = DEVICE(vdev);
|
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
|
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
|
|
|
|
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (!vdev->start_on_kick) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!s->connected) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_dev_is_started(&vsc->dev)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
|
|
|
|
* vhost here instead of waiting for .set_status().
|
|
|
|
*/
|
|
|
|
ret = vhost_user_scsi_start(s, &local_err);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_reportf_err(local_err, "vhost-user-scsi: vhost start failed: ");
|
|
|
|
qemu_chr_fe_disconnect(&vs->conf.chardev);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Kick right away to begin processing requests already in vring */
|
|
|
|
for (i = 0; i < vsc->dev.nvqs; i++) {
|
|
|
|
VirtQueue *kick_vq = virtio_get_queue(vdev, i);
|
|
|
|
|
|
|
|
if (!virtio_queue_get_desc_addr(vdev, i)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
event_notifier_set(virtio_queue_get_host_notifier(kick_vq));
|
|
|
|
}
|
2017-03-02 21:25:52 +03:00
|
|
|
}
|
|
|
|
|
2023-10-09 07:46:59 +03:00
|
|
|
static int vhost_user_scsi_connect(DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
|
|
|
VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
|
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
|
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (s->connected) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
s->connected = true;
|
|
|
|
|
|
|
|
vsc->dev.num_queues = vs->conf.num_queues;
|
|
|
|
vsc->dev.nvqs = VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
|
|
|
|
vsc->dev.vqs = s->vhost_vqs;
|
|
|
|
vsc->dev.vq_index = 0;
|
|
|
|
vsc->dev.backend_features = 0;
|
|
|
|
|
|
|
|
ret = vhost_dev_init(&vsc->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
|
|
|
|
errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* restore vhost state */
|
|
|
|
if (virtio_device_started(vdev, vdev->status)) {
|
|
|
|
ret = vhost_user_scsi_start(s, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event);
|
|
|
|
|
|
|
|
static void vhost_user_scsi_disconnect(DeviceState *dev)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
|
|
|
VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
|
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
|
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
|
|
|
|
|
|
|
|
if (!s->connected) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
s->connected = false;
|
|
|
|
|
|
|
|
vhost_user_scsi_stop(s);
|
|
|
|
|
|
|
|
vhost_dev_cleanup(&vsc->dev);
|
|
|
|
|
|
|
|
/* Re-instate the event handler for new connections */
|
|
|
|
qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
|
|
|
|
vhost_user_scsi_event, NULL, dev, NULL, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
|
|
|
|
{
|
|
|
|
DeviceState *dev = opaque;
|
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
|
|
|
VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
|
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
|
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case CHR_EVENT_OPENED:
|
|
|
|
if (vhost_user_scsi_connect(dev, &local_err) < 0) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
qemu_chr_fe_disconnect(&vs->conf.chardev);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case CHR_EVENT_CLOSED:
|
|
|
|
/* defer close until later to avoid circular close */
|
|
|
|
vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
|
vhost-user: fix lost reconnect
When the vhost-user is reconnecting to the backend, and if the vhost-user fails
at the get_features in vhost_dev_init(), then the reconnect will fail
and it will not be retriggered forever.
The reason is:
When the vhost-user fails at get_features, the vhost_dev_cleanup will be called
immediately.
vhost_dev_cleanup calls 'memset(hdev, 0, sizeof(struct vhost_dev))'.
The reconnect path is:
vhost_user_blk_event
vhost_user_async_close(.. vhost_user_blk_disconnect ..)
qemu_chr_fe_set_handlers <----- clear the notifier callback
schedule vhost_user_async_close_bh
The vhost->vdev is null, so the vhost_user_blk_disconnect will not be
called, then the event fd callback will not be reinstalled.
All vhost-user devices have this issue, including vhost-user-blk/scsi.
With this patch, if the vdev->vdev is null, the fd callback will still
be reinstalled.
Fixes: 71e076a07d ("hw/virtio: generalise CHR_EVENT_CLOSED handling")
Signed-off-by: Li Feng <fengli@smartx.com>
Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com>
Message-Id: <20231009044735.941655-6-fengli@smartx.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2023-10-09 07:47:01 +03:00
|
|
|
vhost_user_scsi_disconnect,
|
|
|
|
vhost_user_scsi_event);
|
2023-10-09 07:46:59 +03:00
|
|
|
break;
|
|
|
|
case CHR_EVENT_BREAK:
|
|
|
|
case CHR_EVENT_MUX_IN:
|
|
|
|
case CHR_EVENT_MUX_OUT:
|
|
|
|
/* Ignore */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_scsi_realize_connect(VHostUserSCSI *s, Error **errp)
|
|
|
|
{
|
|
|
|
DeviceState *dev = DEVICE(s);
|
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
s->connected = false;
|
|
|
|
|
|
|
|
ret = qemu_chr_fe_wait_connected(&vs->conf.chardev, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = vhost_user_scsi_connect(dev, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
qemu_chr_fe_disconnect(&vs->conf.chardev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
assert(s->connected);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-02 21:25:52 +03:00
|
|
|
static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
2023-10-09 07:46:59 +03:00
|
|
|
ERRP_GUARD();
|
2017-03-02 21:25:52 +03:00
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
|
|
|
|
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
|
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
|
|
|
|
Error *err = NULL;
|
|
|
|
int ret;
|
2023-10-09 07:46:59 +03:00
|
|
|
int retries = VU_REALIZE_CONN_RETRIES;
|
2017-03-02 21:25:52 +03:00
|
|
|
|
|
|
|
if (!vs->conf.chardev.chr) {
|
|
|
|
error_setg(errp, "vhost-user-scsi: missing chardev");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-10-09 07:47:00 +03:00
|
|
|
virtio_scsi_common_realize(dev, vhost_user_scsi_handle_output,
|
|
|
|
vhost_user_scsi_handle_output,
|
|
|
|
vhost_user_scsi_handle_output, &err);
|
2017-03-02 21:25:52 +03:00
|
|
|
if (err != NULL) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-08 17:04:45 +03:00
|
|
|
if (!vhost_user_init(&s->vhost_user, &vs->conf.chardev, errp)) {
|
2019-07-17 03:46:06 +03:00
|
|
|
goto free_virtio;
|
2018-05-24 13:33:33 +03:00
|
|
|
}
|
|
|
|
|
2023-10-09 07:46:59 +03:00
|
|
|
vsc->inflight = g_new0(struct vhost_inflight, 1);
|
|
|
|
s->vhost_vqs = g_new0(struct vhost_virtqueue,
|
|
|
|
VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues);
|
|
|
|
|
|
|
|
assert(!*errp);
|
|
|
|
do {
|
|
|
|
if (*errp) {
|
|
|
|
error_prepend(errp, "Reconnecting after error: ");
|
|
|
|
error_report_err(*errp);
|
|
|
|
*errp = NULL;
|
|
|
|
}
|
|
|
|
ret = vhost_user_scsi_realize_connect(s, errp);
|
|
|
|
} while (ret < 0 && retries--);
|
2017-03-02 21:25:52 +03:00
|
|
|
|
|
|
|
if (ret < 0) {
|
2019-07-17 03:46:06 +03:00
|
|
|
goto free_vhost;
|
2017-03-02 21:25:52 +03:00
|
|
|
}
|
|
|
|
|
2023-10-09 07:46:59 +03:00
|
|
|
/* we're fully initialized, now we can operate, so add the handler */
|
|
|
|
qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
|
|
|
|
vhost_user_scsi_event, NULL, (void *)dev,
|
|
|
|
NULL, true);
|
2017-03-02 21:25:52 +03:00
|
|
|
/* Channel and lun both are 0 for bootable vhost-user-scsi disk */
|
|
|
|
vsc->channel = 0;
|
|
|
|
vsc->lun = 0;
|
|
|
|
vsc->target = vs->conf.boot_tpgt;
|
2019-07-17 03:46:06 +03:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
free_vhost:
|
2023-10-09 07:46:59 +03:00
|
|
|
g_free(s->vhost_vqs);
|
|
|
|
s->vhost_vqs = NULL;
|
|
|
|
g_free(vsc->inflight);
|
|
|
|
vsc->inflight = NULL;
|
2019-07-17 03:46:06 +03:00
|
|
|
vhost_user_cleanup(&s->vhost_user);
|
2023-10-09 07:46:59 +03:00
|
|
|
|
2019-07-17 03:46:06 +03:00
|
|
|
free_virtio:
|
|
|
|
virtio_scsi_common_unrealize(dev);
|
2017-03-02 21:25:52 +03:00
|
|
|
}
|
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
static void vhost_user_scsi_unrealize(DeviceState *dev)
|
2017-03-02 21:25:52 +03:00
|
|
|
{
|
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
|
|
|
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
|
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
|
2023-10-09 07:46:59 +03:00
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
|
2017-03-02 21:25:52 +03:00
|
|
|
|
|
|
|
/* This will stop the vhost backend. */
|
|
|
|
vhost_user_scsi_set_status(vdev, 0);
|
2023-10-09 07:46:59 +03:00
|
|
|
qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
NULL, false);
|
2017-03-02 21:25:52 +03:00
|
|
|
|
|
|
|
vhost_dev_cleanup(&vsc->dev);
|
2023-10-09 07:46:59 +03:00
|
|
|
g_free(s->vhost_vqs);
|
|
|
|
s->vhost_vqs = NULL;
|
|
|
|
|
|
|
|
vhost_dev_free_inflight(vsc->inflight);
|
|
|
|
g_free(vsc->inflight);
|
|
|
|
vsc->inflight = NULL;
|
2017-03-02 21:25:52 +03:00
|
|
|
|
2019-03-08 17:04:45 +03:00
|
|
|
vhost_user_cleanup(&s->vhost_user);
|
2023-10-09 07:46:59 +03:00
|
|
|
virtio_scsi_common_unrealize(dev);
|
2017-03-02 21:25:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static Property vhost_user_scsi_properties[] = {
|
|
|
|
DEFINE_PROP_CHR("chardev", VirtIOSCSICommon, conf.chardev),
|
|
|
|
DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0),
|
2020-08-18 17:33:46 +03:00
|
|
|
DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues,
|
|
|
|
VIRTIO_SCSI_AUTO_NUM_QUEUES),
|
2017-11-14 19:28:36 +03:00
|
|
|
DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSICommon, conf.virtqueue_size,
|
|
|
|
128),
|
2017-03-02 21:25:52 +03:00
|
|
|
DEFINE_PROP_UINT32("max_sectors", VirtIOSCSICommon, conf.max_sectors,
|
|
|
|
0xFFFF),
|
|
|
|
DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSICommon, conf.cmd_per_lun, 128),
|
2018-08-08 22:52:33 +03:00
|
|
|
DEFINE_PROP_BIT64("hotplug", VHostSCSICommon, host_features,
|
|
|
|
VIRTIO_SCSI_F_HOTPLUG,
|
|
|
|
true),
|
|
|
|
DEFINE_PROP_BIT64("param_change", VHostSCSICommon, host_features,
|
|
|
|
VIRTIO_SCSI_F_CHANGE,
|
|
|
|
true),
|
2018-08-08 22:52:35 +03:00
|
|
|
DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features,
|
|
|
|
VIRTIO_SCSI_F_T10_PI,
|
|
|
|
false),
|
2017-03-02 21:25:52 +03:00
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_vhost_scsi = {
|
|
|
|
.name = "virtio-scsi",
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.version_id = 1,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_VIRTIO_DEVICE,
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void vhost_user_scsi_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
|
|
|
FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(klass);
|
|
|
|
|
2020-01-10 18:30:32 +03:00
|
|
|
device_class_set_props(dc, vhost_user_scsi_properties);
|
2017-03-02 21:25:52 +03:00
|
|
|
dc->vmsd = &vmstate_vhost_scsi;
|
|
|
|
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
|
|
|
vdc->realize = vhost_user_scsi_realize;
|
|
|
|
vdc->unrealize = vhost_user_scsi_unrealize;
|
2018-08-08 22:52:34 +03:00
|
|
|
vdc->get_features = vhost_scsi_common_get_features;
|
2017-03-02 21:25:52 +03:00
|
|
|
vdc->set_config = vhost_scsi_common_set_config;
|
|
|
|
vdc->set_status = vhost_user_scsi_set_status;
|
|
|
|
fwc->get_dev_path = vhost_scsi_common_get_fw_dev_path;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vhost_user_scsi_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(obj);
|
|
|
|
|
|
|
|
vsc->feature_bits = user_feature_bits;
|
|
|
|
|
|
|
|
/* Add the bootindex property for this object */
|
|
|
|
device_add_bootindex_property(obj, &vsc->bootindex, "bootindex", NULL,
|
2020-05-05 18:29:23 +03:00
|
|
|
DEVICE(vsc));
|
2017-03-02 21:25:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo vhost_user_scsi_info = {
|
|
|
|
.name = TYPE_VHOST_USER_SCSI,
|
|
|
|
.parent = TYPE_VHOST_SCSI_COMMON,
|
|
|
|
.instance_size = sizeof(VHostUserSCSI),
|
|
|
|
.class_init = vhost_user_scsi_class_init,
|
|
|
|
.instance_init = vhost_user_scsi_instance_init,
|
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{ TYPE_FW_PATH_PROVIDER },
|
|
|
|
{ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void virtio_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&vhost_user_scsi_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(virtio_register_types)
|