2016-01-26 21:17:16 +03:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/error-report.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2018-02-01 14:18:46 +03:00
|
|
|
#include "qemu/option.h"
|
2022-02-08 23:08:56 +03:00
|
|
|
#include "qemu/hw-version.h"
|
2019-08-12 08:23:51 +03:00
|
|
|
#include "hw/qdev-properties.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/scsi/scsi.h"
|
2019-08-12 08:23:39 +03:00
|
|
|
#include "migration/qemu-file-types.h"
|
2019-08-12 08:23:45 +03:00
|
|
|
#include "migration/vmstate.h"
|
2017-08-22 10:23:55 +03:00
|
|
|
#include "scsi/constants.h"
|
2014-10-07 15:59:13 +04:00
|
|
|
#include "sysemu/block-backend.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/blockdev.h"
|
2019-08-12 08:23:58 +03:00
|
|
|
#include "sysemu/sysemu.h"
|
2019-08-12 08:23:59 +03:00
|
|
|
#include "sysemu/runstate.h"
|
2011-04-15 13:51:13 +04:00
|
|
|
#include "trace.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/dma.h"
|
2016-03-20 20:16:19 +03:00
|
|
|
#include "qemu/cutils.h"
|
2009-08-31 16:24:04 +04:00
|
|
|
|
2012-03-19 18:30:40 +04:00
|
|
|
static char *scsibus_get_dev_path(DeviceState *dev);
|
2010-12-08 14:35:04 +03:00
|
|
|
static char *scsibus_get_fw_dev_path(DeviceState *dev);
|
2011-10-25 14:53:36 +04:00
|
|
|
static void scsi_req_dequeue(SCSIRequest *req);
|
2013-10-09 11:41:03 +04:00
|
|
|
static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
|
|
|
|
static void scsi_target_free_buf(SCSIRequest *req);
|
2023-07-12 16:43:52 +03:00
|
|
|
static void scsi_clear_reported_luns_changed(SCSIRequest *req);
|
2010-12-08 14:35:04 +03:00
|
|
|
|
2009-08-31 16:24:04 +04:00
|
|
|
static int next_scsi_bus;
|
|
|
|
|
2020-10-06 15:39:01 +03:00
|
|
|
static SCSIDevice *do_scsi_device_find(SCSIBus *bus,
|
|
|
|
int channel, int id, int lun,
|
|
|
|
bool include_unrealized)
|
|
|
|
{
|
|
|
|
BusChild *kid;
|
|
|
|
SCSIDevice *retval = NULL;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_RCU(kid, &bus->qbus.children, sibling) {
|
|
|
|
DeviceState *qdev = kid->child;
|
|
|
|
SCSIDevice *dev = SCSI_DEVICE(qdev);
|
|
|
|
|
|
|
|
if (dev->channel == channel && dev->id == id) {
|
|
|
|
if (dev->lun == lun) {
|
|
|
|
retval = dev;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we don't find exact match (channel/bus/lun),
|
|
|
|
* we will return the first device which matches channel/bus
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!retval) {
|
|
|
|
retval = dev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function might run on the IO thread and we might race against
|
|
|
|
* main thread hot-plugging the device.
|
|
|
|
* We assume that as soon as .realized is set to true we can let
|
|
|
|
* the user access the device.
|
|
|
|
*/
|
|
|
|
|
2023-05-16 22:02:20 +03:00
|
|
|
if (retval && !include_unrealized && !qdev_is_realized(&retval->qdev)) {
|
2020-10-06 15:39:01 +03:00
|
|
|
retval = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
|
|
|
|
{
|
|
|
|
RCU_READ_LOCK_GUARD();
|
|
|
|
return do_scsi_device_find(bus, channel, id, lun, false);
|
|
|
|
}
|
|
|
|
|
2020-10-06 15:39:02 +03:00
|
|
|
SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun)
|
|
|
|
{
|
|
|
|
SCSIDevice *d;
|
|
|
|
RCU_READ_LOCK_GUARD();
|
|
|
|
d = do_scsi_device_find(bus, channel, id, lun, false);
|
|
|
|
if (d) {
|
|
|
|
object_ref(d);
|
|
|
|
}
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
2023-12-04 19:42:56 +03:00
|
|
|
/*
|
|
|
|
* Invoke @fn() for each enqueued request in device @s. Must be called from the
|
|
|
|
* main loop thread while the guest is stopped. This is only suitable for
|
|
|
|
* vmstate ->put(), use scsi_device_for_each_req_async() for other cases.
|
|
|
|
*/
|
|
|
|
static void scsi_device_for_each_req_sync(SCSIDevice *s,
|
|
|
|
void (*fn)(SCSIRequest *, void *),
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
SCSIRequest *req;
|
|
|
|
SCSIRequest *next_req;
|
|
|
|
|
|
|
|
assert(!runstate_is_running());
|
|
|
|
assert(qemu_in_main_thread());
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
|
|
|
|
fn(req, opaque);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
SCSIDevice *s;
|
|
|
|
void (*fn)(SCSIRequest *, void *);
|
|
|
|
void *fn_opaque;
|
|
|
|
} SCSIDeviceForEachReqAsyncData;
|
|
|
|
|
|
|
|
static void scsi_device_for_each_req_async_bh(void *opaque)
|
|
|
|
{
|
|
|
|
g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
|
|
|
|
SCSIDevice *s = data->s;
|
|
|
|
AioContext *ctx;
|
|
|
|
SCSIRequest *req;
|
|
|
|
SCSIRequest *next;
|
|
|
|
|
|
|
|
/*
|
scsi: Await request purging
scsi_device_for_each_req_async() currently does not provide any way to
be awaited. One of its callers is scsi_device_purge_requests(), which
therefore currently does not guarantee that all requests are fully
settled when it returns.
We want all requests to be settled, because scsi_device_purge_requests()
is called through the unrealize path, including the one invoked by
virtio_scsi_hotunplug() through qdev_simple_device_unplug_cb(), which
most likely assumes that all SCSI requests are done then.
In fact, scsi_device_purge_requests() already contains a blk_drain(),
but this will not fully await scsi_device_for_each_req_async(), only the
I/O requests it potentially cancels (not the non-I/O requests).
However, we can have scsi_device_for_each_req_async() increment the BB
in-flight counter, and have scsi_device_for_each_req_async_bh()
decrement it when it is done. This way, the blk_drain() will fully
await all SCSI requests to be purged.
This also removes the need for scsi_device_for_each_req_async_bh() to
double-check the current context and potentially re-schedule itself,
should it now differ from the BB's context: Changing a BB's AioContext
with a root node is done through bdrv_try_change_aio_context(), which
creates a drained section. With this patch, we keep the BB in-flight
counter elevated throughout, so we know the BB's context cannot change.
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
Message-ID: <20240202144755.671354-3-hreitz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2024-02-02 17:47:55 +03:00
|
|
|
* The BB cannot have changed contexts between this BH being scheduled and
|
|
|
|
* now: BBs' AioContexts, when they have a node attached, can only be
|
|
|
|
* changed via bdrv_try_change_aio_context(), in a drained section. While
|
|
|
|
* we have the in-flight counter incremented, that drain must block.
|
2023-12-04 19:42:56 +03:00
|
|
|
*/
|
|
|
|
ctx = blk_get_aio_context(s->conf.blk);
|
scsi: Await request purging
scsi_device_for_each_req_async() currently does not provide any way to
be awaited. One of its callers is scsi_device_purge_requests(), which
therefore currently does not guarantee that all requests are fully
settled when it returns.
We want all requests to be settled, because scsi_device_purge_requests()
is called through the unrealize path, including the one invoked by
virtio_scsi_hotunplug() through qdev_simple_device_unplug_cb(), which
most likely assumes that all SCSI requests are done then.
In fact, scsi_device_purge_requests() already contains a blk_drain(),
but this will not fully await scsi_device_for_each_req_async(), only the
I/O requests it potentially cancels (not the non-I/O requests).
However, we can have scsi_device_for_each_req_async() increment the BB
in-flight counter, and have scsi_device_for_each_req_async_bh()
decrement it when it is done. This way, the blk_drain() will fully
await all SCSI requests to be purged.
This also removes the need for scsi_device_for_each_req_async_bh() to
double-check the current context and potentially re-schedule itself,
should it now differ from the BB's context: Changing a BB's AioContext
with a root node is done through bdrv_try_change_aio_context(), which
creates a drained section. With this patch, we keep the BB in-flight
counter elevated throughout, so we know the BB's context cannot change.
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
Message-ID: <20240202144755.671354-3-hreitz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2024-02-02 17:47:55 +03:00
|
|
|
assert(ctx == qemu_get_current_aio_context());
|
2023-12-04 19:42:56 +03:00
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
|
|
|
|
data->fn(req, data->fn_opaque);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Drop the reference taken by scsi_device_for_each_req_async() */
|
|
|
|
object_unref(OBJECT(s));
|
scsi: Await request purging
scsi_device_for_each_req_async() currently does not provide any way to
be awaited. One of its callers is scsi_device_purge_requests(), which
therefore currently does not guarantee that all requests are fully
settled when it returns.
We want all requests to be settled, because scsi_device_purge_requests()
is called through the unrealize path, including the one invoked by
virtio_scsi_hotunplug() through qdev_simple_device_unplug_cb(), which
most likely assumes that all SCSI requests are done then.
In fact, scsi_device_purge_requests() already contains a blk_drain(),
but this will not fully await scsi_device_for_each_req_async(), only the
I/O requests it potentially cancels (not the non-I/O requests).
However, we can have scsi_device_for_each_req_async() increment the BB
in-flight counter, and have scsi_device_for_each_req_async_bh()
decrement it when it is done. This way, the blk_drain() will fully
await all SCSI requests to be purged.
This also removes the need for scsi_device_for_each_req_async_bh() to
double-check the current context and potentially re-schedule itself,
should it now differ from the BB's context: Changing a BB's AioContext
with a root node is done through bdrv_try_change_aio_context(), which
creates a drained section. With this patch, we keep the BB in-flight
counter elevated throughout, so we know the BB's context cannot change.
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
Message-ID: <20240202144755.671354-3-hreitz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2024-02-02 17:47:55 +03:00
|
|
|
|
|
|
|
/* Paired with blk_inc_in_flight() in scsi_device_for_each_req_async() */
|
|
|
|
blk_dec_in_flight(s->conf.blk);
|
2023-12-04 19:42:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
|
|
|
|
* runs in the AioContext that is executing the request.
|
scsi: Await request purging
scsi_device_for_each_req_async() currently does not provide any way to
be awaited. One of its callers is scsi_device_purge_requests(), which
therefore currently does not guarantee that all requests are fully
settled when it returns.
We want all requests to be settled, because scsi_device_purge_requests()
is called through the unrealize path, including the one invoked by
virtio_scsi_hotunplug() through qdev_simple_device_unplug_cb(), which
most likely assumes that all SCSI requests are done then.
In fact, scsi_device_purge_requests() already contains a blk_drain(),
but this will not fully await scsi_device_for_each_req_async(), only the
I/O requests it potentially cancels (not the non-I/O requests).
However, we can have scsi_device_for_each_req_async() increment the BB
in-flight counter, and have scsi_device_for_each_req_async_bh()
decrement it when it is done. This way, the blk_drain() will fully
await all SCSI requests to be purged.
This also removes the need for scsi_device_for_each_req_async_bh() to
double-check the current context and potentially re-schedule itself,
should it now differ from the BB's context: Changing a BB's AioContext
with a root node is done through bdrv_try_change_aio_context(), which
creates a drained section. With this patch, we keep the BB in-flight
counter elevated throughout, so we know the BB's context cannot change.
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
Message-ID: <20240202144755.671354-3-hreitz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2024-02-02 17:47:55 +03:00
|
|
|
* Keeps the BlockBackend's in-flight counter incremented until everything is
|
|
|
|
* done, so draining it will settle all scheduled @fn() calls.
|
2023-12-04 19:42:56 +03:00
|
|
|
*/
|
|
|
|
static void scsi_device_for_each_req_async(SCSIDevice *s,
|
|
|
|
void (*fn)(SCSIRequest *, void *),
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
assert(qemu_in_main_thread());
|
|
|
|
|
|
|
|
SCSIDeviceForEachReqAsyncData *data =
|
|
|
|
g_new(SCSIDeviceForEachReqAsyncData, 1);
|
|
|
|
|
|
|
|
data->s = s;
|
|
|
|
data->fn = fn;
|
|
|
|
data->fn_opaque = opaque;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hold a reference to the SCSIDevice until
|
|
|
|
* scsi_device_for_each_req_async_bh() finishes.
|
|
|
|
*/
|
|
|
|
object_ref(OBJECT(s));
|
|
|
|
|
scsi: Await request purging
scsi_device_for_each_req_async() currently does not provide any way to
be awaited. One of its callers is scsi_device_purge_requests(), which
therefore currently does not guarantee that all requests are fully
settled when it returns.
We want all requests to be settled, because scsi_device_purge_requests()
is called through the unrealize path, including the one invoked by
virtio_scsi_hotunplug() through qdev_simple_device_unplug_cb(), which
most likely assumes that all SCSI requests are done then.
In fact, scsi_device_purge_requests() already contains a blk_drain(),
but this will not fully await scsi_device_for_each_req_async(), only the
I/O requests it potentially cancels (not the non-I/O requests).
However, we can have scsi_device_for_each_req_async() increment the BB
in-flight counter, and have scsi_device_for_each_req_async_bh()
decrement it when it is done. This way, the blk_drain() will fully
await all SCSI requests to be purged.
This also removes the need for scsi_device_for_each_req_async_bh() to
double-check the current context and potentially re-schedule itself,
should it now differ from the BB's context: Changing a BB's AioContext
with a root node is done through bdrv_try_change_aio_context(), which
creates a drained section. With this patch, we keep the BB in-flight
counter elevated throughout, so we know the BB's context cannot change.
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
Message-ID: <20240202144755.671354-3-hreitz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2024-02-02 17:47:55 +03:00
|
|
|
/* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
|
|
|
|
blk_inc_in_flight(s->conf.blk);
|
2023-12-04 19:42:56 +03:00
|
|
|
aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
|
|
|
|
scsi_device_for_each_req_async_bh,
|
|
|
|
data);
|
|
|
|
}
|
|
|
|
|
2014-08-12 06:12:55 +04:00
|
|
|
static void scsi_device_realize(SCSIDevice *s, Error **errp)
|
2011-12-16 00:50:08 +04:00
|
|
|
{
|
|
|
|
SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
|
2014-08-12 06:12:55 +04:00
|
|
|
if (sc->realize) {
|
|
|
|
sc->realize(s, errp);
|
2011-12-16 00:50:08 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
static void scsi_device_unrealize(SCSIDevice *s)
|
2019-10-16 19:41:41 +03:00
|
|
|
{
|
|
|
|
SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
|
|
|
|
if (sc->unrealize) {
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
sc->unrealize(s);
|
2019-10-16 19:41:41 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-16 12:39:05 +04:00
|
|
|
int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
|
2022-08-17 08:34:58 +03:00
|
|
|
size_t buf_len, void *hba_private)
|
2014-07-16 12:39:05 +04:00
|
|
|
{
|
|
|
|
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
assert(cmd->len == 0);
|
2022-08-17 08:34:58 +03:00
|
|
|
rc = scsi_req_parse_cdb(dev, cmd, buf, buf_len);
|
2014-07-16 12:39:05 +04:00
|
|
|
if (bus->info->parse_cdb) {
|
2022-08-17 08:34:58 +03:00
|
|
|
rc = bus->info->parse_cdb(dev, cmd, buf, buf_len, hba_private);
|
2014-07-16 12:39:05 +04:00
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2011-12-16 00:50:08 +04:00
|
|
|
static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
|
|
|
|
uint8_t *buf, void *hba_private)
|
|
|
|
{
|
|
|
|
SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
|
|
|
|
if (sc->alloc_req) {
|
|
|
|
return sc->alloc_req(s, tag, lun, buf, hba_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-29 15:00:11 +03:00
|
|
|
void scsi_device_unit_attention_reported(SCSIDevice *s)
|
2011-12-16 00:50:08 +04:00
|
|
|
{
|
|
|
|
SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
|
|
|
|
if (sc->unit_attention_reported) {
|
|
|
|
sc->unit_attention_reported(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-31 16:24:04 +04:00
|
|
|
/* Create a scsi bus, and attach devices to it. */
|
2021-09-23 15:11:48 +03:00
|
|
|
void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
|
|
|
|
const SCSIBusInfo *info, const char *bus_name)
|
2009-08-31 16:24:04 +04:00
|
|
|
{
|
2021-09-23 15:11:51 +03:00
|
|
|
qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
|
2009-08-31 16:24:04 +04:00
|
|
|
bus->busnr = next_scsi_bus++;
|
2011-08-13 17:44:45 +04:00
|
|
|
bus->info = info;
|
2020-06-30 12:03:38 +03:00
|
|
|
qbus_set_bus_hotplug_handler(BUS(bus));
|
2009-08-31 16:24:04 +04:00
|
|
|
}
|
|
|
|
|
2023-12-04 19:42:56 +03:00
|
|
|
void scsi_req_retry(SCSIRequest *req)
|
2011-10-25 14:53:36 +04:00
|
|
|
{
|
2023-12-04 19:42:56 +03:00
|
|
|
req->retry = true;
|
|
|
|
}
|
2011-10-25 14:53:36 +04:00
|
|
|
|
2023-12-04 19:42:56 +03:00
|
|
|
/* Called in the AioContext that is executing the request */
|
|
|
|
static void scsi_dma_restart_req(SCSIRequest *req, void *opaque)
|
|
|
|
{
|
|
|
|
scsi_req_ref(req);
|
|
|
|
if (req->retry) {
|
|
|
|
req->retry = false;
|
|
|
|
switch (req->cmd.mode) {
|
2011-10-25 14:53:36 +04:00
|
|
|
case SCSI_XFER_FROM_DEV:
|
|
|
|
case SCSI_XFER_TO_DEV:
|
|
|
|
scsi_req_continue(req);
|
|
|
|
break;
|
|
|
|
case SCSI_XFER_NONE:
|
|
|
|
scsi_req_dequeue(req);
|
|
|
|
scsi_req_enqueue(req);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-12-04 19:42:56 +03:00
|
|
|
scsi_req_unref(req);
|
2011-10-25 14:53:36 +04:00
|
|
|
}
|
|
|
|
|
2021-01-11 18:20:20 +03:00
|
|
|
static void scsi_dma_restart_cb(void *opaque, bool running, RunState state)
|
2011-10-25 14:53:36 +04:00
|
|
|
{
|
|
|
|
SCSIDevice *s = opaque;
|
|
|
|
|
2023-12-04 19:42:56 +03:00
|
|
|
assert(qemu_in_main_thread());
|
|
|
|
|
2011-10-25 14:53:36 +04:00
|
|
|
if (!running) {
|
|
|
|
return;
|
|
|
|
}
|
2023-12-04 19:42:56 +03:00
|
|
|
|
|
|
|
scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL);
|
2011-10-25 14:53:36 +04:00
|
|
|
}
|
|
|
|
|
2020-10-06 15:38:56 +03:00
|
|
|
static bool scsi_bus_is_address_free(SCSIBus *bus,
|
|
|
|
int channel, int target, int lun,
|
|
|
|
SCSIDevice **p_dev)
|
|
|
|
{
|
2020-10-06 15:39:01 +03:00
|
|
|
SCSIDevice *d;
|
|
|
|
|
|
|
|
RCU_READ_LOCK_GUARD();
|
|
|
|
d = do_scsi_device_find(bus, channel, target, lun, true);
|
2020-10-06 15:38:56 +03:00
|
|
|
if (d && d->lun == lun) {
|
|
|
|
if (p_dev) {
|
|
|
|
*p_dev = d;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (p_dev) {
|
|
|
|
*p_dev = NULL;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool scsi_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp)
|
2009-08-31 16:24:04 +04:00
|
|
|
{
|
2011-12-16 00:50:08 +04:00
|
|
|
SCSIDevice *dev = SCSI_DEVICE(qdev);
|
2020-10-06 15:38:56 +03:00
|
|
|
SCSIBus *bus = SCSI_BUS(qbus);
|
2009-08-31 16:24:04 +04:00
|
|
|
|
2011-07-28 01:24:50 +04:00
|
|
|
if (dev->channel > bus->info->max_channel) {
|
2014-08-12 06:12:55 +04:00
|
|
|
error_setg(errp, "bad scsi channel id: %d", dev->channel);
|
2020-10-06 15:38:56 +03:00
|
|
|
return false;
|
2011-07-28 01:24:50 +04:00
|
|
|
}
|
2011-08-13 20:55:17 +04:00
|
|
|
if (dev->id != -1 && dev->id > bus->info->max_target) {
|
2014-08-12 06:12:55 +04:00
|
|
|
error_setg(errp, "bad scsi device id: %d", dev->id);
|
2020-10-06 15:38:56 +03:00
|
|
|
return false;
|
2009-08-31 16:24:04 +04:00
|
|
|
}
|
2012-02-15 12:22:54 +04:00
|
|
|
if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
|
2014-08-12 06:12:55 +04:00
|
|
|
error_setg(errp, "bad scsi device lun: %d", dev->lun);
|
2020-10-06 15:38:56 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->id != -1 && dev->lun != -1) {
|
|
|
|
SCSIDevice *d;
|
|
|
|
if (!scsi_bus_is_address_free(bus, dev->channel, dev->id, dev->lun, &d)) {
|
|
|
|
error_setg(errp, "lun already used by '%s'", d->qdev.id);
|
|
|
|
return false;
|
|
|
|
}
|
2012-02-15 12:22:54 +04:00
|
|
|
}
|
2009-08-31 16:24:04 +04:00
|
|
|
|
2020-10-06 15:38:56 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
|
|
|
|
{
|
|
|
|
SCSIDevice *dev = SCSI_DEVICE(qdev);
|
|
|
|
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
|
|
|
|
bool is_free;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2011-08-13 20:55:17 +04:00
|
|
|
if (dev->id == -1) {
|
|
|
|
int id = -1;
|
|
|
|
if (dev->lun == -1) {
|
|
|
|
dev->lun = 0;
|
|
|
|
}
|
|
|
|
do {
|
2020-10-06 15:38:56 +03:00
|
|
|
is_free = scsi_bus_is_address_free(bus, dev->channel, ++id, dev->lun, NULL);
|
|
|
|
} while (!is_free && id < bus->info->max_target);
|
|
|
|
if (!is_free) {
|
2014-08-12 06:12:55 +04:00
|
|
|
error_setg(errp, "no free target");
|
|
|
|
return;
|
2011-08-13 20:55:17 +04:00
|
|
|
}
|
|
|
|
dev->id = id;
|
|
|
|
} else if (dev->lun == -1) {
|
|
|
|
int lun = -1;
|
|
|
|
do {
|
2020-10-06 15:38:56 +03:00
|
|
|
is_free = scsi_bus_is_address_free(bus, dev->channel, dev->id, ++lun, NULL);
|
|
|
|
} while (!is_free && lun < bus->info->max_lun);
|
|
|
|
if (!is_free) {
|
2014-08-12 06:12:55 +04:00
|
|
|
error_setg(errp, "no free lun");
|
|
|
|
return;
|
2011-08-13 20:55:17 +04:00
|
|
|
}
|
|
|
|
dev->lun = lun;
|
2009-08-31 16:24:04 +04:00
|
|
|
}
|
|
|
|
|
2009-11-26 17:33:49 +03:00
|
|
|
QTAILQ_INIT(&dev->requests);
|
2014-08-12 06:12:55 +04:00
|
|
|
scsi_device_realize(dev, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
2011-10-25 14:53:36 +04:00
|
|
|
}
|
2019-06-20 20:37:09 +03:00
|
|
|
dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
|
|
|
|
scsi_dma_restart_cb, dev);
|
2009-09-25 23:42:36 +04:00
|
|
|
}
|
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
static void scsi_qdev_unrealize(DeviceState *qdev)
|
2009-09-25 23:42:36 +04:00
|
|
|
{
|
2011-12-16 00:50:08 +04:00
|
|
|
SCSIDevice *dev = SCSI_DEVICE(qdev);
|
2009-09-25 23:42:36 +04:00
|
|
|
|
2011-10-25 14:53:36 +04:00
|
|
|
if (dev->vmsentry) {
|
|
|
|
qemu_del_vm_change_state_handler(dev->vmsentry);
|
|
|
|
}
|
2013-09-17 17:09:15 +04:00
|
|
|
|
|
|
|
scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
|
2019-10-16 19:41:41 +03:00
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 18:29:24 +03:00
|
|
|
scsi_device_unrealize(dev);
|
2019-10-16 19:41:41 +03:00
|
|
|
|
2013-09-17 17:09:15 +04:00
|
|
|
blockdev_mark_auto_del(dev->conf.blk);
|
2009-08-31 16:24:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* handle legacy '-drive if=scsi,...' cmd line args */
|
2014-10-07 15:59:18 +04:00
|
|
|
SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
|
2024-01-31 16:06:07 +03:00
|
|
|
int unit, bool removable, BlockConf *conf,
|
2013-07-21 14:16:34 +04:00
|
|
|
const char *serial, Error **errp)
|
2009-08-31 16:24:04 +04:00
|
|
|
{
|
|
|
|
const char *driver;
|
2015-02-19 19:05:46 +03:00
|
|
|
char *name;
|
2009-08-31 16:24:04 +04:00
|
|
|
DeviceState *dev;
|
2024-01-31 16:06:07 +03:00
|
|
|
SCSIDevice *s;
|
2019-11-01 16:32:20 +03:00
|
|
|
DriveInfo *dinfo;
|
2024-07-10 18:25:29 +03:00
|
|
|
Error *local_err = NULL;
|
2009-08-31 16:24:04 +04:00
|
|
|
|
2019-11-01 16:32:20 +03:00
|
|
|
if (blk_is_sg(blk)) {
|
|
|
|
driver = "scsi-generic";
|
|
|
|
} else {
|
|
|
|
dinfo = blk_legacy_dinfo(blk);
|
|
|
|
if (dinfo && dinfo->media_cd) {
|
|
|
|
driver = "scsi-cd";
|
|
|
|
} else {
|
|
|
|
driver = "scsi-hd";
|
|
|
|
}
|
|
|
|
}
|
qdev: Convert uses of qdev_create() with Coccinelle
This is the transformation explained in the commit before previous.
Takes care of just one pattern that needs conversion. More to come in
this series.
Coccinelle script:
@ depends on !(file in "hw/arm/highbank.c")@
expression bus, type_name, dev, expr;
@@
- dev = qdev_create(bus, type_name);
+ dev = qdev_new(type_name);
... when != dev = expr
- qdev_init_nofail(dev);
+ qdev_realize_and_unref(dev, bus, &error_fatal);
@@
expression bus, type_name, dev, expr;
identifier DOWN;
@@
- dev = DOWN(qdev_create(bus, type_name));
+ dev = DOWN(qdev_new(type_name));
... when != dev = expr
- qdev_init_nofail(DEVICE(dev));
+ qdev_realize_and_unref(DEVICE(dev), bus, &error_fatal);
@@
expression bus, type_name, expr;
identifier dev;
@@
- DeviceState *dev = qdev_create(bus, type_name);
+ DeviceState *dev = qdev_new(type_name);
... when != dev = expr
- qdev_init_nofail(dev);
+ qdev_realize_and_unref(dev, bus, &error_fatal);
@@
expression bus, type_name, dev, expr, errp;
symbol true;
@@
- dev = qdev_create(bus, type_name);
+ dev = qdev_new(type_name);
... when != dev = expr
- object_property_set_bool(OBJECT(dev), true, "realized", errp);
+ qdev_realize_and_unref(dev, bus, errp);
@@
expression bus, type_name, expr, errp;
identifier dev;
symbol true;
@@
- DeviceState *dev = qdev_create(bus, type_name);
+ DeviceState *dev = qdev_new(type_name);
... when != dev = expr
- object_property_set_bool(OBJECT(dev), true, "realized", errp);
+ qdev_realize_and_unref(dev, bus, errp);
The first rule exempts hw/arm/highbank.c, because it matches along two
control flow paths there, with different @type_name. Covered by the
next commit's manual conversions.
Missing #include "qapi/error.h" added manually.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200610053247.1583243-10-armbru@redhat.com>
[Conflicts in hw/misc/empty_slot.c and hw/sparc/leon3.c resolved]
2020-06-10 08:31:58 +03:00
|
|
|
dev = qdev_new(driver);
|
2015-02-19 19:05:46 +03:00
|
|
|
name = g_strdup_printf("legacy[%d]", unit);
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
object_property_add_child(OBJECT(bus), name, OBJECT(dev));
|
2015-02-19 19:05:46 +03:00
|
|
|
g_free(name);
|
|
|
|
|
2024-01-31 16:06:07 +03:00
|
|
|
s = SCSI_DEVICE(dev);
|
|
|
|
s->conf = *conf;
|
|
|
|
|
2024-07-10 18:25:29 +03:00
|
|
|
check_boot_index(conf->bootindex, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
object_unparent(OBJECT(dev));
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
add_boot_device_path(conf->bootindex, dev, NULL);
|
|
|
|
|
2009-08-31 16:24:04 +04:00
|
|
|
qdev_prop_set_uint32(dev, "scsi-id", unit);
|
2020-09-14 16:56:17 +03:00
|
|
|
if (object_property_find(OBJECT(dev), "removable")) {
|
2011-01-24 18:34:59 +03:00
|
|
|
qdev_prop_set_bit(dev, "removable", removable);
|
|
|
|
}
|
2020-09-14 16:56:17 +03:00
|
|
|
if (serial && object_property_find(OBJECT(dev), "serial")) {
|
2013-04-03 14:41:46 +04:00
|
|
|
qdev_prop_set_string(dev, "serial", serial);
|
|
|
|
}
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 19:06:02 +03:00
|
|
|
if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) {
|
2013-09-11 16:54:09 +04:00
|
|
|
object_unparent(OBJECT(dev));
|
2010-06-29 18:58:30 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
2018-06-25 19:39:00 +03:00
|
|
|
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 19:06:02 +03:00
|
|
|
if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) {
|
2013-09-11 16:54:09 +04:00
|
|
|
object_unparent(OBJECT(dev));
|
2009-10-07 03:15:57 +04:00
|
|
|
return NULL;
|
2013-07-21 14:16:34 +04:00
|
|
|
}
|
2024-01-31 16:06:07 +03:00
|
|
|
return s;
|
2009-08-31 16:24:04 +04:00
|
|
|
}
|
|
|
|
|
2018-02-20 13:42:37 +03:00
|
|
|
void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
|
2009-08-31 16:24:04 +04:00
|
|
|
{
|
2010-05-27 23:08:18 +04:00
|
|
|
Location loc;
|
2009-08-31 16:24:04 +04:00
|
|
|
DriveInfo *dinfo;
|
2013-07-21 14:16:34 +04:00
|
|
|
int unit;
|
2024-01-31 16:06:07 +03:00
|
|
|
BlockConf conf = {
|
|
|
|
.bootindex = -1,
|
|
|
|
.share_rw = false,
|
|
|
|
.rerror = BLOCKDEV_ON_ERROR_AUTO,
|
|
|
|
.werror = BLOCKDEV_ON_ERROR_AUTO,
|
|
|
|
};
|
2009-08-31 16:24:04 +04:00
|
|
|
|
2010-05-27 23:08:18 +04:00
|
|
|
loc_push_none(&loc);
|
2012-02-15 12:22:54 +04:00
|
|
|
for (unit = 0; unit <= bus->info->max_target; unit++) {
|
2009-08-31 16:24:04 +04:00
|
|
|
dinfo = drive_get(IF_SCSI, bus->busnr, unit);
|
|
|
|
if (dinfo == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
2010-05-27 23:08:18 +04:00
|
|
|
qemu_opts_loc_restore(dinfo->opts);
|
2014-10-07 15:59:18 +04:00
|
|
|
scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
|
2024-01-31 16:06:07 +03:00
|
|
|
unit, false, &conf, NULL, &error_fatal);
|
2009-08-31 16:24:04 +04:00
|
|
|
}
|
2010-05-27 23:08:18 +04:00
|
|
|
loc_pop(&loc);
|
2009-08-31 16:24:04 +04:00
|
|
|
}
|
2009-11-26 17:33:50 +03:00
|
|
|
|
2012-02-08 14:49:43 +04:00
|
|
|
static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
|
|
|
|
{
|
|
|
|
scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
|
|
|
|
scsi_req_complete(req, CHECK_CONDITION);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct SCSIReqOps reqops_invalid_field = {
|
|
|
|
.size = sizeof(SCSIRequest),
|
|
|
|
.send_command = scsi_invalid_field
|
|
|
|
};
|
|
|
|
|
2011-08-03 12:49:13 +04:00
|
|
|
/* SCSIReqOps implementation for invalid commands. */
|
|
|
|
|
|
|
|
static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
|
|
|
|
{
|
|
|
|
scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
|
|
|
|
scsi_req_complete(req, CHECK_CONDITION);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-12 14:57:59 +04:00
|
|
|
static const struct SCSIReqOps reqops_invalid_opcode = {
|
2011-08-03 12:49:13 +04:00
|
|
|
.size = sizeof(SCSIRequest),
|
|
|
|
.send_command = scsi_invalid_command
|
|
|
|
};
|
|
|
|
|
2011-08-03 12:49:17 +04:00
|
|
|
/* SCSIReqOps implementation for unit attention conditions. */
|
|
|
|
|
2023-07-12 16:43:50 +03:00
|
|
|
static void scsi_fetch_unit_attention_sense(SCSIRequest *req)
|
2011-08-03 12:49:17 +04:00
|
|
|
{
|
2023-07-12 16:43:50 +03:00
|
|
|
SCSISense *ua = NULL;
|
|
|
|
|
2013-01-17 16:07:47 +04:00
|
|
|
if (req->dev->unit_attention.key == UNIT_ATTENTION) {
|
2023-07-12 16:43:50 +03:00
|
|
|
ua = &req->dev->unit_attention;
|
2011-08-03 12:49:17 +04:00
|
|
|
} else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
|
2023-07-12 16:43:50 +03:00
|
|
|
ua = &req->bus->unit_attention;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fetch the unit attention sense immediately so that another
|
|
|
|
* scsi_req_new does not use reqops_unit_attention.
|
|
|
|
*/
|
|
|
|
if (ua) {
|
|
|
|
scsi_req_build_sense(req, *ua);
|
|
|
|
*ua = SENSE_CODE(NO_SENSE);
|
2011-08-03 12:49:17 +04:00
|
|
|
}
|
2023-07-12 16:43:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
|
|
|
|
{
|
2011-08-03 12:49:17 +04:00
|
|
|
scsi_req_complete(req, CHECK_CONDITION);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-12 14:57:59 +04:00
|
|
|
static const struct SCSIReqOps reqops_unit_attention = {
|
2011-08-03 12:49:17 +04:00
|
|
|
.size = sizeof(SCSIRequest),
|
2023-07-12 16:43:50 +03:00
|
|
|
.init_req = scsi_fetch_unit_attention_sense,
|
2011-08-03 12:49:17 +04:00
|
|
|
.send_command = scsi_unit_attention
|
|
|
|
};
|
|
|
|
|
2011-08-03 12:49:14 +04:00
|
|
|
/* SCSIReqOps implementation for REPORT LUNS and for commands sent to
|
|
|
|
an invalid LUN. */
|
|
|
|
|
|
|
|
typedef struct SCSITargetReq SCSITargetReq;
|
|
|
|
|
|
|
|
struct SCSITargetReq {
|
|
|
|
SCSIRequest req;
|
|
|
|
int len;
|
2013-10-09 11:41:03 +04:00
|
|
|
uint8_t *buf;
|
|
|
|
int buf_len;
|
2011-08-03 12:49:14 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static void store_lun(uint8_t *outbuf, int lun)
|
|
|
|
{
|
|
|
|
if (lun < 256) {
|
2020-10-06 15:39:04 +03:00
|
|
|
/* Simple logical unit addressing method*/
|
|
|
|
outbuf[0] = 0;
|
2011-08-03 12:49:14 +04:00
|
|
|
outbuf[1] = lun;
|
2020-10-06 15:39:04 +03:00
|
|
|
} else {
|
|
|
|
/* Flat space addressing method */
|
|
|
|
outbuf[0] = 0x40 | (lun >> 8);
|
|
|
|
outbuf[1] = (lun & 255);
|
2011-08-03 12:49:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
|
|
|
|
{
|
2011-12-24 01:34:39 +04:00
|
|
|
BusChild *kid;
|
2011-07-28 01:24:50 +04:00
|
|
|
int channel, id;
|
2020-10-06 15:39:04 +03:00
|
|
|
uint8_t tmp[8] = {0};
|
|
|
|
int len = 0;
|
|
|
|
GByteArray *buf;
|
2011-09-14 22:39:36 +04:00
|
|
|
|
2011-08-03 12:49:14 +04:00
|
|
|
if (r->req.cmd.xfer < 16) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (r->req.cmd.buf[2] > 2) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-10-06 15:39:04 +03:00
|
|
|
|
|
|
|
/* reserve space for 63 LUNs*/
|
|
|
|
buf = g_byte_array_sized_new(512);
|
|
|
|
|
2011-07-28 01:24:50 +04:00
|
|
|
channel = r->req.dev->channel;
|
2011-09-14 22:39:36 +04:00
|
|
|
id = r->req.dev->id;
|
2020-10-06 15:38:59 +03:00
|
|
|
|
2020-10-06 15:39:04 +03:00
|
|
|
/* add size (will be updated later to correct value */
|
|
|
|
g_byte_array_append(buf, tmp, 8);
|
|
|
|
len += 8;
|
2020-10-06 15:38:59 +03:00
|
|
|
|
2020-10-06 15:39:04 +03:00
|
|
|
/* add LUN0 */
|
|
|
|
g_byte_array_append(buf, tmp, 8);
|
|
|
|
len += 8;
|
2011-09-14 22:39:36 +04:00
|
|
|
|
2020-10-06 15:39:04 +03:00
|
|
|
WITH_RCU_READ_LOCK_GUARD() {
|
|
|
|
QTAILQ_FOREACH_RCU(kid, &r->req.bus->qbus.children, sibling) {
|
|
|
|
DeviceState *qdev = kid->child;
|
|
|
|
SCSIDevice *dev = SCSI_DEVICE(qdev);
|
|
|
|
|
2023-05-16 22:02:21 +03:00
|
|
|
if (dev->channel == channel && dev->id == id && dev->lun != 0 &&
|
|
|
|
qdev_is_realized(&dev->qdev)) {
|
2020-10-06 15:39:04 +03:00
|
|
|
store_lun(tmp, dev->lun);
|
|
|
|
g_byte_array_append(buf, tmp, 8);
|
|
|
|
len += 8;
|
2011-09-14 22:39:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-06 15:39:04 +03:00
|
|
|
r->buf_len = len;
|
|
|
|
r->buf = g_byte_array_free(buf, FALSE);
|
|
|
|
r->len = MIN(len, r->req.cmd.xfer & ~7);
|
2020-10-06 15:38:59 +03:00
|
|
|
|
2020-10-06 15:39:04 +03:00
|
|
|
/* store the LUN list length */
|
|
|
|
stl_be_p(&r->buf[0], len - 8);
|
2023-07-12 16:43:52 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If a REPORT LUNS command enters the enabled command state, [...]
|
|
|
|
* the device server shall clear any pending unit attention condition
|
|
|
|
* with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
|
|
|
|
*/
|
|
|
|
scsi_clear_reported_luns_changed(&r->req);
|
|
|
|
|
2011-08-03 12:49:14 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
|
|
|
|
{
|
|
|
|
assert(r->req.dev->lun != r->req.lun);
|
2013-10-09 11:41:03 +04:00
|
|
|
|
|
|
|
scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
|
|
|
|
|
2011-08-03 12:49:14 +04:00
|
|
|
if (r->req.cmd.buf[1] & 0x2) {
|
|
|
|
/* Command support data - optional, not implemented */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r->req.cmd.buf[1] & 0x1) {
|
|
|
|
/* Vital product data */
|
|
|
|
uint8_t page_code = r->req.cmd.buf[2];
|
|
|
|
r->buf[r->len++] = page_code ; /* this page */
|
|
|
|
r->buf[r->len++] = 0x00;
|
|
|
|
|
|
|
|
switch (page_code) {
|
|
|
|
case 0x00: /* Supported page codes, mandatory */
|
|
|
|
{
|
|
|
|
int pages;
|
|
|
|
pages = r->len++;
|
|
|
|
r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
|
|
|
|
r->buf[pages] = r->len - pages - 1; /* number of pages */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/* done with EVPD */
|
2013-10-09 11:41:03 +04:00
|
|
|
assert(r->len < r->buf_len);
|
2011-08-03 12:49:14 +04:00
|
|
|
r->len = MIN(r->req.cmd.xfer, r->len);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Standard INQUIRY data */
|
|
|
|
if (r->req.cmd.buf[2] != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PAGE CODE == 0 */
|
2013-10-09 11:41:03 +04:00
|
|
|
r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
|
2011-08-03 12:49:14 +04:00
|
|
|
memset(r->buf, 0, r->len);
|
|
|
|
if (r->req.lun != 0) {
|
|
|
|
r->buf[0] = TYPE_NO_LUN;
|
|
|
|
} else {
|
|
|
|
r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
|
|
|
|
r->buf[2] = 5; /* Version */
|
|
|
|
r->buf[3] = 2 | 0x10; /* HiSup, response data format */
|
|
|
|
r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
|
2011-08-13 17:44:45 +04:00
|
|
|
r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */
|
2011-08-03 12:49:14 +04:00
|
|
|
memcpy(&r->buf[8], "QEMU ", 8);
|
|
|
|
memcpy(&r->buf[16], "QEMU TARGET ", 16);
|
2015-10-30 22:36:08 +03:00
|
|
|
pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
|
2011-08-03 12:49:14 +04:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-06-29 03:11:46 +03:00
|
|
|
static size_t scsi_sense_len(SCSIRequest *req)
|
|
|
|
{
|
|
|
|
if (req->dev->type == TYPE_SCANNER)
|
|
|
|
return SCSI_SENSE_LEN_SCANNER;
|
|
|
|
else
|
|
|
|
return SCSI_SENSE_LEN;
|
|
|
|
}
|
|
|
|
|
2011-08-03 12:49:14 +04:00
|
|
|
static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
|
|
|
|
{
|
|
|
|
SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
|
2017-08-18 12:37:02 +03:00
|
|
|
int fixed_sense = (req->cmd.buf[1] & 1) == 0;
|
2011-08-03 12:49:14 +04:00
|
|
|
|
2017-08-18 12:37:02 +03:00
|
|
|
if (req->lun != 0 &&
|
|
|
|
buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
|
2017-08-04 11:36:34 +03:00
|
|
|
scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
|
|
|
|
scsi_req_complete(req, CHECK_CONDITION);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-08-03 12:49:14 +04:00
|
|
|
switch (buf[0]) {
|
|
|
|
case REPORT_LUNS:
|
|
|
|
if (!scsi_target_emulate_report_luns(r)) {
|
|
|
|
goto illegal_request;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case INQUIRY:
|
|
|
|
if (!scsi_target_emulate_inquiry(r)) {
|
|
|
|
goto illegal_request;
|
|
|
|
}
|
|
|
|
break;
|
2011-08-03 12:49:15 +04:00
|
|
|
case REQUEST_SENSE:
|
2016-06-29 03:11:46 +03:00
|
|
|
scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
|
2017-08-18 12:37:02 +03:00
|
|
|
if (req->lun != 0) {
|
|
|
|
const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
|
|
|
|
|
2017-11-27 15:27:41 +03:00
|
|
|
r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
|
|
|
|
sense, fixed_sense);
|
2017-08-18 12:37:02 +03:00
|
|
|
} else {
|
|
|
|
r->len = scsi_device_get_sense(r->req.dev, r->buf,
|
|
|
|
MIN(req->cmd.xfer, r->buf_len),
|
|
|
|
fixed_sense);
|
|
|
|
}
|
2011-09-13 18:19:53 +04:00
|
|
|
if (r->req.dev->sense_is_ua) {
|
2011-12-16 00:50:08 +04:00
|
|
|
scsi_device_unit_attention_reported(req->dev);
|
2011-09-13 18:19:53 +04:00
|
|
|
r->req.dev->sense_len = 0;
|
|
|
|
r->req.dev->sense_is_ua = false;
|
|
|
|
}
|
2011-08-03 12:49:15 +04:00
|
|
|
break;
|
2014-01-16 16:06:13 +04:00
|
|
|
case TEST_UNIT_READY:
|
|
|
|
break;
|
2011-08-03 12:49:14 +04:00
|
|
|
default:
|
2017-08-04 11:36:34 +03:00
|
|
|
scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
|
2011-08-03 12:49:14 +04:00
|
|
|
scsi_req_complete(req, CHECK_CONDITION);
|
|
|
|
return 0;
|
|
|
|
illegal_request:
|
|
|
|
scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
|
|
|
|
scsi_req_complete(req, CHECK_CONDITION);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!r->len) {
|
|
|
|
scsi_req_complete(req, GOOD);
|
|
|
|
}
|
|
|
|
return r->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void scsi_target_read_data(SCSIRequest *req)
|
|
|
|
{
|
|
|
|
SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
|
|
|
|
uint32_t n;
|
|
|
|
|
|
|
|
n = r->len;
|
|
|
|
if (n > 0) {
|
|
|
|
r->len = 0;
|
|
|
|
scsi_req_data(&r->req, n);
|
|
|
|
} else {
|
|
|
|
scsi_req_complete(&r->req, GOOD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t *scsi_target_get_buf(SCSIRequest *req)
|
|
|
|
{
|
|
|
|
SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
|
|
|
|
|
|
|
|
return r->buf;
|
|
|
|
}
|
|
|
|
|
2013-10-09 11:41:03 +04:00
|
|
|
static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
|
|
|
|
{
|
|
|
|
SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
|
|
|
|
|
|
|
|
r->buf = g_malloc(len);
|
|
|
|
r->buf_len = len;
|
|
|
|
|
|
|
|
return r->buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void scsi_target_free_buf(SCSIRequest *req)
|
|
|
|
{
|
|
|
|
SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
|
|
|
|
|
|
|
|
g_free(r->buf);
|
|
|
|
}
|
|
|
|
|
2011-10-12 14:57:59 +04:00
|
|
|
static const struct SCSIReqOps reqops_target_command = {
|
2011-08-03 12:49:14 +04:00
|
|
|
.size = sizeof(SCSITargetReq),
|
|
|
|
.send_command = scsi_target_send_command,
|
|
|
|
.read_data = scsi_target_read_data,
|
|
|
|
.get_buf = scsi_target_get_buf,
|
2013-10-09 11:41:03 +04:00
|
|
|
.free_req = scsi_target_free_buf,
|
2011-08-03 12:49:14 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-10-12 14:57:59 +04:00
|
|
|
SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
|
|
|
|
uint32_t tag, uint32_t lun, void *hba_private)
|
2009-11-26 17:33:50 +03:00
|
|
|
{
|
|
|
|
SCSIRequest *req;
|
2013-06-03 16:09:39 +04:00
|
|
|
SCSIBus *bus = scsi_bus_from_device(d);
|
|
|
|
BusState *qbus = BUS(bus);
|
2014-09-16 11:20:17 +04:00
|
|
|
const int memset_off = offsetof(SCSIRequest, sense)
|
|
|
|
+ sizeof(req->sense);
|
2009-11-26 17:33:50 +03:00
|
|
|
|
2015-10-01 13:59:01 +03:00
|
|
|
req = g_malloc(reqops->size);
|
2014-09-16 11:20:17 +04:00
|
|
|
memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
|
2011-04-18 14:35:39 +04:00
|
|
|
req->refcount = 1;
|
2013-06-03 16:09:39 +04:00
|
|
|
req->bus = bus;
|
2009-11-26 17:33:50 +03:00
|
|
|
req->dev = d;
|
|
|
|
req->tag = tag;
|
|
|
|
req->lun = lun;
|
2011-07-11 17:02:24 +04:00
|
|
|
req->hba_private = hba_private;
|
2009-11-26 17:34:00 +03:00
|
|
|
req->status = -1;
|
2021-02-24 21:14:50 +03:00
|
|
|
req->host_status = -1;
|
2011-08-03 12:49:08 +04:00
|
|
|
req->ops = reqops;
|
2013-06-03 16:09:39 +04:00
|
|
|
object_ref(OBJECT(d));
|
|
|
|
object_ref(OBJECT(qbus->parent));
|
2014-09-28 05:48:00 +04:00
|
|
|
notifier_list_init(&req->cancel_notifiers);
|
2023-07-12 16:43:50 +03:00
|
|
|
|
|
|
|
if (reqops->init_req) {
|
|
|
|
reqops->init_req(req);
|
|
|
|
}
|
|
|
|
|
2011-04-15 13:51:13 +04:00
|
|
|
trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
|
2009-11-26 17:33:50 +03:00
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
2011-07-11 17:02:24 +04:00
|
|
|
SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
|
2022-08-17 08:34:58 +03:00
|
|
|
uint8_t *buf, size_t buf_len, void *hba_private)
|
2011-04-18 21:09:55 +04:00
|
|
|
{
|
2011-08-03 12:49:17 +04:00
|
|
|
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
|
2014-07-16 12:23:43 +04:00
|
|
|
const SCSIReqOps *ops;
|
2014-07-16 12:39:05 +04:00
|
|
|
SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
|
2011-08-03 12:49:10 +04:00
|
|
|
SCSIRequest *req;
|
2014-07-16 12:23:43 +04:00
|
|
|
SCSICommand cmd = { .len = 0 };
|
|
|
|
int ret;
|
|
|
|
|
2022-08-17 08:35:00 +03:00
|
|
|
if (buf_len == 0) {
|
|
|
|
trace_scsi_req_parse_bad(d->id, lun, tag, 0);
|
|
|
|
goto invalid_opcode;
|
|
|
|
}
|
|
|
|
|
2014-07-16 12:23:43 +04:00
|
|
|
if ((d->unit_attention.key == UNIT_ATTENTION ||
|
|
|
|
bus->unit_attention.key == UNIT_ATTENTION) &&
|
|
|
|
(buf[0] != INQUIRY &&
|
|
|
|
buf[0] != REPORT_LUNS &&
|
|
|
|
buf[0] != GET_CONFIGURATION &&
|
|
|
|
buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we already have a pending unit attention condition,
|
|
|
|
* report this one before triggering another one.
|
|
|
|
*/
|
|
|
|
!(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
|
|
|
|
ops = &reqops_unit_attention;
|
|
|
|
} else if (lun != d->lun ||
|
|
|
|
buf[0] == REPORT_LUNS ||
|
|
|
|
(buf[0] == REQUEST_SENSE && d->sense_len)) {
|
|
|
|
ops = &reqops_target_command;
|
|
|
|
} else {
|
|
|
|
ops = NULL;
|
|
|
|
}
|
2011-08-03 12:49:13 +04:00
|
|
|
|
2014-07-16 12:39:05 +04:00
|
|
|
if (ops != NULL || !sc->parse_cdb) {
|
2022-08-17 08:34:58 +03:00
|
|
|
ret = scsi_req_parse_cdb(d, &cmd, buf, buf_len);
|
2014-07-16 12:39:05 +04:00
|
|
|
} else {
|
2022-08-17 08:34:58 +03:00
|
|
|
ret = sc->parse_cdb(d, &cmd, buf, buf_len, hba_private);
|
2014-07-16 12:39:05 +04:00
|
|
|
}
|
|
|
|
|
2014-07-16 12:23:43 +04:00
|
|
|
if (ret != 0) {
|
2011-08-03 12:49:13 +04:00
|
|
|
trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
|
2022-08-17 08:35:00 +03:00
|
|
|
invalid_opcode:
|
2011-08-03 12:49:13 +04:00
|
|
|
req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
|
|
|
|
} else {
|
2014-07-16 12:23:43 +04:00
|
|
|
assert(cmd.len != 0);
|
2011-08-03 12:49:13 +04:00
|
|
|
trace_scsi_req_parsed(d->id, lun, tag, buf[0],
|
|
|
|
cmd.mode, cmd.xfer);
|
2011-08-12 20:49:36 +04:00
|
|
|
if (cmd.lba != -1) {
|
2011-08-03 12:49:13 +04:00
|
|
|
trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
|
|
|
|
cmd.lba);
|
|
|
|
}
|
2011-08-03 12:49:14 +04:00
|
|
|
|
2012-02-08 14:49:43 +04:00
|
|
|
if (cmd.xfer > INT32_MAX) {
|
|
|
|
req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
|
2014-07-16 12:23:43 +04:00
|
|
|
} else if (ops) {
|
|
|
|
req = scsi_req_alloc(ops, d, tag, lun, hba_private);
|
2011-08-03 12:49:14 +04:00
|
|
|
} else {
|
2011-12-16 00:50:08 +04:00
|
|
|
req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
|
2011-08-03 12:49:14 +04:00
|
|
|
}
|
2011-08-03 12:49:13 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
req->cmd = cmd;
|
2021-12-31 13:13:34 +03:00
|
|
|
req->residual = req->cmd.xfer;
|
2011-07-06 13:55:37 +04:00
|
|
|
|
2011-08-03 12:49:19 +04:00
|
|
|
switch (buf[0]) {
|
|
|
|
case INQUIRY:
|
|
|
|
trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
|
|
|
|
break;
|
|
|
|
case TEST_UNIT_READY:
|
|
|
|
trace_scsi_test_unit_ready(d->id, lun, tag);
|
|
|
|
break;
|
|
|
|
case REPORT_LUNS:
|
|
|
|
trace_scsi_report_luns(d->id, lun, tag);
|
|
|
|
break;
|
|
|
|
case REQUEST_SENSE:
|
|
|
|
trace_scsi_request_sense(d->id, lun, tag);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-08-03 12:49:10 +04:00
|
|
|
return req;
|
2011-04-18 21:09:55 +04:00
|
|
|
}
|
|
|
|
|
2011-04-21 15:21:02 +04:00
|
|
|
uint8_t *scsi_req_get_buf(SCSIRequest *req)
|
|
|
|
{
|
2011-08-03 12:49:09 +04:00
|
|
|
return req->ops->get_buf(req);
|
2011-04-21 15:21:02 +04:00
|
|
|
}
|
|
|
|
|
2023-07-12 16:43:52 +03:00
|
|
|
static void scsi_clear_reported_luns_changed(SCSIRequest *req)
|
2011-08-03 12:49:17 +04:00
|
|
|
{
|
|
|
|
SCSISense *ua;
|
2023-07-12 16:43:50 +03:00
|
|
|
|
2011-08-03 12:49:17 +04:00
|
|
|
if (req->dev->unit_attention.key == UNIT_ATTENTION) {
|
|
|
|
ua = &req->dev->unit_attention;
|
2023-07-12 16:43:51 +03:00
|
|
|
} else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
|
2011-08-03 12:49:17 +04:00
|
|
|
ua = &req->bus->unit_attention;
|
2023-07-12 16:43:51 +03:00
|
|
|
} else {
|
|
|
|
return;
|
2011-08-03 12:49:17 +04:00
|
|
|
}
|
|
|
|
|
2023-07-12 16:43:52 +03:00
|
|
|
if (ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
|
2023-07-12 16:43:51 +03:00
|
|
|
ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq) {
|
|
|
|
*ua = SENSE_CODE(NO_SENSE);
|
2011-08-03 12:49:17 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-18 15:36:02 +04:00
|
|
|
int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
|
|
|
|
{
|
2011-08-03 12:49:17 +04:00
|
|
|
int ret;
|
|
|
|
|
2011-08-03 12:49:07 +04:00
|
|
|
assert(len >= 14);
|
|
|
|
if (!req->sense_len) {
|
2011-04-18 15:36:02 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2011-08-03 12:49:17 +04:00
|
|
|
|
2017-08-22 10:31:36 +03:00
|
|
|
ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
|
2011-08-03 12:49:17 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: clearing unit attention conditions upon autosense should be done
|
|
|
|
* only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
|
|
|
|
* (SAM-5, 5.14).
|
|
|
|
*
|
|
|
|
* We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
|
|
|
|
* 10b for HBAs that do not support it (do not call scsi_req_get_sense).
|
2011-09-13 18:19:53 +04:00
|
|
|
* Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
|
2011-08-03 12:49:17 +04:00
|
|
|
*/
|
2011-09-13 18:19:53 +04:00
|
|
|
if (req->dev->sense_is_ua) {
|
2011-12-16 00:50:08 +04:00
|
|
|
scsi_device_unit_attention_reported(req->dev);
|
2011-09-13 18:19:53 +04:00
|
|
|
req->dev->sense_len = 0;
|
|
|
|
req->dev->sense_is_ua = false;
|
|
|
|
}
|
2011-08-03 12:49:17 +04:00
|
|
|
return ret;
|
2011-08-03 12:49:07 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
|
|
|
|
{
|
2017-08-22 10:31:36 +03:00
|
|
|
return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
|
2011-08-03 12:49:07 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
|
|
|
|
{
|
|
|
|
trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
|
|
|
|
sense.key, sense.asc, sense.ascq);
|
2017-08-22 10:42:59 +03:00
|
|
|
req->sense_len = scsi_build_sense(req->sense, sense);
|
2011-04-18 15:36:02 +04:00
|
|
|
}
|
|
|
|
|
2011-12-02 19:27:02 +04:00
|
|
|
static void scsi_req_enqueue_internal(SCSIRequest *req)
|
2009-11-26 17:33:50 +03:00
|
|
|
{
|
2011-04-18 14:35:39 +04:00
|
|
|
assert(!req->enqueued);
|
|
|
|
scsi_req_ref(req);
|
2011-07-06 13:26:47 +04:00
|
|
|
if (req->bus->info->get_sg_list) {
|
|
|
|
req->sg = req->bus->info->get_sg_list(req);
|
|
|
|
} else {
|
|
|
|
req->sg = NULL;
|
|
|
|
}
|
2011-04-18 14:35:39 +04:00
|
|
|
req->enqueued = true;
|
|
|
|
QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
|
2011-12-02 19:27:02 +04:00
|
|
|
}
|
2011-04-18 21:07:23 +04:00
|
|
|
|
2011-12-02 19:27:02 +04:00
|
|
|
int32_t scsi_req_enqueue(SCSIRequest *req)
|
|
|
|
{
|
|
|
|
int32_t rc;
|
|
|
|
|
|
|
|
assert(!req->retry);
|
|
|
|
scsi_req_enqueue_internal(req);
|
2011-04-18 21:07:23 +04:00
|
|
|
scsi_req_ref(req);
|
2011-08-03 12:49:10 +04:00
|
|
|
rc = req->ops->send_command(req, req->cmd.buf);
|
2011-04-18 21:07:23 +04:00
|
|
|
scsi_req_unref(req);
|
|
|
|
return rc;
|
2009-11-26 17:33:50 +03:00
|
|
|
}
|
|
|
|
|
2011-04-18 14:53:14 +04:00
|
|
|
static void scsi_req_dequeue(SCSIRequest *req)
|
2010-07-13 16:13:45 +04:00
|
|
|
{
|
2011-04-15 13:51:13 +04:00
|
|
|
trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
|
2011-10-25 14:53:36 +04:00
|
|
|
req->retry = false;
|
2010-07-13 16:13:45 +04:00
|
|
|
if (req->enqueued) {
|
|
|
|
QTAILQ_REMOVE(&req->dev->requests, req, next);
|
|
|
|
req->enqueued = false;
|
2011-04-18 18:01:56 +04:00
|
|
|
scsi_req_unref(req);
|
2010-07-13 16:13:45 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-14 17:31:49 +04:00
|
|
|
static int scsi_get_performance_length(int num_desc, int type, int data_type)
|
|
|
|
{
|
|
|
|
/* MMC-6, paragraph 6.7. */
|
|
|
|
switch (type) {
|
|
|
|
case 0:
|
|
|
|
if ((data_type & 3) == 0) {
|
|
|
|
/* Each descriptor is as in Table 295 - Nominal performance. */
|
|
|
|
return 16 * num_desc + 8;
|
|
|
|
} else {
|
|
|
|
/* Each descriptor is as in Table 296 - Exceptions. */
|
|
|
|
return 6 * num_desc + 8;
|
|
|
|
}
|
|
|
|
case 1:
|
|
|
|
case 4:
|
|
|
|
case 5:
|
|
|
|
return 8 * num_desc + 8;
|
|
|
|
case 2:
|
|
|
|
return 2048 * num_desc + 8;
|
|
|
|
case 3:
|
|
|
|
return 16 * num_desc + 8;
|
|
|
|
default:
|
|
|
|
return 8;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-03 08:02:19 +04:00
|
|
|
static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
|
|
|
|
{
|
|
|
|
int byte_block = (buf[2] >> 2) & 0x1;
|
|
|
|
int type = (buf[2] >> 4) & 0x1;
|
|
|
|
int xfer_unit;
|
|
|
|
|
|
|
|
if (byte_block) {
|
|
|
|
if (type) {
|
|
|
|
xfer_unit = dev->blocksize;
|
|
|
|
} else {
|
|
|
|
xfer_unit = 512;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
xfer_unit = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return xfer_unit;
|
|
|
|
}
|
|
|
|
|
2014-10-29 15:00:04 +03:00
|
|
|
static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
|
2012-08-03 08:02:19 +04:00
|
|
|
{
|
|
|
|
int length = buf[2] & 0x3;
|
|
|
|
int xfer;
|
|
|
|
int unit = ata_passthrough_xfer_unit(dev, buf);
|
|
|
|
|
|
|
|
switch (length) {
|
|
|
|
case 0:
|
|
|
|
case 3: /* USB-specific. */
|
2012-08-13 15:05:44 +04:00
|
|
|
default:
|
2012-08-03 08:02:19 +04:00
|
|
|
xfer = 0;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
xfer = buf[3];
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
xfer = buf[4];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return xfer * unit;
|
|
|
|
}
|
|
|
|
|
2014-10-29 15:00:04 +03:00
|
|
|
static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
|
2012-08-03 08:02:19 +04:00
|
|
|
{
|
|
|
|
int extend = buf[1] & 0x1;
|
|
|
|
int length = buf[2] & 0x3;
|
|
|
|
int xfer;
|
|
|
|
int unit = ata_passthrough_xfer_unit(dev, buf);
|
|
|
|
|
|
|
|
switch (length) {
|
|
|
|
case 0:
|
|
|
|
case 3: /* USB-specific. */
|
2012-08-13 15:05:44 +04:00
|
|
|
default:
|
2012-08-03 08:02:19 +04:00
|
|
|
xfer = 0;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
xfer = buf[4];
|
|
|
|
xfer |= (extend ? buf[3] << 8 : 0);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
xfer = buf[6];
|
|
|
|
xfer |= (extend ? buf[5] << 8 : 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return xfer * unit;
|
|
|
|
}
|
|
|
|
|
2014-10-29 15:00:04 +03:00
|
|
|
static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
|
2012-09-05 19:57:19 +04:00
|
|
|
{
|
2014-10-29 15:00:04 +03:00
|
|
|
cmd->xfer = scsi_cdb_xfer(buf);
|
2011-08-03 12:49:11 +04:00
|
|
|
switch (buf[0]) {
|
2009-11-26 17:33:55 +03:00
|
|
|
case TEST_UNIT_READY:
|
scsi: Sanitize command definitions
Sanitize SCSI command definitions.
Add _10 suffix to READ_CAPACITY, WRITE_VERIFY, VERIFY, READ_LONG,
WRITE_LONG, and WRITE_SAME.
Add new command definitions for LOCATE_10, UNMAP, VARLENGTH_CDB,
WRITE_FILEMARKS_16, EXTENDED_COPY, ATA_PASSTHROUGH, ACCESS_CONTROL_IN,
ACCESS_CONTROL_OUT, COMPARE_AND_WRITE, VERIFY_16, SYNCHRONIZE_CACHE_16,
LOCATE_16, ERASE_16, WRITE_LONG_16, LOAD_UNLOAD, VERIFY_12.
Remove invalid definition of WRITE_LONG_2.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2011-07-22 18:51:15 +04:00
|
|
|
case REWIND:
|
2009-11-26 17:33:55 +03:00
|
|
|
case START_STOP:
|
2011-11-14 17:31:48 +04:00
|
|
|
case SET_CAPACITY:
|
2009-11-26 17:33:55 +03:00
|
|
|
case WRITE_FILEMARKS:
|
2011-11-14 17:31:49 +04:00
|
|
|
case WRITE_FILEMARKS_16:
|
2009-11-26 17:33:55 +03:00
|
|
|
case SPACE:
|
2010-09-06 18:58:44 +04:00
|
|
|
case RESERVE:
|
|
|
|
case RELEASE:
|
2009-11-26 17:33:55 +03:00
|
|
|
case ERASE:
|
|
|
|
case ALLOW_MEDIUM_REMOVAL:
|
|
|
|
case SEEK_10:
|
|
|
|
case SYNCHRONIZE_CACHE:
|
2011-11-14 17:31:49 +04:00
|
|
|
case SYNCHRONIZE_CACHE_16:
|
|
|
|
case LOCATE_16:
|
2009-11-26 17:33:55 +03:00
|
|
|
case LOCK_UNLOCK_CACHE:
|
|
|
|
case SET_CD_SPEED:
|
|
|
|
case SET_LIMITS:
|
scsi: Sanitize command definitions
Sanitize SCSI command definitions.
Add _10 suffix to READ_CAPACITY, WRITE_VERIFY, VERIFY, READ_LONG,
WRITE_LONG, and WRITE_SAME.
Add new command definitions for LOCATE_10, UNMAP, VARLENGTH_CDB,
WRITE_FILEMARKS_16, EXTENDED_COPY, ATA_PASSTHROUGH, ACCESS_CONTROL_IN,
ACCESS_CONTROL_OUT, COMPARE_AND_WRITE, VERIFY_16, SYNCHRONIZE_CACHE_16,
LOCATE_16, ERASE_16, WRITE_LONG_16, LOAD_UNLOAD, VERIFY_12.
Remove invalid definition of WRITE_LONG_2.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2011-07-22 18:51:15 +04:00
|
|
|
case WRITE_LONG_10:
|
2009-11-26 17:33:55 +03:00
|
|
|
case UPDATE_BLOCK:
|
2011-11-14 17:31:49 +04:00
|
|
|
case RESERVE_TRACK:
|
|
|
|
case SET_READ_AHEAD:
|
|
|
|
case PRE_FETCH:
|
|
|
|
case PRE_FETCH_16:
|
|
|
|
case ALLOW_OVERWRITE:
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->xfer = 0;
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
2013-11-28 14:01:13 +04:00
|
|
|
case VERIFY_10:
|
|
|
|
case VERIFY_12:
|
|
|
|
case VERIFY_16:
|
|
|
|
if ((buf[1] & 2) == 0) {
|
|
|
|
cmd->xfer = 0;
|
2014-01-29 21:47:39 +04:00
|
|
|
} else if ((buf[1] & 4) != 0) {
|
2013-11-28 14:01:13 +04:00
|
|
|
cmd->xfer = 1;
|
|
|
|
}
|
|
|
|
cmd->xfer *= dev->blocksize;
|
|
|
|
break;
|
2009-11-26 17:33:55 +03:00
|
|
|
case MODE_SENSE:
|
|
|
|
break;
|
scsi: Sanitize command definitions
Sanitize SCSI command definitions.
Add _10 suffix to READ_CAPACITY, WRITE_VERIFY, VERIFY, READ_LONG,
WRITE_LONG, and WRITE_SAME.
Add new command definitions for LOCATE_10, UNMAP, VARLENGTH_CDB,
WRITE_FILEMARKS_16, EXTENDED_COPY, ATA_PASSTHROUGH, ACCESS_CONTROL_IN,
ACCESS_CONTROL_OUT, COMPARE_AND_WRITE, VERIFY_16, SYNCHRONIZE_CACHE_16,
LOCATE_16, ERASE_16, WRITE_LONG_16, LOAD_UNLOAD, VERIFY_12.
Remove invalid definition of WRITE_LONG_2.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2011-07-22 18:51:15 +04:00
|
|
|
case WRITE_SAME_10:
|
2012-02-08 13:40:37 +04:00
|
|
|
case WRITE_SAME_16:
|
2018-03-08 18:17:59 +03:00
|
|
|
cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize;
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
scsi: Sanitize command definitions
Sanitize SCSI command definitions.
Add _10 suffix to READ_CAPACITY, WRITE_VERIFY, VERIFY, READ_LONG,
WRITE_LONG, and WRITE_SAME.
Add new command definitions for LOCATE_10, UNMAP, VARLENGTH_CDB,
WRITE_FILEMARKS_16, EXTENDED_COPY, ATA_PASSTHROUGH, ACCESS_CONTROL_IN,
ACCESS_CONTROL_OUT, COMPARE_AND_WRITE, VERIFY_16, SYNCHRONIZE_CACHE_16,
LOCATE_16, ERASE_16, WRITE_LONG_16, LOAD_UNLOAD, VERIFY_12.
Remove invalid definition of WRITE_LONG_2.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2011-07-22 18:51:15 +04:00
|
|
|
case READ_CAPACITY_10:
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->xfer = 8;
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
|
|
|
case READ_BLOCK_LIMITS:
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->xfer = 6;
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
|
|
|
case SEND_VOLUME_TAG:
|
2011-11-14 17:31:49 +04:00
|
|
|
/* GPCMD_SET_STREAMING from multimedia commands. */
|
|
|
|
if (dev->type == TYPE_ROM) {
|
|
|
|
cmd->xfer = buf[10] | (buf[9] << 8);
|
|
|
|
} else {
|
|
|
|
cmd->xfer = buf[9] | (buf[8] << 8);
|
|
|
|
}
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
2012-05-03 17:28:05 +04:00
|
|
|
case WRITE_6:
|
|
|
|
/* length 0 means 256 blocks */
|
|
|
|
if (cmd->xfer == 0) {
|
|
|
|
cmd->xfer = 256;
|
|
|
|
}
|
2014-05-16 19:44:05 +04:00
|
|
|
/* fall through */
|
2009-11-26 17:33:55 +03:00
|
|
|
case WRITE_10:
|
scsi: Sanitize command definitions
Sanitize SCSI command definitions.
Add _10 suffix to READ_CAPACITY, WRITE_VERIFY, VERIFY, READ_LONG,
WRITE_LONG, and WRITE_SAME.
Add new command definitions for LOCATE_10, UNMAP, VARLENGTH_CDB,
WRITE_FILEMARKS_16, EXTENDED_COPY, ATA_PASSTHROUGH, ACCESS_CONTROL_IN,
ACCESS_CONTROL_OUT, COMPARE_AND_WRITE, VERIFY_16, SYNCHRONIZE_CACHE_16,
LOCATE_16, ERASE_16, WRITE_LONG_16, LOAD_UNLOAD, VERIFY_12.
Remove invalid definition of WRITE_LONG_2.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2011-07-22 18:51:15 +04:00
|
|
|
case WRITE_VERIFY_10:
|
2009-11-26 17:33:55 +03:00
|
|
|
case WRITE_12:
|
|
|
|
case WRITE_VERIFY_12:
|
2009-11-26 17:34:16 +03:00
|
|
|
case WRITE_16:
|
|
|
|
case WRITE_VERIFY_16:
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->xfer *= dev->blocksize;
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
|
|
|
case READ_6:
|
|
|
|
case READ_REVERSE:
|
2012-05-03 17:28:05 +04:00
|
|
|
/* length 0 means 256 blocks */
|
|
|
|
if (cmd->xfer == 0) {
|
|
|
|
cmd->xfer = 256;
|
|
|
|
}
|
2014-05-16 19:44:05 +04:00
|
|
|
/* fall through */
|
2012-05-03 17:28:05 +04:00
|
|
|
case READ_10:
|
2009-11-26 17:33:55 +03:00
|
|
|
case READ_12:
|
2009-11-26 17:34:16 +03:00
|
|
|
case READ_16:
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->xfer *= dev->blocksize;
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
2011-11-14 17:31:49 +04:00
|
|
|
case FORMAT_UNIT:
|
|
|
|
/* MMC mandates the parameter list to be 12-bytes long. Parameters
|
|
|
|
* for block devices are restricted to the header right now. */
|
|
|
|
if (dev->type == TYPE_ROM && (buf[1] & 16)) {
|
|
|
|
cmd->xfer = 12;
|
|
|
|
} else {
|
|
|
|
cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
|
|
|
|
}
|
|
|
|
break;
|
2009-11-26 17:33:55 +03:00
|
|
|
case INQUIRY:
|
2011-11-14 17:31:49 +04:00
|
|
|
case RECEIVE_DIAGNOSTIC:
|
|
|
|
case SEND_DIAGNOSTIC:
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->xfer = buf[4] | (buf[3] << 8);
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
2011-11-14 17:31:49 +04:00
|
|
|
case READ_CD:
|
|
|
|
case READ_BUFFER:
|
|
|
|
case WRITE_BUFFER:
|
|
|
|
case SEND_CUE_SHEET:
|
|
|
|
cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
|
|
|
|
break;
|
|
|
|
case PERSISTENT_RESERVE_OUT:
|
|
|
|
cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
|
|
|
|
break;
|
|
|
|
case ERASE_12:
|
|
|
|
if (dev->type == TYPE_ROM) {
|
|
|
|
/* MMC command GET PERFORMANCE. */
|
|
|
|
cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
|
|
|
|
buf[10], buf[1] & 0x1f);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MECHANISM_STATUS:
|
|
|
|
case READ_DVD_STRUCTURE:
|
|
|
|
case SEND_DVD_STRUCTURE:
|
2010-06-16 17:43:06 +04:00
|
|
|
case MAINTENANCE_OUT:
|
|
|
|
case MAINTENANCE_IN:
|
2011-08-03 12:49:11 +04:00
|
|
|
if (dev->type == TYPE_ROM) {
|
2010-06-16 17:43:06 +04:00
|
|
|
/* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->xfer = buf[9] | (buf[8] << 8);
|
2010-06-16 17:43:06 +04:00
|
|
|
}
|
|
|
|
break;
|
2012-08-03 08:02:19 +04:00
|
|
|
case ATA_PASSTHROUGH_12:
|
|
|
|
if (dev->type == TYPE_ROM) {
|
|
|
|
/* BLANK command of MMC */
|
|
|
|
cmd->xfer = 0;
|
|
|
|
} else {
|
2014-10-29 15:00:04 +03:00
|
|
|
cmd->xfer = ata_passthrough_12_xfer(dev, buf);
|
2012-08-03 08:02:19 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ATA_PASSTHROUGH_16:
|
2014-10-29 15:00:04 +03:00
|
|
|
cmd->xfer = ata_passthrough_16_xfer(dev, buf);
|
2012-08-03 08:02:19 +04:00
|
|
|
break;
|
2009-11-26 17:33:55 +03:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-29 15:00:04 +03:00
|
|
|
static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
|
2009-11-26 17:33:55 +03:00
|
|
|
{
|
2011-08-03 12:49:11 +04:00
|
|
|
switch (buf[0]) {
|
2009-11-26 17:33:55 +03:00
|
|
|
/* stream commands */
|
2011-11-14 17:31:49 +04:00
|
|
|
case ERASE_12:
|
|
|
|
case ERASE_16:
|
|
|
|
cmd->xfer = 0;
|
|
|
|
break;
|
2009-11-26 17:33:55 +03:00
|
|
|
case READ_6:
|
|
|
|
case READ_REVERSE:
|
|
|
|
case RECOVER_BUFFERED_DATA:
|
|
|
|
case WRITE_6:
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
|
|
|
|
if (buf[1] & 0x01) { /* fixed */
|
|
|
|
cmd->xfer *= dev->blocksize;
|
|
|
|
}
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
2012-05-04 12:28:55 +04:00
|
|
|
case READ_16:
|
|
|
|
case READ_REVERSE_16:
|
|
|
|
case VERIFY_16:
|
|
|
|
case WRITE_16:
|
|
|
|
cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
|
|
|
|
if (buf[1] & 0x01) { /* fixed */
|
|
|
|
cmd->xfer *= dev->blocksize;
|
|
|
|
}
|
|
|
|
break;
|
2009-11-26 17:33:55 +03:00
|
|
|
case REWIND:
|
2012-06-14 17:55:26 +04:00
|
|
|
case LOAD_UNLOAD:
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->xfer = 0;
|
2009-11-26 17:33:55 +03:00
|
|
|
break;
|
2011-11-14 17:31:49 +04:00
|
|
|
case SPACE_16:
|
|
|
|
cmd->xfer = buf[13] | (buf[12] << 8);
|
|
|
|
break;
|
|
|
|
case READ_POSITION:
|
2012-06-14 17:55:28 +04:00
|
|
|
switch (buf[1] & 0x1f) /* operation code */ {
|
|
|
|
case SHORT_FORM_BLOCK_ID:
|
|
|
|
case SHORT_FORM_VENDOR_SPECIFIC:
|
|
|
|
cmd->xfer = 20;
|
|
|
|
break;
|
|
|
|
case LONG_FORM:
|
|
|
|
cmd->xfer = 32;
|
|
|
|
break;
|
|
|
|
case EXTENDED_FORM:
|
|
|
|
cmd->xfer = buf[8] | (buf[7] << 8);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-11-14 17:31:49 +04:00
|
|
|
break;
|
|
|
|
case FORMAT_UNIT:
|
|
|
|
cmd->xfer = buf[4] | (buf[3] << 8);
|
|
|
|
break;
|
2009-11-26 17:33:55 +03:00
|
|
|
/* generic commands */
|
|
|
|
default:
|
2014-10-29 15:00:04 +03:00
|
|
|
return scsi_req_xfer(cmd, dev, buf);
|
2009-11-26 17:33:55 +03:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-29 15:00:04 +03:00
|
|
|
static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
|
2012-06-14 17:55:27 +04:00
|
|
|
{
|
|
|
|
switch (buf[0]) {
|
|
|
|
/* medium changer commands */
|
|
|
|
case EXCHANGE_MEDIUM:
|
|
|
|
case INITIALIZE_ELEMENT_STATUS:
|
|
|
|
case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
|
|
|
|
case MOVE_MEDIUM:
|
|
|
|
case POSITION_TO_ELEMENT:
|
|
|
|
cmd->xfer = 0;
|
|
|
|
break;
|
|
|
|
case READ_ELEMENT_STATUS:
|
|
|
|
cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* generic commands */
|
|
|
|
default:
|
2014-10-29 15:00:04 +03:00
|
|
|
return scsi_req_xfer(cmd, dev, buf);
|
2012-06-14 17:55:27 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-28 21:51:52 +03:00
|
|
|
static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
|
|
|
|
{
|
|
|
|
switch (buf[0]) {
|
|
|
|
/* Scanner commands */
|
|
|
|
case OBJECT_POSITION:
|
|
|
|
cmd->xfer = 0;
|
|
|
|
break;
|
|
|
|
case SCAN:
|
|
|
|
cmd->xfer = buf[4];
|
|
|
|
break;
|
|
|
|
case READ_10:
|
|
|
|
case SEND:
|
|
|
|
case GET_WINDOW:
|
|
|
|
case SET_WINDOW:
|
|
|
|
cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
|
|
|
|
return scsi_req_xfer(cmd, dev, buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-06-14 17:55:27 +04:00
|
|
|
|
2011-08-03 12:49:11 +04:00
|
|
|
static void scsi_cmd_xfer_mode(SCSICommand *cmd)
|
2009-11-26 17:33:57 +03:00
|
|
|
{
|
2012-02-08 13:40:37 +04:00
|
|
|
if (!cmd->xfer) {
|
|
|
|
cmd->mode = SCSI_XFER_NONE;
|
|
|
|
return;
|
|
|
|
}
|
2011-08-03 12:49:11 +04:00
|
|
|
switch (cmd->buf[0]) {
|
2009-11-26 17:33:57 +03:00
|
|
|
case WRITE_6:
|
|
|
|
case WRITE_10:
|
scsi: Sanitize command definitions
Sanitize SCSI command definitions.
Add _10 suffix to READ_CAPACITY, WRITE_VERIFY, VERIFY, READ_LONG,
WRITE_LONG, and WRITE_SAME.
Add new command definitions for LOCATE_10, UNMAP, VARLENGTH_CDB,
WRITE_FILEMARKS_16, EXTENDED_COPY, ATA_PASSTHROUGH, ACCESS_CONTROL_IN,
ACCESS_CONTROL_OUT, COMPARE_AND_WRITE, VERIFY_16, SYNCHRONIZE_CACHE_16,
LOCATE_16, ERASE_16, WRITE_LONG_16, LOAD_UNLOAD, VERIFY_12.
Remove invalid definition of WRITE_LONG_2.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2011-07-22 18:51:15 +04:00
|
|
|
case WRITE_VERIFY_10:
|
2009-11-26 17:33:57 +03:00
|
|
|
case WRITE_12:
|
|
|
|
case WRITE_VERIFY_12:
|
2009-11-26 17:34:16 +03:00
|
|
|
case WRITE_16:
|
|
|
|
case WRITE_VERIFY_16:
|
2013-11-28 14:01:13 +04:00
|
|
|
case VERIFY_10:
|
|
|
|
case VERIFY_12:
|
|
|
|
case VERIFY_16:
|
2009-11-26 17:33:57 +03:00
|
|
|
case COPY:
|
|
|
|
case COPY_VERIFY:
|
|
|
|
case COMPARE:
|
|
|
|
case CHANGE_DEFINITION:
|
|
|
|
case LOG_SELECT:
|
|
|
|
case MODE_SELECT:
|
|
|
|
case MODE_SELECT_10:
|
|
|
|
case SEND_DIAGNOSTIC:
|
|
|
|
case WRITE_BUFFER:
|
|
|
|
case FORMAT_UNIT:
|
|
|
|
case REASSIGN_BLOCKS:
|
|
|
|
case SEARCH_EQUAL:
|
|
|
|
case SEARCH_HIGH:
|
|
|
|
case SEARCH_LOW:
|
|
|
|
case UPDATE_BLOCK:
|
scsi: Sanitize command definitions
Sanitize SCSI command definitions.
Add _10 suffix to READ_CAPACITY, WRITE_VERIFY, VERIFY, READ_LONG,
WRITE_LONG, and WRITE_SAME.
Add new command definitions for LOCATE_10, UNMAP, VARLENGTH_CDB,
WRITE_FILEMARKS_16, EXTENDED_COPY, ATA_PASSTHROUGH, ACCESS_CONTROL_IN,
ACCESS_CONTROL_OUT, COMPARE_AND_WRITE, VERIFY_16, SYNCHRONIZE_CACHE_16,
LOCATE_16, ERASE_16, WRITE_LONG_16, LOAD_UNLOAD, VERIFY_12.
Remove invalid definition of WRITE_LONG_2.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2011-07-22 18:51:15 +04:00
|
|
|
case WRITE_LONG_10:
|
|
|
|
case WRITE_SAME_10:
|
2012-02-08 13:40:37 +04:00
|
|
|
case WRITE_SAME_16:
|
2012-04-28 17:49:36 +04:00
|
|
|
case UNMAP:
|
2009-11-26 17:33:57 +03:00
|
|
|
case SEARCH_HIGH_12:
|
|
|
|
case SEARCH_EQUAL_12:
|
|
|
|
case SEARCH_LOW_12:
|
|
|
|
case MEDIUM_SCAN:
|
|
|
|
case SEND_VOLUME_TAG:
|
2011-11-14 17:31:49 +04:00
|
|
|
case SEND_CUE_SHEET:
|
|
|
|
case SEND_DVD_STRUCTURE:
|
2010-06-16 17:42:58 +04:00
|
|
|
case PERSISTENT_RESERVE_OUT:
|
2010-06-16 17:43:06 +04:00
|
|
|
case MAINTENANCE_OUT:
|
2016-06-28 21:51:52 +03:00
|
|
|
case SET_WINDOW:
|
|
|
|
case SCAN:
|
|
|
|
/* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for
|
|
|
|
* non-scanner devices, so we only get here for SCAN and not for START_STOP.
|
|
|
|
*/
|
2011-08-03 12:49:11 +04:00
|
|
|
cmd->mode = SCSI_XFER_TO_DEV;
|
2009-11-26 17:33:57 +03:00
|
|
|
break;
|
2012-08-03 08:02:19 +04:00
|
|
|
case ATA_PASSTHROUGH_12:
|
|
|
|
case ATA_PASSTHROUGH_16:
|
|
|
|
/* T_DIR */
|
|
|
|
cmd->mode = (cmd->buf[2] & 0x8) ?
|
|
|
|
SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
|
|
|
|
break;
|
2009-11-26 17:33:57 +03:00
|
|
|
default:
|
2012-02-08 13:40:37 +04:00
|
|
|
cmd->mode = SCSI_XFER_FROM_DEV;
|
2009-11-26 17:33:57 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-17 08:34:58 +03:00
|
|
|
int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
|
|
|
|
size_t buf_len)
|
2014-10-29 15:00:04 +03:00
|
|
|
{
|
|
|
|
int rc;
|
2015-07-21 09:59:39 +03:00
|
|
|
int len;
|
2014-10-29 15:00:04 +03:00
|
|
|
|
|
|
|
cmd->lba = -1;
|
2015-07-21 09:59:39 +03:00
|
|
|
len = scsi_cdb_length(buf);
|
2022-08-17 08:35:00 +03:00
|
|
|
if (len < 0 || len > buf_len) {
|
2015-07-21 09:59:39 +03:00
|
|
|
return -1;
|
|
|
|
}
|
2012-06-14 18:13:49 +04:00
|
|
|
|
2015-07-21 09:59:39 +03:00
|
|
|
cmd->len = len;
|
2012-06-14 17:55:27 +04:00
|
|
|
switch (dev->type) {
|
|
|
|
case TYPE_TAPE:
|
2014-10-29 15:00:04 +03:00
|
|
|
rc = scsi_req_stream_xfer(cmd, dev, buf);
|
2012-06-14 17:55:27 +04:00
|
|
|
break;
|
|
|
|
case TYPE_MEDIUM_CHANGER:
|
2014-10-29 15:00:04 +03:00
|
|
|
rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
|
2012-06-14 17:55:27 +04:00
|
|
|
break;
|
2016-06-28 21:51:52 +03:00
|
|
|
case TYPE_SCANNER:
|
|
|
|
rc = scsi_req_scanner_length(cmd, dev, buf);
|
|
|
|
break;
|
2012-06-14 17:55:27 +04:00
|
|
|
default:
|
2014-10-29 15:00:04 +03:00
|
|
|
rc = scsi_req_xfer(cmd, dev, buf);
|
2012-06-14 17:55:27 +04:00
|
|
|
break;
|
2009-11-26 17:33:55 +03:00
|
|
|
}
|
2012-06-14 17:55:27 +04:00
|
|
|
|
2009-11-26 17:33:55 +03:00
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
2011-08-03 12:49:13 +04:00
|
|
|
memcpy(cmd->buf, buf, cmd->len);
|
|
|
|
scsi_cmd_xfer_mode(cmd);
|
|
|
|
cmd->lba = scsi_cmd_lba(cmd);
|
2009-11-26 17:33:55 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2009-11-26 17:34:00 +03:00
|
|
|
|
2012-07-16 16:22:36 +04:00
|
|
|
void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
|
|
|
|
{
|
|
|
|
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
|
|
|
|
|
|
|
|
scsi_device_set_ua(dev, sense);
|
|
|
|
if (bus->info->change) {
|
|
|
|
bus->info->change(bus, dev, sense);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-18 18:01:56 +04:00
|
|
|
SCSIRequest *scsi_req_ref(SCSIRequest *req)
|
|
|
|
{
|
2012-07-09 14:06:28 +04:00
|
|
|
assert(req->refcount > 0);
|
2011-04-18 18:01:56 +04:00
|
|
|
req->refcount++;
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
void scsi_req_unref(SCSIRequest *req)
|
|
|
|
{
|
2012-05-04 10:51:16 +04:00
|
|
|
assert(req->refcount > 0);
|
2011-04-18 18:01:56 +04:00
|
|
|
if (--req->refcount == 0) {
|
2013-06-03 16:09:39 +04:00
|
|
|
BusState *qbus = req->dev->qdev.parent_bus;
|
|
|
|
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
|
|
|
|
|
2012-07-09 14:06:28 +04:00
|
|
|
if (bus->info->free_request && req->hba_private) {
|
|
|
|
bus->info->free_request(bus, req->hba_private);
|
|
|
|
}
|
2011-08-03 12:49:09 +04:00
|
|
|
if (req->ops->free_req) {
|
|
|
|
req->ops->free_req(req);
|
2011-04-18 18:01:56 +04:00
|
|
|
}
|
2013-06-03 16:09:39 +04:00
|
|
|
object_unref(OBJECT(req->dev));
|
|
|
|
object_unref(OBJECT(qbus->parent));
|
2015-10-01 13:59:01 +03:00
|
|
|
g_free(req);
|
2011-04-18 18:01:56 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-18 17:28:11 +04:00
|
|
|
/* Tell the device that we finished processing this chunk of I/O. It
|
|
|
|
will start the next chunk or complete the command. */
|
|
|
|
void scsi_req_continue(SCSIRequest *req)
|
|
|
|
{
|
2013-02-25 15:12:58 +04:00
|
|
|
if (req->io_canceled) {
|
|
|
|
trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
|
|
|
|
return;
|
|
|
|
}
|
2011-04-18 17:28:11 +04:00
|
|
|
trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
|
|
|
|
if (req->cmd.mode == SCSI_XFER_TO_DEV) {
|
2011-08-03 12:49:09 +04:00
|
|
|
req->ops->write_data(req);
|
2011-04-18 17:28:11 +04:00
|
|
|
} else {
|
2011-08-03 12:49:09 +04:00
|
|
|
req->ops->read_data(req);
|
2011-04-18 17:28:11 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-18 16:59:13 +04:00
|
|
|
/* Called by the devices when data is ready for the HBA. The HBA should
|
|
|
|
start a DMA operation to read or fill the device's data buffer.
|
2011-04-18 17:28:11 +04:00
|
|
|
Once it completes, calling scsi_req_continue will restart I/O. */
|
2011-04-18 16:59:13 +04:00
|
|
|
void scsi_req_data(SCSIRequest *req, int len)
|
|
|
|
{
|
2011-07-06 13:26:47 +04:00
|
|
|
uint8_t *buf;
|
2011-10-25 14:53:33 +04:00
|
|
|
if (req->io_canceled) {
|
|
|
|
trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
|
2011-07-06 13:55:37 +04:00
|
|
|
return;
|
2011-10-25 14:53:33 +04:00
|
|
|
}
|
2011-07-06 13:55:37 +04:00
|
|
|
trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
|
|
|
|
assert(req->cmd.mode != SCSI_XFER_NONE);
|
2011-07-06 13:26:47 +04:00
|
|
|
if (!req->sg) {
|
2021-12-31 13:13:34 +03:00
|
|
|
req->residual -= len;
|
2011-07-06 13:26:47 +04:00
|
|
|
req->bus->info->transfer_data(req, len);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the device calls scsi_req_data and the HBA specified a
|
|
|
|
* scatter/gather list, the transfer has to happen in a single
|
|
|
|
* step. */
|
|
|
|
assert(!req->dma_started);
|
|
|
|
req->dma_started = true;
|
|
|
|
|
|
|
|
buf = scsi_req_get_buf(req);
|
|
|
|
if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
|
2021-12-16 11:36:38 +03:00
|
|
|
dma_buf_read(buf, len, &req->residual, req->sg,
|
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
2011-07-06 13:26:47 +04:00
|
|
|
} else {
|
2021-12-16 11:36:38 +03:00
|
|
|
dma_buf_write(buf, len, &req->residual, req->sg,
|
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
2011-07-06 13:26:47 +04:00
|
|
|
}
|
|
|
|
scsi_req_continue(req);
|
2011-04-18 16:59:13 +04:00
|
|
|
}
|
|
|
|
|
2009-11-26 17:34:01 +03:00
|
|
|
void scsi_req_print(SCSIRequest *req)
|
|
|
|
{
|
|
|
|
FILE *fp = stderr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
fprintf(fp, "[%s id=%d] %s",
|
|
|
|
req->dev->qdev.parent_bus->name,
|
|
|
|
req->dev->id,
|
|
|
|
scsi_command_name(req->cmd.buf[0]));
|
|
|
|
for (i = 1; i < req->cmd.len; i++) {
|
|
|
|
fprintf(fp, " 0x%02x", req->cmd.buf[i]);
|
|
|
|
}
|
|
|
|
switch (req->cmd.mode) {
|
|
|
|
case SCSI_XFER_NONE:
|
|
|
|
fprintf(fp, " - none\n");
|
|
|
|
break;
|
|
|
|
case SCSI_XFER_FROM_DEV:
|
|
|
|
fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
|
|
|
|
break;
|
|
|
|
case SCSI_XFER_TO_DEV:
|
|
|
|
fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fprintf(fp, " - Oops\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-24 21:14:50 +03:00
|
|
|
void scsi_req_complete_failed(SCSIRequest *req, int host_status)
|
|
|
|
{
|
|
|
|
SCSISense sense;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
assert(req->status == -1 && req->host_status == -1);
|
|
|
|
assert(req->ops != &reqops_unit_attention);
|
|
|
|
|
|
|
|
if (!req->bus->info->fail) {
|
|
|
|
status = scsi_sense_from_host_status(req->host_status, &sense);
|
|
|
|
if (status == CHECK_CONDITION) {
|
|
|
|
scsi_req_build_sense(req, sense);
|
|
|
|
}
|
|
|
|
scsi_req_complete(req, status);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
req->host_status = host_status;
|
|
|
|
scsi_req_ref(req);
|
|
|
|
scsi_req_dequeue(req);
|
|
|
|
req->bus->info->fail(req);
|
|
|
|
|
|
|
|
/* Cancelled requests might end up being completed instead of cancelled */
|
|
|
|
notifier_list_notify(&req->cancel_notifiers, req);
|
|
|
|
scsi_req_unref(req);
|
|
|
|
}
|
|
|
|
|
2011-08-03 12:49:06 +04:00
|
|
|
void scsi_req_complete(SCSIRequest *req, int status)
|
2009-11-26 17:34:00 +03:00
|
|
|
{
|
2021-02-24 21:14:50 +03:00
|
|
|
assert(req->status == -1 && req->host_status == -1);
|
2011-08-03 12:49:06 +04:00
|
|
|
req->status = status;
|
2021-02-24 21:14:50 +03:00
|
|
|
req->host_status = SCSI_HOST_OK;
|
2011-08-03 12:49:07 +04:00
|
|
|
|
2012-06-14 17:55:25 +04:00
|
|
|
assert(req->sense_len <= sizeof(req->sense));
|
2011-08-03 12:49:07 +04:00
|
|
|
if (status == GOOD) {
|
|
|
|
req->sense_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req->sense_len) {
|
|
|
|
memcpy(req->dev->sense, req->sense, req->sense_len);
|
2011-09-13 18:19:53 +04:00
|
|
|
req->dev->sense_len = req->sense_len;
|
|
|
|
req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
|
|
|
|
} else {
|
|
|
|
req->dev->sense_len = 0;
|
|
|
|
req->dev->sense_is_ua = false;
|
2011-08-03 12:49:07 +04:00
|
|
|
}
|
|
|
|
|
2011-04-18 18:01:56 +04:00
|
|
|
scsi_req_ref(req);
|
2010-07-13 16:13:45 +04:00
|
|
|
scsi_req_dequeue(req);
|
2021-12-31 13:13:34 +03:00
|
|
|
req->bus->info->complete(req, req->residual);
|
2014-09-28 05:48:00 +04:00
|
|
|
|
|
|
|
/* Cancelled requests might end up being completed instead of cancelled */
|
|
|
|
notifier_list_notify(&req->cancel_notifiers, req);
|
2011-04-18 18:01:56 +04:00
|
|
|
scsi_req_unref(req);
|
2009-11-26 17:34:00 +03:00
|
|
|
}
|
2010-12-08 14:35:04 +03:00
|
|
|
|
2014-09-25 06:20:47 +04:00
|
|
|
/* Called by the devices when the request is canceled. */
|
|
|
|
void scsi_req_cancel_complete(SCSIRequest *req)
|
|
|
|
{
|
|
|
|
assert(req->io_canceled);
|
|
|
|
if (req->bus->info->cancel) {
|
|
|
|
req->bus->info->cancel(req);
|
|
|
|
}
|
2014-09-28 05:48:00 +04:00
|
|
|
notifier_list_notify(&req->cancel_notifiers, req);
|
2014-09-25 06:20:47 +04:00
|
|
|
scsi_req_unref(req);
|
|
|
|
}
|
|
|
|
|
2014-09-28 05:48:00 +04:00
|
|
|
/* Cancel @req asynchronously. @notifier is added to @req's cancellation
|
|
|
|
* notifier list, the bus will be notified the requests cancellation is
|
|
|
|
* completed.
|
|
|
|
* */
|
|
|
|
void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
|
|
|
|
{
|
|
|
|
trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
|
|
|
|
if (notifier) {
|
|
|
|
notifier_list_add(&req->cancel_notifiers, notifier);
|
|
|
|
}
|
2015-12-18 11:54:53 +03:00
|
|
|
if (req->io_canceled) {
|
|
|
|
/* A blk_aio_cancel_async is pending; when it finishes,
|
|
|
|
* scsi_req_cancel_complete will be called and will
|
|
|
|
* call the notifier we just added. Just wait for that.
|
|
|
|
*/
|
|
|
|
assert(req->aiocb);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Dropped in scsi_req_cancel_complete. */
|
2014-09-28 05:48:00 +04:00
|
|
|
scsi_req_ref(req);
|
|
|
|
scsi_req_dequeue(req);
|
|
|
|
req->io_canceled = true;
|
|
|
|
if (req->aiocb) {
|
2014-10-07 15:59:18 +04:00
|
|
|
blk_aio_cancel_async(req->aiocb);
|
2015-01-27 12:16:59 +03:00
|
|
|
} else {
|
|
|
|
scsi_req_cancel_complete(req);
|
2014-09-28 05:48:00 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-19 00:53:08 +04:00
|
|
|
void scsi_req_cancel(SCSIRequest *req)
|
|
|
|
{
|
2012-07-27 10:23:07 +04:00
|
|
|
trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
|
2011-10-25 14:53:33 +04:00
|
|
|
if (!req->enqueued) {
|
|
|
|
return;
|
2011-04-19 00:53:08 +04:00
|
|
|
}
|
2015-12-18 11:54:53 +03:00
|
|
|
assert(!req->io_canceled);
|
|
|
|
/* Dropped in scsi_req_cancel_complete. */
|
2011-04-19 00:53:08 +04:00
|
|
|
scsi_req_ref(req);
|
|
|
|
scsi_req_dequeue(req);
|
2011-10-25 14:53:33 +04:00
|
|
|
req->io_canceled = true;
|
2014-09-24 12:27:55 +04:00
|
|
|
if (req->aiocb) {
|
2014-10-07 15:59:18 +04:00
|
|
|
blk_aio_cancel(req->aiocb);
|
2015-01-12 13:47:30 +03:00
|
|
|
} else {
|
|
|
|
scsi_req_cancel_complete(req);
|
2011-10-25 14:53:33 +04:00
|
|
|
}
|
2011-04-19 00:53:08 +04:00
|
|
|
}
|
|
|
|
|
2012-07-16 16:18:58 +04:00
|
|
|
static int scsi_ua_precedence(SCSISense sense)
|
|
|
|
{
|
|
|
|
if (sense.key != UNIT_ATTENTION) {
|
|
|
|
return INT_MAX;
|
|
|
|
}
|
|
|
|
if (sense.asc == 0x29 && sense.ascq == 0x04) {
|
|
|
|
/* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
|
|
|
|
return 1;
|
|
|
|
} else if (sense.asc == 0x3F && sense.ascq == 0x01) {
|
|
|
|
/* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
|
|
|
|
return 2;
|
|
|
|
} else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
|
|
|
|
/* These two go with "all others". */
|
|
|
|
;
|
|
|
|
} else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
|
|
|
|
/* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
|
|
|
|
* POWER ON OCCURRED = 1
|
|
|
|
* SCSI BUS RESET OCCURRED = 2
|
|
|
|
* BUS DEVICE RESET FUNCTION OCCURRED = 3
|
|
|
|
* I_T NEXUS LOSS OCCURRED = 7
|
|
|
|
*/
|
|
|
|
return sense.ascq;
|
|
|
|
} else if (sense.asc == 0x2F && sense.ascq == 0x01) {
|
|
|
|
/* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
|
|
|
|
return 8;
|
|
|
|
}
|
|
|
|
return (sense.asc << 8) | sense.ascq;
|
|
|
|
}
|
|
|
|
|
2022-10-06 22:49:46 +03:00
|
|
|
void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense)
|
|
|
|
{
|
|
|
|
int prec1, prec2;
|
|
|
|
if (sense.key != UNIT_ATTENTION) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Override a pre-existing unit attention condition, except for a more
|
|
|
|
* important reset condition.
|
|
|
|
*/
|
|
|
|
prec1 = scsi_ua_precedence(bus->unit_attention);
|
|
|
|
prec2 = scsi_ua_precedence(sense);
|
|
|
|
if (prec2 < prec1) {
|
|
|
|
bus->unit_attention = sense;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-16 16:18:58 +04:00
|
|
|
void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
|
|
|
|
{
|
|
|
|
int prec1, prec2;
|
|
|
|
if (sense.key != UNIT_ATTENTION) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
|
|
|
|
sense.asc, sense.ascq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Override a pre-existing unit attention condition, except for a more
|
|
|
|
* important reset condition.
|
|
|
|
*/
|
|
|
|
prec1 = scsi_ua_precedence(sdev->unit_attention);
|
|
|
|
prec2 = scsi_ua_precedence(sense);
|
|
|
|
if (prec2 < prec1) {
|
|
|
|
sdev->unit_attention = sense;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-04 19:42:56 +03:00
|
|
|
static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque)
|
|
|
|
{
|
|
|
|
scsi_req_cancel_async(req, NULL);
|
|
|
|
}
|
|
|
|
|
scsi: Await request purging
scsi_device_for_each_req_async() currently does not provide any way to
be awaited. One of its callers is scsi_device_purge_requests(), which
therefore currently does not guarantee that all requests are fully
settled when it returns.
We want all requests to be settled, because scsi_device_purge_requests()
is called through the unrealize path, including the one invoked by
virtio_scsi_hotunplug() through qdev_simple_device_unplug_cb(), which
most likely assumes that all SCSI requests are done then.
In fact, scsi_device_purge_requests() already contains a blk_drain(),
but this will not fully await scsi_device_for_each_req_async(), only the
I/O requests it potentially cancels (not the non-I/O requests).
However, we can have scsi_device_for_each_req_async() increment the BB
in-flight counter, and have scsi_device_for_each_req_async_bh()
decrement it when it is done. This way, the blk_drain() will fully
await all SCSI requests to be purged.
This also removes the need for scsi_device_for_each_req_async_bh() to
double-check the current context and potentially re-schedule itself,
should it now differ from the BB's context: Changing a BB's AioContext
with a root node is done through bdrv_try_change_aio_context(), which
creates a drained section. With this patch, we keep the BB in-flight
counter elevated throughout, so we know the BB's context cannot change.
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
Message-ID: <20240202144755.671354-3-hreitz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2024-02-02 17:47:55 +03:00
|
|
|
/**
|
|
|
|
* Cancel all requests, and block until they are deleted.
|
|
|
|
*/
|
2011-08-03 12:49:18 +04:00
|
|
|
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
|
2011-04-18 20:47:12 +04:00
|
|
|
{
|
2023-12-04 19:42:56 +03:00
|
|
|
scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
|
2011-04-18 20:47:12 +04:00
|
|
|
|
scsi: Await request purging
scsi_device_for_each_req_async() currently does not provide any way to
be awaited. One of its callers is scsi_device_purge_requests(), which
therefore currently does not guarantee that all requests are fully
settled when it returns.
We want all requests to be settled, because scsi_device_purge_requests()
is called through the unrealize path, including the one invoked by
virtio_scsi_hotunplug() through qdev_simple_device_unplug_cb(), which
most likely assumes that all SCSI requests are done then.
In fact, scsi_device_purge_requests() already contains a blk_drain(),
but this will not fully await scsi_device_for_each_req_async(), only the
I/O requests it potentially cancels (not the non-I/O requests).
However, we can have scsi_device_for_each_req_async() increment the BB
in-flight counter, and have scsi_device_for_each_req_async_bh()
decrement it when it is done. This way, the blk_drain() will fully
await all SCSI requests to be purged.
This also removes the need for scsi_device_for_each_req_async_bh() to
double-check the current context and potentially re-schedule itself,
should it now differ from the BB's context: Changing a BB's AioContext
with a root node is done through bdrv_try_change_aio_context(), which
creates a drained section. With this patch, we keep the BB in-flight
counter elevated throughout, so we know the BB's context cannot change.
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
Message-ID: <20240202144755.671354-3-hreitz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2024-02-02 17:47:55 +03:00
|
|
|
/*
|
|
|
|
* Await all the scsi_device_purge_one_req() calls scheduled by
|
|
|
|
* scsi_device_for_each_req_async(), and all I/O requests that were
|
|
|
|
* cancelled this way, but may still take a bit of time to settle.
|
|
|
|
*/
|
2015-12-16 21:33:43 +03:00
|
|
|
blk_drain(sdev->conf.blk);
|
scsi: Await request purging
scsi_device_for_each_req_async() currently does not provide any way to
be awaited. One of its callers is scsi_device_purge_requests(), which
therefore currently does not guarantee that all requests are fully
settled when it returns.
We want all requests to be settled, because scsi_device_purge_requests()
is called through the unrealize path, including the one invoked by
virtio_scsi_hotunplug() through qdev_simple_device_unplug_cb(), which
most likely assumes that all SCSI requests are done then.
In fact, scsi_device_purge_requests() already contains a blk_drain(),
but this will not fully await scsi_device_for_each_req_async(), only the
I/O requests it potentially cancels (not the non-I/O requests).
However, we can have scsi_device_for_each_req_async() increment the BB
in-flight counter, and have scsi_device_for_each_req_async_bh()
decrement it when it is done. This way, the blk_drain() will fully
await all SCSI requests to be purged.
This also removes the need for scsi_device_for_each_req_async_bh() to
double-check the current context and potentially re-schedule itself,
should it now differ from the BB's context: Changing a BB's AioContext
with a root node is done through bdrv_try_change_aio_context(), which
creates a drained section. With this patch, we keep the BB in-flight
counter elevated throughout, so we know the BB's context cannot change.
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
Message-ID: <20240202144755.671354-3-hreitz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2024-02-02 17:47:55 +03:00
|
|
|
|
2012-07-16 16:18:58 +04:00
|
|
|
scsi_device_set_ua(sdev, sense);
|
2011-04-18 20:47:12 +04:00
|
|
|
}
|
|
|
|
|
2023-05-16 22:02:36 +03:00
|
|
|
void scsi_device_drained_begin(SCSIDevice *sdev)
|
|
|
|
{
|
|
|
|
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
|
|
|
|
if (!bus) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
|
|
|
assert(bus->drain_count < INT_MAX);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Multiple BlockBackends can be on a SCSIBus and each may begin/end
|
|
|
|
* draining at any time. Keep a counter so HBAs only see begin/end once.
|
|
|
|
*/
|
|
|
|
if (bus->drain_count++ == 0) {
|
|
|
|
trace_scsi_bus_drained_begin(bus, sdev);
|
|
|
|
if (bus->info->drained_begin) {
|
|
|
|
bus->info->drained_begin(bus);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void scsi_device_drained_end(SCSIDevice *sdev)
|
|
|
|
{
|
|
|
|
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
|
|
|
|
if (!bus) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
|
|
|
assert(bus->drain_count > 0);
|
|
|
|
|
|
|
|
if (bus->drain_count-- == 1) {
|
|
|
|
trace_scsi_bus_drained_end(bus, sdev);
|
|
|
|
if (bus->info->drained_end) {
|
|
|
|
bus->info->drained_end(bus);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-19 18:30:40 +04:00
|
|
|
static char *scsibus_get_dev_path(DeviceState *dev)
|
|
|
|
{
|
2016-01-06 12:37:46 +03:00
|
|
|
SCSIDevice *d = SCSI_DEVICE(dev);
|
2012-03-19 18:30:40 +04:00
|
|
|
DeviceState *hba = dev->parent_bus->parent;
|
2012-02-03 22:28:43 +04:00
|
|
|
char *id;
|
2012-04-06 16:12:42 +04:00
|
|
|
char *path;
|
2012-03-19 18:30:40 +04:00
|
|
|
|
2012-02-03 22:28:43 +04:00
|
|
|
id = qdev_get_dev_path(hba);
|
2012-03-19 18:30:40 +04:00
|
|
|
if (id) {
|
2012-04-06 16:12:42 +04:00
|
|
|
path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
|
2012-03-19 18:30:40 +04:00
|
|
|
} else {
|
2012-04-06 16:12:42 +04:00
|
|
|
path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
|
2012-03-19 18:30:40 +04:00
|
|
|
}
|
2012-04-06 16:12:42 +04:00
|
|
|
g_free(id);
|
|
|
|
return path;
|
2012-03-19 18:30:40 +04:00
|
|
|
}
|
|
|
|
|
2010-12-08 14:35:04 +03:00
|
|
|
static char *scsibus_get_fw_dev_path(DeviceState *dev)
|
|
|
|
{
|
2011-12-16 00:50:08 +04:00
|
|
|
SCSIDevice *d = SCSI_DEVICE(dev);
|
scsi, pci, qdev, isa-bus, sysbus: don't let *_get_fw_dev_path return NULL
Use g_strdup rather than strdup, because the sole caller
(qdev_get_fw_dev_path_helper) assumes it gets non-NULL, and dereferences
it. Besides, in that caller, the allocated buffer is already freed with
g_free, so it's better to allocate with a matching g_strdup.
In one case, (scsi-bus.c) it was trivial, so I replaced an snprintf+
g_strdup combination with an equivalent g_strdup_printf use.
Signed-off-by: Jim Meyering <meyering@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-10-04 15:09:44 +04:00
|
|
|
return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
|
|
|
|
qdev_fw_name(dev), d->id, d->lun);
|
2011-07-28 20:02:13 +04:00
|
|
|
}
|
|
|
|
|
2011-12-02 19:27:02 +04:00
|
|
|
/* SCSI request list. For simplicity, pv points to the whole device */
|
|
|
|
|
2023-12-04 19:42:56 +03:00
|
|
|
static void put_scsi_req(SCSIRequest *req, void *opaque)
|
|
|
|
{
|
|
|
|
QEMUFile *f = opaque;
|
|
|
|
|
|
|
|
assert(!req->io_canceled);
|
|
|
|
assert(req->status == -1 && req->host_status == -1);
|
|
|
|
assert(req->enqueued);
|
|
|
|
|
|
|
|
qemu_put_sbyte(f, req->retry ? 1 : 2);
|
|
|
|
qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
|
|
|
|
qemu_put_be32s(f, &req->tag);
|
|
|
|
qemu_put_be32s(f, &req->lun);
|
|
|
|
if (req->bus->info->save_request) {
|
|
|
|
req->bus->info->save_request(f, req);
|
|
|
|
}
|
|
|
|
if (req->ops->save_request) {
|
|
|
|
req->ops->save_request(f, req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-19 22:00:50 +03:00
|
|
|
static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
|
2020-12-11 20:11:48 +03:00
|
|
|
const VMStateField *field, JSONWriter *vmdesc)
|
2011-12-02 19:27:02 +04:00
|
|
|
{
|
|
|
|
SCSIDevice *s = pv;
|
|
|
|
|
2023-12-04 19:42:56 +03:00
|
|
|
scsi_device_for_each_req_sync(s, put_scsi_req, f);
|
2011-12-02 19:27:02 +04:00
|
|
|
qemu_put_sbyte(f, 0);
|
2017-01-19 22:00:50 +03:00
|
|
|
return 0;
|
2011-12-02 19:27:02 +04:00
|
|
|
}
|
|
|
|
|
2017-01-19 22:00:50 +03:00
|
|
|
static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
|
2018-11-14 16:29:30 +03:00
|
|
|
const VMStateField *field)
|
2011-12-02 19:27:02 +04:00
|
|
|
{
|
|
|
|
SCSIDevice *s = pv;
|
|
|
|
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
|
2012-05-16 16:20:03 +04:00
|
|
|
int8_t sbyte;
|
2011-12-02 19:27:02 +04:00
|
|
|
|
2012-05-16 16:20:03 +04:00
|
|
|
while ((sbyte = qemu_get_sbyte(f)) > 0) {
|
2011-12-02 19:27:02 +04:00
|
|
|
uint8_t buf[SCSI_CMD_BUF_SIZE];
|
|
|
|
uint32_t tag;
|
|
|
|
uint32_t lun;
|
|
|
|
SCSIRequest *req;
|
|
|
|
|
|
|
|
qemu_get_buffer(f, buf, sizeof(buf));
|
|
|
|
qemu_get_be32s(f, &tag);
|
|
|
|
qemu_get_be32s(f, &lun);
|
2022-08-17 08:34:58 +03:00
|
|
|
/*
|
|
|
|
* A too-short CDB would have been rejected by scsi_req_new, so just use
|
|
|
|
* SCSI_CMD_BUF_SIZE as the CDB length.
|
|
|
|
*/
|
|
|
|
req = scsi_req_new(s, tag, lun, buf, sizeof(buf), NULL);
|
2012-05-16 16:20:03 +04:00
|
|
|
req->retry = (sbyte == 1);
|
2011-12-02 19:27:02 +04:00
|
|
|
if (bus->info->load_request) {
|
|
|
|
req->hba_private = bus->info->load_request(f, req);
|
|
|
|
}
|
|
|
|
if (req->ops->load_request) {
|
|
|
|
req->ops->load_request(f, req);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Just restart it later. */
|
|
|
|
scsi_req_enqueue_internal(req);
|
|
|
|
|
|
|
|
/* At this point, the request will be kept alive by the reference
|
|
|
|
* added by scsi_req_enqueue_internal, so we can release our reference.
|
|
|
|
* The HBA of course will add its own reference in the load_request
|
|
|
|
* callback if it needs to hold on the SCSIRequest.
|
|
|
|
*/
|
|
|
|
scsi_req_unref(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-05-21 14:03:10 +04:00
|
|
|
static const VMStateInfo vmstate_info_scsi_requests = {
|
2011-12-02 19:27:02 +04:00
|
|
|
.name = "scsi-requests",
|
|
|
|
.get = get_scsi_requests,
|
|
|
|
.put = put_scsi_requests,
|
|
|
|
};
|
|
|
|
|
2014-03-06 12:26:02 +04:00
|
|
|
static bool scsi_sense_state_needed(void *opaque)
|
|
|
|
{
|
|
|
|
SCSIDevice *s = opaque;
|
|
|
|
|
|
|
|
return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_scsi_sense_state = {
|
|
|
|
.name = "SCSIDevice/sense",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2014-09-23 16:09:54 +04:00
|
|
|
.needed = scsi_sense_state_needed,
|
2023-12-21 06:16:32 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2014-03-06 12:26:02 +04:00
|
|
|
VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
|
|
|
|
SCSI_SENSE_BUF_SIZE_OLD,
|
|
|
|
SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-12-02 19:27:02 +04:00
|
|
|
const VMStateDescription vmstate_scsi_device = {
|
|
|
|
.name = "SCSIDevice",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2023-12-21 06:16:32 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2011-12-02 19:27:02 +04:00
|
|
|
VMSTATE_UINT8(unit_attention.key, SCSIDevice),
|
|
|
|
VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
|
|
|
|
VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
|
|
|
|
VMSTATE_BOOL(sense_is_ua, SCSIDevice),
|
2014-03-06 12:26:02 +04:00
|
|
|
VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
|
2011-12-02 19:27:02 +04:00
|
|
|
VMSTATE_UINT32(sense_len, SCSIDevice),
|
|
|
|
{
|
|
|
|
.name = "requests",
|
|
|
|
.version_id = 0,
|
|
|
|
.field_exists = NULL,
|
|
|
|
.size = 0, /* ouch */
|
|
|
|
.info = &vmstate_info_scsi_requests,
|
|
|
|
.flags = VMS_SINGLE,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
VMSTATE_END_OF_LIST()
|
2014-03-06 12:26:02 +04:00
|
|
|
},
|
2023-12-21 06:16:32 +03:00
|
|
|
.subsections = (const VMStateDescription * const []) {
|
2014-09-23 16:09:54 +04:00
|
|
|
&vmstate_scsi_sense_state,
|
|
|
|
NULL
|
2011-12-02 19:27:02 +04:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-10-06 15:38:56 +03:00
|
|
|
static Property scsi_props[] = {
|
|
|
|
DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
|
|
|
|
DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
|
|
|
|
DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
|
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
2011-12-08 07:34:16 +04:00
|
|
|
static void scsi_device_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *k = DEVICE_CLASS(klass);
|
2013-07-29 18:17:45 +04:00
|
|
|
set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
|
2014-08-12 06:12:55 +04:00
|
|
|
k->bus_type = TYPE_SCSI_BUS;
|
|
|
|
k->realize = scsi_qdev_realize;
|
|
|
|
k->unrealize = scsi_qdev_unrealize;
|
2020-01-10 18:30:32 +03:00
|
|
|
device_class_set_props(k, scsi_props);
|
2011-12-08 07:34:16 +04:00
|
|
|
}
|
|
|
|
|
2014-10-07 12:00:28 +04:00
|
|
|
static void scsi_dev_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
DeviceState *dev = DEVICE(obj);
|
2016-01-06 12:37:46 +03:00
|
|
|
SCSIDevice *s = SCSI_DEVICE(dev);
|
2014-10-07 12:00:28 +04:00
|
|
|
|
|
|
|
device_add_bootindex_property(obj, &s->conf.bootindex,
|
|
|
|
"bootindex", NULL,
|
2020-05-05 18:29:23 +03:00
|
|
|
&s->qdev);
|
2014-10-07 12:00:28 +04:00
|
|
|
}
|
|
|
|
|
2013-01-10 19:19:07 +04:00
|
|
|
static const TypeInfo scsi_device_type_info = {
|
2011-12-16 00:50:08 +04:00
|
|
|
.name = TYPE_SCSI_DEVICE,
|
|
|
|
.parent = TYPE_DEVICE,
|
|
|
|
.instance_size = sizeof(SCSIDevice),
|
|
|
|
.abstract = true,
|
|
|
|
.class_size = sizeof(SCSIDeviceClass),
|
2011-12-08 07:34:16 +04:00
|
|
|
.class_init = scsi_device_class_init,
|
2014-10-07 12:00:28 +04:00
|
|
|
.instance_init = scsi_dev_instance_init,
|
2011-12-16 00:50:08 +04:00
|
|
|
};
|
|
|
|
|
2020-10-06 15:38:56 +03:00
|
|
|
static void scsi_bus_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
BusClass *k = BUS_CLASS(klass);
|
|
|
|
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
|
|
|
|
|
|
|
|
k->get_dev_path = scsibus_get_dev_path;
|
|
|
|
k->get_fw_dev_path = scsibus_get_fw_dev_path;
|
|
|
|
k->check_address = scsi_bus_check_address;
|
|
|
|
hc->unplug = qdev_simple_device_unplug_cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo scsi_bus_info = {
|
|
|
|
.name = TYPE_SCSI_BUS,
|
|
|
|
.parent = TYPE_BUS,
|
|
|
|
.instance_size = sizeof(SCSIBus),
|
|
|
|
.class_init = scsi_bus_class_init,
|
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{ TYPE_HOTPLUG_HANDLER },
|
|
|
|
{ }
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-02-09 18:20:55 +04:00
|
|
|
static void scsi_register_types(void)
|
2011-12-16 00:50:08 +04:00
|
|
|
{
|
2012-05-02 11:00:20 +04:00
|
|
|
type_register_static(&scsi_bus_info);
|
2011-12-16 00:50:08 +04:00
|
|
|
type_register_static(&scsi_device_type_info);
|
|
|
|
}
|
|
|
|
|
2012-02-09 18:20:55 +04:00
|
|
|
type_init(scsi_register_types)
|