2020-09-18 11:09:10 +03:00
|
|
|
/*
|
2023-07-14 14:06:05 +03:00
|
|
|
* Sharing QEMU block devices via vhost-user protocol
|
2020-09-18 11:09:10 +03:00
|
|
|
*
|
|
|
|
* Parts of the code based on nbd/server.c.
|
|
|
|
*
|
|
|
|
* Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
|
|
|
|
* Copyright (c) 2020 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or
|
|
|
|
* later. See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
2023-03-21 23:13:23 +03:00
|
|
|
#include "qemu/error-report.h"
|
2020-09-18 11:09:10 +03:00
|
|
|
#include "block/block.h"
|
2020-11-25 13:06:37 +03:00
|
|
|
#include "subprojects/libvhost-user/libvhost-user.h" /* only for the type definitions */
|
2020-09-24 18:15:47 +03:00
|
|
|
#include "standard-headers/linux/virtio_blk.h"
|
2020-09-24 18:15:48 +03:00
|
|
|
#include "qemu/vhost-user-server.h"
|
2020-09-18 11:09:10 +03:00
|
|
|
#include "vhost-user-blk-server.h"
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qom/object_interfaces.h"
|
|
|
|
#include "util/block-helpers.h"
|
2022-05-23 11:46:06 +03:00
|
|
|
#include "virtio-blk-handler.h"
|
2021-02-23 17:46:49 +03:00
|
|
|
|
2020-09-18 11:09:10 +03:00
|
|
|
enum {
|
2020-10-01 17:46:03 +03:00
|
|
|
VHOST_USER_BLK_NUM_QUEUES_DEFAULT = 1,
|
2020-09-18 11:09:10 +03:00
|
|
|
};
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
typedef struct VuBlkReq {
|
2020-09-24 18:15:41 +03:00
|
|
|
VuVirtqElement elem;
|
2020-09-18 11:09:10 +03:00
|
|
|
VuServer *server;
|
|
|
|
struct VuVirtq *vq;
|
2020-09-24 18:15:47 +03:00
|
|
|
} VuBlkReq;
|
2020-09-18 11:09:10 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
/* vhost user block device */
|
|
|
|
typedef struct {
|
|
|
|
BlockExport export;
|
|
|
|
VuServer vu_server;
|
2022-05-23 11:46:06 +03:00
|
|
|
VirtioBlkHandler handler;
|
2020-09-24 18:15:47 +03:00
|
|
|
QIOChannelSocket *sioc;
|
|
|
|
struct virtio_blk_config blkcfg;
|
|
|
|
} VuBlkExport;
|
|
|
|
|
2022-05-23 11:46:06 +03:00
|
|
|
static void vu_blk_req_complete(VuBlkReq *req, size_t in_len)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
|
|
|
VuDev *vu_dev = &req->server->vu_dev;
|
|
|
|
|
2022-05-23 11:46:06 +03:00
|
|
|
vu_queue_push(vu_dev, req->vq, &req->elem, in_len);
|
2020-09-18 11:09:10 +03:00
|
|
|
vu_queue_notify(vu_dev, req->vq);
|
|
|
|
|
2020-09-24 18:15:41 +03:00
|
|
|
free(req);
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
2023-05-16 22:02:23 +03:00
|
|
|
/*
|
|
|
|
* Called with server in_flight counter increased, must decrease before
|
|
|
|
* returning.
|
|
|
|
*/
|
2020-09-24 18:15:47 +03:00
|
|
|
static void coroutine_fn vu_blk_virtio_process_req(void *opaque)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkReq *req = opaque;
|
2020-09-24 18:15:41 +03:00
|
|
|
VuServer *server = req->server;
|
|
|
|
VuVirtqElement *elem = &req->elem;
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
|
2022-05-23 11:46:06 +03:00
|
|
|
VirtioBlkHandler *handler = &vexp->handler;
|
2020-09-18 11:09:10 +03:00
|
|
|
struct iovec *in_iov = elem->in_sg;
|
|
|
|
struct iovec *out_iov = elem->out_sg;
|
|
|
|
unsigned in_num = elem->in_num;
|
|
|
|
unsigned out_num = elem->out_num;
|
2022-05-23 11:46:06 +03:00
|
|
|
int in_len;
|
|
|
|
|
|
|
|
in_len = virtio_blk_process_req(handler, in_iov, out_iov,
|
|
|
|
in_num, out_num);
|
|
|
|
if (in_len < 0) {
|
|
|
|
free(req);
|
2023-05-16 22:02:23 +03:00
|
|
|
vhost_user_server_dec_in_flight(server);
|
2022-05-23 11:46:06 +03:00
|
|
|
return;
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
2022-05-23 11:46:06 +03:00
|
|
|
vu_blk_req_complete(req, in_len);
|
2023-05-16 22:02:23 +03:00
|
|
|
vhost_user_server_dec_in_flight(server);
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
static void vu_blk_process_vq(VuDev *vu_dev, int idx)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
2020-09-24 18:15:41 +03:00
|
|
|
VuServer *server = container_of(vu_dev, VuServer, vu_dev);
|
|
|
|
VuVirtq *vq = vu_get_queue(vu_dev, idx);
|
2020-09-18 11:09:10 +03:00
|
|
|
|
|
|
|
while (1) {
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkReq *req;
|
2020-09-24 18:15:41 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
req = vu_queue_pop(vu_dev, vq, sizeof(VuBlkReq));
|
2020-09-24 18:15:41 +03:00
|
|
|
if (!req) {
|
2020-09-18 11:09:10 +03:00
|
|
|
break;
|
|
|
|
}
|
2020-09-24 18:15:41 +03:00
|
|
|
|
|
|
|
req->server = server;
|
|
|
|
req->vq = vq;
|
|
|
|
|
|
|
|
Coroutine *co =
|
2020-09-24 18:15:47 +03:00
|
|
|
qemu_coroutine_create(vu_blk_virtio_process_req, req);
|
2022-01-25 18:14:35 +03:00
|
|
|
|
2023-05-16 22:02:23 +03:00
|
|
|
vhost_user_server_inc_in_flight(server);
|
2020-09-24 18:15:41 +03:00
|
|
|
qemu_coroutine_enter(co);
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
static void vu_blk_queue_set_started(VuDev *vu_dev, int idx, bool started)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
|
|
|
VuVirtq *vq;
|
|
|
|
|
|
|
|
assert(vu_dev);
|
|
|
|
|
|
|
|
vq = vu_get_queue(vu_dev, idx);
|
2020-09-24 18:15:47 +03:00
|
|
|
vu_set_queue_handler(vu_dev, vq, started ? vu_blk_process_vq : NULL);
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
static uint64_t vu_blk_get_features(VuDev *dev)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
|
|
|
uint64_t features;
|
|
|
|
VuServer *server = container_of(dev, VuServer, vu_dev);
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
|
2020-09-18 11:09:10 +03:00
|
|
|
features = 1ull << VIRTIO_BLK_F_SIZE_MAX |
|
|
|
|
1ull << VIRTIO_BLK_F_SEG_MAX |
|
|
|
|
1ull << VIRTIO_BLK_F_TOPOLOGY |
|
|
|
|
1ull << VIRTIO_BLK_F_BLK_SIZE |
|
|
|
|
1ull << VIRTIO_BLK_F_FLUSH |
|
|
|
|
1ull << VIRTIO_BLK_F_DISCARD |
|
|
|
|
1ull << VIRTIO_BLK_F_WRITE_ZEROES |
|
|
|
|
1ull << VIRTIO_BLK_F_CONFIG_WCE |
|
2020-10-01 17:46:03 +03:00
|
|
|
1ull << VIRTIO_BLK_F_MQ |
|
2020-09-18 11:09:10 +03:00
|
|
|
1ull << VIRTIO_F_VERSION_1 |
|
|
|
|
1ull << VIRTIO_RING_F_INDIRECT_DESC |
|
|
|
|
1ull << VIRTIO_RING_F_EVENT_IDX |
|
|
|
|
1ull << VHOST_USER_F_PROTOCOL_FEATURES;
|
|
|
|
|
2022-05-23 11:46:06 +03:00
|
|
|
if (!vexp->handler.writable) {
|
2020-09-18 11:09:10 +03:00
|
|
|
features |= 1ull << VIRTIO_BLK_F_RO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return features;
|
|
|
|
}
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
static uint64_t vu_blk_get_protocol_features(VuDev *dev)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
2021-03-09 12:41:03 +03:00
|
|
|
return 1ull << VHOST_USER_PROTOCOL_F_CONFIG;
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-09-24 18:15:47 +03:00
|
|
|
vu_blk_get_config(VuDev *vu_dev, uint8_t *config, uint32_t len)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
|
|
|
VuServer *server = container_of(vu_dev, VuServer, vu_dev);
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
|
2020-10-27 20:35:20 +03:00
|
|
|
|
2020-11-18 12:16:44 +03:00
|
|
|
if (len > sizeof(struct virtio_blk_config)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2020-10-27 20:35:20 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
memcpy(config, &vexp->blkcfg, len);
|
2020-09-18 11:09:10 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-09-24 18:15:47 +03:00
|
|
|
vu_blk_set_config(VuDev *vu_dev, const uint8_t *data,
|
2020-09-18 11:09:10 +03:00
|
|
|
uint32_t offset, uint32_t size, uint32_t flags)
|
|
|
|
{
|
|
|
|
VuServer *server = container_of(vu_dev, VuServer, vu_dev);
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
|
2020-09-18 11:09:10 +03:00
|
|
|
uint8_t wce;
|
|
|
|
|
|
|
|
/* don't support live migration */
|
2023-06-13 11:08:48 +03:00
|
|
|
if (flags != VHOST_SET_CONFIG_TYPE_FRONTEND) {
|
2020-09-18 11:09:10 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (offset != offsetof(struct virtio_blk_config, wce) ||
|
|
|
|
size != 1) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
wce = *data;
|
2020-09-24 18:15:47 +03:00
|
|
|
vexp->blkcfg.wce = wce;
|
|
|
|
blk_set_enable_write_cache(vexp->export.blk, wce);
|
2020-09-18 11:09:10 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When the client disconnects, it sends a VHOST_USER_NONE request
|
|
|
|
* and vu_process_message will simple call exit which cause the VM
|
|
|
|
* to exit abruptly.
|
|
|
|
* To avoid this issue, process VHOST_USER_NONE request ahead
|
|
|
|
* of vu_process_message.
|
|
|
|
*
|
|
|
|
*/
|
2020-09-24 18:15:47 +03:00
|
|
|
static int vu_blk_process_msg(VuDev *dev, VhostUserMsg *vmsg, int *do_reply)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
|
|
|
if (vmsg->request == VHOST_USER_NONE) {
|
|
|
|
dev->panic(dev, "disconnect");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
static const VuDevIface vu_blk_iface = {
|
|
|
|
.get_features = vu_blk_get_features,
|
|
|
|
.queue_set_started = vu_blk_queue_set_started,
|
|
|
|
.get_protocol_features = vu_blk_get_protocol_features,
|
|
|
|
.get_config = vu_blk_get_config,
|
|
|
|
.set_config = vu_blk_set_config,
|
|
|
|
.process_msg = vu_blk_process_msg,
|
2020-09-18 11:09:10 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static void blk_aio_attached(AioContext *ctx, void *opaque)
|
|
|
|
{
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkExport *vexp = opaque;
|
2020-09-29 15:55:16 +03:00
|
|
|
|
2023-05-16 22:02:25 +03:00
|
|
|
/*
|
|
|
|
* The actual attach will happen in vu_blk_drained_end() and we just
|
|
|
|
* restore ctx here.
|
|
|
|
*/
|
2020-09-29 15:55:16 +03:00
|
|
|
vexp->export.ctx = ctx;
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void blk_aio_detach(void *opaque)
|
|
|
|
{
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkExport *vexp = opaque;
|
2020-09-29 15:55:16 +03:00
|
|
|
|
2023-05-16 22:02:25 +03:00
|
|
|
/*
|
|
|
|
* The actual detach already happened in vu_blk_drained_begin() but from
|
|
|
|
* this point on we must not access ctx anymore.
|
|
|
|
*/
|
2020-09-29 15:55:16 +03:00
|
|
|
vexp->export.ctx = NULL;
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-09-24 18:15:47 +03:00
|
|
|
vu_blk_initialize_config(BlockDriverState *bs,
|
2020-10-01 17:46:03 +03:00
|
|
|
struct virtio_blk_config *config,
|
|
|
|
uint32_t blk_size,
|
|
|
|
uint16_t num_queues)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
2021-02-23 17:46:49 +03:00
|
|
|
config->capacity =
|
|
|
|
cpu_to_le64(bdrv_getlength(bs) >> VIRTIO_BLK_SECTOR_BITS);
|
2020-10-27 20:35:19 +03:00
|
|
|
config->blk_size = cpu_to_le32(blk_size);
|
|
|
|
config->size_max = cpu_to_le32(0);
|
|
|
|
config->seg_max = cpu_to_le32(128 - 2);
|
|
|
|
config->min_io_size = cpu_to_le16(1);
|
|
|
|
config->opt_io_size = cpu_to_le32(1);
|
|
|
|
config->num_queues = cpu_to_le16(num_queues);
|
2021-02-23 17:46:51 +03:00
|
|
|
config->max_discard_sectors =
|
2022-05-23 11:46:06 +03:00
|
|
|
cpu_to_le32(VIRTIO_BLK_MAX_DISCARD_SECTORS);
|
2020-10-27 20:35:19 +03:00
|
|
|
config->max_discard_seg = cpu_to_le32(1);
|
2021-02-23 17:46:49 +03:00
|
|
|
config->discard_sector_alignment =
|
|
|
|
cpu_to_le32(blk_size >> VIRTIO_BLK_SECTOR_BITS);
|
2021-02-23 17:46:51 +03:00
|
|
|
config->max_write_zeroes_sectors
|
2022-05-23 11:46:06 +03:00
|
|
|
= cpu_to_le32(VIRTIO_BLK_MAX_WRITE_ZEROES_SECTORS);
|
2020-10-27 20:35:19 +03:00
|
|
|
config->max_write_zeroes_seg = cpu_to_le32(1);
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
static void vu_blk_exp_request_shutdown(BlockExport *exp)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
|
2020-09-18 11:09:10 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
vhost_user_server_stop(&vexp->vu_server);
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
2023-03-21 23:13:23 +03:00
|
|
|
static void vu_blk_exp_resize(void *opaque)
|
|
|
|
{
|
|
|
|
VuBlkExport *vexp = opaque;
|
|
|
|
BlockDriverState *bs = blk_bs(vexp->handler.blk);
|
|
|
|
int64_t new_size = bdrv_getlength(bs);
|
|
|
|
|
|
|
|
if (new_size < 0) {
|
|
|
|
error_printf("Failed to get length of block node '%s'",
|
|
|
|
bdrv_get_node_name(bs));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
vexp->blkcfg.capacity = cpu_to_le64(new_size >> VIRTIO_BLK_SECTOR_BITS);
|
|
|
|
|
|
|
|
vu_config_change_msg(&vexp->vu_server.vu_dev);
|
|
|
|
}
|
|
|
|
|
2023-05-16 22:02:25 +03:00
|
|
|
static void vu_blk_drained_begin(void *opaque)
|
|
|
|
{
|
|
|
|
VuBlkExport *vexp = opaque;
|
|
|
|
|
export/vhost-user-blk: Fix consecutive drains
The vhost-user-blk export implement AioContext switches in its drain
implementation. This means that on drain_begin, it detaches the server
from its AioContext and on drain_end, attaches it again and schedules
the server->co_trip coroutine in the updated AioContext.
However, nothing guarantees that server->co_trip is even safe to be
scheduled. Not only is it unclear that the coroutine is actually in a
state where it can be reentered externally without causing problems, but
with two consecutive drains, it is possible that the scheduled coroutine
didn't have a chance yet to run and trying to schedule an already
scheduled coroutine a second time crashes with an assertion failure.
Following the model of NBD, this commit makes the vhost-user-blk export
shut down server->co_trip during drain so that resuming the export means
creating and scheduling a new coroutine, which is always safe.
There is one exception: If the drain call didn't poll (for example, this
happens in the context of bdrv_graph_wrlock()), then the coroutine
didn't have a chance to shut down. However, in this case the AioContext
can't have changed; changing the AioContext always involves a polling
drain. So in this case we can simply assert that the AioContext is
unchanged and just leave the coroutine running or wake it up if it has
yielded to wait for the AioContext to be attached again.
Fixes: e1054cd4aad03a493a5d1cded7508f7c348205bf
Fixes: https://issues.redhat.com/browse/RHEL-1708
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20231127115755.22846-1-kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-11-27 14:57:55 +03:00
|
|
|
vexp->vu_server.quiescing = true;
|
2023-05-16 22:02:25 +03:00
|
|
|
vhost_user_server_detach_aio_context(&vexp->vu_server);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vu_blk_drained_end(void *opaque)
|
|
|
|
{
|
|
|
|
VuBlkExport *vexp = opaque;
|
|
|
|
|
export/vhost-user-blk: Fix consecutive drains
The vhost-user-blk export implement AioContext switches in its drain
implementation. This means that on drain_begin, it detaches the server
from its AioContext and on drain_end, attaches it again and schedules
the server->co_trip coroutine in the updated AioContext.
However, nothing guarantees that server->co_trip is even safe to be
scheduled. Not only is it unclear that the coroutine is actually in a
state where it can be reentered externally without causing problems, but
with two consecutive drains, it is possible that the scheduled coroutine
didn't have a chance yet to run and trying to schedule an already
scheduled coroutine a second time crashes with an assertion failure.
Following the model of NBD, this commit makes the vhost-user-blk export
shut down server->co_trip during drain so that resuming the export means
creating and scheduling a new coroutine, which is always safe.
There is one exception: If the drain call didn't poll (for example, this
happens in the context of bdrv_graph_wrlock()), then the coroutine
didn't have a chance to shut down. However, in this case the AioContext
can't have changed; changing the AioContext always involves a polling
drain. So in this case we can simply assert that the AioContext is
unchanged and just leave the coroutine running or wake it up if it has
yielded to wait for the AioContext to be attached again.
Fixes: e1054cd4aad03a493a5d1cded7508f7c348205bf
Fixes: https://issues.redhat.com/browse/RHEL-1708
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20231127115755.22846-1-kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-11-27 14:57:55 +03:00
|
|
|
vexp->vu_server.quiescing = false;
|
2023-05-16 22:02:25 +03:00
|
|
|
vhost_user_server_attach_aio_context(&vexp->vu_server, vexp->export.ctx);
|
|
|
|
}
|
|
|
|
|
2023-05-16 22:02:24 +03:00
|
|
|
/*
|
export/vhost-user-blk: Fix consecutive drains
The vhost-user-blk export implement AioContext switches in its drain
implementation. This means that on drain_begin, it detaches the server
from its AioContext and on drain_end, attaches it again and schedules
the server->co_trip coroutine in the updated AioContext.
However, nothing guarantees that server->co_trip is even safe to be
scheduled. Not only is it unclear that the coroutine is actually in a
state where it can be reentered externally without causing problems, but
with two consecutive drains, it is possible that the scheduled coroutine
didn't have a chance yet to run and trying to schedule an already
scheduled coroutine a second time crashes with an assertion failure.
Following the model of NBD, this commit makes the vhost-user-blk export
shut down server->co_trip during drain so that resuming the export means
creating and scheduling a new coroutine, which is always safe.
There is one exception: If the drain call didn't poll (for example, this
happens in the context of bdrv_graph_wrlock()), then the coroutine
didn't have a chance to shut down. However, in this case the AioContext
can't have changed; changing the AioContext always involves a polling
drain. So in this case we can simply assert that the AioContext is
unchanged and just leave the coroutine running or wake it up if it has
yielded to wait for the AioContext to be attached again.
Fixes: e1054cd4aad03a493a5d1cded7508f7c348205bf
Fixes: https://issues.redhat.com/browse/RHEL-1708
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20231127115755.22846-1-kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-11-27 14:57:55 +03:00
|
|
|
* Ensures that bdrv_drained_begin() waits until in-flight requests complete
|
|
|
|
* and the server->co_trip coroutine has terminated. It will be restarted in
|
|
|
|
* vhost_user_server_attach_aio_context().
|
2023-05-16 22:02:24 +03:00
|
|
|
*/
|
|
|
|
static bool vu_blk_drained_poll(void *opaque)
|
|
|
|
{
|
|
|
|
VuBlkExport *vexp = opaque;
|
export/vhost-user-blk: Fix consecutive drains
The vhost-user-blk export implement AioContext switches in its drain
implementation. This means that on drain_begin, it detaches the server
from its AioContext and on drain_end, attaches it again and schedules
the server->co_trip coroutine in the updated AioContext.
However, nothing guarantees that server->co_trip is even safe to be
scheduled. Not only is it unclear that the coroutine is actually in a
state where it can be reentered externally without causing problems, but
with two consecutive drains, it is possible that the scheduled coroutine
didn't have a chance yet to run and trying to schedule an already
scheduled coroutine a second time crashes with an assertion failure.
Following the model of NBD, this commit makes the vhost-user-blk export
shut down server->co_trip during drain so that resuming the export means
creating and scheduling a new coroutine, which is always safe.
There is one exception: If the drain call didn't poll (for example, this
happens in the context of bdrv_graph_wrlock()), then the coroutine
didn't have a chance to shut down. However, in this case the AioContext
can't have changed; changing the AioContext always involves a polling
drain. So in this case we can simply assert that the AioContext is
unchanged and just leave the coroutine running or wake it up if it has
yielded to wait for the AioContext to be attached again.
Fixes: e1054cd4aad03a493a5d1cded7508f7c348205bf
Fixes: https://issues.redhat.com/browse/RHEL-1708
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20231127115755.22846-1-kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-11-27 14:57:55 +03:00
|
|
|
VuServer *server = &vexp->vu_server;
|
2023-05-16 22:02:24 +03:00
|
|
|
|
export/vhost-user-blk: Fix consecutive drains
The vhost-user-blk export implement AioContext switches in its drain
implementation. This means that on drain_begin, it detaches the server
from its AioContext and on drain_end, attaches it again and schedules
the server->co_trip coroutine in the updated AioContext.
However, nothing guarantees that server->co_trip is even safe to be
scheduled. Not only is it unclear that the coroutine is actually in a
state where it can be reentered externally without causing problems, but
with two consecutive drains, it is possible that the scheduled coroutine
didn't have a chance yet to run and trying to schedule an already
scheduled coroutine a second time crashes with an assertion failure.
Following the model of NBD, this commit makes the vhost-user-blk export
shut down server->co_trip during drain so that resuming the export means
creating and scheduling a new coroutine, which is always safe.
There is one exception: If the drain call didn't poll (for example, this
happens in the context of bdrv_graph_wrlock()), then the coroutine
didn't have a chance to shut down. However, in this case the AioContext
can't have changed; changing the AioContext always involves a polling
drain. So in this case we can simply assert that the AioContext is
unchanged and just leave the coroutine running or wake it up if it has
yielded to wait for the AioContext to be attached again.
Fixes: e1054cd4aad03a493a5d1cded7508f7c348205bf
Fixes: https://issues.redhat.com/browse/RHEL-1708
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20231127115755.22846-1-kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-11-27 14:57:55 +03:00
|
|
|
return server->co_trip || vhost_user_server_has_in_flight(server);
|
2023-05-16 22:02:24 +03:00
|
|
|
}
|
|
|
|
|
2023-03-21 23:13:23 +03:00
|
|
|
static const BlockDevOps vu_blk_dev_ops = {
|
2023-05-16 22:02:25 +03:00
|
|
|
.drained_begin = vu_blk_drained_begin,
|
|
|
|
.drained_end = vu_blk_drained_end,
|
2023-05-16 22:02:24 +03:00
|
|
|
.drained_poll = vu_blk_drained_poll,
|
2023-03-21 23:13:23 +03:00
|
|
|
.resize_cb = vu_blk_exp_resize,
|
|
|
|
};
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
|
|
|
|
Error **errp)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
|
|
|
|
BlockExportOptionsVhostUserBlk *vu_opts = &opts->u.vhost_user_blk;
|
|
|
|
uint64_t logical_block_size;
|
2020-10-01 17:46:03 +03:00
|
|
|
uint16_t num_queues = VHOST_USER_BLK_NUM_QUEUES_DEFAULT;
|
2020-09-18 11:09:10 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
vexp->blkcfg.wce = 0;
|
2020-09-18 11:09:10 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
if (vu_opts->has_logical_block_size) {
|
|
|
|
logical_block_size = vu_opts->logical_block_size;
|
|
|
|
} else {
|
2021-02-23 17:46:49 +03:00
|
|
|
logical_block_size = VIRTIO_BLK_SECTOR_SIZE;
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
2024-10-10 18:01:40 +03:00
|
|
|
if (!check_block_size("logical-block-size", logical_block_size, errp)) {
|
2020-09-24 18:15:47 +03:00
|
|
|
return -EINVAL;
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
2020-10-01 17:46:03 +03:00
|
|
|
|
|
|
|
if (vu_opts->has_num_queues) {
|
|
|
|
num_queues = vu_opts->num_queues;
|
|
|
|
}
|
|
|
|
if (num_queues == 0) {
|
|
|
|
error_setg(errp, "num-queues must be greater than 0");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2022-05-23 11:46:06 +03:00
|
|
|
vexp->handler.blk = exp->blk;
|
2022-06-14 08:15:31 +03:00
|
|
|
vexp->handler.serial = g_strdup("vhost_user_blk");
|
2022-05-23 11:46:06 +03:00
|
|
|
vexp->handler.logical_block_size = logical_block_size;
|
|
|
|
vexp->handler.writable = opts->writable;
|
2020-10-01 17:46:03 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
vu_blk_initialize_config(blk_bs(exp->blk), &vexp->blkcfg,
|
2020-10-01 17:46:03 +03:00
|
|
|
logical_block_size, num_queues);
|
2020-09-18 11:09:10 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
|
|
|
|
vexp);
|
2020-09-18 11:09:10 +03:00
|
|
|
|
2023-03-21 23:13:23 +03:00
|
|
|
blk_set_dev_ops(exp->blk, &vu_blk_dev_ops, vexp);
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
if (!vhost_user_server_start(&vexp->vu_server, vu_opts->addr, exp->ctx,
|
2020-10-01 17:46:03 +03:00
|
|
|
num_queues, &vu_blk_iface, errp)) {
|
2020-09-24 18:15:47 +03:00
|
|
|
blk_remove_aio_context_notifier(exp->blk, blk_aio_attached,
|
|
|
|
blk_aio_detach, vexp);
|
2022-06-14 08:15:31 +03:00
|
|
|
g_free(vexp->handler.serial);
|
2020-09-24 18:15:47 +03:00
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
}
|
2020-09-18 11:09:10 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
return 0;
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
static void vu_blk_exp_delete(BlockExport *exp)
|
2020-09-18 11:09:10 +03:00
|
|
|
{
|
2020-09-24 18:15:47 +03:00
|
|
|
VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
|
2020-09-18 11:09:10 +03:00
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
|
|
|
|
vexp);
|
2022-06-14 08:15:31 +03:00
|
|
|
g_free(vexp->handler.serial);
|
2020-09-18 11:09:10 +03:00
|
|
|
}
|
|
|
|
|
2020-09-24 18:15:47 +03:00
|
|
|
const BlockExportDriver blk_exp_vhost_user_blk = {
|
|
|
|
.type = BLOCK_EXPORT_TYPE_VHOST_USER_BLK,
|
|
|
|
.instance_size = sizeof(VuBlkExport),
|
|
|
|
.create = vu_blk_exp_create,
|
|
|
|
.delete = vu_blk_exp_delete,
|
|
|
|
.request_shutdown = vu_blk_exp_request_shutdown,
|
2020-09-18 11:09:10 +03:00
|
|
|
};
|