Block layer patches
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJWKmeMAAoJEH8JsnLIjy/WUYYP/0hrpnuE14QBAW+RjV/40+fh II3+RBX2Avz8ERWD29aAftqmNIigxPhdvzEdOP1/IRBnYNzBMUw6BTsqrV3IgA1X ODIRFht3horyL6w5rfJLLbAVOyRPWTGHZNgxBN+GGy3Z/jLK+VH+1dK26rSd6p7o QqsmBUPi5UQvSd89r+X1tVwFjT5Miw7CyFaijXdnVzs1LNpbtg49t4YpQH1eG5bf aP4GXWn4g5/Ht8LSByuViDG3CpLjysSYSFPn/4HIP41BU6u3P6yD++g6nbdkvIsn yDezoVpCEvKoYXfc1xGY3Q7+lwzV8wa5mzdtpy6eg2889dHoJuUePI6Yfza9TNJI XzBJmYaBZx+289nxeAX2K3dRe0ilCEdWyujlhoonDuYOS9xbDiaouWcVZEw/0ky5 SUsRZYTZGGc1BOoFeBE4JpopFCPZ4a//bzi5GrlyEiwl7kpKPTMxFWvjSQpQ/Gzz sPLxnn1y1AA4jAqgQNLFpCciJ1sH1WNmb00WjQkoEomIdpuvLvK1GUKfcwEERTWb Ae8wlCbofkIJgQOwa9DTS/yDPfl3pUc/NgmRc+Qz/0snrtvmmsS+huJQQfCH1JDQ p3jvurvQ7G5RkTzdOIbSkzfKaW8ZHq6ENWRP5HY/y8LontAVdYzT+DRLeyTpGfKL ncgMgK6fT3rE+3lA8Acz =xcrS -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging Block layer patches # gpg: Signature made Fri 23 Oct 2015 17:59:56 BST using RSA key ID C88F2FD6 # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" * remotes/kevin/tags/for-upstream: (37 commits) tests: Add test case for aio_disable_external block: Add "drained begin/end" for internal snapshot block: Add "drained begin/end" for transactional blockdev-backup block: Add "drained begin/end" for transactional backup block: Add "drained begin/end" for transactional external snapshot block: Introduce "drained begin/end" API aio: introduce aio_{disable,enable}_external dataplane: Mark host notifiers' client type as "external" nbd: Mark fd handlers client type as "external" aio: Add "is_external" flag for event handlers throttle: Remove throttle_group_lock/unlock() blockdev: Allow more options for BB-less BDS tree blockdev: Pull out blockdev option extraction blockdev: Do not create BDS for empty drive block: Prepare for NULL BDS block: Add blk_insert_bs() block: Prepare remaining BB functions for NULL BDS block: Fail requests to empty BlockBackend block: Make some BB functions fall back to BBRS block: Add BlockBackendRootState ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
af25e7277d
@ -25,6 +25,7 @@ struct AioHandler
|
|||||||
IOHandler *io_write;
|
IOHandler *io_write;
|
||||||
int deleted;
|
int deleted;
|
||||||
void *opaque;
|
void *opaque;
|
||||||
|
bool is_external;
|
||||||
QLIST_ENTRY(AioHandler) node;
|
QLIST_ENTRY(AioHandler) node;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -43,6 +44,7 @@ static AioHandler *find_aio_handler(AioContext *ctx, int fd)
|
|||||||
|
|
||||||
void aio_set_fd_handler(AioContext *ctx,
|
void aio_set_fd_handler(AioContext *ctx,
|
||||||
int fd,
|
int fd,
|
||||||
|
bool is_external,
|
||||||
IOHandler *io_read,
|
IOHandler *io_read,
|
||||||
IOHandler *io_write,
|
IOHandler *io_write,
|
||||||
void *opaque)
|
void *opaque)
|
||||||
@ -82,6 +84,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
|||||||
node->io_read = io_read;
|
node->io_read = io_read;
|
||||||
node->io_write = io_write;
|
node->io_write = io_write;
|
||||||
node->opaque = opaque;
|
node->opaque = opaque;
|
||||||
|
node->is_external = is_external;
|
||||||
|
|
||||||
node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
|
node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
|
||||||
node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
|
node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
|
||||||
@ -92,10 +95,11 @@ void aio_set_fd_handler(AioContext *ctx,
|
|||||||
|
|
||||||
void aio_set_event_notifier(AioContext *ctx,
|
void aio_set_event_notifier(AioContext *ctx,
|
||||||
EventNotifier *notifier,
|
EventNotifier *notifier,
|
||||||
|
bool is_external,
|
||||||
EventNotifierHandler *io_read)
|
EventNotifierHandler *io_read)
|
||||||
{
|
{
|
||||||
aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
|
aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
|
||||||
(IOHandler *)io_read, NULL, notifier);
|
is_external, (IOHandler *)io_read, NULL, notifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool aio_prepare(AioContext *ctx)
|
bool aio_prepare(AioContext *ctx)
|
||||||
@ -257,7 +261,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||||||
|
|
||||||
/* fill pollfds */
|
/* fill pollfds */
|
||||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||||
if (!node->deleted && node->pfd.events) {
|
if (!node->deleted && node->pfd.events
|
||||||
|
&& aio_node_check(ctx, node->is_external)) {
|
||||||
add_pollfd(node);
|
add_pollfd(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,11 +28,13 @@ struct AioHandler {
|
|||||||
GPollFD pfd;
|
GPollFD pfd;
|
||||||
int deleted;
|
int deleted;
|
||||||
void *opaque;
|
void *opaque;
|
||||||
|
bool is_external;
|
||||||
QLIST_ENTRY(AioHandler) node;
|
QLIST_ENTRY(AioHandler) node;
|
||||||
};
|
};
|
||||||
|
|
||||||
void aio_set_fd_handler(AioContext *ctx,
|
void aio_set_fd_handler(AioContext *ctx,
|
||||||
int fd,
|
int fd,
|
||||||
|
bool is_external,
|
||||||
IOHandler *io_read,
|
IOHandler *io_read,
|
||||||
IOHandler *io_write,
|
IOHandler *io_write,
|
||||||
void *opaque)
|
void *opaque)
|
||||||
@ -86,6 +88,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
|||||||
node->opaque = opaque;
|
node->opaque = opaque;
|
||||||
node->io_read = io_read;
|
node->io_read = io_read;
|
||||||
node->io_write = io_write;
|
node->io_write = io_write;
|
||||||
|
node->is_external = is_external;
|
||||||
|
|
||||||
event = event_notifier_get_handle(&ctx->notifier);
|
event = event_notifier_get_handle(&ctx->notifier);
|
||||||
WSAEventSelect(node->pfd.fd, event,
|
WSAEventSelect(node->pfd.fd, event,
|
||||||
@ -98,6 +101,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
|||||||
|
|
||||||
void aio_set_event_notifier(AioContext *ctx,
|
void aio_set_event_notifier(AioContext *ctx,
|
||||||
EventNotifier *e,
|
EventNotifier *e,
|
||||||
|
bool is_external,
|
||||||
EventNotifierHandler *io_notify)
|
EventNotifierHandler *io_notify)
|
||||||
{
|
{
|
||||||
AioHandler *node;
|
AioHandler *node;
|
||||||
@ -133,6 +137,7 @@ void aio_set_event_notifier(AioContext *ctx,
|
|||||||
node->e = e;
|
node->e = e;
|
||||||
node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
|
node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
|
||||||
node->pfd.events = G_IO_IN;
|
node->pfd.events = G_IO_IN;
|
||||||
|
node->is_external = is_external;
|
||||||
QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
|
QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
|
||||||
|
|
||||||
g_source_add_poll(&ctx->source, &node->pfd);
|
g_source_add_poll(&ctx->source, &node->pfd);
|
||||||
@ -304,7 +309,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||||||
/* fill fd sets */
|
/* fill fd sets */
|
||||||
count = 0;
|
count = 0;
|
||||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||||
if (!node->deleted && node->io_notify) {
|
if (!node->deleted && node->io_notify
|
||||||
|
&& aio_node_check(ctx, node->is_external)) {
|
||||||
events[count++] = event_notifier_get_handle(node->e);
|
events[count++] = event_notifier_get_handle(node->e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
3
async.c
3
async.c
@ -247,7 +247,7 @@ aio_ctx_finalize(GSource *source)
|
|||||||
}
|
}
|
||||||
qemu_mutex_unlock(&ctx->bh_lock);
|
qemu_mutex_unlock(&ctx->bh_lock);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &ctx->notifier, NULL);
|
aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
|
||||||
event_notifier_cleanup(&ctx->notifier);
|
event_notifier_cleanup(&ctx->notifier);
|
||||||
rfifolock_destroy(&ctx->lock);
|
rfifolock_destroy(&ctx->lock);
|
||||||
qemu_mutex_destroy(&ctx->bh_lock);
|
qemu_mutex_destroy(&ctx->bh_lock);
|
||||||
@ -329,6 +329,7 @@ AioContext *aio_context_new(Error **errp)
|
|||||||
}
|
}
|
||||||
g_source_set_can_recurse(&ctx->source, true);
|
g_source_set_can_recurse(&ctx->source, true);
|
||||||
aio_set_event_notifier(ctx, &ctx->notifier,
|
aio_set_event_notifier(ctx, &ctx->notifier,
|
||||||
|
false,
|
||||||
(EventNotifierHandler *)
|
(EventNotifierHandler *)
|
||||||
event_notifier_dummy_cb);
|
event_notifier_dummy_cb);
|
||||||
ctx->thread_pool = NULL;
|
ctx->thread_pool = NULL;
|
||||||
|
180
block.c
180
block.c
@ -257,7 +257,6 @@ BlockDriverState *bdrv_new(void)
|
|||||||
for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
|
for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
|
||||||
QLIST_INIT(&bs->op_blockers[i]);
|
QLIST_INIT(&bs->op_blockers[i]);
|
||||||
}
|
}
|
||||||
bdrv_iostatus_disable(bs);
|
|
||||||
notifier_list_init(&bs->close_notifiers);
|
notifier_list_init(&bs->close_notifiers);
|
||||||
notifier_with_return_list_init(&bs->before_write_notifiers);
|
notifier_with_return_list_init(&bs->before_write_notifiers);
|
||||||
qemu_co_queue_init(&bs->throttled_reqs[0]);
|
qemu_co_queue_init(&bs->throttled_reqs[0]);
|
||||||
@ -857,7 +856,6 @@ static int bdrv_open_common(BlockDriverState *bs, BdrvChild *file,
|
|||||||
goto fail_opts;
|
goto fail_opts;
|
||||||
}
|
}
|
||||||
|
|
||||||
bs->guest_block_size = 512;
|
|
||||||
bs->request_alignment = 512;
|
bs->request_alignment = 512;
|
||||||
bs->zero_beyond_eof = true;
|
bs->zero_beyond_eof = true;
|
||||||
open_flags = bdrv_open_flags(bs, flags);
|
open_flags = bdrv_open_flags(bs, flags);
|
||||||
@ -1081,6 +1079,10 @@ static int bdrv_fill_options(QDict **options, const char **pfilename,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (runstate_check(RUN_STATE_INMIGRATE)) {
|
||||||
|
*flags |= BDRV_O_INCOMING;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1908,6 +1910,10 @@ void bdrv_close(BlockDriverState *bs)
|
|||||||
bdrv_drain(bs); /* in case flush left pending I/O */
|
bdrv_drain(bs); /* in case flush left pending I/O */
|
||||||
notifier_list_notify(&bs->close_notifiers, bs);
|
notifier_list_notify(&bs->close_notifiers, bs);
|
||||||
|
|
||||||
|
if (bs->blk) {
|
||||||
|
blk_dev_change_media_cb(bs->blk, false);
|
||||||
|
}
|
||||||
|
|
||||||
if (bs->drv) {
|
if (bs->drv) {
|
||||||
BdrvChild *child, *next;
|
BdrvChild *child, *next;
|
||||||
|
|
||||||
@ -1946,10 +1952,6 @@ void bdrv_close(BlockDriverState *bs)
|
|||||||
bs->full_open_options = NULL;
|
bs->full_open_options = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bs->blk) {
|
|
||||||
blk_dev_change_media_cb(bs->blk, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
|
QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
|
||||||
g_free(ban);
|
g_free(ban);
|
||||||
}
|
}
|
||||||
@ -1998,19 +2000,10 @@ static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
|
|||||||
/* move some fields that need to stay attached to the device */
|
/* move some fields that need to stay attached to the device */
|
||||||
|
|
||||||
/* dev info */
|
/* dev info */
|
||||||
bs_dest->guest_block_size = bs_src->guest_block_size;
|
|
||||||
bs_dest->copy_on_read = bs_src->copy_on_read;
|
bs_dest->copy_on_read = bs_src->copy_on_read;
|
||||||
|
|
||||||
bs_dest->enable_write_cache = bs_src->enable_write_cache;
|
bs_dest->enable_write_cache = bs_src->enable_write_cache;
|
||||||
|
|
||||||
/* r/w error */
|
|
||||||
bs_dest->on_read_error = bs_src->on_read_error;
|
|
||||||
bs_dest->on_write_error = bs_src->on_write_error;
|
|
||||||
|
|
||||||
/* i/o status */
|
|
||||||
bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
|
|
||||||
bs_dest->iostatus = bs_src->iostatus;
|
|
||||||
|
|
||||||
/* dirty bitmap */
|
/* dirty bitmap */
|
||||||
bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
|
bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
|
||||||
}
|
}
|
||||||
@ -2497,82 +2490,6 @@ void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
|
|||||||
*nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
|
*nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
|
|
||||||
BlockdevOnError on_write_error)
|
|
||||||
{
|
|
||||||
bs->on_read_error = on_read_error;
|
|
||||||
bs->on_write_error = on_write_error;
|
|
||||||
}
|
|
||||||
|
|
||||||
BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
|
|
||||||
{
|
|
||||||
return is_read ? bs->on_read_error : bs->on_write_error;
|
|
||||||
}
|
|
||||||
|
|
||||||
BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
|
|
||||||
{
|
|
||||||
BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
|
|
||||||
|
|
||||||
switch (on_err) {
|
|
||||||
case BLOCKDEV_ON_ERROR_ENOSPC:
|
|
||||||
return (error == ENOSPC) ?
|
|
||||||
BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
|
|
||||||
case BLOCKDEV_ON_ERROR_STOP:
|
|
||||||
return BLOCK_ERROR_ACTION_STOP;
|
|
||||||
case BLOCKDEV_ON_ERROR_REPORT:
|
|
||||||
return BLOCK_ERROR_ACTION_REPORT;
|
|
||||||
case BLOCKDEV_ON_ERROR_IGNORE:
|
|
||||||
return BLOCK_ERROR_ACTION_IGNORE;
|
|
||||||
default:
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void send_qmp_error_event(BlockDriverState *bs,
|
|
||||||
BlockErrorAction action,
|
|
||||||
bool is_read, int error)
|
|
||||||
{
|
|
||||||
IoOperationType optype;
|
|
||||||
|
|
||||||
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
|
|
||||||
qapi_event_send_block_io_error(bdrv_get_device_name(bs), optype, action,
|
|
||||||
bdrv_iostatus_is_enabled(bs),
|
|
||||||
error == ENOSPC, strerror(error),
|
|
||||||
&error_abort);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This is done by device models because, while the block layer knows
|
|
||||||
* about the error, it does not know whether an operation comes from
|
|
||||||
* the device or the block layer (from a job, for example).
|
|
||||||
*/
|
|
||||||
void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
|
|
||||||
bool is_read, int error)
|
|
||||||
{
|
|
||||||
assert(error >= 0);
|
|
||||||
|
|
||||||
if (action == BLOCK_ERROR_ACTION_STOP) {
|
|
||||||
/* First set the iostatus, so that "info block" returns an iostatus
|
|
||||||
* that matches the events raised so far (an additional error iostatus
|
|
||||||
* is fine, but not a lost one).
|
|
||||||
*/
|
|
||||||
bdrv_iostatus_set_err(bs, error);
|
|
||||||
|
|
||||||
/* Then raise the request to stop the VM and the event.
|
|
||||||
* qemu_system_vmstop_request_prepare has two effects. First,
|
|
||||||
* it ensures that the STOP event always comes after the
|
|
||||||
* BLOCK_IO_ERROR event. Second, it ensures that even if management
|
|
||||||
* can observe the STOP event and do a "cont" before the STOP
|
|
||||||
* event is issued, the VM will not stop. In this case, vm_start()
|
|
||||||
* also ensures that the STOP/RESUME pair of events is emitted.
|
|
||||||
*/
|
|
||||||
qemu_system_vmstop_request_prepare();
|
|
||||||
send_qmp_error_event(bs, action, is_read, error);
|
|
||||||
qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
|
|
||||||
} else {
|
|
||||||
send_qmp_error_event(bs, action, is_read, error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int bdrv_is_read_only(BlockDriverState *bs)
|
int bdrv_is_read_only(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
return bs->read_only;
|
return bs->read_only;
|
||||||
@ -2766,6 +2683,11 @@ BlockDriverState *bdrv_lookup_bs(const char *device,
|
|||||||
blk = blk_by_name(device);
|
blk = blk_by_name(device);
|
||||||
|
|
||||||
if (blk) {
|
if (blk) {
|
||||||
|
if (!blk_bs(blk)) {
|
||||||
|
error_setg(errp, "Device '%s' has no medium", device);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return blk_bs(blk);
|
return blk_bs(blk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3136,15 +3058,23 @@ void bdrv_invalidate_cache_all(Error **errp)
|
|||||||
/**
|
/**
|
||||||
* Return TRUE if the media is present
|
* Return TRUE if the media is present
|
||||||
*/
|
*/
|
||||||
int bdrv_is_inserted(BlockDriverState *bs)
|
bool bdrv_is_inserted(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
BlockDriver *drv = bs->drv;
|
BlockDriver *drv = bs->drv;
|
||||||
|
BdrvChild *child;
|
||||||
|
|
||||||
if (!drv)
|
if (!drv) {
|
||||||
return 0;
|
return false;
|
||||||
if (!drv->bdrv_is_inserted)
|
}
|
||||||
return 1;
|
if (drv->bdrv_is_inserted) {
|
||||||
return drv->bdrv_is_inserted(bs);
|
return drv->bdrv_is_inserted(bs);
|
||||||
|
}
|
||||||
|
QLIST_FOREACH(child, &bs->children, next) {
|
||||||
|
if (!bdrv_is_inserted(child->bs)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3195,11 +3125,6 @@ void bdrv_lock_medium(BlockDriverState *bs, bool locked)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
|
|
||||||
{
|
|
||||||
bs->guest_block_size = align;
|
|
||||||
}
|
|
||||||
|
|
||||||
BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs, const char *name)
|
BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs, const char *name)
|
||||||
{
|
{
|
||||||
BdrvDirtyBitmap *bm;
|
BdrvDirtyBitmap *bm;
|
||||||
@ -3597,46 +3522,6 @@ bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bdrv_iostatus_enable(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
bs->iostatus_enabled = true;
|
|
||||||
bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The I/O status is only enabled if the drive explicitly
|
|
||||||
* enables it _and_ the VM is configured to stop on errors */
|
|
||||||
bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
return (bs->iostatus_enabled &&
|
|
||||||
(bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
|
|
||||||
bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
|
|
||||||
bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
|
|
||||||
}
|
|
||||||
|
|
||||||
void bdrv_iostatus_disable(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
bs->iostatus_enabled = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void bdrv_iostatus_reset(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
if (bdrv_iostatus_is_enabled(bs)) {
|
|
||||||
bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
|
|
||||||
if (bs->job) {
|
|
||||||
block_job_iostatus_reset(bs->job);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
|
|
||||||
{
|
|
||||||
assert(bdrv_iostatus_is_enabled(bs));
|
|
||||||
if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
|
|
||||||
bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
|
|
||||||
BLOCK_DEVICE_IO_STATUS_FAILED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void bdrv_img_create(const char *filename, const char *fmt,
|
void bdrv_img_create(const char *filename, const char *fmt,
|
||||||
const char *base_filename, const char *base_fmt,
|
const char *base_filename, const char *base_fmt,
|
||||||
char *options, uint64_t img_size, int flags,
|
char *options, uint64_t img_size, int flags,
|
||||||
@ -4148,14 +4033,3 @@ void bdrv_refresh_filename(BlockDriverState *bs)
|
|||||||
QDECREF(json);
|
QDECREF(json);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This accessor function purpose is to allow the device models to access the
|
|
||||||
* BlockAcctStats structure embedded inside a BlockDriverState without being
|
|
||||||
* aware of the BlockDriverState structure layout.
|
|
||||||
* It will go away when the BlockAcctStats structure will be moved inside
|
|
||||||
* the device models.
|
|
||||||
*/
|
|
||||||
BlockAcctStats *bdrv_get_stats(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
return &bs->stats;
|
|
||||||
}
|
|
||||||
|
@ -47,14 +47,6 @@ void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void block_acct_highest_sector(BlockAcctStats *stats, int64_t sector_num,
|
|
||||||
unsigned int nb_sectors)
|
|
||||||
{
|
|
||||||
if (stats->wr_highest_sector < sector_num + nb_sectors - 1) {
|
|
||||||
stats->wr_highest_sector = sector_num + nb_sectors - 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void block_acct_merge_done(BlockAcctStats *stats, enum BlockAcctType type,
|
void block_acct_merge_done(BlockAcctStats *stats, enum BlockAcctType type,
|
||||||
int num_requests)
|
int num_requests)
|
||||||
{
|
{
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
#include "block/blockjob.h"
|
#include "block/blockjob.h"
|
||||||
#include "qapi/qmp/qerror.h"
|
#include "qapi/qmp/qerror.h"
|
||||||
#include "qemu/ratelimit.h"
|
#include "qemu/ratelimit.h"
|
||||||
|
#include "sysemu/block-backend.h"
|
||||||
|
|
||||||
#define BACKUP_CLUSTER_BITS 16
|
#define BACKUP_CLUSTER_BITS 16
|
||||||
#define BACKUP_CLUSTER_SIZE (1 << BACKUP_CLUSTER_BITS)
|
#define BACKUP_CLUSTER_SIZE (1 << BACKUP_CLUSTER_BITS)
|
||||||
@ -215,7 +216,9 @@ static void backup_iostatus_reset(BlockJob *job)
|
|||||||
{
|
{
|
||||||
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
|
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
|
||||||
|
|
||||||
bdrv_iostatus_reset(s->target);
|
if (s->target->blk) {
|
||||||
|
blk_iostatus_reset(s->target->blk);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const BlockJobDriver backup_job_driver = {
|
static const BlockJobDriver backup_job_driver = {
|
||||||
@ -360,8 +363,10 @@ static void coroutine_fn backup_run(void *opaque)
|
|||||||
job->bitmap = hbitmap_alloc(end, 0);
|
job->bitmap = hbitmap_alloc(end, 0);
|
||||||
|
|
||||||
bdrv_set_enable_write_cache(target, true);
|
bdrv_set_enable_write_cache(target, true);
|
||||||
bdrv_set_on_error(target, on_target_error, on_target_error);
|
if (target->blk) {
|
||||||
bdrv_iostatus_enable(target);
|
blk_set_on_error(target->blk, on_target_error, on_target_error);
|
||||||
|
blk_iostatus_enable(target->blk);
|
||||||
|
}
|
||||||
|
|
||||||
bdrv_add_before_write_notifier(bs, &before_write);
|
bdrv_add_before_write_notifier(bs, &before_write);
|
||||||
|
|
||||||
@ -451,7 +456,9 @@ static void coroutine_fn backup_run(void *opaque)
|
|||||||
}
|
}
|
||||||
hbitmap_free(job->bitmap);
|
hbitmap_free(job->bitmap);
|
||||||
|
|
||||||
bdrv_iostatus_disable(target);
|
if (target->blk) {
|
||||||
|
blk_iostatus_disable(target->blk);
|
||||||
|
}
|
||||||
bdrv_op_unblock_all(target, job->common.blocker);
|
bdrv_op_unblock_all(target, job->common.blocker);
|
||||||
|
|
||||||
data = g_malloc(sizeof(*data));
|
data = g_malloc(sizeof(*data));
|
||||||
@ -480,7 +487,7 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target,
|
|||||||
|
|
||||||
if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
|
if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
|
||||||
on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
|
on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
|
||||||
!bdrv_iostatus_is_enabled(bs)) {
|
(!bs->blk || !blk_iostatus_is_enabled(bs->blk))) {
|
||||||
error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
|
error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -12,12 +12,17 @@
|
|||||||
|
|
||||||
#include "sysemu/block-backend.h"
|
#include "sysemu/block-backend.h"
|
||||||
#include "block/block_int.h"
|
#include "block/block_int.h"
|
||||||
|
#include "block/blockjob.h"
|
||||||
|
#include "block/throttle-groups.h"
|
||||||
#include "sysemu/blockdev.h"
|
#include "sysemu/blockdev.h"
|
||||||
|
#include "sysemu/sysemu.h"
|
||||||
#include "qapi-event.h"
|
#include "qapi-event.h"
|
||||||
|
|
||||||
/* Number of coroutines to reserve per attached device model */
|
/* Number of coroutines to reserve per attached device model */
|
||||||
#define COROUTINE_POOL_RESERVATION 64
|
#define COROUTINE_POOL_RESERVATION 64
|
||||||
|
|
||||||
|
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
|
||||||
|
|
||||||
struct BlockBackend {
|
struct BlockBackend {
|
||||||
char *name;
|
char *name;
|
||||||
int refcnt;
|
int refcnt;
|
||||||
@ -29,15 +34,31 @@ struct BlockBackend {
|
|||||||
/* TODO change to DeviceState when all users are qdevified */
|
/* TODO change to DeviceState when all users are qdevified */
|
||||||
const BlockDevOps *dev_ops;
|
const BlockDevOps *dev_ops;
|
||||||
void *dev_opaque;
|
void *dev_opaque;
|
||||||
|
|
||||||
|
/* the block size for which the guest device expects atomicity */
|
||||||
|
int guest_block_size;
|
||||||
|
|
||||||
|
/* If the BDS tree is removed, some of its options are stored here (which
|
||||||
|
* can be used to restore those options in the new BDS on insert) */
|
||||||
|
BlockBackendRootState root_state;
|
||||||
|
|
||||||
|
/* I/O stats (display with "info blockstats"). */
|
||||||
|
BlockAcctStats stats;
|
||||||
|
|
||||||
|
BlockdevOnError on_read_error, on_write_error;
|
||||||
|
bool iostatus_enabled;
|
||||||
|
BlockDeviceIoStatus iostatus;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct BlockBackendAIOCB {
|
typedef struct BlockBackendAIOCB {
|
||||||
BlockAIOCB common;
|
BlockAIOCB common;
|
||||||
QEMUBH *bh;
|
QEMUBH *bh;
|
||||||
|
BlockBackend *blk;
|
||||||
int ret;
|
int ret;
|
||||||
} BlockBackendAIOCB;
|
} BlockBackendAIOCB;
|
||||||
|
|
||||||
static const AIOCBInfo block_backend_aiocb_info = {
|
static const AIOCBInfo block_backend_aiocb_info = {
|
||||||
|
.get_aio_context = blk_aiocb_get_aio_context,
|
||||||
.aiocb_size = sizeof(BlockBackendAIOCB),
|
.aiocb_size = sizeof(BlockBackendAIOCB),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -145,6 +166,10 @@ static void blk_delete(BlockBackend *blk)
|
|||||||
bdrv_unref(blk->bs);
|
bdrv_unref(blk->bs);
|
||||||
blk->bs = NULL;
|
blk->bs = NULL;
|
||||||
}
|
}
|
||||||
|
if (blk->root_state.throttle_state) {
|
||||||
|
g_free(blk->root_state.throttle_group);
|
||||||
|
throttle_group_unref(blk->root_state.throttle_state);
|
||||||
|
}
|
||||||
/* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
|
/* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
|
||||||
if (blk->name[0]) {
|
if (blk->name[0]) {
|
||||||
QTAILQ_REMOVE(&blk_backends, blk, link);
|
QTAILQ_REMOVE(&blk_backends, blk, link);
|
||||||
@ -308,6 +333,17 @@ void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Associates a new BlockDriverState with @blk.
|
||||||
|
*/
|
||||||
|
void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
|
||||||
|
{
|
||||||
|
assert(!blk->bs && !bs->blk);
|
||||||
|
bdrv_ref(bs);
|
||||||
|
blk->bs = bs;
|
||||||
|
bs->blk = blk;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Attach device model @dev to @blk.
|
* Attach device model @dev to @blk.
|
||||||
* Return 0 on success, -EBUSY when a device model is attached already.
|
* Return 0 on success, -EBUSY when a device model is attached already.
|
||||||
@ -320,7 +356,7 @@ int blk_attach_dev(BlockBackend *blk, void *dev)
|
|||||||
}
|
}
|
||||||
blk_ref(blk);
|
blk_ref(blk);
|
||||||
blk->dev = dev;
|
blk->dev = dev;
|
||||||
bdrv_iostatus_reset(blk->bs);
|
blk_iostatus_reset(blk);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -347,7 +383,7 @@ void blk_detach_dev(BlockBackend *blk, void *dev)
|
|||||||
blk->dev = NULL;
|
blk->dev = NULL;
|
||||||
blk->dev_ops = NULL;
|
blk->dev_ops = NULL;
|
||||||
blk->dev_opaque = NULL;
|
blk->dev_opaque = NULL;
|
||||||
bdrv_set_guest_block_size(blk->bs, 512);
|
blk->guest_block_size = 512;
|
||||||
blk_unref(blk);
|
blk_unref(blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -452,7 +488,47 @@ void blk_dev_resize_cb(BlockBackend *blk)
|
|||||||
|
|
||||||
void blk_iostatus_enable(BlockBackend *blk)
|
void blk_iostatus_enable(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
bdrv_iostatus_enable(blk->bs);
|
blk->iostatus_enabled = true;
|
||||||
|
blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The I/O status is only enabled if the drive explicitly
|
||||||
|
* enables it _and_ the VM is configured to stop on errors */
|
||||||
|
bool blk_iostatus_is_enabled(const BlockBackend *blk)
|
||||||
|
{
|
||||||
|
return (blk->iostatus_enabled &&
|
||||||
|
(blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
|
||||||
|
blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
|
||||||
|
blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
|
||||||
|
}
|
||||||
|
|
||||||
|
BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
|
||||||
|
{
|
||||||
|
return blk->iostatus;
|
||||||
|
}
|
||||||
|
|
||||||
|
void blk_iostatus_disable(BlockBackend *blk)
|
||||||
|
{
|
||||||
|
blk->iostatus_enabled = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void blk_iostatus_reset(BlockBackend *blk)
|
||||||
|
{
|
||||||
|
if (blk_iostatus_is_enabled(blk)) {
|
||||||
|
blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
|
||||||
|
if (blk->bs && blk->bs->job) {
|
||||||
|
block_job_iostatus_reset(blk->bs->job);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void blk_iostatus_set_err(BlockBackend *blk, int error)
|
||||||
|
{
|
||||||
|
assert(blk_iostatus_is_enabled(blk));
|
||||||
|
if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
|
||||||
|
blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
|
||||||
|
BLOCK_DEVICE_IO_STATUS_FAILED;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
|
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
|
||||||
@ -464,7 +540,7 @@ static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!blk_is_inserted(blk)) {
|
if (!blk_is_available(blk)) {
|
||||||
return -ENOMEDIUM;
|
return -ENOMEDIUM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -558,6 +634,7 @@ static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb,
|
|||||||
QEMUBH *bh;
|
QEMUBH *bh;
|
||||||
|
|
||||||
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
|
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
|
||||||
|
acb->blk = blk;
|
||||||
acb->ret = ret;
|
acb->ret = ret;
|
||||||
|
|
||||||
bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
|
bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
|
||||||
@ -602,16 +679,28 @@ int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
|
|||||||
|
|
||||||
int64_t blk_getlength(BlockBackend *blk)
|
int64_t blk_getlength(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_getlength(blk->bs);
|
return bdrv_getlength(blk->bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
|
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
|
||||||
{
|
{
|
||||||
bdrv_get_geometry(blk->bs, nb_sectors_ptr);
|
if (!blk->bs) {
|
||||||
|
*nb_sectors_ptr = 0;
|
||||||
|
} else {
|
||||||
|
bdrv_get_geometry(blk->bs, nb_sectors_ptr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t blk_nb_sectors(BlockBackend *blk)
|
int64_t blk_nb_sectors(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_nb_sectors(blk->bs);
|
return bdrv_nb_sectors(blk->bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -642,6 +731,10 @@ BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
|
|||||||
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
|
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
|
||||||
BlockCompletionFunc *cb, void *opaque)
|
BlockCompletionFunc *cb, void *opaque)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_aio_flush(blk->bs, cb, opaque);
|
return bdrv_aio_flush(blk->bs, cb, opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -683,12 +776,20 @@ int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
|
|||||||
|
|
||||||
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
|
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_ioctl(blk->bs, req, buf);
|
return bdrv_ioctl(blk->bs, req, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
||||||
BlockCompletionFunc *cb, void *opaque)
|
BlockCompletionFunc *cb, void *opaque)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
|
return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -704,11 +805,19 @@ int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
|
|||||||
|
|
||||||
int blk_co_flush(BlockBackend *blk)
|
int blk_co_flush(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_co_flush(blk->bs);
|
return bdrv_co_flush(blk->bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_flush(BlockBackend *blk)
|
int blk_flush(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_flush(blk->bs);
|
return bdrv_flush(blk->bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -719,7 +828,9 @@ int blk_flush_all(void)
|
|||||||
|
|
||||||
void blk_drain(BlockBackend *blk)
|
void blk_drain(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
bdrv_drain(blk->bs);
|
if (blk->bs) {
|
||||||
|
bdrv_drain(blk->bs);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_drain_all(void)
|
void blk_drain_all(void)
|
||||||
@ -727,76 +838,178 @@ void blk_drain_all(void)
|
|||||||
bdrv_drain_all();
|
bdrv_drain_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
|
||||||
|
BlockdevOnError on_write_error)
|
||||||
|
{
|
||||||
|
blk->on_read_error = on_read_error;
|
||||||
|
blk->on_write_error = on_write_error;
|
||||||
|
}
|
||||||
|
|
||||||
BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
|
BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
|
||||||
{
|
{
|
||||||
return bdrv_get_on_error(blk->bs, is_read);
|
return is_read ? blk->on_read_error : blk->on_write_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
|
BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
|
||||||
int error)
|
int error)
|
||||||
{
|
{
|
||||||
return bdrv_get_error_action(blk->bs, is_read, error);
|
BlockdevOnError on_err = blk_get_on_error(blk, is_read);
|
||||||
|
|
||||||
|
switch (on_err) {
|
||||||
|
case BLOCKDEV_ON_ERROR_ENOSPC:
|
||||||
|
return (error == ENOSPC) ?
|
||||||
|
BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
|
||||||
|
case BLOCKDEV_ON_ERROR_STOP:
|
||||||
|
return BLOCK_ERROR_ACTION_STOP;
|
||||||
|
case BLOCKDEV_ON_ERROR_REPORT:
|
||||||
|
return BLOCK_ERROR_ACTION_REPORT;
|
||||||
|
case BLOCKDEV_ON_ERROR_IGNORE:
|
||||||
|
return BLOCK_ERROR_ACTION_IGNORE;
|
||||||
|
default:
|
||||||
|
abort();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void send_qmp_error_event(BlockBackend *blk,
|
||||||
|
BlockErrorAction action,
|
||||||
|
bool is_read, int error)
|
||||||
|
{
|
||||||
|
IoOperationType optype;
|
||||||
|
|
||||||
|
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
|
||||||
|
qapi_event_send_block_io_error(blk_name(blk), optype, action,
|
||||||
|
blk_iostatus_is_enabled(blk),
|
||||||
|
error == ENOSPC, strerror(error),
|
||||||
|
&error_abort);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This is done by device models because, while the block layer knows
|
||||||
|
* about the error, it does not know whether an operation comes from
|
||||||
|
* the device or the block layer (from a job, for example).
|
||||||
|
*/
|
||||||
void blk_error_action(BlockBackend *blk, BlockErrorAction action,
|
void blk_error_action(BlockBackend *blk, BlockErrorAction action,
|
||||||
bool is_read, int error)
|
bool is_read, int error)
|
||||||
{
|
{
|
||||||
bdrv_error_action(blk->bs, action, is_read, error);
|
assert(error >= 0);
|
||||||
|
|
||||||
|
if (action == BLOCK_ERROR_ACTION_STOP) {
|
||||||
|
/* First set the iostatus, so that "info block" returns an iostatus
|
||||||
|
* that matches the events raised so far (an additional error iostatus
|
||||||
|
* is fine, but not a lost one).
|
||||||
|
*/
|
||||||
|
blk_iostatus_set_err(blk, error);
|
||||||
|
|
||||||
|
/* Then raise the request to stop the VM and the event.
|
||||||
|
* qemu_system_vmstop_request_prepare has two effects. First,
|
||||||
|
* it ensures that the STOP event always comes after the
|
||||||
|
* BLOCK_IO_ERROR event. Second, it ensures that even if management
|
||||||
|
* can observe the STOP event and do a "cont" before the STOP
|
||||||
|
* event is issued, the VM will not stop. In this case, vm_start()
|
||||||
|
* also ensures that the STOP/RESUME pair of events is emitted.
|
||||||
|
*/
|
||||||
|
qemu_system_vmstop_request_prepare();
|
||||||
|
send_qmp_error_event(blk, action, is_read, error);
|
||||||
|
qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
|
||||||
|
} else {
|
||||||
|
send_qmp_error_event(blk, action, is_read, error);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_is_read_only(BlockBackend *blk)
|
int blk_is_read_only(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
return bdrv_is_read_only(blk->bs);
|
if (blk->bs) {
|
||||||
|
return bdrv_is_read_only(blk->bs);
|
||||||
|
} else {
|
||||||
|
return blk->root_state.read_only;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_is_sg(BlockBackend *blk)
|
int blk_is_sg(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
|
if (!blk->bs) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_is_sg(blk->bs);
|
return bdrv_is_sg(blk->bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_enable_write_cache(BlockBackend *blk)
|
int blk_enable_write_cache(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
return bdrv_enable_write_cache(blk->bs);
|
if (blk->bs) {
|
||||||
|
return bdrv_enable_write_cache(blk->bs);
|
||||||
|
} else {
|
||||||
|
return !!(blk->root_state.open_flags & BDRV_O_CACHE_WB);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
|
void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
|
||||||
{
|
{
|
||||||
bdrv_set_enable_write_cache(blk->bs, wce);
|
if (blk->bs) {
|
||||||
|
bdrv_set_enable_write_cache(blk->bs, wce);
|
||||||
|
} else {
|
||||||
|
if (wce) {
|
||||||
|
blk->root_state.open_flags |= BDRV_O_CACHE_WB;
|
||||||
|
} else {
|
||||||
|
blk->root_state.open_flags &= ~BDRV_O_CACHE_WB;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_invalidate_cache(BlockBackend *blk, Error **errp)
|
void blk_invalidate_cache(BlockBackend *blk, Error **errp)
|
||||||
{
|
{
|
||||||
|
if (!blk->bs) {
|
||||||
|
error_setg(errp, "Device '%s' has no medium", blk->name);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
bdrv_invalidate_cache(blk->bs, errp);
|
bdrv_invalidate_cache(blk->bs, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_is_inserted(BlockBackend *blk)
|
bool blk_is_inserted(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
return bdrv_is_inserted(blk->bs);
|
return blk->bs && bdrv_is_inserted(blk->bs);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool blk_is_available(BlockBackend *blk)
|
||||||
|
{
|
||||||
|
return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_lock_medium(BlockBackend *blk, bool locked)
|
void blk_lock_medium(BlockBackend *blk, bool locked)
|
||||||
{
|
{
|
||||||
bdrv_lock_medium(blk->bs, locked);
|
if (blk->bs) {
|
||||||
|
bdrv_lock_medium(blk->bs, locked);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_eject(BlockBackend *blk, bool eject_flag)
|
void blk_eject(BlockBackend *blk, bool eject_flag)
|
||||||
{
|
{
|
||||||
bdrv_eject(blk->bs, eject_flag);
|
if (blk->bs) {
|
||||||
|
bdrv_eject(blk->bs, eject_flag);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_get_flags(BlockBackend *blk)
|
int blk_get_flags(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
return bdrv_get_flags(blk->bs);
|
if (blk->bs) {
|
||||||
|
return bdrv_get_flags(blk->bs);
|
||||||
|
} else {
|
||||||
|
return blk->root_state.open_flags;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_get_max_transfer_length(BlockBackend *blk)
|
int blk_get_max_transfer_length(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
return blk->bs->bl.max_transfer_length;
|
if (blk->bs) {
|
||||||
|
return blk->bs->bl.max_transfer_length;
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_set_guest_block_size(BlockBackend *blk, int align)
|
void blk_set_guest_block_size(BlockBackend *blk, int align)
|
||||||
{
|
{
|
||||||
bdrv_set_guest_block_size(blk->bs, align);
|
blk->guest_block_size = align;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *blk_blockalign(BlockBackend *blk, size_t size)
|
void *blk_blockalign(BlockBackend *blk, size_t size)
|
||||||
@ -806,40 +1019,64 @@ void *blk_blockalign(BlockBackend *blk, size_t size)
|
|||||||
|
|
||||||
bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
|
bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
|
||||||
{
|
{
|
||||||
|
if (!blk->bs) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_op_is_blocked(blk->bs, op, errp);
|
return bdrv_op_is_blocked(blk->bs, op, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
|
void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
|
||||||
{
|
{
|
||||||
bdrv_op_unblock(blk->bs, op, reason);
|
if (blk->bs) {
|
||||||
|
bdrv_op_unblock(blk->bs, op, reason);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_op_block_all(BlockBackend *blk, Error *reason)
|
void blk_op_block_all(BlockBackend *blk, Error *reason)
|
||||||
{
|
{
|
||||||
bdrv_op_block_all(blk->bs, reason);
|
if (blk->bs) {
|
||||||
|
bdrv_op_block_all(blk->bs, reason);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_op_unblock_all(BlockBackend *blk, Error *reason)
|
void blk_op_unblock_all(BlockBackend *blk, Error *reason)
|
||||||
{
|
{
|
||||||
bdrv_op_unblock_all(blk->bs, reason);
|
if (blk->bs) {
|
||||||
|
bdrv_op_unblock_all(blk->bs, reason);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AioContext *blk_get_aio_context(BlockBackend *blk)
|
AioContext *blk_get_aio_context(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
return bdrv_get_aio_context(blk->bs);
|
if (blk->bs) {
|
||||||
|
return bdrv_get_aio_context(blk->bs);
|
||||||
|
} else {
|
||||||
|
return qemu_get_aio_context();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
|
||||||
|
{
|
||||||
|
BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
|
||||||
|
return blk_get_aio_context(blk_acb->blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
|
void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
|
||||||
{
|
{
|
||||||
bdrv_set_aio_context(blk->bs, new_context);
|
if (blk->bs) {
|
||||||
|
bdrv_set_aio_context(blk->bs, new_context);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_add_aio_context_notifier(BlockBackend *blk,
|
void blk_add_aio_context_notifier(BlockBackend *blk,
|
||||||
void (*attached_aio_context)(AioContext *new_context, void *opaque),
|
void (*attached_aio_context)(AioContext *new_context, void *opaque),
|
||||||
void (*detach_aio_context)(void *opaque), void *opaque)
|
void (*detach_aio_context)(void *opaque), void *opaque)
|
||||||
{
|
{
|
||||||
bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
|
if (blk->bs) {
|
||||||
detach_aio_context, opaque);
|
bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
|
||||||
|
detach_aio_context, opaque);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_remove_aio_context_notifier(BlockBackend *blk,
|
void blk_remove_aio_context_notifier(BlockBackend *blk,
|
||||||
@ -848,28 +1085,36 @@ void blk_remove_aio_context_notifier(BlockBackend *blk,
|
|||||||
void (*detach_aio_context)(void *),
|
void (*detach_aio_context)(void *),
|
||||||
void *opaque)
|
void *opaque)
|
||||||
{
|
{
|
||||||
bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
|
if (blk->bs) {
|
||||||
detach_aio_context, opaque);
|
bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
|
||||||
|
detach_aio_context, opaque);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
|
void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
|
||||||
{
|
{
|
||||||
bdrv_add_close_notifier(blk->bs, notify);
|
if (blk->bs) {
|
||||||
|
bdrv_add_close_notifier(blk->bs, notify);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_io_plug(BlockBackend *blk)
|
void blk_io_plug(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
bdrv_io_plug(blk->bs);
|
if (blk->bs) {
|
||||||
|
bdrv_io_plug(blk->bs);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_io_unplug(BlockBackend *blk)
|
void blk_io_unplug(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
bdrv_io_unplug(blk->bs);
|
if (blk->bs) {
|
||||||
|
bdrv_io_unplug(blk->bs);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockAcctStats *blk_get_stats(BlockBackend *blk)
|
BlockAcctStats *blk_get_stats(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
return bdrv_get_stats(blk->bs);
|
return &blk->stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
|
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
|
||||||
@ -902,6 +1147,10 @@ int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
|
|||||||
|
|
||||||
int blk_truncate(BlockBackend *blk, int64_t offset)
|
int blk_truncate(BlockBackend *blk, int64_t offset)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_truncate(blk->bs, offset);
|
return bdrv_truncate(blk->bs, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -918,20 +1167,67 @@ int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
|
|||||||
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
|
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
|
||||||
int64_t pos, int size)
|
int64_t pos, int size)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_save_vmstate(blk->bs, buf, pos, size);
|
return bdrv_save_vmstate(blk->bs, buf, pos, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
|
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_load_vmstate(blk->bs, buf, pos, size);
|
return bdrv_load_vmstate(blk->bs, buf, pos, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
|
int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_probe_blocksizes(blk->bs, bsz);
|
return bdrv_probe_blocksizes(blk->bs, bsz);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
|
int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
|
||||||
{
|
{
|
||||||
|
if (!blk_is_available(blk)) {
|
||||||
|
return -ENOMEDIUM;
|
||||||
|
}
|
||||||
|
|
||||||
return bdrv_probe_geometry(blk->bs, geo);
|
return bdrv_probe_geometry(blk->bs, geo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Updates the BlockBackendRootState object with data from the currently
|
||||||
|
* attached BlockDriverState.
|
||||||
|
*/
|
||||||
|
void blk_update_root_state(BlockBackend *blk)
|
||||||
|
{
|
||||||
|
assert(blk->bs);
|
||||||
|
|
||||||
|
blk->root_state.open_flags = blk->bs->open_flags;
|
||||||
|
blk->root_state.read_only = blk->bs->read_only;
|
||||||
|
blk->root_state.detect_zeroes = blk->bs->detect_zeroes;
|
||||||
|
|
||||||
|
if (blk->root_state.throttle_group) {
|
||||||
|
g_free(blk->root_state.throttle_group);
|
||||||
|
throttle_group_unref(blk->root_state.throttle_state);
|
||||||
|
}
|
||||||
|
if (blk->bs->throttle_state) {
|
||||||
|
const char *name = throttle_group_get_name(blk->bs);
|
||||||
|
blk->root_state.throttle_group = g_strdup(name);
|
||||||
|
blk->root_state.throttle_state = throttle_group_incref(name);
|
||||||
|
} else {
|
||||||
|
blk->root_state.throttle_group = NULL;
|
||||||
|
blk->root_state.throttle_state = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
|
||||||
|
{
|
||||||
|
return &blk->root_state;
|
||||||
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "block/blockjob.h"
|
#include "block/blockjob.h"
|
||||||
#include "qapi/qmp/qerror.h"
|
#include "qapi/qmp/qerror.h"
|
||||||
#include "qemu/ratelimit.h"
|
#include "qemu/ratelimit.h"
|
||||||
|
#include "sysemu/block-backend.h"
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
/*
|
/*
|
||||||
@ -213,7 +214,7 @@ void commit_start(BlockDriverState *bs, BlockDriverState *base,
|
|||||||
|
|
||||||
if ((on_error == BLOCKDEV_ON_ERROR_STOP ||
|
if ((on_error == BLOCKDEV_ON_ERROR_STOP ||
|
||||||
on_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
|
on_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
|
||||||
!bdrv_iostatus_is_enabled(bs)) {
|
(!bs->blk || !blk_iostatus_is_enabled(bs->blk))) {
|
||||||
error_setg(errp, "Invalid parameter combination");
|
error_setg(errp, "Invalid parameter combination");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
14
block/curl.c
14
block/curl.c
@ -154,18 +154,20 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
|
|||||||
DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
|
DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case CURL_POLL_IN:
|
case CURL_POLL_IN:
|
||||||
aio_set_fd_handler(s->aio_context, fd, curl_multi_read,
|
aio_set_fd_handler(s->aio_context, fd, false,
|
||||||
NULL, state);
|
curl_multi_read, NULL, state);
|
||||||
break;
|
break;
|
||||||
case CURL_POLL_OUT:
|
case CURL_POLL_OUT:
|
||||||
aio_set_fd_handler(s->aio_context, fd, NULL, curl_multi_do, state);
|
aio_set_fd_handler(s->aio_context, fd, false,
|
||||||
|
NULL, curl_multi_do, state);
|
||||||
break;
|
break;
|
||||||
case CURL_POLL_INOUT:
|
case CURL_POLL_INOUT:
|
||||||
aio_set_fd_handler(s->aio_context, fd, curl_multi_read,
|
aio_set_fd_handler(s->aio_context, fd, false,
|
||||||
curl_multi_do, state);
|
curl_multi_read, curl_multi_do, state);
|
||||||
break;
|
break;
|
||||||
case CURL_POLL_REMOVE:
|
case CURL_POLL_REMOVE:
|
||||||
aio_set_fd_handler(s->aio_context, fd, NULL, NULL, NULL);
|
aio_set_fd_handler(s->aio_context, fd, false,
|
||||||
|
NULL, NULL, NULL);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
27
block/io.c
27
block/io.c
@ -23,6 +23,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
#include "sysemu/block-backend.h"
|
||||||
#include "block/blockjob.h"
|
#include "block/blockjob.h"
|
||||||
#include "block/block_int.h"
|
#include "block/block_int.h"
|
||||||
#include "block/throttle-groups.h"
|
#include "block/throttle-groups.h"
|
||||||
@ -1151,7 +1152,9 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
|
|||||||
|
|
||||||
bdrv_set_dirty(bs, sector_num, nb_sectors);
|
bdrv_set_dirty(bs, sector_num, nb_sectors);
|
||||||
|
|
||||||
block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
|
if (bs->wr_highest_offset < offset + bytes) {
|
||||||
|
bs->wr_highest_offset = offset + bytes;
|
||||||
|
}
|
||||||
|
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
|
bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
|
||||||
@ -1903,7 +1906,10 @@ static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1);
|
if (bs->blk) {
|
||||||
|
block_acct_merge_done(blk_get_stats(bs->blk), BLOCK_ACCT_WRITE,
|
||||||
|
num_reqs - outidx - 1);
|
||||||
|
}
|
||||||
|
|
||||||
return outidx + 1;
|
return outidx + 1;
|
||||||
}
|
}
|
||||||
@ -2618,3 +2624,20 @@ void bdrv_flush_io_queue(BlockDriverState *bs)
|
|||||||
}
|
}
|
||||||
bdrv_start_throttled_reqs(bs);
|
bdrv_start_throttled_reqs(bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void bdrv_drained_begin(BlockDriverState *bs)
|
||||||
|
{
|
||||||
|
if (!bs->quiesce_counter++) {
|
||||||
|
aio_disable_external(bdrv_get_aio_context(bs));
|
||||||
|
}
|
||||||
|
bdrv_drain(bs);
|
||||||
|
}
|
||||||
|
|
||||||
|
void bdrv_drained_end(BlockDriverState *bs)
|
||||||
|
{
|
||||||
|
assert(bs->quiesce_counter > 0);
|
||||||
|
if (--bs->quiesce_counter > 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
aio_enable_external(bdrv_get_aio_context(bs));
|
||||||
|
}
|
||||||
|
@ -291,8 +291,8 @@ iscsi_set_events(IscsiLun *iscsilun)
|
|||||||
int ev = iscsi_which_events(iscsi);
|
int ev = iscsi_which_events(iscsi);
|
||||||
|
|
||||||
if (ev != iscsilun->events) {
|
if (ev != iscsilun->events) {
|
||||||
aio_set_fd_handler(iscsilun->aio_context,
|
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsi),
|
||||||
iscsi_get_fd(iscsi),
|
false,
|
||||||
(ev & POLLIN) ? iscsi_process_read : NULL,
|
(ev & POLLIN) ? iscsi_process_read : NULL,
|
||||||
(ev & POLLOUT) ? iscsi_process_write : NULL,
|
(ev & POLLOUT) ? iscsi_process_write : NULL,
|
||||||
iscsilun);
|
iscsilun);
|
||||||
@ -1280,9 +1280,8 @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
|
|||||||
{
|
{
|
||||||
IscsiLun *iscsilun = bs->opaque;
|
IscsiLun *iscsilun = bs->opaque;
|
||||||
|
|
||||||
aio_set_fd_handler(iscsilun->aio_context,
|
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
|
||||||
iscsi_get_fd(iscsilun->iscsi),
|
false, NULL, NULL, NULL);
|
||||||
NULL, NULL, NULL);
|
|
||||||
iscsilun->events = 0;
|
iscsilun->events = 0;
|
||||||
|
|
||||||
if (iscsilun->nop_timer) {
|
if (iscsilun->nop_timer) {
|
||||||
|
@ -287,7 +287,7 @@ void laio_detach_aio_context(void *s_, AioContext *old_context)
|
|||||||
{
|
{
|
||||||
struct qemu_laio_state *s = s_;
|
struct qemu_laio_state *s = s_;
|
||||||
|
|
||||||
aio_set_event_notifier(old_context, &s->e, NULL);
|
aio_set_event_notifier(old_context, &s->e, false, NULL);
|
||||||
qemu_bh_delete(s->completion_bh);
|
qemu_bh_delete(s->completion_bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,7 +296,8 @@ void laio_attach_aio_context(void *s_, AioContext *new_context)
|
|||||||
struct qemu_laio_state *s = s_;
|
struct qemu_laio_state *s = s_;
|
||||||
|
|
||||||
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
|
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
|
||||||
aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb);
|
aio_set_event_notifier(new_context, &s->e, false,
|
||||||
|
qemu_laio_completion_cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *laio_init(void)
|
void *laio_init(void)
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include "block/blockjob.h"
|
#include "block/blockjob.h"
|
||||||
#include "block/block_int.h"
|
#include "block/block_int.h"
|
||||||
|
#include "sysemu/block-backend.h"
|
||||||
#include "qapi/qmp/qerror.h"
|
#include "qapi/qmp/qerror.h"
|
||||||
#include "qemu/ratelimit.h"
|
#include "qemu/ratelimit.h"
|
||||||
#include "qemu/bitmap.h"
|
#include "qemu/bitmap.h"
|
||||||
@ -599,7 +600,9 @@ immediate_exit:
|
|||||||
g_free(s->cow_bitmap);
|
g_free(s->cow_bitmap);
|
||||||
g_free(s->in_flight_bitmap);
|
g_free(s->in_flight_bitmap);
|
||||||
bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
|
bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
|
||||||
bdrv_iostatus_disable(s->target);
|
if (s->target->blk) {
|
||||||
|
blk_iostatus_disable(s->target->blk);
|
||||||
|
}
|
||||||
|
|
||||||
data = g_malloc(sizeof(*data));
|
data = g_malloc(sizeof(*data));
|
||||||
data->ret = ret;
|
data->ret = ret;
|
||||||
@ -621,7 +624,9 @@ static void mirror_iostatus_reset(BlockJob *job)
|
|||||||
{
|
{
|
||||||
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
|
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
|
||||||
|
|
||||||
bdrv_iostatus_reset(s->target);
|
if (s->target->blk) {
|
||||||
|
blk_iostatus_reset(s->target->blk);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mirror_complete(BlockJob *job, Error **errp)
|
static void mirror_complete(BlockJob *job, Error **errp)
|
||||||
@ -704,7 +709,7 @@ static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
|
|||||||
|
|
||||||
if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
|
if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
|
||||||
on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
|
on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
|
||||||
!bdrv_iostatus_is_enabled(bs)) {
|
(!bs->blk || !blk_iostatus_is_enabled(bs->blk))) {
|
||||||
error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
|
error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -740,8 +745,10 @@ static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bdrv_set_enable_write_cache(s->target, true);
|
bdrv_set_enable_write_cache(s->target, true);
|
||||||
bdrv_set_on_error(s->target, on_target_error, on_target_error);
|
if (s->target->blk) {
|
||||||
bdrv_iostatus_enable(s->target);
|
blk_set_on_error(s->target->blk, on_target_error, on_target_error);
|
||||||
|
blk_iostatus_enable(s->target->blk);
|
||||||
|
}
|
||||||
s->common.co = qemu_coroutine_create(mirror_run);
|
s->common.co = qemu_coroutine_create(mirror_run);
|
||||||
trace_mirror_start(bs, s, s->common.co, opaque);
|
trace_mirror_start(bs, s, s->common.co, opaque);
|
||||||
qemu_coroutine_enter(s->common.co, s);
|
qemu_coroutine_enter(s->common.co, s);
|
||||||
|
@ -124,7 +124,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
|||||||
s->send_coroutine = qemu_coroutine_self();
|
s->send_coroutine = qemu_coroutine_self();
|
||||||
aio_context = bdrv_get_aio_context(bs);
|
aio_context = bdrv_get_aio_context(bs);
|
||||||
|
|
||||||
aio_set_fd_handler(aio_context, s->sock,
|
aio_set_fd_handler(aio_context, s->sock, false,
|
||||||
nbd_reply_ready, nbd_restart_write, bs);
|
nbd_reply_ready, nbd_restart_write, bs);
|
||||||
if (qiov) {
|
if (qiov) {
|
||||||
if (!s->is_unix) {
|
if (!s->is_unix) {
|
||||||
@ -144,7 +144,8 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
|||||||
} else {
|
} else {
|
||||||
rc = nbd_send_request(s->sock, request);
|
rc = nbd_send_request(s->sock, request);
|
||||||
}
|
}
|
||||||
aio_set_fd_handler(aio_context, s->sock, nbd_reply_ready, NULL, bs);
|
aio_set_fd_handler(aio_context, s->sock, false,
|
||||||
|
nbd_reply_ready, NULL, bs);
|
||||||
s->send_coroutine = NULL;
|
s->send_coroutine = NULL;
|
||||||
qemu_co_mutex_unlock(&s->send_mutex);
|
qemu_co_mutex_unlock(&s->send_mutex);
|
||||||
return rc;
|
return rc;
|
||||||
@ -348,14 +349,15 @@ int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
|
|||||||
void nbd_client_detach_aio_context(BlockDriverState *bs)
|
void nbd_client_detach_aio_context(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
aio_set_fd_handler(bdrv_get_aio_context(bs),
|
aio_set_fd_handler(bdrv_get_aio_context(bs),
|
||||||
nbd_get_client_session(bs)->sock, NULL, NULL, NULL);
|
nbd_get_client_session(bs)->sock,
|
||||||
|
false, NULL, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
||||||
AioContext *new_context)
|
AioContext *new_context)
|
||||||
{
|
{
|
||||||
aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sock,
|
aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sock,
|
||||||
nbd_reply_ready, NULL, bs);
|
false, nbd_reply_ready, NULL, bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nbd_client_close(BlockDriverState *bs)
|
void nbd_client_close(BlockDriverState *bs)
|
||||||
|
17
block/nfs.c
17
block/nfs.c
@ -63,11 +63,10 @@ static void nfs_set_events(NFSClient *client)
|
|||||||
{
|
{
|
||||||
int ev = nfs_which_events(client->context);
|
int ev = nfs_which_events(client->context);
|
||||||
if (ev != client->events) {
|
if (ev != client->events) {
|
||||||
aio_set_fd_handler(client->aio_context,
|
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
|
||||||
nfs_get_fd(client->context),
|
false,
|
||||||
(ev & POLLIN) ? nfs_process_read : NULL,
|
(ev & POLLIN) ? nfs_process_read : NULL,
|
||||||
(ev & POLLOUT) ? nfs_process_write : NULL,
|
(ev & POLLOUT) ? nfs_process_write : NULL, client);
|
||||||
client);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
client->events = ev;
|
client->events = ev;
|
||||||
@ -242,9 +241,8 @@ static void nfs_detach_aio_context(BlockDriverState *bs)
|
|||||||
{
|
{
|
||||||
NFSClient *client = bs->opaque;
|
NFSClient *client = bs->opaque;
|
||||||
|
|
||||||
aio_set_fd_handler(client->aio_context,
|
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
|
||||||
nfs_get_fd(client->context),
|
false, NULL, NULL, NULL);
|
||||||
NULL, NULL, NULL);
|
|
||||||
client->events = 0;
|
client->events = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,9 +261,8 @@ static void nfs_client_close(NFSClient *client)
|
|||||||
if (client->fh) {
|
if (client->fh) {
|
||||||
nfs_close(client->context, client->fh);
|
nfs_close(client->context, client->fh);
|
||||||
}
|
}
|
||||||
aio_set_fd_handler(client->aio_context,
|
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
|
||||||
nfs_get_fd(client->context),
|
false, NULL, NULL, NULL);
|
||||||
NULL, NULL, NULL);
|
|
||||||
nfs_destroy_context(client->context);
|
nfs_destroy_context(client->context);
|
||||||
}
|
}
|
||||||
memset(client, 0, sizeof(NFSClient));
|
memset(client, 0, sizeof(NFSClient));
|
||||||
|
36
block/qapi.c
36
block/qapi.c
@ -301,17 +301,17 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
|
|||||||
info->tray_open = blk_dev_is_tray_open(blk);
|
info->tray_open = blk_dev_is_tray_open(blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bdrv_iostatus_is_enabled(bs)) {
|
if (blk_iostatus_is_enabled(blk)) {
|
||||||
info->has_io_status = true;
|
info->has_io_status = true;
|
||||||
info->io_status = bs->iostatus;
|
info->io_status = blk_iostatus(blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!QLIST_EMPTY(&bs->dirty_bitmaps)) {
|
if (bs && !QLIST_EMPTY(&bs->dirty_bitmaps)) {
|
||||||
info->has_dirty_bitmaps = true;
|
info->has_dirty_bitmaps = true;
|
||||||
info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
|
info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bs->drv) {
|
if (bs && bs->drv) {
|
||||||
info->has_inserted = true;
|
info->has_inserted = true;
|
||||||
info->inserted = bdrv_block_device_info(bs, errp);
|
info->inserted = bdrv_block_device_info(bs, errp);
|
||||||
if (info->inserted == NULL) {
|
if (info->inserted == NULL) {
|
||||||
@ -344,18 +344,22 @@ static BlockStats *bdrv_query_stats(const BlockDriverState *bs,
|
|||||||
}
|
}
|
||||||
|
|
||||||
s->stats = g_malloc0(sizeof(*s->stats));
|
s->stats = g_malloc0(sizeof(*s->stats));
|
||||||
s->stats->rd_bytes = bs->stats.nr_bytes[BLOCK_ACCT_READ];
|
if (bs->blk) {
|
||||||
s->stats->wr_bytes = bs->stats.nr_bytes[BLOCK_ACCT_WRITE];
|
BlockAcctStats *stats = blk_get_stats(bs->blk);
|
||||||
s->stats->rd_operations = bs->stats.nr_ops[BLOCK_ACCT_READ];
|
|
||||||
s->stats->wr_operations = bs->stats.nr_ops[BLOCK_ACCT_WRITE];
|
s->stats->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
|
||||||
s->stats->rd_merged = bs->stats.merged[BLOCK_ACCT_READ];
|
s->stats->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
|
||||||
s->stats->wr_merged = bs->stats.merged[BLOCK_ACCT_WRITE];
|
s->stats->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
|
||||||
s->stats->wr_highest_offset =
|
s->stats->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
|
||||||
bs->stats.wr_highest_sector * BDRV_SECTOR_SIZE;
|
s->stats->rd_merged = stats->merged[BLOCK_ACCT_READ];
|
||||||
s->stats->flush_operations = bs->stats.nr_ops[BLOCK_ACCT_FLUSH];
|
s->stats->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
|
||||||
s->stats->wr_total_time_ns = bs->stats.total_time_ns[BLOCK_ACCT_WRITE];
|
s->stats->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
|
||||||
s->stats->rd_total_time_ns = bs->stats.total_time_ns[BLOCK_ACCT_READ];
|
s->stats->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
|
||||||
s->stats->flush_total_time_ns = bs->stats.total_time_ns[BLOCK_ACCT_FLUSH];
|
s->stats->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
|
||||||
|
s->stats->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
|
||||||
|
}
|
||||||
|
|
||||||
|
s->stats->wr_highest_offset = bs->wr_highest_offset;
|
||||||
|
|
||||||
if (bs->file) {
|
if (bs->file) {
|
||||||
s->has_parent = true;
|
s->has_parent = true;
|
||||||
|
@ -127,11 +127,6 @@ do { \
|
|||||||
|
|
||||||
#define FTYPE_FILE 0
|
#define FTYPE_FILE 0
|
||||||
#define FTYPE_CD 1
|
#define FTYPE_CD 1
|
||||||
#define FTYPE_FD 2
|
|
||||||
|
|
||||||
/* if the FD is not accessed during that time (in ns), we try to
|
|
||||||
reopen it to see if the disk has been changed */
|
|
||||||
#define FD_OPEN_TIMEOUT (1000000000)
|
|
||||||
|
|
||||||
#define MAX_BLOCKSIZE 4096
|
#define MAX_BLOCKSIZE 4096
|
||||||
|
|
||||||
@ -141,13 +136,6 @@ typedef struct BDRVRawState {
|
|||||||
int open_flags;
|
int open_flags;
|
||||||
size_t buf_align;
|
size_t buf_align;
|
||||||
|
|
||||||
#if defined(__linux__)
|
|
||||||
/* linux floppy specific */
|
|
||||||
int64_t fd_open_time;
|
|
||||||
int64_t fd_error_time;
|
|
||||||
int fd_got_error;
|
|
||||||
int fd_media_changed;
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_LINUX_AIO
|
#ifdef CONFIG_LINUX_AIO
|
||||||
int use_aio;
|
int use_aio;
|
||||||
void *aio_ctx;
|
void *aio_ctx;
|
||||||
@ -635,7 +623,7 @@ static int raw_reopen_prepare(BDRVReopenState *state,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (s->type == FTYPE_FD || s->type == FTYPE_CD) {
|
if (s->type == FTYPE_CD) {
|
||||||
raw_s->open_flags |= O_NONBLOCK;
|
raw_s->open_flags |= O_NONBLOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2187,47 +2175,6 @@ static int hdev_open(BlockDriverState *bs, QDict *options, int flags,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if defined(__linux__)
|
#if defined(__linux__)
|
||||||
/* Note: we do not have a reliable method to detect if the floppy is
|
|
||||||
present. The current method is to try to open the floppy at every
|
|
||||||
I/O and to keep it opened during a few hundreds of ms. */
|
|
||||||
static int fd_open(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
BDRVRawState *s = bs->opaque;
|
|
||||||
int last_media_present;
|
|
||||||
|
|
||||||
if (s->type != FTYPE_FD)
|
|
||||||
return 0;
|
|
||||||
last_media_present = (s->fd >= 0);
|
|
||||||
if (s->fd >= 0 &&
|
|
||||||
(qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->fd_open_time) >= FD_OPEN_TIMEOUT) {
|
|
||||||
qemu_close(s->fd);
|
|
||||||
s->fd = -1;
|
|
||||||
DPRINTF("Floppy closed\n");
|
|
||||||
}
|
|
||||||
if (s->fd < 0) {
|
|
||||||
if (s->fd_got_error &&
|
|
||||||
(qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->fd_error_time) < FD_OPEN_TIMEOUT) {
|
|
||||||
DPRINTF("No floppy (open delayed)\n");
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
s->fd = qemu_open(bs->filename, s->open_flags & ~O_NONBLOCK);
|
|
||||||
if (s->fd < 0) {
|
|
||||||
s->fd_error_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
|
||||||
s->fd_got_error = 1;
|
|
||||||
if (last_media_present)
|
|
||||||
s->fd_media_changed = 1;
|
|
||||||
DPRINTF("No floppy\n");
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
DPRINTF("Floppy opened\n");
|
|
||||||
}
|
|
||||||
if (!last_media_present)
|
|
||||||
s->fd_media_changed = 1;
|
|
||||||
s->fd_open_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
|
||||||
s->fd_got_error = 0;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int hdev_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
|
static int hdev_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
|
||||||
{
|
{
|
||||||
BDRVRawState *s = bs->opaque;
|
BDRVRawState *s = bs->opaque;
|
||||||
@ -2256,8 +2203,8 @@ static BlockAIOCB *hdev_aio_ioctl(BlockDriverState *bs,
|
|||||||
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
|
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
|
||||||
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
|
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
|
||||||
}
|
}
|
||||||
|
#endif /* linux */
|
||||||
|
|
||||||
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
|
||||||
static int fd_open(BlockDriverState *bs)
|
static int fd_open(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
BDRVRawState *s = bs->opaque;
|
BDRVRawState *s = bs->opaque;
|
||||||
@ -2267,14 +2214,6 @@ static int fd_open(BlockDriverState *bs)
|
|||||||
return 0;
|
return 0;
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
#else /* !linux && !FreeBSD */
|
|
||||||
|
|
||||||
static int fd_open(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* !linux && !FreeBSD */
|
|
||||||
|
|
||||||
static coroutine_fn BlockAIOCB *hdev_aio_discard(BlockDriverState *bs,
|
static coroutine_fn BlockAIOCB *hdev_aio_discard(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors,
|
int64_t sector_num, int nb_sectors,
|
||||||
@ -2318,14 +2257,13 @@ static int hdev_create(const char *filename, QemuOpts *opts,
|
|||||||
int64_t total_size = 0;
|
int64_t total_size = 0;
|
||||||
bool has_prefix;
|
bool has_prefix;
|
||||||
|
|
||||||
/* This function is used by all three protocol block drivers and therefore
|
/* This function is used by both protocol block drivers and therefore either
|
||||||
* any of these three prefixes may be given.
|
* of these prefixes may be given.
|
||||||
* The return value has to be stored somewhere, otherwise this is an error
|
* The return value has to be stored somewhere, otherwise this is an error
|
||||||
* due to -Werror=unused-value. */
|
* due to -Werror=unused-value. */
|
||||||
has_prefix =
|
has_prefix =
|
||||||
strstart(filename, "host_device:", &filename) ||
|
strstart(filename, "host_device:", &filename) ||
|
||||||
strstart(filename, "host_cdrom:" , &filename) ||
|
strstart(filename, "host_cdrom:" , &filename);
|
||||||
strstart(filename, "host_floppy:", &filename);
|
|
||||||
|
|
||||||
(void)has_prefix;
|
(void)has_prefix;
|
||||||
|
|
||||||
@ -2405,155 +2343,6 @@ static BlockDriver bdrv_host_device = {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef __linux__
|
|
||||||
static void floppy_parse_filename(const char *filename, QDict *options,
|
|
||||||
Error **errp)
|
|
||||||
{
|
|
||||||
/* The prefix is optional, just as for "file". */
|
|
||||||
strstart(filename, "host_floppy:", &filename);
|
|
||||||
|
|
||||||
qdict_put_obj(options, "filename", QOBJECT(qstring_from_str(filename)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int floppy_open(BlockDriverState *bs, QDict *options, int flags,
|
|
||||||
Error **errp)
|
|
||||||
{
|
|
||||||
BDRVRawState *s = bs->opaque;
|
|
||||||
Error *local_err = NULL;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
s->type = FTYPE_FD;
|
|
||||||
|
|
||||||
/* open will not fail even if no floppy is inserted, so add O_NONBLOCK */
|
|
||||||
ret = raw_open_common(bs, options, flags, O_NONBLOCK, &local_err);
|
|
||||||
if (ret) {
|
|
||||||
if (local_err) {
|
|
||||||
error_propagate(errp, local_err);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* close fd so that we can reopen it as needed */
|
|
||||||
qemu_close(s->fd);
|
|
||||||
s->fd = -1;
|
|
||||||
s->fd_media_changed = 1;
|
|
||||||
|
|
||||||
error_report("Host floppy pass-through is deprecated");
|
|
||||||
error_printf("Support for it will be removed in a future release.\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int floppy_probe_device(const char *filename)
|
|
||||||
{
|
|
||||||
int fd, ret;
|
|
||||||
int prio = 0;
|
|
||||||
struct floppy_struct fdparam;
|
|
||||||
struct stat st;
|
|
||||||
|
|
||||||
if (strstart(filename, "/dev/fd", NULL) &&
|
|
||||||
!strstart(filename, "/dev/fdset/", NULL) &&
|
|
||||||
!strstart(filename, "/dev/fd/", NULL)) {
|
|
||||||
prio = 50;
|
|
||||||
}
|
|
||||||
|
|
||||||
fd = qemu_open(filename, O_RDONLY | O_NONBLOCK);
|
|
||||||
if (fd < 0) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
ret = fstat(fd, &st);
|
|
||||||
if (ret == -1 || !S_ISBLK(st.st_mode)) {
|
|
||||||
goto outc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Attempt to detect via a floppy specific ioctl */
|
|
||||||
ret = ioctl(fd, FDGETPRM, &fdparam);
|
|
||||||
if (ret >= 0)
|
|
||||||
prio = 100;
|
|
||||||
|
|
||||||
outc:
|
|
||||||
qemu_close(fd);
|
|
||||||
out:
|
|
||||||
return prio;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static int floppy_is_inserted(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
return fd_open(bs) >= 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int floppy_media_changed(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
BDRVRawState *s = bs->opaque;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* XXX: we do not have a true media changed indication.
|
|
||||||
* It does not work if the floppy is changed without trying to read it.
|
|
||||||
*/
|
|
||||||
fd_open(bs);
|
|
||||||
ret = s->fd_media_changed;
|
|
||||||
s->fd_media_changed = 0;
|
|
||||||
DPRINTF("Floppy changed=%d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void floppy_eject(BlockDriverState *bs, bool eject_flag)
|
|
||||||
{
|
|
||||||
BDRVRawState *s = bs->opaque;
|
|
||||||
int fd;
|
|
||||||
|
|
||||||
if (s->fd >= 0) {
|
|
||||||
qemu_close(s->fd);
|
|
||||||
s->fd = -1;
|
|
||||||
}
|
|
||||||
fd = qemu_open(bs->filename, s->open_flags | O_NONBLOCK);
|
|
||||||
if (fd >= 0) {
|
|
||||||
if (ioctl(fd, FDEJECT, 0) < 0)
|
|
||||||
perror("FDEJECT");
|
|
||||||
qemu_close(fd);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static BlockDriver bdrv_host_floppy = {
|
|
||||||
.format_name = "host_floppy",
|
|
||||||
.protocol_name = "host_floppy",
|
|
||||||
.instance_size = sizeof(BDRVRawState),
|
|
||||||
.bdrv_needs_filename = true,
|
|
||||||
.bdrv_probe_device = floppy_probe_device,
|
|
||||||
.bdrv_parse_filename = floppy_parse_filename,
|
|
||||||
.bdrv_file_open = floppy_open,
|
|
||||||
.bdrv_close = raw_close,
|
|
||||||
.bdrv_reopen_prepare = raw_reopen_prepare,
|
|
||||||
.bdrv_reopen_commit = raw_reopen_commit,
|
|
||||||
.bdrv_reopen_abort = raw_reopen_abort,
|
|
||||||
.bdrv_create = hdev_create,
|
|
||||||
.create_opts = &raw_create_opts,
|
|
||||||
|
|
||||||
.bdrv_aio_readv = raw_aio_readv,
|
|
||||||
.bdrv_aio_writev = raw_aio_writev,
|
|
||||||
.bdrv_aio_flush = raw_aio_flush,
|
|
||||||
.bdrv_refresh_limits = raw_refresh_limits,
|
|
||||||
.bdrv_io_plug = raw_aio_plug,
|
|
||||||
.bdrv_io_unplug = raw_aio_unplug,
|
|
||||||
.bdrv_flush_io_queue = raw_aio_flush_io_queue,
|
|
||||||
|
|
||||||
.bdrv_truncate = raw_truncate,
|
|
||||||
.bdrv_getlength = raw_getlength,
|
|
||||||
.has_variable_length = true,
|
|
||||||
.bdrv_get_allocated_file_size
|
|
||||||
= raw_get_allocated_file_size,
|
|
||||||
|
|
||||||
.bdrv_detach_aio_context = raw_detach_aio_context,
|
|
||||||
.bdrv_attach_aio_context = raw_attach_aio_context,
|
|
||||||
|
|
||||||
/* removable device support */
|
|
||||||
.bdrv_is_inserted = floppy_is_inserted,
|
|
||||||
.bdrv_media_changed = floppy_media_changed,
|
|
||||||
.bdrv_eject = floppy_eject,
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||||
static void cdrom_parse_filename(const char *filename, QDict *options,
|
static void cdrom_parse_filename(const char *filename, QDict *options,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
@ -2609,15 +2398,13 @@ out:
|
|||||||
return prio;
|
return prio;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cdrom_is_inserted(BlockDriverState *bs)
|
static bool cdrom_is_inserted(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
BDRVRawState *s = bs->opaque;
|
BDRVRawState *s = bs->opaque;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = ioctl(s->fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
|
ret = ioctl(s->fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
|
||||||
if (ret == CDS_DISC_OK)
|
return ret == CDS_DISC_OK;
|
||||||
return 1;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cdrom_eject(BlockDriverState *bs, bool eject_flag)
|
static void cdrom_eject(BlockDriverState *bs, bool eject_flag)
|
||||||
@ -2743,7 +2530,7 @@ static int cdrom_reopen(BlockDriverState *bs)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cdrom_is_inserted(BlockDriverState *bs)
|
static bool cdrom_is_inserted(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
return raw_getlength(bs) > 0;
|
return raw_getlength(bs) > 0;
|
||||||
}
|
}
|
||||||
@ -2831,7 +2618,6 @@ static void bdrv_file_init(void)
|
|||||||
bdrv_register(&bdrv_file);
|
bdrv_register(&bdrv_file);
|
||||||
bdrv_register(&bdrv_host_device);
|
bdrv_register(&bdrv_host_device);
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
bdrv_register(&bdrv_host_floppy);
|
|
||||||
bdrv_register(&bdrv_host_cdrom);
|
bdrv_register(&bdrv_host_cdrom);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||||
|
@ -154,11 +154,6 @@ static int raw_truncate(BlockDriverState *bs, int64_t offset)
|
|||||||
return bdrv_truncate(bs->file->bs, offset);
|
return bdrv_truncate(bs->file->bs, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int raw_is_inserted(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
return bdrv_is_inserted(bs->file->bs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int raw_media_changed(BlockDriverState *bs)
|
static int raw_media_changed(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
return bdrv_media_changed(bs->file->bs);
|
return bdrv_media_changed(bs->file->bs);
|
||||||
@ -264,7 +259,6 @@ BlockDriver bdrv_raw = {
|
|||||||
.bdrv_refresh_limits = &raw_refresh_limits,
|
.bdrv_refresh_limits = &raw_refresh_limits,
|
||||||
.bdrv_probe_blocksizes = &raw_probe_blocksizes,
|
.bdrv_probe_blocksizes = &raw_probe_blocksizes,
|
||||||
.bdrv_probe_geometry = &raw_probe_geometry,
|
.bdrv_probe_geometry = &raw_probe_geometry,
|
||||||
.bdrv_is_inserted = &raw_is_inserted,
|
|
||||||
.bdrv_media_changed = &raw_media_changed,
|
.bdrv_media_changed = &raw_media_changed,
|
||||||
.bdrv_eject = &raw_eject,
|
.bdrv_eject = &raw_eject,
|
||||||
.bdrv_lock_medium = &raw_lock_medium,
|
.bdrv_lock_medium = &raw_lock_medium,
|
||||||
|
@ -651,14 +651,16 @@ static coroutine_fn void do_co_req(void *opaque)
|
|||||||
unsigned int *rlen = srco->rlen;
|
unsigned int *rlen = srco->rlen;
|
||||||
|
|
||||||
co = qemu_coroutine_self();
|
co = qemu_coroutine_self();
|
||||||
aio_set_fd_handler(srco->aio_context, sockfd, NULL, restart_co_req, co);
|
aio_set_fd_handler(srco->aio_context, sockfd, false,
|
||||||
|
NULL, restart_co_req, co);
|
||||||
|
|
||||||
ret = send_co_req(sockfd, hdr, data, wlen);
|
ret = send_co_req(sockfd, hdr, data, wlen);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
aio_set_fd_handler(srco->aio_context, sockfd, restart_co_req, NULL, co);
|
aio_set_fd_handler(srco->aio_context, sockfd, false,
|
||||||
|
restart_co_req, NULL, co);
|
||||||
|
|
||||||
ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
|
ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
|
||||||
if (ret != sizeof(*hdr)) {
|
if (ret != sizeof(*hdr)) {
|
||||||
@ -683,7 +685,8 @@ static coroutine_fn void do_co_req(void *opaque)
|
|||||||
out:
|
out:
|
||||||
/* there is at most one request for this sockfd, so it is safe to
|
/* there is at most one request for this sockfd, so it is safe to
|
||||||
* set each handler to NULL. */
|
* set each handler to NULL. */
|
||||||
aio_set_fd_handler(srco->aio_context, sockfd, NULL, NULL, NULL);
|
aio_set_fd_handler(srco->aio_context, sockfd, false,
|
||||||
|
NULL, NULL, NULL);
|
||||||
|
|
||||||
srco->ret = ret;
|
srco->ret = ret;
|
||||||
srco->finished = true;
|
srco->finished = true;
|
||||||
@ -735,7 +738,8 @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
|
|||||||
BDRVSheepdogState *s = opaque;
|
BDRVSheepdogState *s = opaque;
|
||||||
AIOReq *aio_req, *next;
|
AIOReq *aio_req, *next;
|
||||||
|
|
||||||
aio_set_fd_handler(s->aio_context, s->fd, NULL, NULL, NULL);
|
aio_set_fd_handler(s->aio_context, s->fd, false, NULL,
|
||||||
|
NULL, NULL);
|
||||||
close(s->fd);
|
close(s->fd);
|
||||||
s->fd = -1;
|
s->fd = -1;
|
||||||
|
|
||||||
@ -938,7 +942,8 @@ static int get_sheep_fd(BDRVSheepdogState *s, Error **errp)
|
|||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
aio_set_fd_handler(s->aio_context, fd, co_read_response, NULL, s);
|
aio_set_fd_handler(s->aio_context, fd, false,
|
||||||
|
co_read_response, NULL, s);
|
||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1199,7 +1204,7 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
|
|||||||
|
|
||||||
qemu_co_mutex_lock(&s->lock);
|
qemu_co_mutex_lock(&s->lock);
|
||||||
s->co_send = qemu_coroutine_self();
|
s->co_send = qemu_coroutine_self();
|
||||||
aio_set_fd_handler(s->aio_context, s->fd,
|
aio_set_fd_handler(s->aio_context, s->fd, false,
|
||||||
co_read_response, co_write_request, s);
|
co_read_response, co_write_request, s);
|
||||||
socket_set_cork(s->fd, 1);
|
socket_set_cork(s->fd, 1);
|
||||||
|
|
||||||
@ -1218,7 +1223,8 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
|
|||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
socket_set_cork(s->fd, 0);
|
socket_set_cork(s->fd, 0);
|
||||||
aio_set_fd_handler(s->aio_context, s->fd, co_read_response, NULL, s);
|
aio_set_fd_handler(s->aio_context, s->fd, false,
|
||||||
|
co_read_response, NULL, s);
|
||||||
s->co_send = NULL;
|
s->co_send = NULL;
|
||||||
qemu_co_mutex_unlock(&s->lock);
|
qemu_co_mutex_unlock(&s->lock);
|
||||||
}
|
}
|
||||||
@ -1368,7 +1374,8 @@ static void sd_detach_aio_context(BlockDriverState *bs)
|
|||||||
{
|
{
|
||||||
BDRVSheepdogState *s = bs->opaque;
|
BDRVSheepdogState *s = bs->opaque;
|
||||||
|
|
||||||
aio_set_fd_handler(s->aio_context, s->fd, NULL, NULL, NULL);
|
aio_set_fd_handler(s->aio_context, s->fd, false, NULL,
|
||||||
|
NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sd_attach_aio_context(BlockDriverState *bs,
|
static void sd_attach_aio_context(BlockDriverState *bs,
|
||||||
@ -1377,7 +1384,8 @@ static void sd_attach_aio_context(BlockDriverState *bs,
|
|||||||
BDRVSheepdogState *s = bs->opaque;
|
BDRVSheepdogState *s = bs->opaque;
|
||||||
|
|
||||||
s->aio_context = new_context;
|
s->aio_context = new_context;
|
||||||
aio_set_fd_handler(new_context, s->fd, co_read_response, NULL, s);
|
aio_set_fd_handler(new_context, s->fd, false,
|
||||||
|
co_read_response, NULL, s);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO Convert to fine grained options */
|
/* TODO Convert to fine grained options */
|
||||||
@ -1490,7 +1498,8 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
|
|||||||
g_free(buf);
|
g_free(buf);
|
||||||
return 0;
|
return 0;
|
||||||
out:
|
out:
|
||||||
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd, NULL, NULL, NULL);
|
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
|
||||||
|
false, NULL, NULL, NULL);
|
||||||
if (s->fd >= 0) {
|
if (s->fd >= 0) {
|
||||||
closesocket(s->fd);
|
closesocket(s->fd);
|
||||||
}
|
}
|
||||||
@ -1528,7 +1537,8 @@ static void sd_reopen_commit(BDRVReopenState *state)
|
|||||||
BDRVSheepdogState *s = state->bs->opaque;
|
BDRVSheepdogState *s = state->bs->opaque;
|
||||||
|
|
||||||
if (s->fd) {
|
if (s->fd) {
|
||||||
aio_set_fd_handler(s->aio_context, s->fd, NULL, NULL, NULL);
|
aio_set_fd_handler(s->aio_context, s->fd, false,
|
||||||
|
NULL, NULL, NULL);
|
||||||
closesocket(s->fd);
|
closesocket(s->fd);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1551,7 +1561,8 @@ static void sd_reopen_abort(BDRVReopenState *state)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (re_s->fd) {
|
if (re_s->fd) {
|
||||||
aio_set_fd_handler(s->aio_context, re_s->fd, NULL, NULL, NULL);
|
aio_set_fd_handler(s->aio_context, re_s->fd, false,
|
||||||
|
NULL, NULL, NULL);
|
||||||
closesocket(re_s->fd);
|
closesocket(re_s->fd);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1935,7 +1946,8 @@ static void sd_close(BlockDriverState *bs)
|
|||||||
error_report("%s, %s", sd_strerror(rsp->result), s->name);
|
error_report("%s, %s", sd_strerror(rsp->result), s->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd, NULL, NULL, NULL);
|
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
|
||||||
|
false, NULL, NULL, NULL);
|
||||||
closesocket(s->fd);
|
closesocket(s->fd);
|
||||||
g_free(s->host_spec);
|
g_free(s->host_spec);
|
||||||
}
|
}
|
||||||
|
@ -800,14 +800,15 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs)
|
|||||||
rd_handler, wr_handler);
|
rd_handler, wr_handler);
|
||||||
|
|
||||||
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
|
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
|
||||||
rd_handler, wr_handler, co);
|
false, rd_handler, wr_handler, co);
|
||||||
}
|
}
|
||||||
|
|
||||||
static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
|
static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
|
||||||
BlockDriverState *bs)
|
BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
DPRINTF("s->sock=%d", s->sock);
|
DPRINTF("s->sock=%d", s->sock);
|
||||||
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, NULL, NULL, NULL);
|
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
|
||||||
|
false, NULL, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* A non-blocking call returned EAGAIN, so yield, ensuring the
|
/* A non-blocking call returned EAGAIN, so yield, ensuring the
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include "block/blockjob.h"
|
#include "block/blockjob.h"
|
||||||
#include "qapi/qmp/qerror.h"
|
#include "qapi/qmp/qerror.h"
|
||||||
#include "qemu/ratelimit.h"
|
#include "qemu/ratelimit.h"
|
||||||
|
#include "sysemu/block-backend.h"
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
/*
|
/*
|
||||||
@ -222,7 +223,7 @@ void stream_start(BlockDriverState *bs, BlockDriverState *base,
|
|||||||
|
|
||||||
if ((on_error == BLOCKDEV_ON_ERROR_STOP ||
|
if ((on_error == BLOCKDEV_ON_ERROR_STOP ||
|
||||||
on_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
|
on_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
|
||||||
!bdrv_iostatus_is_enabled(bs)) {
|
(!bs->blk || !blk_iostatus_is_enabled(bs->blk))) {
|
||||||
error_setg(errp, QERR_INVALID_PARAMETER, "on-error");
|
error_setg(errp, QERR_INVALID_PARAMETER, "on-error");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -33,8 +33,7 @@
|
|||||||
* its own locking.
|
* its own locking.
|
||||||
*
|
*
|
||||||
* This locking is however handled internally in this file, so it's
|
* This locking is however handled internally in this file, so it's
|
||||||
* mostly transparent to outside users (but see the documentation in
|
* transparent to outside users.
|
||||||
* throttle_groups_lock()).
|
|
||||||
*
|
*
|
||||||
* The whole ThrottleGroup structure is private and invisible to
|
* The whole ThrottleGroup structure is private and invisible to
|
||||||
* outside users, that only use it through its ThrottleState.
|
* outside users, that only use it through its ThrottleState.
|
||||||
@ -76,9 +75,9 @@ static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
|
|||||||
* created.
|
* created.
|
||||||
*
|
*
|
||||||
* @name: the name of the ThrottleGroup
|
* @name: the name of the ThrottleGroup
|
||||||
* @ret: the ThrottleGroup
|
* @ret: the ThrottleState member of the ThrottleGroup
|
||||||
*/
|
*/
|
||||||
static ThrottleGroup *throttle_group_incref(const char *name)
|
ThrottleState *throttle_group_incref(const char *name)
|
||||||
{
|
{
|
||||||
ThrottleGroup *tg = NULL;
|
ThrottleGroup *tg = NULL;
|
||||||
ThrottleGroup *iter;
|
ThrottleGroup *iter;
|
||||||
@ -108,7 +107,7 @@ static ThrottleGroup *throttle_group_incref(const char *name)
|
|||||||
|
|
||||||
qemu_mutex_unlock(&throttle_groups_lock);
|
qemu_mutex_unlock(&throttle_groups_lock);
|
||||||
|
|
||||||
return tg;
|
return &tg->ts;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Decrease the reference count of a ThrottleGroup.
|
/* Decrease the reference count of a ThrottleGroup.
|
||||||
@ -116,10 +115,12 @@ static ThrottleGroup *throttle_group_incref(const char *name)
|
|||||||
* When the reference count reaches zero the ThrottleGroup is
|
* When the reference count reaches zero the ThrottleGroup is
|
||||||
* destroyed.
|
* destroyed.
|
||||||
*
|
*
|
||||||
* @tg: The ThrottleGroup to unref
|
* @ts: The ThrottleGroup to unref, given by its ThrottleState member
|
||||||
*/
|
*/
|
||||||
static void throttle_group_unref(ThrottleGroup *tg)
|
void throttle_group_unref(ThrottleState *ts)
|
||||||
{
|
{
|
||||||
|
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
||||||
|
|
||||||
qemu_mutex_lock(&throttle_groups_lock);
|
qemu_mutex_lock(&throttle_groups_lock);
|
||||||
if (--tg->refcount == 0) {
|
if (--tg->refcount == 0) {
|
||||||
QTAILQ_REMOVE(&throttle_groups, tg, list);
|
QTAILQ_REMOVE(&throttle_groups, tg, list);
|
||||||
@ -401,7 +402,8 @@ static void write_timer_cb(void *opaque)
|
|||||||
void throttle_group_register_bs(BlockDriverState *bs, const char *groupname)
|
void throttle_group_register_bs(BlockDriverState *bs, const char *groupname)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
ThrottleGroup *tg = throttle_group_incref(groupname);
|
ThrottleState *ts = throttle_group_incref(groupname);
|
||||||
|
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
||||||
int clock_type = QEMU_CLOCK_REALTIME;
|
int clock_type = QEMU_CLOCK_REALTIME;
|
||||||
|
|
||||||
if (qtest_enabled()) {
|
if (qtest_enabled()) {
|
||||||
@ -409,7 +411,7 @@ void throttle_group_register_bs(BlockDriverState *bs, const char *groupname)
|
|||||||
clock_type = QEMU_CLOCK_VIRTUAL;
|
clock_type = QEMU_CLOCK_VIRTUAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
bs->throttle_state = &tg->ts;
|
bs->throttle_state = ts;
|
||||||
|
|
||||||
qemu_mutex_lock(&tg->lock);
|
qemu_mutex_lock(&tg->lock);
|
||||||
/* If the ThrottleGroup is new set this BlockDriverState as the token */
|
/* If the ThrottleGroup is new set this BlockDriverState as the token */
|
||||||
@ -461,38 +463,10 @@ void throttle_group_unregister_bs(BlockDriverState *bs)
|
|||||||
throttle_timers_destroy(&bs->throttle_timers);
|
throttle_timers_destroy(&bs->throttle_timers);
|
||||||
qemu_mutex_unlock(&tg->lock);
|
qemu_mutex_unlock(&tg->lock);
|
||||||
|
|
||||||
throttle_group_unref(tg);
|
throttle_group_unref(&tg->ts);
|
||||||
bs->throttle_state = NULL;
|
bs->throttle_state = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Acquire the lock of this throttling group.
|
|
||||||
*
|
|
||||||
* You won't normally need to use this. None of the functions from the
|
|
||||||
* ThrottleGroup API require you to acquire the lock since all of them
|
|
||||||
* deal with it internally.
|
|
||||||
*
|
|
||||||
* This should only be used in exceptional cases when you want to
|
|
||||||
* access the protected fields of a BlockDriverState directly
|
|
||||||
* (e.g. bdrv_swap()).
|
|
||||||
*
|
|
||||||
* @bs: a BlockDriverState that is member of the group
|
|
||||||
*/
|
|
||||||
void throttle_group_lock(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
|
|
||||||
qemu_mutex_lock(&tg->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Release the lock of this throttling group.
|
|
||||||
*
|
|
||||||
* See the comments in throttle_group_lock().
|
|
||||||
*/
|
|
||||||
void throttle_group_unlock(BlockDriverState *bs)
|
|
||||||
{
|
|
||||||
ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
|
|
||||||
qemu_mutex_unlock(&tg->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void throttle_groups_init(void)
|
static void throttle_groups_init(void)
|
||||||
{
|
{
|
||||||
qemu_mutex_init(&throttle_groups_lock);
|
qemu_mutex_init(&throttle_groups_lock);
|
||||||
|
@ -174,7 +174,7 @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile)
|
|||||||
void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
|
void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
|
||||||
AioContext *old_context)
|
AioContext *old_context)
|
||||||
{
|
{
|
||||||
aio_set_event_notifier(old_context, &aio->e, NULL);
|
aio_set_event_notifier(old_context, &aio->e, false, NULL);
|
||||||
aio->is_aio_context_attached = false;
|
aio->is_aio_context_attached = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,7 +182,8 @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
|
|||||||
AioContext *new_context)
|
AioContext *new_context)
|
||||||
{
|
{
|
||||||
aio->is_aio_context_attached = true;
|
aio->is_aio_context_attached = true;
|
||||||
aio_set_event_notifier(new_context, &aio->e, win32_aio_completion_cb);
|
aio_set_event_notifier(new_context, &aio->e, false,
|
||||||
|
win32_aio_completion_cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
QEMUWin32AIOState *win32_aio_init(void)
|
QEMUWin32AIOState *win32_aio_init(void)
|
||||||
|
642
blockdev.c
642
blockdev.c
File diff suppressed because it is too large
Load Diff
@ -29,6 +29,7 @@
|
|||||||
#include "block/block.h"
|
#include "block/block.h"
|
||||||
#include "block/blockjob.h"
|
#include "block/blockjob.h"
|
||||||
#include "block/block_int.h"
|
#include "block/block_int.h"
|
||||||
|
#include "sysemu/block-backend.h"
|
||||||
#include "qapi/qmp/qerror.h"
|
#include "qapi/qmp/qerror.h"
|
||||||
#include "qapi/qmp/qjson.h"
|
#include "qapi/qmp/qjson.h"
|
||||||
#include "qemu/coroutine.h"
|
#include "qemu/coroutine.h"
|
||||||
@ -354,8 +355,8 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockDriverState *bs,
|
|||||||
job->user_paused = true;
|
job->user_paused = true;
|
||||||
block_job_pause(job);
|
block_job_pause(job);
|
||||||
block_job_iostatus_set_err(job, error);
|
block_job_iostatus_set_err(job, error);
|
||||||
if (bs != job->bs) {
|
if (bs->blk && bs != job->bs) {
|
||||||
bdrv_iostatus_set_err(bs, error);
|
blk_iostatus_set_err(bs->blk, error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return action;
|
return action;
|
||||||
|
@ -283,7 +283,8 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
|
|||||||
|
|
||||||
/* Get this show started by hooking up our callbacks */
|
/* Get this show started by hooking up our callbacks */
|
||||||
aio_context_acquire(s->ctx);
|
aio_context_acquire(s->ctx);
|
||||||
aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify);
|
aio_set_event_notifier(s->ctx, &s->host_notifier, true,
|
||||||
|
handle_notify);
|
||||||
aio_context_release(s->ctx);
|
aio_context_release(s->ctx);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -319,7 +320,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
|
|||||||
aio_context_acquire(s->ctx);
|
aio_context_acquire(s->ctx);
|
||||||
|
|
||||||
/* Stop notifications for new requests from guest */
|
/* Stop notifications for new requests from guest */
|
||||||
aio_set_event_notifier(s->ctx, &s->host_notifier, NULL);
|
aio_set_event_notifier(s->ctx, &s->host_notifier, true, NULL);
|
||||||
|
|
||||||
/* Drain and switch bs back to the QEMU main loop */
|
/* Drain and switch bs back to the QEMU main loop */
|
||||||
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
|
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
|
||||||
|
@ -192,6 +192,8 @@ typedef struct FDrive {
|
|||||||
uint8_t ro; /* Is read-only */
|
uint8_t ro; /* Is read-only */
|
||||||
uint8_t media_changed; /* Is media changed */
|
uint8_t media_changed; /* Is media changed */
|
||||||
uint8_t media_rate; /* Data rate of medium */
|
uint8_t media_rate; /* Data rate of medium */
|
||||||
|
|
||||||
|
bool media_inserted; /* Is there a medium in the tray */
|
||||||
} FDrive;
|
} FDrive;
|
||||||
|
|
||||||
static void fd_init(FDrive *drv)
|
static void fd_init(FDrive *drv)
|
||||||
@ -261,7 +263,7 @@ static int fd_seek(FDrive *drv, uint8_t head, uint8_t track, uint8_t sect,
|
|||||||
#endif
|
#endif
|
||||||
drv->head = head;
|
drv->head = head;
|
||||||
if (drv->track != track) {
|
if (drv->track != track) {
|
||||||
if (drv->blk != NULL && blk_is_inserted(drv->blk)) {
|
if (drv->media_inserted) {
|
||||||
drv->media_changed = 0;
|
drv->media_changed = 0;
|
||||||
}
|
}
|
||||||
ret = 1;
|
ret = 1;
|
||||||
@ -270,7 +272,7 @@ static int fd_seek(FDrive *drv, uint8_t head, uint8_t track, uint8_t sect,
|
|||||||
drv->sect = sect;
|
drv->sect = sect;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (drv->blk == NULL || !blk_is_inserted(drv->blk)) {
|
if (!drv->media_inserted) {
|
||||||
ret = 2;
|
ret = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,7 +298,7 @@ static void fd_revalidate(FDrive *drv)
|
|||||||
ro = blk_is_read_only(drv->blk);
|
ro = blk_is_read_only(drv->blk);
|
||||||
pick_geometry(drv->blk, &nb_heads, &max_track,
|
pick_geometry(drv->blk, &nb_heads, &max_track,
|
||||||
&last_sect, drv->drive, &drive, &rate);
|
&last_sect, drv->drive, &drive, &rate);
|
||||||
if (!blk_is_inserted(drv->blk)) {
|
if (!drv->media_inserted) {
|
||||||
FLOPPY_DPRINTF("No disk in drive\n");
|
FLOPPY_DPRINTF("No disk in drive\n");
|
||||||
} else {
|
} else {
|
||||||
FLOPPY_DPRINTF("Floppy disk (%d h %d t %d s) %s\n", nb_heads,
|
FLOPPY_DPRINTF("Floppy disk (%d h %d t %d s) %s\n", nb_heads,
|
||||||
@ -692,7 +694,7 @@ static bool fdrive_media_changed_needed(void *opaque)
|
|||||||
{
|
{
|
||||||
FDrive *drive = opaque;
|
FDrive *drive = opaque;
|
||||||
|
|
||||||
return (drive->blk != NULL && drive->media_changed != 1);
|
return (drive->media_inserted && drive->media_changed != 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const VMStateDescription vmstate_fdrive_media_changed = {
|
static const VMStateDescription vmstate_fdrive_media_changed = {
|
||||||
@ -2184,12 +2186,21 @@ static void fdctrl_change_cb(void *opaque, bool load)
|
|||||||
{
|
{
|
||||||
FDrive *drive = opaque;
|
FDrive *drive = opaque;
|
||||||
|
|
||||||
|
drive->media_inserted = load && drive->blk && blk_is_inserted(drive->blk);
|
||||||
|
|
||||||
drive->media_changed = 1;
|
drive->media_changed = 1;
|
||||||
fd_revalidate(drive);
|
fd_revalidate(drive);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool fdctrl_is_tray_open(void *opaque)
|
||||||
|
{
|
||||||
|
FDrive *drive = opaque;
|
||||||
|
return !drive->media_inserted;
|
||||||
|
}
|
||||||
|
|
||||||
static const BlockDevOps fdctrl_block_ops = {
|
static const BlockDevOps fdctrl_block_ops = {
|
||||||
.change_media_cb = fdctrl_change_cb,
|
.change_media_cb = fdctrl_change_cb,
|
||||||
|
.is_tray_open = fdctrl_is_tray_open,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Init functions */
|
/* Init functions */
|
||||||
@ -2217,6 +2228,7 @@ static void fdctrl_connect_drives(FDCtrl *fdctrl, Error **errp)
|
|||||||
fdctrl_change_cb(drive, 0);
|
fdctrl_change_cb(drive, 0);
|
||||||
if (drive->blk) {
|
if (drive->blk) {
|
||||||
blk_set_dev_ops(drive->blk, &fdctrl_block_ops, drive);
|
blk_set_dev_ops(drive->blk, &fdctrl_block_ops, drive);
|
||||||
|
drive->media_inserted = blk_is_inserted(drive->blk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -931,9 +931,11 @@ static int blk_connect(struct XenDevice *xendev)
|
|||||||
blk_attach_dev_nofail(blkdev->blk, blkdev);
|
blk_attach_dev_nofail(blkdev->blk, blkdev);
|
||||||
blkdev->file_size = blk_getlength(blkdev->blk);
|
blkdev->file_size = blk_getlength(blkdev->blk);
|
||||||
if (blkdev->file_size < 0) {
|
if (blkdev->file_size < 0) {
|
||||||
|
BlockDriverState *bs = blk_bs(blkdev->blk);
|
||||||
|
const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
|
||||||
xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
|
xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
|
||||||
(int)blkdev->file_size, strerror(-blkdev->file_size),
|
(int)blkdev->file_size, strerror(-blkdev->file_size),
|
||||||
bdrv_get_format_name(blk_bs(blkdev->blk)) ?: "-");
|
drv_name ?: "-");
|
||||||
blkdev->file_size = 0;
|
blkdev->file_size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
|
|||||||
r = g_new(VirtIOSCSIVring, 1);
|
r = g_new(VirtIOSCSIVring, 1);
|
||||||
r->host_notifier = *virtio_queue_get_host_notifier(vq);
|
r->host_notifier = *virtio_queue_get_host_notifier(vq);
|
||||||
r->guest_notifier = *virtio_queue_get_guest_notifier(vq);
|
r->guest_notifier = *virtio_queue_get_guest_notifier(vq);
|
||||||
aio_set_event_notifier(s->ctx, &r->host_notifier, handler);
|
aio_set_event_notifier(s->ctx, &r->host_notifier, true, handler);
|
||||||
|
|
||||||
r->parent = s;
|
r->parent = s;
|
||||||
|
|
||||||
@ -71,7 +71,7 @@ static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
|
|||||||
return r;
|
return r;
|
||||||
|
|
||||||
fail_vring:
|
fail_vring:
|
||||||
aio_set_event_notifier(s->ctx, &r->host_notifier, NULL);
|
aio_set_event_notifier(s->ctx, &r->host_notifier, true, NULL);
|
||||||
k->set_host_notifier(qbus->parent, n, false);
|
k->set_host_notifier(qbus->parent, n, false);
|
||||||
g_free(r);
|
g_free(r);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -162,14 +162,17 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (s->ctrl_vring) {
|
if (s->ctrl_vring) {
|
||||||
aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier, NULL);
|
aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier,
|
||||||
|
true, NULL);
|
||||||
}
|
}
|
||||||
if (s->event_vring) {
|
if (s->event_vring) {
|
||||||
aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier, NULL);
|
aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier,
|
||||||
|
true, NULL);
|
||||||
}
|
}
|
||||||
if (s->cmd_vrings) {
|
if (s->cmd_vrings) {
|
||||||
for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
|
for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
|
||||||
aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier, NULL);
|
aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
|
||||||
|
true, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -290,10 +293,13 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
|
|||||||
|
|
||||||
aio_context_acquire(s->ctx);
|
aio_context_acquire(s->ctx);
|
||||||
|
|
||||||
aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier, NULL);
|
aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier,
|
||||||
aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier, NULL);
|
true, NULL);
|
||||||
|
aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier,
|
||||||
|
true, NULL);
|
||||||
for (i = 0; i < vs->conf.num_queues; i++) {
|
for (i = 0; i < vs->conf.num_queues; i++) {
|
||||||
aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier, NULL);
|
aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
|
||||||
|
true, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_drain_all(); /* ensure there are no in-flight requests */
|
blk_drain_all(); /* ensure there are no in-flight requests */
|
||||||
|
@ -613,20 +613,22 @@ static void usb_msd_realize_storage(USBDevice *dev, Error **errp)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
bdrv_add_key(blk_bs(blk), NULL, &err);
|
if (blk_bs(blk)) {
|
||||||
if (err) {
|
bdrv_add_key(blk_bs(blk), NULL, &err);
|
||||||
if (monitor_cur_is_qmp()) {
|
if (err) {
|
||||||
error_propagate(errp, err);
|
if (monitor_cur_is_qmp()) {
|
||||||
return;
|
error_propagate(errp, err);
|
||||||
}
|
return;
|
||||||
error_free(err);
|
}
|
||||||
err = NULL;
|
error_free(err);
|
||||||
if (cur_mon) {
|
err = NULL;
|
||||||
monitor_read_bdrv_key_start(cur_mon, blk_bs(blk),
|
if (cur_mon) {
|
||||||
usb_msd_password_cb, s);
|
monitor_read_bdrv_key_start(cur_mon, blk_bs(blk),
|
||||||
s->dev.auto_attach = 0;
|
usb_msd_password_cb, s);
|
||||||
} else {
|
s->dev.auto_attach = 0;
|
||||||
autostart = 0;
|
} else {
|
||||||
|
autostart = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +40,6 @@ typedef struct BlockAcctStats {
|
|||||||
uint64_t nr_ops[BLOCK_MAX_IOTYPE];
|
uint64_t nr_ops[BLOCK_MAX_IOTYPE];
|
||||||
uint64_t total_time_ns[BLOCK_MAX_IOTYPE];
|
uint64_t total_time_ns[BLOCK_MAX_IOTYPE];
|
||||||
uint64_t merged[BLOCK_MAX_IOTYPE];
|
uint64_t merged[BLOCK_MAX_IOTYPE];
|
||||||
uint64_t wr_highest_sector;
|
|
||||||
} BlockAcctStats;
|
} BlockAcctStats;
|
||||||
|
|
||||||
typedef struct BlockAcctCookie {
|
typedef struct BlockAcctCookie {
|
||||||
@ -52,8 +51,6 @@ typedef struct BlockAcctCookie {
|
|||||||
void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie,
|
void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie,
|
||||||
int64_t bytes, enum BlockAcctType type);
|
int64_t bytes, enum BlockAcctType type);
|
||||||
void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie);
|
void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie);
|
||||||
void block_acct_highest_sector(BlockAcctStats *stats, int64_t sector_num,
|
|
||||||
unsigned int nb_sectors);
|
|
||||||
void block_acct_merge_done(BlockAcctStats *stats, enum BlockAcctType type,
|
void block_acct_merge_done(BlockAcctStats *stats, enum BlockAcctType type,
|
||||||
int num_requests);
|
int num_requests);
|
||||||
|
|
||||||
|
@ -122,6 +122,8 @@ struct AioContext {
|
|||||||
|
|
||||||
/* TimerLists for calling timers - one per clock type */
|
/* TimerLists for calling timers - one per clock type */
|
||||||
QEMUTimerListGroup tlg;
|
QEMUTimerListGroup tlg;
|
||||||
|
|
||||||
|
int external_disable_cnt;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -299,6 +301,7 @@ bool aio_poll(AioContext *ctx, bool blocking);
|
|||||||
*/
|
*/
|
||||||
void aio_set_fd_handler(AioContext *ctx,
|
void aio_set_fd_handler(AioContext *ctx,
|
||||||
int fd,
|
int fd,
|
||||||
|
bool is_external,
|
||||||
IOHandler *io_read,
|
IOHandler *io_read,
|
||||||
IOHandler *io_write,
|
IOHandler *io_write,
|
||||||
void *opaque);
|
void *opaque);
|
||||||
@ -312,6 +315,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
|||||||
*/
|
*/
|
||||||
void aio_set_event_notifier(AioContext *ctx,
|
void aio_set_event_notifier(AioContext *ctx,
|
||||||
EventNotifier *notifier,
|
EventNotifier *notifier,
|
||||||
|
bool is_external,
|
||||||
EventNotifierHandler *io_read);
|
EventNotifierHandler *io_read);
|
||||||
|
|
||||||
/* Return a GSource that lets the main loop poll the file descriptors attached
|
/* Return a GSource that lets the main loop poll the file descriptors attached
|
||||||
@ -373,4 +377,40 @@ static inline void aio_timer_init(AioContext *ctx,
|
|||||||
*/
|
*/
|
||||||
int64_t aio_compute_timeout(AioContext *ctx);
|
int64_t aio_compute_timeout(AioContext *ctx);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* aio_disable_external:
|
||||||
|
* @ctx: the aio context
|
||||||
|
*
|
||||||
|
* Disable the further processing of external clients.
|
||||||
|
*/
|
||||||
|
static inline void aio_disable_external(AioContext *ctx)
|
||||||
|
{
|
||||||
|
atomic_inc(&ctx->external_disable_cnt);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* aio_enable_external:
|
||||||
|
* @ctx: the aio context
|
||||||
|
*
|
||||||
|
* Enable the processing of external clients.
|
||||||
|
*/
|
||||||
|
static inline void aio_enable_external(AioContext *ctx)
|
||||||
|
{
|
||||||
|
assert(ctx->external_disable_cnt > 0);
|
||||||
|
atomic_dec(&ctx->external_disable_cnt);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* aio_node_check:
|
||||||
|
* @ctx: the aio context
|
||||||
|
* @is_external: Whether or not the checked node is an external event source.
|
||||||
|
*
|
||||||
|
* Check if the node's is_external flag is okay to be polled by the ctx at this
|
||||||
|
* moment. True means green light.
|
||||||
|
*/
|
||||||
|
static inline bool aio_node_check(AioContext *ctx, bool is_external)
|
||||||
|
{
|
||||||
|
return !is_external || !atomic_read(&ctx->external_disable_cnt);
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -174,11 +174,6 @@ typedef enum BlockOpType {
|
|||||||
BLOCK_OP_TYPE_MAX,
|
BLOCK_OP_TYPE_MAX,
|
||||||
} BlockOpType;
|
} BlockOpType;
|
||||||
|
|
||||||
void bdrv_iostatus_enable(BlockDriverState *bs);
|
|
||||||
void bdrv_iostatus_reset(BlockDriverState *bs);
|
|
||||||
void bdrv_iostatus_disable(BlockDriverState *bs);
|
|
||||||
bool bdrv_iostatus_is_enabled(const BlockDriverState *bs);
|
|
||||||
void bdrv_iostatus_set_err(BlockDriverState *bs, int error);
|
|
||||||
void bdrv_info_print(Monitor *mon, const QObject *data);
|
void bdrv_info_print(Monitor *mon, const QObject *data);
|
||||||
void bdrv_info(Monitor *mon, QObject **ret_data);
|
void bdrv_info(Monitor *mon, QObject **ret_data);
|
||||||
void bdrv_stats_print(Monitor *mon, const QObject *data);
|
void bdrv_stats_print(Monitor *mon, const QObject *data);
|
||||||
@ -389,17 +384,11 @@ int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
|
|||||||
int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
|
int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
|
||||||
int64_t sector_num, int nb_sectors, int *pnum);
|
int64_t sector_num, int nb_sectors, int *pnum);
|
||||||
|
|
||||||
void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
|
|
||||||
BlockdevOnError on_write_error);
|
|
||||||
BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read);
|
|
||||||
BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error);
|
|
||||||
void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
|
|
||||||
bool is_read, int error);
|
|
||||||
int bdrv_is_read_only(BlockDriverState *bs);
|
int bdrv_is_read_only(BlockDriverState *bs);
|
||||||
int bdrv_is_sg(BlockDriverState *bs);
|
int bdrv_is_sg(BlockDriverState *bs);
|
||||||
int bdrv_enable_write_cache(BlockDriverState *bs);
|
int bdrv_enable_write_cache(BlockDriverState *bs);
|
||||||
void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce);
|
void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce);
|
||||||
int bdrv_is_inserted(BlockDriverState *bs);
|
bool bdrv_is_inserted(BlockDriverState *bs);
|
||||||
int bdrv_media_changed(BlockDriverState *bs);
|
int bdrv_media_changed(BlockDriverState *bs);
|
||||||
void bdrv_lock_medium(BlockDriverState *bs, bool locked);
|
void bdrv_lock_medium(BlockDriverState *bs, bool locked);
|
||||||
void bdrv_eject(BlockDriverState *bs, bool eject_flag);
|
void bdrv_eject(BlockDriverState *bs, bool eject_flag);
|
||||||
@ -466,7 +455,6 @@ void bdrv_img_create(const char *filename, const char *fmt,
|
|||||||
size_t bdrv_min_mem_align(BlockDriverState *bs);
|
size_t bdrv_min_mem_align(BlockDriverState *bs);
|
||||||
/* Returns optimal alignment in bytes for bounce buffer */
|
/* Returns optimal alignment in bytes for bounce buffer */
|
||||||
size_t bdrv_opt_mem_align(BlockDriverState *bs);
|
size_t bdrv_opt_mem_align(BlockDriverState *bs);
|
||||||
void bdrv_set_guest_block_size(BlockDriverState *bs, int align);
|
|
||||||
void *qemu_blockalign(BlockDriverState *bs, size_t size);
|
void *qemu_blockalign(BlockDriverState *bs, size_t size);
|
||||||
void *qemu_blockalign0(BlockDriverState *bs, size_t size);
|
void *qemu_blockalign0(BlockDriverState *bs, size_t size);
|
||||||
void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
|
void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
|
||||||
@ -622,6 +610,23 @@ void bdrv_io_plug(BlockDriverState *bs);
|
|||||||
void bdrv_io_unplug(BlockDriverState *bs);
|
void bdrv_io_unplug(BlockDriverState *bs);
|
||||||
void bdrv_flush_io_queue(BlockDriverState *bs);
|
void bdrv_flush_io_queue(BlockDriverState *bs);
|
||||||
|
|
||||||
BlockAcctStats *bdrv_get_stats(BlockDriverState *bs);
|
/**
|
||||||
|
* bdrv_drained_begin:
|
||||||
|
*
|
||||||
|
* Begin a quiesced section for exclusive access to the BDS, by disabling
|
||||||
|
* external request sources including NBD server and device model. Note that
|
||||||
|
* this doesn't block timers or coroutines from submitting more requests, which
|
||||||
|
* means block_job_pause is still necessary.
|
||||||
|
*
|
||||||
|
* This function can be recursive.
|
||||||
|
*/
|
||||||
|
void bdrv_drained_begin(BlockDriverState *bs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bdrv_drained_end:
|
||||||
|
*
|
||||||
|
* End a quiescent section started by bdrv_drained_begin().
|
||||||
|
*/
|
||||||
|
void bdrv_drained_end(BlockDriverState *bs);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
|
|
||||||
#include "block/accounting.h"
|
#include "block/accounting.h"
|
||||||
#include "block/block.h"
|
#include "block/block.h"
|
||||||
|
#include "block/throttle-groups.h"
|
||||||
#include "qemu/option.h"
|
#include "qemu/option.h"
|
||||||
#include "qemu/queue.h"
|
#include "qemu/queue.h"
|
||||||
#include "qemu/coroutine.h"
|
#include "qemu/coroutine.h"
|
||||||
@ -212,7 +213,7 @@ struct BlockDriver {
|
|||||||
const char *backing_file, const char *backing_fmt);
|
const char *backing_file, const char *backing_fmt);
|
||||||
|
|
||||||
/* removable device specific */
|
/* removable device specific */
|
||||||
int (*bdrv_is_inserted)(BlockDriverState *bs);
|
bool (*bdrv_is_inserted)(BlockDriverState *bs);
|
||||||
int (*bdrv_media_changed)(BlockDriverState *bs);
|
int (*bdrv_media_changed)(BlockDriverState *bs);
|
||||||
void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
|
void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
|
||||||
void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
|
void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
|
||||||
@ -399,8 +400,8 @@ struct BlockDriverState {
|
|||||||
unsigned pending_reqs[2];
|
unsigned pending_reqs[2];
|
||||||
QLIST_ENTRY(BlockDriverState) round_robin;
|
QLIST_ENTRY(BlockDriverState) round_robin;
|
||||||
|
|
||||||
/* I/O stats (display with "info blockstats"). */
|
/* Offset after the highest byte written to */
|
||||||
BlockAcctStats stats;
|
uint64_t wr_highest_offset;
|
||||||
|
|
||||||
/* I/O Limits */
|
/* I/O Limits */
|
||||||
BlockLimits bl;
|
BlockLimits bl;
|
||||||
@ -411,18 +412,9 @@ struct BlockDriverState {
|
|||||||
/* Alignment requirement for offset/length of I/O requests */
|
/* Alignment requirement for offset/length of I/O requests */
|
||||||
unsigned int request_alignment;
|
unsigned int request_alignment;
|
||||||
|
|
||||||
/* the block size for which the guest device expects atomicity */
|
|
||||||
int guest_block_size;
|
|
||||||
|
|
||||||
/* do we need to tell the quest if we have a volatile write cache? */
|
/* do we need to tell the quest if we have a volatile write cache? */
|
||||||
int enable_write_cache;
|
int enable_write_cache;
|
||||||
|
|
||||||
/* NOTE: the following infos are only hints for real hardware
|
|
||||||
drivers. They are not used by the block driver */
|
|
||||||
BlockdevOnError on_read_error, on_write_error;
|
|
||||||
bool iostatus_enabled;
|
|
||||||
BlockDeviceIoStatus iostatus;
|
|
||||||
|
|
||||||
/* the following member gives a name to every node on the bs graph. */
|
/* the following member gives a name to every node on the bs graph. */
|
||||||
char node_name[32];
|
char node_name[32];
|
||||||
/* element of the list of named nodes building the graph */
|
/* element of the list of named nodes building the graph */
|
||||||
@ -456,6 +448,17 @@ struct BlockDriverState {
|
|||||||
/* threshold limit for writes, in bytes. "High water mark". */
|
/* threshold limit for writes, in bytes. "High water mark". */
|
||||||
uint64_t write_threshold_offset;
|
uint64_t write_threshold_offset;
|
||||||
NotifierWithReturn write_threshold_notifier;
|
NotifierWithReturn write_threshold_notifier;
|
||||||
|
|
||||||
|
int quiesce_counter;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct BlockBackendRootState {
|
||||||
|
int open_flags;
|
||||||
|
bool read_only;
|
||||||
|
BlockdevDetectZeroesOptions detect_zeroes;
|
||||||
|
|
||||||
|
char *throttle_group;
|
||||||
|
ThrottleState *throttle_state;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline BlockDriverState *backing_bs(BlockDriverState *bs)
|
static inline BlockDriverState *backing_bs(BlockDriverState *bs)
|
||||||
|
@ -30,6 +30,9 @@
|
|||||||
|
|
||||||
const char *throttle_group_get_name(BlockDriverState *bs);
|
const char *throttle_group_get_name(BlockDriverState *bs);
|
||||||
|
|
||||||
|
ThrottleState *throttle_group_incref(const char *name);
|
||||||
|
void throttle_group_unref(ThrottleState *ts);
|
||||||
|
|
||||||
void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg);
|
void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg);
|
||||||
void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg);
|
void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg);
|
||||||
|
|
||||||
@ -40,7 +43,4 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs,
|
|||||||
unsigned int bytes,
|
unsigned int bytes,
|
||||||
bool is_write);
|
bool is_write);
|
||||||
|
|
||||||
void throttle_group_lock(BlockDriverState *bs);
|
|
||||||
void throttle_group_unlock(BlockDriverState *bs);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -11,6 +11,7 @@ typedef struct AddressSpace AddressSpace;
|
|||||||
typedef struct AioContext AioContext;
|
typedef struct AioContext AioContext;
|
||||||
typedef struct AudioState AudioState;
|
typedef struct AudioState AudioState;
|
||||||
typedef struct BlockBackend BlockBackend;
|
typedef struct BlockBackend BlockBackend;
|
||||||
|
typedef struct BlockBackendRootState BlockBackendRootState;
|
||||||
typedef struct BlockDriverState BlockDriverState;
|
typedef struct BlockDriverState BlockDriverState;
|
||||||
typedef struct BusClass BusClass;
|
typedef struct BusClass BusClass;
|
||||||
typedef struct BusState BusState;
|
typedef struct BusState BusState;
|
||||||
|
@ -72,10 +72,16 @@ BlockBackend *blk_by_name(const char *name);
|
|||||||
BlockBackend *blk_next(BlockBackend *blk);
|
BlockBackend *blk_next(BlockBackend *blk);
|
||||||
|
|
||||||
BlockDriverState *blk_bs(BlockBackend *blk);
|
BlockDriverState *blk_bs(BlockBackend *blk);
|
||||||
|
void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs);
|
||||||
|
|
||||||
void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk);
|
void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk);
|
||||||
|
|
||||||
void blk_iostatus_enable(BlockBackend *blk);
|
void blk_iostatus_enable(BlockBackend *blk);
|
||||||
|
bool blk_iostatus_is_enabled(const BlockBackend *blk);
|
||||||
|
BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
|
||||||
|
void blk_iostatus_disable(BlockBackend *blk);
|
||||||
|
void blk_iostatus_reset(BlockBackend *blk);
|
||||||
|
void blk_iostatus_set_err(BlockBackend *blk, int error);
|
||||||
int blk_attach_dev(BlockBackend *blk, void *dev);
|
int blk_attach_dev(BlockBackend *blk, void *dev);
|
||||||
void blk_attach_dev_nofail(BlockBackend *blk, void *dev);
|
void blk_attach_dev_nofail(BlockBackend *blk, void *dev);
|
||||||
void blk_detach_dev(BlockBackend *blk, void *dev);
|
void blk_detach_dev(BlockBackend *blk, void *dev);
|
||||||
@ -120,6 +126,8 @@ int blk_flush(BlockBackend *blk);
|
|||||||
int blk_flush_all(void);
|
int blk_flush_all(void);
|
||||||
void blk_drain(BlockBackend *blk);
|
void blk_drain(BlockBackend *blk);
|
||||||
void blk_drain_all(void);
|
void blk_drain_all(void);
|
||||||
|
void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
|
||||||
|
BlockdevOnError on_write_error);
|
||||||
BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read);
|
BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read);
|
||||||
BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
|
BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
|
||||||
int error);
|
int error);
|
||||||
@ -130,7 +138,8 @@ int blk_is_sg(BlockBackend *blk);
|
|||||||
int blk_enable_write_cache(BlockBackend *blk);
|
int blk_enable_write_cache(BlockBackend *blk);
|
||||||
void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
|
void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
|
||||||
void blk_invalidate_cache(BlockBackend *blk, Error **errp);
|
void blk_invalidate_cache(BlockBackend *blk, Error **errp);
|
||||||
int blk_is_inserted(BlockBackend *blk);
|
bool blk_is_inserted(BlockBackend *blk);
|
||||||
|
bool blk_is_available(BlockBackend *blk);
|
||||||
void blk_lock_medium(BlockBackend *blk, bool locked);
|
void blk_lock_medium(BlockBackend *blk, bool locked);
|
||||||
void blk_eject(BlockBackend *blk, bool eject_flag);
|
void blk_eject(BlockBackend *blk, bool eject_flag);
|
||||||
int blk_get_flags(BlockBackend *blk);
|
int blk_get_flags(BlockBackend *blk);
|
||||||
@ -155,6 +164,8 @@ void blk_add_close_notifier(BlockBackend *blk, Notifier *notify);
|
|||||||
void blk_io_plug(BlockBackend *blk);
|
void blk_io_plug(BlockBackend *blk);
|
||||||
void blk_io_unplug(BlockBackend *blk);
|
void blk_io_unplug(BlockBackend *blk);
|
||||||
BlockAcctStats *blk_get_stats(BlockBackend *blk);
|
BlockAcctStats *blk_get_stats(BlockBackend *blk);
|
||||||
|
BlockBackendRootState *blk_get_root_state(BlockBackend *blk);
|
||||||
|
void blk_update_root_state(BlockBackend *blk);
|
||||||
|
|
||||||
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
|
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
|
@ -55,7 +55,8 @@ void qemu_set_fd_handler(int fd,
|
|||||||
void *opaque)
|
void *opaque)
|
||||||
{
|
{
|
||||||
iohandler_init();
|
iohandler_init();
|
||||||
aio_set_fd_handler(iohandler_ctx, fd, fd_read, fd_write, opaque);
|
aio_set_fd_handler(iohandler_ctx, fd, false,
|
||||||
|
fd_read, fd_write, opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reaping of zombies. right now we're not passing the status to
|
/* reaping of zombies. right now we're not passing the status to
|
||||||
|
@ -808,6 +808,11 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
bs = blk_bs(blk);
|
bs = blk_bs(blk);
|
||||||
|
if (!bs) {
|
||||||
|
fprintf(stderr, "Block device %s has no medium\n",
|
||||||
|
device_name);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (bs != bs_prev) {
|
if (bs != bs_prev) {
|
||||||
bs_prev = bs;
|
bs_prev = bs;
|
||||||
|
@ -4145,6 +4145,10 @@ int monitor_read_block_device_key(Monitor *mon, const char *device,
|
|||||||
monitor_printf(mon, "Device not found %s\n", device);
|
monitor_printf(mon, "Device not found %s\n", device);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
if (!blk_bs(blk)) {
|
||||||
|
monitor_printf(mon, "Device '%s' has no medium\n", device);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
bdrv_add_key(blk_bs(blk), NULL, &err);
|
bdrv_add_key(blk_bs(blk), NULL, &err);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
4
nbd.c
4
nbd.c
@ -1446,6 +1446,7 @@ static void nbd_set_handlers(NBDClient *client)
|
|||||||
{
|
{
|
||||||
if (client->exp && client->exp->ctx) {
|
if (client->exp && client->exp->ctx) {
|
||||||
aio_set_fd_handler(client->exp->ctx, client->sock,
|
aio_set_fd_handler(client->exp->ctx, client->sock,
|
||||||
|
true,
|
||||||
client->can_read ? nbd_read : NULL,
|
client->can_read ? nbd_read : NULL,
|
||||||
client->send_coroutine ? nbd_restart_write : NULL,
|
client->send_coroutine ? nbd_restart_write : NULL,
|
||||||
client);
|
client);
|
||||||
@ -1455,7 +1456,8 @@ static void nbd_set_handlers(NBDClient *client)
|
|||||||
static void nbd_unset_handlers(NBDClient *client)
|
static void nbd_unset_handlers(NBDClient *client)
|
||||||
{
|
{
|
||||||
if (client->exp && client->exp->ctx) {
|
if (client->exp && client->exp->ctx) {
|
||||||
aio_set_fd_handler(client->exp->ctx, client->sock, NULL, NULL, NULL);
|
aio_set_fd_handler(client->exp->ctx, client->sock,
|
||||||
|
true, NULL, NULL, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,10 +215,11 @@
|
|||||||
# @drv: the name of the block format used to open the backing device. As of
|
# @drv: the name of the block format used to open the backing device. As of
|
||||||
# 0.14.0 this can be: 'blkdebug', 'bochs', 'cloop', 'cow', 'dmg',
|
# 0.14.0 this can be: 'blkdebug', 'bochs', 'cloop', 'cow', 'dmg',
|
||||||
# 'file', 'file', 'ftp', 'ftps', 'host_cdrom', 'host_device',
|
# 'file', 'file', 'ftp', 'ftps', 'host_cdrom', 'host_device',
|
||||||
# 'host_floppy', 'http', 'https', 'nbd', 'parallels', 'qcow',
|
# 'http', 'https', 'nbd', 'parallels', 'qcow',
|
||||||
# 'qcow2', 'raw', 'tftp', 'vdi', 'vmdk', 'vpc', 'vvfat'
|
# 'qcow2', 'raw', 'tftp', 'vdi', 'vmdk', 'vpc', 'vvfat'
|
||||||
# 2.2: 'archipelago' added, 'cow' dropped
|
# 2.2: 'archipelago' added, 'cow' dropped
|
||||||
# 2.3: 'host_floppy' deprecated
|
# 2.3: 'host_floppy' deprecated
|
||||||
|
# 2.5: 'host_floppy' dropped
|
||||||
#
|
#
|
||||||
# @backing_file: #optional the name of the backing file (for copy-on-write)
|
# @backing_file: #optional the name of the backing file (for copy-on-write)
|
||||||
#
|
#
|
||||||
@ -1373,15 +1374,14 @@
|
|||||||
#
|
#
|
||||||
# Drivers that are supported in block device operations.
|
# Drivers that are supported in block device operations.
|
||||||
#
|
#
|
||||||
# @host_device, @host_cdrom, @host_floppy: Since 2.1
|
# @host_device, @host_cdrom: Since 2.1
|
||||||
# @host_floppy: deprecated since 2.3
|
|
||||||
#
|
#
|
||||||
# Since: 2.0
|
# Since: 2.0
|
||||||
##
|
##
|
||||||
{ 'enum': 'BlockdevDriver',
|
{ 'enum': 'BlockdevDriver',
|
||||||
'data': [ 'archipelago', 'blkdebug', 'blkverify', 'bochs', 'cloop',
|
'data': [ 'archipelago', 'blkdebug', 'blkverify', 'bochs', 'cloop',
|
||||||
'dmg', 'file', 'ftp', 'ftps', 'host_cdrom', 'host_device',
|
'dmg', 'file', 'ftp', 'ftps', 'host_cdrom', 'host_device',
|
||||||
'host_floppy', 'http', 'https', 'null-aio', 'null-co', 'parallels',
|
'http', 'https', 'null-aio', 'null-co', 'parallels',
|
||||||
'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'tftp', 'vdi', 'vhdx',
|
'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'tftp', 'vdi', 'vhdx',
|
||||||
'vmdk', 'vpc', 'vvfat' ] }
|
'vmdk', 'vpc', 'vvfat' ] }
|
||||||
|
|
||||||
@ -1393,9 +1393,12 @@
|
|||||||
#
|
#
|
||||||
# @driver: block driver name
|
# @driver: block driver name
|
||||||
# @id: #optional id by which the new block device can be referred to.
|
# @id: #optional id by which the new block device can be referred to.
|
||||||
# This is a required option on the top level of blockdev-add, and
|
# This option is only allowed on the top level of blockdev-add.
|
||||||
# currently not allowed on any other level.
|
# A BlockBackend will be created by blockdev-add if and only if
|
||||||
# @node-name: #optional the name of a block driver state node (Since 2.0)
|
# this option is given.
|
||||||
|
# @node-name: #optional the name of a block driver state node (Since 2.0).
|
||||||
|
# This option is required on the top level of blockdev-add if
|
||||||
|
# the @id option is not given there.
|
||||||
# @discard: #optional discard-related options (default: ignore)
|
# @discard: #optional discard-related options (default: ignore)
|
||||||
# @cache: #optional cache-related options
|
# @cache: #optional cache-related options
|
||||||
# @aio: #optional AIO backend (default: threads)
|
# @aio: #optional AIO backend (default: threads)
|
||||||
@ -1816,7 +1819,6 @@
|
|||||||
# TODO gluster: Wait for structured options
|
# TODO gluster: Wait for structured options
|
||||||
'host_cdrom': 'BlockdevOptionsFile',
|
'host_cdrom': 'BlockdevOptionsFile',
|
||||||
'host_device':'BlockdevOptionsFile',
|
'host_device':'BlockdevOptionsFile',
|
||||||
'host_floppy':'BlockdevOptionsFile',
|
|
||||||
'http': 'BlockdevOptionsFile',
|
'http': 'BlockdevOptionsFile',
|
||||||
'https': 'BlockdevOptionsFile',
|
'https': 'BlockdevOptionsFile',
|
||||||
# TODO iscsi: Wait for structured options
|
# TODO iscsi: Wait for structured options
|
||||||
@ -1860,7 +1862,9 @@
|
|||||||
##
|
##
|
||||||
# @blockdev-add:
|
# @blockdev-add:
|
||||||
#
|
#
|
||||||
# Creates a new block device.
|
# Creates a new block device. If the @id option is given at the top level, a
|
||||||
|
# BlockBackend will be created; otherwise, @node-name is mandatory at the top
|
||||||
|
# level and no BlockBackend will be created.
|
||||||
#
|
#
|
||||||
# This command is still a work in progress. It doesn't support all
|
# This command is still a work in progress. It doesn't support all
|
||||||
# block drivers, it lacks a matching blockdev-del, and more. Stay
|
# block drivers, it lacks a matching blockdev-del, and more. Stay
|
||||||
|
@ -2520,8 +2520,8 @@ Each json-object contain the following:
|
|||||||
- "wr_total_time_ns": total time spend on writes in nano-seconds (json-int)
|
- "wr_total_time_ns": total time spend on writes in nano-seconds (json-int)
|
||||||
- "rd_total_time_ns": total time spend on reads in nano-seconds (json-int)
|
- "rd_total_time_ns": total time spend on reads in nano-seconds (json-int)
|
||||||
- "flush_total_time_ns": total time spend on cache flushes in nano-seconds (json-int)
|
- "flush_total_time_ns": total time spend on cache flushes in nano-seconds (json-int)
|
||||||
- "wr_highest_offset": Highest offset of a sector written since the
|
- "wr_highest_offset": The offset after the greatest byte written to the
|
||||||
BlockDriverState has been opened (json-int)
|
BlockDriverState since it has been opened (json-int)
|
||||||
- "rd_merged": number of read requests that have been merged into
|
- "rd_merged": number of read requests that have been merged into
|
||||||
another request (json-int)
|
another request (json-int)
|
||||||
- "wr_merged": number of write requests that have been merged into
|
- "wr_merged": number of write requests that have been merged into
|
||||||
|
6
qmp.c
6
qmp.c
@ -24,6 +24,7 @@
|
|||||||
#include "sysemu/arch_init.h"
|
#include "sysemu/arch_init.h"
|
||||||
#include "hw/qdev.h"
|
#include "hw/qdev.h"
|
||||||
#include "sysemu/blockdev.h"
|
#include "sysemu/blockdev.h"
|
||||||
|
#include "sysemu/block-backend.h"
|
||||||
#include "qom/qom-qobject.h"
|
#include "qom/qom-qobject.h"
|
||||||
#include "qapi/qmp/qerror.h"
|
#include "qapi/qmp/qerror.h"
|
||||||
#include "qapi/qmp/qobject.h"
|
#include "qapi/qmp/qobject.h"
|
||||||
@ -170,6 +171,7 @@ SpiceInfo *qmp_query_spice(Error **errp)
|
|||||||
void qmp_cont(Error **errp)
|
void qmp_cont(Error **errp)
|
||||||
{
|
{
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
BlockBackend *blk;
|
||||||
BlockDriverState *bs;
|
BlockDriverState *bs;
|
||||||
|
|
||||||
if (runstate_needs_reset()) {
|
if (runstate_needs_reset()) {
|
||||||
@ -179,8 +181,8 @@ void qmp_cont(Error **errp)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) {
|
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
|
||||||
bdrv_iostatus_reset(bs);
|
blk_iostatus_reset(blk);
|
||||||
}
|
}
|
||||||
for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) {
|
for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) {
|
||||||
bdrv_add_key(bs, NULL, &local_err);
|
bdrv_add_key(bs, NULL, &local_err);
|
||||||
|
@ -304,9 +304,7 @@ static void test_media_insert(void)
|
|||||||
qmp_discard_response("{'execute':'change', 'arguments':{"
|
qmp_discard_response("{'execute':'change', 'arguments':{"
|
||||||
" 'device':'floppy0', 'target': %s, 'arg': 'raw' }}",
|
" 'device':'floppy0', 'target': %s, 'arg': 'raw' }}",
|
||||||
test_image);
|
test_image);
|
||||||
qmp_discard_response(""); /* ignore event
|
qmp_discard_response(""); /* ignore event (open -> close) */
|
||||||
(FIXME open -> open transition?!) */
|
|
||||||
qmp_discard_response(""); /* ignore event */
|
|
||||||
|
|
||||||
dir = inb(FLOPPY_BASE + reg_dir);
|
dir = inb(FLOPPY_BASE + reg_dir);
|
||||||
assert_bit_set(dir, DSKCHG);
|
assert_bit_set(dir, DSKCHG);
|
||||||
|
@ -104,8 +104,17 @@ echo
|
|||||||
echo "=== Testing blkdebug on existing block device ==="
|
echo "=== Testing blkdebug on existing block device ==="
|
||||||
echo
|
echo
|
||||||
|
|
||||||
run_qemu -drive "file=$TEST_IMG,format=raw,if=none,id=drive0" <<EOF
|
run_qemu <<EOF
|
||||||
{ "execute": "qmp_capabilities" }
|
{ "execute": "qmp_capabilities" }
|
||||||
|
{ "execute": "blockdev-add",
|
||||||
|
"arguments": {
|
||||||
|
"options": {
|
||||||
|
"node-name": "drive0",
|
||||||
|
"driver": "file",
|
||||||
|
"filename": "$TEST_IMG"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
{ "execute": "blockdev-add",
|
{ "execute": "blockdev-add",
|
||||||
"arguments": {
|
"arguments": {
|
||||||
"options": {
|
"options": {
|
||||||
@ -133,8 +142,20 @@ echo
|
|||||||
echo "=== Testing blkverify on existing block device ==="
|
echo "=== Testing blkverify on existing block device ==="
|
||||||
echo
|
echo
|
||||||
|
|
||||||
run_qemu -drive "file=$TEST_IMG,format=$IMGFMT,if=none,id=drive0" <<EOF
|
run_qemu <<EOF
|
||||||
{ "execute": "qmp_capabilities" }
|
{ "execute": "qmp_capabilities" }
|
||||||
|
{ "execute": "blockdev-add",
|
||||||
|
"arguments": {
|
||||||
|
"options": {
|
||||||
|
"node-name": "drive0",
|
||||||
|
"driver": "$IMGFMT",
|
||||||
|
"file": {
|
||||||
|
"driver": "file",
|
||||||
|
"filename": "$TEST_IMG"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
{ "execute": "blockdev-add",
|
{ "execute": "blockdev-add",
|
||||||
"arguments": {
|
"arguments": {
|
||||||
"options": {
|
"options": {
|
||||||
@ -142,11 +163,8 @@ run_qemu -drive "file=$TEST_IMG,format=$IMGFMT,if=none,id=drive0" <<EOF
|
|||||||
"id": "drive0-verify",
|
"id": "drive0-verify",
|
||||||
"test": "drive0",
|
"test": "drive0",
|
||||||
"raw": {
|
"raw": {
|
||||||
"driver": "raw",
|
"driver": "file",
|
||||||
"file": {
|
"filename": "$TEST_IMG.base"
|
||||||
"driver": "file",
|
|
||||||
"filename": "$TEST_IMG.base"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -163,8 +181,17 @@ echo
|
|||||||
echo "=== Testing blkverify on existing raw block device ==="
|
echo "=== Testing blkverify on existing raw block device ==="
|
||||||
echo
|
echo
|
||||||
|
|
||||||
run_qemu -drive "file=$TEST_IMG.base,format=raw,if=none,id=drive0" <<EOF
|
run_qemu <<EOF
|
||||||
{ "execute": "qmp_capabilities" }
|
{ "execute": "qmp_capabilities" }
|
||||||
|
{ "execute": "blockdev-add",
|
||||||
|
"arguments": {
|
||||||
|
"options": {
|
||||||
|
"node-name": "drive0",
|
||||||
|
"driver": "file",
|
||||||
|
"filename": "$TEST_IMG.base"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
{ "execute": "blockdev-add",
|
{ "execute": "blockdev-add",
|
||||||
"arguments": {
|
"arguments": {
|
||||||
"options": {
|
"options": {
|
||||||
@ -193,8 +220,17 @@ echo
|
|||||||
echo "=== Testing blkdebug's set-state through QMP ==="
|
echo "=== Testing blkdebug's set-state through QMP ==="
|
||||||
echo
|
echo
|
||||||
|
|
||||||
run_qemu -drive "file=$TEST_IMG,format=raw,if=none,id=drive0" <<EOF
|
run_qemu <<EOF
|
||||||
{ "execute": "qmp_capabilities" }
|
{ "execute": "qmp_capabilities" }
|
||||||
|
{ "execute": "blockdev-add",
|
||||||
|
"arguments": {
|
||||||
|
"options": {
|
||||||
|
"node-name": "drive0",
|
||||||
|
"driver": "file",
|
||||||
|
"filename": "$TEST_IMG"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
{ "execute": "blockdev-add",
|
{ "execute": "blockdev-add",
|
||||||
"arguments": {
|
"arguments": {
|
||||||
"options": {
|
"options": {
|
||||||
|
@ -42,10 +42,11 @@ read failed: Input/output error
|
|||||||
|
|
||||||
=== Testing blkdebug on existing block device ===
|
=== Testing blkdebug on existing block device ===
|
||||||
|
|
||||||
Testing: -drive file=TEST_DIR/t.IMGFMT,format=raw,if=none,id=drive0
|
Testing:
|
||||||
QMP_VERSION
|
QMP_VERSION
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
|
{"return": {}}
|
||||||
read failed: Input/output error
|
read failed: Input/output error
|
||||||
{"return": ""}
|
{"return": ""}
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
@ -56,28 +57,31 @@ QEMU_PROG: Failed to flush the refcount block cache: Input/output error
|
|||||||
|
|
||||||
=== Testing blkverify on existing block device ===
|
=== Testing blkverify on existing block device ===
|
||||||
|
|
||||||
Testing: -drive file=TEST_DIR/t.IMGFMT,format=IMGFMT,if=none,id=drive0
|
Testing:
|
||||||
QMP_VERSION
|
QMP_VERSION
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
|
{"return": {}}
|
||||||
blkverify: read sector_num=0 nb_sectors=1 contents mismatch in sector 0
|
blkverify: read sector_num=0 nb_sectors=1 contents mismatch in sector 0
|
||||||
|
|
||||||
|
|
||||||
=== Testing blkverify on existing raw block device ===
|
=== Testing blkverify on existing raw block device ===
|
||||||
|
|
||||||
Testing: -drive file=TEST_DIR/t.IMGFMT.base,format=raw,if=none,id=drive0
|
Testing:
|
||||||
QMP_VERSION
|
QMP_VERSION
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
|
{"return": {}}
|
||||||
blkverify: read sector_num=0 nb_sectors=1 contents mismatch in sector 0
|
blkverify: read sector_num=0 nb_sectors=1 contents mismatch in sector 0
|
||||||
|
|
||||||
|
|
||||||
=== Testing blkdebug's set-state through QMP ===
|
=== Testing blkdebug's set-state through QMP ===
|
||||||
|
|
||||||
Testing: -drive file=TEST_DIR/t.IMGFMT,format=raw,if=none,id=drive0
|
Testing:
|
||||||
QMP_VERSION
|
QMP_VERSION
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
|
{"return": {}}
|
||||||
read 512/512 bytes at offset 0
|
read 512/512 bytes at offset 0
|
||||||
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
{"return": ""}
|
{"return": ""}
|
||||||
|
@ -102,8 +102,20 @@ $QEMU_IO -c "open -o $quorum" -c "read -P 0x32 0 $size" | _filter_qemu_io
|
|||||||
echo
|
echo
|
||||||
echo "== checking mixed reference/option specification =="
|
echo "== checking mixed reference/option specification =="
|
||||||
|
|
||||||
run_qemu -drive "file=$TEST_DIR/2.raw,format=$IMGFMT,if=none,id=drive2" <<EOF
|
run_qemu <<EOF
|
||||||
{ "execute": "qmp_capabilities" }
|
{ "execute": "qmp_capabilities" }
|
||||||
|
{ "execute": "blockdev-add",
|
||||||
|
"arguments": {
|
||||||
|
"options": {
|
||||||
|
"node-name": "drive2",
|
||||||
|
"driver": "$IMGFMT",
|
||||||
|
"file": {
|
||||||
|
"driver": "file",
|
||||||
|
"filename": "$TEST_DIR/2.raw"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
{ "execute": "blockdev-add",
|
{ "execute": "blockdev-add",
|
||||||
"arguments": {
|
"arguments": {
|
||||||
"options": {
|
"options": {
|
||||||
@ -112,7 +124,7 @@ run_qemu -drive "file=$TEST_DIR/2.raw,format=$IMGFMT,if=none,id=drive2" <<EOF
|
|||||||
"vote-threshold": 2,
|
"vote-threshold": 2,
|
||||||
"children": [
|
"children": [
|
||||||
{
|
{
|
||||||
"driver": "raw",
|
"driver": "$IMGFMT",
|
||||||
"file": {
|
"file": {
|
||||||
"driver": "file",
|
"driver": "file",
|
||||||
"filename": "$TEST_DIR/1.raw"
|
"filename": "$TEST_DIR/1.raw"
|
||||||
@ -120,7 +132,7 @@ run_qemu -drive "file=$TEST_DIR/2.raw,format=$IMGFMT,if=none,id=drive2" <<EOF
|
|||||||
},
|
},
|
||||||
"drive2",
|
"drive2",
|
||||||
{
|
{
|
||||||
"driver": "raw",
|
"driver": "$IMGFMT",
|
||||||
"file": {
|
"file": {
|
||||||
"driver": "file",
|
"driver": "file",
|
||||||
"filename": "$TEST_DIR/3.raw"
|
"filename": "$TEST_DIR/3.raw"
|
||||||
|
@ -26,11 +26,12 @@ read 10485760/10485760 bytes at offset 0
|
|||||||
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
|
||||||
== checking mixed reference/option specification ==
|
== checking mixed reference/option specification ==
|
||||||
Testing: -drive file=TEST_DIR/2.IMGFMT,format=IMGFMT,if=none,id=drive2
|
Testing:
|
||||||
QMP_VERSION
|
QMP_VERSION
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "QUORUM_REPORT_BAD", "data": {"node-name": "NODE_NAME", "sectors-count": 20480, "sector-num": 0}}
|
{"return": {}}
|
||||||
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "QUORUM_REPORT_BAD", "data": {"node-name": "drive2", "sectors-count": 20480, "sector-num": 0}}
|
||||||
read 10485760/10485760 bytes at offset 0
|
read 10485760/10485760 bytes at offset 0
|
||||||
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
{"return": ""}
|
{"return": ""}
|
||||||
|
@ -54,7 +54,7 @@ size=128M
|
|||||||
_make_test_img $size
|
_make_test_img $size
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo === Missing ID ===
|
echo === Missing ID and node-name ===
|
||||||
echo
|
echo
|
||||||
|
|
||||||
run_qemu <<EOF
|
run_qemu <<EOF
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
QA output created by 087
|
QA output created by 087
|
||||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
|
||||||
|
|
||||||
=== Missing ID ===
|
=== Missing ID and node-name ===
|
||||||
|
|
||||||
Testing:
|
Testing:
|
||||||
QMP_VERSION
|
QMP_VERSION
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
{"error": {"class": "GenericError", "desc": "Block device needs an ID"}}
|
{"error": {"class": "GenericError", "desc": "'id' and/or 'node-name' need to be specified for the root node"}}
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN"}
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN"}
|
||||||
|
|
||||||
|
@ -118,6 +118,12 @@ static void *test_acquire_thread(void *opaque)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
|
||||||
|
EventNotifierHandler *handler)
|
||||||
|
{
|
||||||
|
aio_set_event_notifier(ctx, notifier, false, handler);
|
||||||
|
}
|
||||||
|
|
||||||
static void dummy_notifier_read(EventNotifier *unused)
|
static void dummy_notifier_read(EventNotifier *unused)
|
||||||
{
|
{
|
||||||
g_assert(false); /* should never be invoked */
|
g_assert(false); /* should never be invoked */
|
||||||
@ -131,7 +137,7 @@ static void test_acquire(void)
|
|||||||
|
|
||||||
/* Dummy event notifier ensures aio_poll() will block */
|
/* Dummy event notifier ensures aio_poll() will block */
|
||||||
event_notifier_init(¬ifier, false);
|
event_notifier_init(¬ifier, false);
|
||||||
aio_set_event_notifier(ctx, ¬ifier, dummy_notifier_read);
|
set_event_notifier(ctx, ¬ifier, dummy_notifier_read);
|
||||||
g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
|
g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
|
||||||
|
|
||||||
qemu_mutex_init(&data.start_lock);
|
qemu_mutex_init(&data.start_lock);
|
||||||
@ -149,7 +155,7 @@ static void test_acquire(void)
|
|||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
|
|
||||||
qemu_thread_join(&thread);
|
qemu_thread_join(&thread);
|
||||||
aio_set_event_notifier(ctx, ¬ifier, NULL);
|
set_event_notifier(ctx, ¬ifier, NULL);
|
||||||
event_notifier_cleanup(¬ifier);
|
event_notifier_cleanup(¬ifier);
|
||||||
|
|
||||||
g_assert(data.thread_acquired);
|
g_assert(data.thread_acquired);
|
||||||
@ -308,11 +314,11 @@ static void test_set_event_notifier(void)
|
|||||||
{
|
{
|
||||||
EventNotifierTestData data = { .n = 0, .active = 0 };
|
EventNotifierTestData data = { .n = 0, .active = 0 };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
g_assert(!aio_poll(ctx, false));
|
g_assert(!aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &data.e, NULL);
|
set_event_notifier(ctx, &data.e, NULL);
|
||||||
g_assert(!aio_poll(ctx, false));
|
g_assert(!aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
event_notifier_cleanup(&data.e);
|
event_notifier_cleanup(&data.e);
|
||||||
@ -322,7 +328,7 @@ static void test_wait_event_notifier(void)
|
|||||||
{
|
{
|
||||||
EventNotifierTestData data = { .n = 0, .active = 1 };
|
EventNotifierTestData data = { .n = 0, .active = 1 };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
while (aio_poll(ctx, false));
|
while (aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 1);
|
g_assert_cmpint(data.active, ==, 1);
|
||||||
@ -336,7 +342,7 @@ static void test_wait_event_notifier(void)
|
|||||||
g_assert_cmpint(data.n, ==, 1);
|
g_assert_cmpint(data.n, ==, 1);
|
||||||
g_assert_cmpint(data.active, ==, 0);
|
g_assert_cmpint(data.active, ==, 0);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &data.e, NULL);
|
set_event_notifier(ctx, &data.e, NULL);
|
||||||
g_assert(!aio_poll(ctx, false));
|
g_assert(!aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 1);
|
g_assert_cmpint(data.n, ==, 1);
|
||||||
|
|
||||||
@ -347,7 +353,7 @@ static void test_flush_event_notifier(void)
|
|||||||
{
|
{
|
||||||
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
while (aio_poll(ctx, false));
|
while (aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 10);
|
g_assert_cmpint(data.active, ==, 10);
|
||||||
@ -363,18 +369,41 @@ static void test_flush_event_notifier(void)
|
|||||||
g_assert_cmpint(data.active, ==, 0);
|
g_assert_cmpint(data.active, ==, 0);
|
||||||
g_assert(!aio_poll(ctx, false));
|
g_assert(!aio_poll(ctx, false));
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &data.e, NULL);
|
set_event_notifier(ctx, &data.e, NULL);
|
||||||
g_assert(!aio_poll(ctx, false));
|
g_assert(!aio_poll(ctx, false));
|
||||||
event_notifier_cleanup(&data.e);
|
event_notifier_cleanup(&data.e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void test_aio_external_client(void)
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
for (i = 1; i < 3; i++) {
|
||||||
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||||
|
event_notifier_init(&data.e, false);
|
||||||
|
aio_set_event_notifier(ctx, &data.e, true, event_ready_cb);
|
||||||
|
event_notifier_set(&data.e);
|
||||||
|
for (j = 0; j < i; j++) {
|
||||||
|
aio_disable_external(ctx);
|
||||||
|
}
|
||||||
|
for (j = 0; j < i; j++) {
|
||||||
|
assert(!aio_poll(ctx, false));
|
||||||
|
assert(event_notifier_test_and_clear(&data.e));
|
||||||
|
event_notifier_set(&data.e);
|
||||||
|
aio_enable_external(ctx);
|
||||||
|
}
|
||||||
|
assert(aio_poll(ctx, false));
|
||||||
|
event_notifier_cleanup(&data.e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void test_wait_event_notifier_noflush(void)
|
static void test_wait_event_notifier_noflush(void)
|
||||||
{
|
{
|
||||||
EventNotifierTestData data = { .n = 0 };
|
EventNotifierTestData data = { .n = 0 };
|
||||||
EventNotifierTestData dummy = { .n = 0, .active = 1 };
|
EventNotifierTestData dummy = { .n = 0, .active = 1 };
|
||||||
|
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
|
|
||||||
g_assert(!aio_poll(ctx, false));
|
g_assert(!aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
@ -387,7 +416,7 @@ static void test_wait_event_notifier_noflush(void)
|
|||||||
|
|
||||||
/* An active event notifier forces aio_poll to look at EventNotifiers. */
|
/* An active event notifier forces aio_poll to look at EventNotifiers. */
|
||||||
event_notifier_init(&dummy.e, false);
|
event_notifier_init(&dummy.e, false);
|
||||||
aio_set_event_notifier(ctx, &dummy.e, event_ready_cb);
|
set_event_notifier(ctx, &dummy.e, event_ready_cb);
|
||||||
|
|
||||||
event_notifier_set(&data.e);
|
event_notifier_set(&data.e);
|
||||||
g_assert(aio_poll(ctx, false));
|
g_assert(aio_poll(ctx, false));
|
||||||
@ -407,10 +436,10 @@ static void test_wait_event_notifier_noflush(void)
|
|||||||
g_assert_cmpint(dummy.n, ==, 1);
|
g_assert_cmpint(dummy.n, ==, 1);
|
||||||
g_assert_cmpint(dummy.active, ==, 0);
|
g_assert_cmpint(dummy.active, ==, 0);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &dummy.e, NULL);
|
set_event_notifier(ctx, &dummy.e, NULL);
|
||||||
event_notifier_cleanup(&dummy.e);
|
event_notifier_cleanup(&dummy.e);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &data.e, NULL);
|
set_event_notifier(ctx, &data.e, NULL);
|
||||||
g_assert(!aio_poll(ctx, false));
|
g_assert(!aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 2);
|
g_assert_cmpint(data.n, ==, 2);
|
||||||
|
|
||||||
@ -428,7 +457,7 @@ static void test_timer_schedule(void)
|
|||||||
* an fd to wait on. Fixing this breaks other tests. So create a dummy one.
|
* an fd to wait on. Fixing this breaks other tests. So create a dummy one.
|
||||||
*/
|
*/
|
||||||
event_notifier_init(&e, false);
|
event_notifier_init(&e, false);
|
||||||
aio_set_event_notifier(ctx, &e, dummy_io_handler_read);
|
set_event_notifier(ctx, &e, dummy_io_handler_read);
|
||||||
aio_poll(ctx, false);
|
aio_poll(ctx, false);
|
||||||
|
|
||||||
aio_timer_init(ctx, &data.timer, data.clock_type,
|
aio_timer_init(ctx, &data.timer, data.clock_type,
|
||||||
@ -467,7 +496,7 @@ static void test_timer_schedule(void)
|
|||||||
g_assert(!aio_poll(ctx, false));
|
g_assert(!aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 2);
|
g_assert_cmpint(data.n, ==, 2);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &e, NULL);
|
set_event_notifier(ctx, &e, NULL);
|
||||||
event_notifier_cleanup(&e);
|
event_notifier_cleanup(&e);
|
||||||
|
|
||||||
timer_del(&data.timer);
|
timer_del(&data.timer);
|
||||||
@ -638,11 +667,11 @@ static void test_source_set_event_notifier(void)
|
|||||||
{
|
{
|
||||||
EventNotifierTestData data = { .n = 0, .active = 0 };
|
EventNotifierTestData data = { .n = 0, .active = 0 };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &data.e, NULL);
|
set_event_notifier(ctx, &data.e, NULL);
|
||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
event_notifier_cleanup(&data.e);
|
event_notifier_cleanup(&data.e);
|
||||||
@ -652,7 +681,7 @@ static void test_source_wait_event_notifier(void)
|
|||||||
{
|
{
|
||||||
EventNotifierTestData data = { .n = 0, .active = 1 };
|
EventNotifierTestData data = { .n = 0, .active = 1 };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 1);
|
g_assert_cmpint(data.active, ==, 1);
|
||||||
@ -666,7 +695,7 @@ static void test_source_wait_event_notifier(void)
|
|||||||
g_assert_cmpint(data.n, ==, 1);
|
g_assert_cmpint(data.n, ==, 1);
|
||||||
g_assert_cmpint(data.active, ==, 0);
|
g_assert_cmpint(data.active, ==, 0);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &data.e, NULL);
|
set_event_notifier(ctx, &data.e, NULL);
|
||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 1);
|
g_assert_cmpint(data.n, ==, 1);
|
||||||
|
|
||||||
@ -677,7 +706,7 @@ static void test_source_flush_event_notifier(void)
|
|||||||
{
|
{
|
||||||
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 10);
|
g_assert_cmpint(data.active, ==, 10);
|
||||||
@ -693,7 +722,7 @@ static void test_source_flush_event_notifier(void)
|
|||||||
g_assert_cmpint(data.active, ==, 0);
|
g_assert_cmpint(data.active, ==, 0);
|
||||||
g_assert(!g_main_context_iteration(NULL, false));
|
g_assert(!g_main_context_iteration(NULL, false));
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &data.e, NULL);
|
set_event_notifier(ctx, &data.e, NULL);
|
||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
event_notifier_cleanup(&data.e);
|
event_notifier_cleanup(&data.e);
|
||||||
}
|
}
|
||||||
@ -704,7 +733,7 @@ static void test_source_wait_event_notifier_noflush(void)
|
|||||||
EventNotifierTestData dummy = { .n = 0, .active = 1 };
|
EventNotifierTestData dummy = { .n = 0, .active = 1 };
|
||||||
|
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
|
|
||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
@ -717,7 +746,7 @@ static void test_source_wait_event_notifier_noflush(void)
|
|||||||
|
|
||||||
/* An active event notifier forces aio_poll to look at EventNotifiers. */
|
/* An active event notifier forces aio_poll to look at EventNotifiers. */
|
||||||
event_notifier_init(&dummy.e, false);
|
event_notifier_init(&dummy.e, false);
|
||||||
aio_set_event_notifier(ctx, &dummy.e, event_ready_cb);
|
set_event_notifier(ctx, &dummy.e, event_ready_cb);
|
||||||
|
|
||||||
event_notifier_set(&data.e);
|
event_notifier_set(&data.e);
|
||||||
g_assert(g_main_context_iteration(NULL, false));
|
g_assert(g_main_context_iteration(NULL, false));
|
||||||
@ -737,10 +766,10 @@ static void test_source_wait_event_notifier_noflush(void)
|
|||||||
g_assert_cmpint(dummy.n, ==, 1);
|
g_assert_cmpint(dummy.n, ==, 1);
|
||||||
g_assert_cmpint(dummy.active, ==, 0);
|
g_assert_cmpint(dummy.active, ==, 0);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &dummy.e, NULL);
|
set_event_notifier(ctx, &dummy.e, NULL);
|
||||||
event_notifier_cleanup(&dummy.e);
|
event_notifier_cleanup(&dummy.e);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &data.e, NULL);
|
set_event_notifier(ctx, &data.e, NULL);
|
||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 2);
|
g_assert_cmpint(data.n, ==, 2);
|
||||||
|
|
||||||
@ -759,7 +788,7 @@ static void test_source_timer_schedule(void)
|
|||||||
* an fd to wait on. Fixing this breaks other tests. So create a dummy one.
|
* an fd to wait on. Fixing this breaks other tests. So create a dummy one.
|
||||||
*/
|
*/
|
||||||
event_notifier_init(&e, false);
|
event_notifier_init(&e, false);
|
||||||
aio_set_event_notifier(ctx, &e, dummy_io_handler_read);
|
set_event_notifier(ctx, &e, dummy_io_handler_read);
|
||||||
do {} while (g_main_context_iteration(NULL, false));
|
do {} while (g_main_context_iteration(NULL, false));
|
||||||
|
|
||||||
aio_timer_init(ctx, &data.timer, data.clock_type,
|
aio_timer_init(ctx, &data.timer, data.clock_type,
|
||||||
@ -784,7 +813,7 @@ static void test_source_timer_schedule(void)
|
|||||||
g_assert_cmpint(data.n, ==, 2);
|
g_assert_cmpint(data.n, ==, 2);
|
||||||
g_assert(qemu_clock_get_ns(data.clock_type) > expiry);
|
g_assert(qemu_clock_get_ns(data.clock_type) > expiry);
|
||||||
|
|
||||||
aio_set_event_notifier(ctx, &e, NULL);
|
set_event_notifier(ctx, &e, NULL);
|
||||||
event_notifier_cleanup(&e);
|
event_notifier_cleanup(&e);
|
||||||
|
|
||||||
timer_del(&data.timer);
|
timer_del(&data.timer);
|
||||||
@ -826,6 +855,7 @@ int main(int argc, char **argv)
|
|||||||
g_test_add_func("/aio/event/wait", test_wait_event_notifier);
|
g_test_add_func("/aio/event/wait", test_wait_event_notifier);
|
||||||
g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
|
g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
|
||||||
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
|
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
|
||||||
|
g_test_add_func("/aio/external-client", test_aio_external_client);
|
||||||
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
|
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
|
||||||
|
|
||||||
g_test_add_func("/aio-gsource/flush", test_source_flush);
|
g_test_add_func("/aio-gsource/flush", test_source_flush);
|
||||||
|
Loading…
Reference in New Issue
Block a user