Block layer patches:
- iscsi: Cap block count from GET LBA STATUS (CVE-2020-1711) - AioContext fixes in QMP commands for backup and bitmaps - iotests fixes -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJeLyLGAAoJEH8JsnLIjy/WK3sP/jc+rZwTLQ/1RbF/vQBlnR+B 6Ff25xwHqF6FL2vR2ldsfUtzqxuxKGl2KJMv07YbvnKljiefOR8r4sCVgGUGjB4R rpMAIu/7qjhE7/ybyibYUm8WxblP+v+ZAyuyK2KVC9GFizWkDXV+ArBeEEDTPX29 owN79UsZBcs+38TpQnr2fzW6LE9KhRlC3A+LIb9kd+KyrUosB+xCQBHxVu1eDiub jahM+i3CN/NubpKmJXsZX8u+wn7pI1+1kEF2upBMcjxTIX1VTXxUDZs09sdYYU9p 5CMkPL9VC4qaI5fjp5KnFUlR5vppQudoV94GkNMboScuylEavhQ/izJuc3FLP113 EWAZB0aRv8zlcBffhDrFzj642sZV4Rm0tSFzHdBnPLAvWAC9OvrztsTNv2E7oNCV lV6AfTiuNf9BtI9NsxwRyTuhIz+BfllrRFmVzualAQkwL9oxi8RnJbduw1uVzaYf WmxIDvnhgKrHAdR/BtFQ1bml5HkQnflvxuIHNvJk4qENyo0/2PhrUi7eQJ//1I9A bURXp3zrOcNm9kyoorIrSwktbxKG002NPu9+5QUWWdsRLzmftiy0IQnEBx/lDSPA FH/CWwOukoV+z3qZgW8JnxnS5FXHHUDkdiAtV5mdN4YO9wN3IAojYfkeXQMnGjT/ 5u47vAA+5Kkv9oMIbsQ/ =tsNA -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging Block layer patches: - iscsi: Cap block count from GET LBA STATUS (CVE-2020-1711) - AioContext fixes in QMP commands for backup and bitmaps - iotests fixes # gpg: Signature made Mon 27 Jan 2020 17:49:58 GMT # gpg: using RSA key 7F09B272C88F2FD6 # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full] # Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6 * remotes/kevin/tags/for-upstream: iscsi: Don't access non-existent scsi_lba_status_descriptor iscsi: Cap block count from GET LBA STATUS (CVE-2020-1711) block/backup: fix memory leak in bdrv_backup_top_append() iotests: Test handling of AioContexts with some blockdev actions blockdev: Return bs to the proper context on snapshot abort blockdev: Acquire AioContext on dirty bitmap functions block/backup-top: Don't acquire context while dropping top blockdev: honor bdrv_try_set_aio_context() context requirements blockdev: unify qmp_blockdev_backup and blockdev-backup transaction paths blockdev: unify qmp_drive_backup and drive-backup transaction paths blockdev: fix coding style issues in drive_backup_prepare iotests: Add more "skip_if_unsupported" statements to the python tests iotests.py: Let wait_migration wait even more Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
750fe5989f
@ -196,7 +196,7 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
|
|||||||
}
|
}
|
||||||
|
|
||||||
top->total_sectors = source->total_sectors;
|
top->total_sectors = source->total_sectors;
|
||||||
top->opaque = state = g_new0(BDRVBackupTopState, 1);
|
state = top->opaque;
|
||||||
|
|
||||||
bdrv_ref(target);
|
bdrv_ref(target);
|
||||||
state->target = bdrv_attach_child(top, target, "target", &child_file, errp);
|
state->target = bdrv_attach_child(top, target, "target", &child_file, errp);
|
||||||
@ -255,9 +255,6 @@ append_failed:
|
|||||||
void bdrv_backup_top_drop(BlockDriverState *bs)
|
void bdrv_backup_top_drop(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
BDRVBackupTopState *s = bs->opaque;
|
BDRVBackupTopState *s = bs->opaque;
|
||||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
|
||||||
|
|
||||||
aio_context_acquire(aio_context);
|
|
||||||
|
|
||||||
bdrv_drained_begin(bs);
|
bdrv_drained_begin(bs);
|
||||||
|
|
||||||
@ -271,6 +268,4 @@ void bdrv_backup_top_drop(BlockDriverState *bs)
|
|||||||
bdrv_drained_end(bs);
|
bdrv_drained_end(bs);
|
||||||
|
|
||||||
bdrv_unref(bs);
|
bdrv_unref(bs);
|
||||||
|
|
||||||
aio_context_release(aio_context);
|
|
||||||
}
|
}
|
||||||
|
@ -135,8 +135,11 @@ static void backup_abort(Job *job)
|
|||||||
static void backup_clean(Job *job)
|
static void backup_clean(Job *job)
|
||||||
{
|
{
|
||||||
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
|
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
|
||||||
|
AioContext *aio_context = bdrv_get_aio_context(s->backup_top);
|
||||||
|
|
||||||
|
aio_context_acquire(aio_context);
|
||||||
bdrv_backup_top_drop(s->backup_top);
|
bdrv_backup_top_drop(s->backup_top);
|
||||||
|
aio_context_release(aio_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
void backup_do_checkpoint(BlockJob *job, Error **errp)
|
void backup_do_checkpoint(BlockJob *job, Error **errp)
|
||||||
|
@ -701,7 +701,7 @@ static int coroutine_fn iscsi_co_block_status(BlockDriverState *bs,
|
|||||||
struct scsi_get_lba_status *lbas = NULL;
|
struct scsi_get_lba_status *lbas = NULL;
|
||||||
struct scsi_lba_status_descriptor *lbasd = NULL;
|
struct scsi_lba_status_descriptor *lbasd = NULL;
|
||||||
struct IscsiTask iTask;
|
struct IscsiTask iTask;
|
||||||
uint64_t lba;
|
uint64_t lba, max_bytes;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
||||||
@ -721,6 +721,7 @@ static int coroutine_fn iscsi_co_block_status(BlockDriverState *bs,
|
|||||||
}
|
}
|
||||||
|
|
||||||
lba = offset / iscsilun->block_size;
|
lba = offset / iscsilun->block_size;
|
||||||
|
max_bytes = (iscsilun->num_blocks - lba) * iscsilun->block_size;
|
||||||
|
|
||||||
qemu_mutex_lock(&iscsilun->mutex);
|
qemu_mutex_lock(&iscsilun->mutex);
|
||||||
retry:
|
retry:
|
||||||
@ -752,7 +753,7 @@ retry:
|
|||||||
}
|
}
|
||||||
|
|
||||||
lbas = scsi_datain_unmarshall(iTask.task);
|
lbas = scsi_datain_unmarshall(iTask.task);
|
||||||
if (lbas == NULL) {
|
if (lbas == NULL || lbas->num_descriptors == 0) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
@ -764,7 +765,7 @@ retry:
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
*pnum = (int64_t) lbasd->num_blocks * iscsilun->block_size;
|
*pnum = MIN((int64_t) lbasd->num_blocks * iscsilun->block_size, max_bytes);
|
||||||
|
|
||||||
if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED ||
|
if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED ||
|
||||||
lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) {
|
lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) {
|
||||||
|
393
blockdev.c
393
blockdev.c
@ -1535,6 +1535,7 @@ static void external_snapshot_prepare(BlkActionState *common,
|
|||||||
DO_UPCAST(ExternalSnapshotState, common, common);
|
DO_UPCAST(ExternalSnapshotState, common, common);
|
||||||
TransactionAction *action = common->action;
|
TransactionAction *action = common->action;
|
||||||
AioContext *aio_context;
|
AioContext *aio_context;
|
||||||
|
AioContext *old_context;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
|
/* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
|
||||||
@ -1675,7 +1676,16 @@ static void external_snapshot_prepare(BlkActionState *common,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Honor bdrv_try_set_aio_context() context acquisition requirements. */
|
||||||
|
old_context = bdrv_get_aio_context(state->new_bs);
|
||||||
|
aio_context_release(aio_context);
|
||||||
|
aio_context_acquire(old_context);
|
||||||
|
|
||||||
ret = bdrv_try_set_aio_context(state->new_bs, aio_context, errp);
|
ret = bdrv_try_set_aio_context(state->new_bs, aio_context, errp);
|
||||||
|
|
||||||
|
aio_context_release(old_context);
|
||||||
|
aio_context_acquire(aio_context);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -1721,6 +1731,8 @@ static void external_snapshot_abort(BlkActionState *common)
|
|||||||
if (state->new_bs) {
|
if (state->new_bs) {
|
||||||
if (state->overlay_appended) {
|
if (state->overlay_appended) {
|
||||||
AioContext *aio_context;
|
AioContext *aio_context;
|
||||||
|
AioContext *tmp_context;
|
||||||
|
int ret;
|
||||||
|
|
||||||
aio_context = bdrv_get_aio_context(state->old_bs);
|
aio_context = bdrv_get_aio_context(state->old_bs);
|
||||||
aio_context_acquire(aio_context);
|
aio_context_acquire(aio_context);
|
||||||
@ -1728,6 +1740,25 @@ static void external_snapshot_abort(BlkActionState *common)
|
|||||||
bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd()
|
bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd()
|
||||||
close state->old_bs; we need it */
|
close state->old_bs; we need it */
|
||||||
bdrv_set_backing_hd(state->new_bs, NULL, &error_abort);
|
bdrv_set_backing_hd(state->new_bs, NULL, &error_abort);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The call to bdrv_set_backing_hd() above returns state->old_bs to
|
||||||
|
* the main AioContext. As we're still going to be using it, return
|
||||||
|
* it to the AioContext it was before.
|
||||||
|
*/
|
||||||
|
tmp_context = bdrv_get_aio_context(state->old_bs);
|
||||||
|
if (aio_context != tmp_context) {
|
||||||
|
aio_context_release(aio_context);
|
||||||
|
aio_context_acquire(tmp_context);
|
||||||
|
|
||||||
|
ret = bdrv_try_set_aio_context(state->old_bs,
|
||||||
|
aio_context, NULL);
|
||||||
|
assert(ret == 0);
|
||||||
|
|
||||||
|
aio_context_release(tmp_context);
|
||||||
|
aio_context_acquire(aio_context);
|
||||||
|
}
|
||||||
|
|
||||||
bdrv_replace_node(state->new_bs, state->old_bs, &error_abort);
|
bdrv_replace_node(state->new_bs, state->old_bs, &error_abort);
|
||||||
bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */
|
bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */
|
||||||
|
|
||||||
@ -1761,39 +1792,145 @@ typedef struct DriveBackupState {
|
|||||||
BlockJob *job;
|
BlockJob *job;
|
||||||
} DriveBackupState;
|
} DriveBackupState;
|
||||||
|
|
||||||
static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
|
static BlockJob *do_backup_common(BackupCommon *backup,
|
||||||
Error **errp);
|
BlockDriverState *bs,
|
||||||
|
BlockDriverState *target_bs,
|
||||||
|
AioContext *aio_context,
|
||||||
|
JobTxn *txn, Error **errp);
|
||||||
|
|
||||||
static void drive_backup_prepare(BlkActionState *common, Error **errp)
|
static void drive_backup_prepare(BlkActionState *common, Error **errp)
|
||||||
{
|
{
|
||||||
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
|
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
|
||||||
BlockDriverState *bs;
|
|
||||||
DriveBackup *backup;
|
DriveBackup *backup;
|
||||||
|
BlockDriverState *bs;
|
||||||
|
BlockDriverState *target_bs;
|
||||||
|
BlockDriverState *source = NULL;
|
||||||
AioContext *aio_context;
|
AioContext *aio_context;
|
||||||
|
AioContext *old_context;
|
||||||
|
QDict *options;
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
int flags;
|
||||||
|
int64_t size;
|
||||||
|
bool set_backing_hd = false;
|
||||||
|
int ret;
|
||||||
|
|
||||||
assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
|
assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
|
||||||
backup = common->action->u.drive_backup.data;
|
backup = common->action->u.drive_backup.data;
|
||||||
|
|
||||||
|
if (!backup->has_mode) {
|
||||||
|
backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
|
||||||
|
}
|
||||||
|
|
||||||
bs = bdrv_lookup_bs(backup->device, backup->device, errp);
|
bs = bdrv_lookup_bs(backup->device, backup->device, errp);
|
||||||
if (!bs) {
|
if (!bs) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!bs->drv) {
|
||||||
|
error_setg(errp, "Device has no medium");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
aio_context = bdrv_get_aio_context(bs);
|
aio_context = bdrv_get_aio_context(bs);
|
||||||
aio_context_acquire(aio_context);
|
aio_context_acquire(aio_context);
|
||||||
|
|
||||||
/* Paired with .clean() */
|
/* Paired with .clean() */
|
||||||
bdrv_drained_begin(bs);
|
bdrv_drained_begin(bs);
|
||||||
|
|
||||||
state->bs = bs;
|
if (!backup->has_format) {
|
||||||
|
backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ?
|
||||||
|
NULL : (char *) bs->drv->format_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Early check to avoid creating target */
|
||||||
|
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
flags = bs->open_flags | BDRV_O_RDWR;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* See if we have a backing HD we can use to create our new image
|
||||||
|
* on top of.
|
||||||
|
*/
|
||||||
|
if (backup->sync == MIRROR_SYNC_MODE_TOP) {
|
||||||
|
source = backing_bs(bs);
|
||||||
|
if (!source) {
|
||||||
|
backup->sync = MIRROR_SYNC_MODE_FULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (backup->sync == MIRROR_SYNC_MODE_NONE) {
|
||||||
|
source = bs;
|
||||||
|
flags |= BDRV_O_NO_BACKING;
|
||||||
|
set_backing_hd = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
size = bdrv_getlength(bs);
|
||||||
|
if (size < 0) {
|
||||||
|
error_setg_errno(errp, -size, "bdrv_getlength failed");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (backup->mode != NEW_IMAGE_MODE_EXISTING) {
|
||||||
|
assert(backup->format);
|
||||||
|
if (source) {
|
||||||
|
bdrv_refresh_filename(source);
|
||||||
|
bdrv_img_create(backup->target, backup->format, source->filename,
|
||||||
|
source->drv->format_name, NULL,
|
||||||
|
size, flags, false, &local_err);
|
||||||
|
} else {
|
||||||
|
bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL,
|
||||||
|
size, flags, false, &local_err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
state->job = do_drive_backup(backup, common->block_job_txn, &local_err);
|
|
||||||
if (local_err) {
|
if (local_err) {
|
||||||
error_propagate(errp, local_err);
|
error_propagate(errp, local_err);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
options = qdict_new();
|
||||||
|
qdict_put_str(options, "discard", "unmap");
|
||||||
|
qdict_put_str(options, "detect-zeroes", "unmap");
|
||||||
|
if (backup->format) {
|
||||||
|
qdict_put_str(options, "driver", backup->format);
|
||||||
|
}
|
||||||
|
|
||||||
|
target_bs = bdrv_open(backup->target, NULL, options, flags, errp);
|
||||||
|
if (!target_bs) {
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Honor bdrv_try_set_aio_context() context acquisition requirements. */
|
||||||
|
old_context = bdrv_get_aio_context(target_bs);
|
||||||
|
aio_context_release(aio_context);
|
||||||
|
aio_context_acquire(old_context);
|
||||||
|
|
||||||
|
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
||||||
|
if (ret < 0) {
|
||||||
|
bdrv_unref(target_bs);
|
||||||
|
aio_context_release(old_context);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
aio_context_release(old_context);
|
||||||
|
aio_context_acquire(aio_context);
|
||||||
|
|
||||||
|
if (set_backing_hd) {
|
||||||
|
bdrv_set_backing_hd(target_bs, source, &local_err);
|
||||||
|
if (local_err) {
|
||||||
|
goto unref;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state->bs = bs;
|
||||||
|
|
||||||
|
state->job = do_backup_common(qapi_DriveBackup_base(backup),
|
||||||
|
bs, target_bs, aio_context,
|
||||||
|
common->block_job_txn, errp);
|
||||||
|
|
||||||
|
unref:
|
||||||
|
bdrv_unref(target_bs);
|
||||||
out:
|
out:
|
||||||
aio_context_release(aio_context);
|
aio_context_release(aio_context);
|
||||||
}
|
}
|
||||||
@ -1851,16 +1988,15 @@ typedef struct BlockdevBackupState {
|
|||||||
BlockJob *job;
|
BlockJob *job;
|
||||||
} BlockdevBackupState;
|
} BlockdevBackupState;
|
||||||
|
|
||||||
static BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn,
|
|
||||||
Error **errp);
|
|
||||||
|
|
||||||
static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
|
static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
|
||||||
{
|
{
|
||||||
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
|
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
|
||||||
BlockdevBackup *backup;
|
BlockdevBackup *backup;
|
||||||
BlockDriverState *bs, *target;
|
BlockDriverState *bs;
|
||||||
|
BlockDriverState *target_bs;
|
||||||
AioContext *aio_context;
|
AioContext *aio_context;
|
||||||
Error *local_err = NULL;
|
AioContext *old_context;
|
||||||
|
int ret;
|
||||||
|
|
||||||
assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
|
assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
|
||||||
backup = common->action->u.blockdev_backup.data;
|
backup = common->action->u.blockdev_backup.data;
|
||||||
@ -1870,25 +2006,33 @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
target = bdrv_lookup_bs(backup->target, backup->target, errp);
|
target_bs = bdrv_lookup_bs(backup->target, backup->target, errp);
|
||||||
if (!target) {
|
if (!target_bs) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Honor bdrv_try_set_aio_context() context acquisition requirements. */
|
||||||
aio_context = bdrv_get_aio_context(bs);
|
aio_context = bdrv_get_aio_context(bs);
|
||||||
|
old_context = bdrv_get_aio_context(target_bs);
|
||||||
|
aio_context_acquire(old_context);
|
||||||
|
|
||||||
|
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
||||||
|
if (ret < 0) {
|
||||||
|
aio_context_release(old_context);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
aio_context_release(old_context);
|
||||||
aio_context_acquire(aio_context);
|
aio_context_acquire(aio_context);
|
||||||
state->bs = bs;
|
state->bs = bs;
|
||||||
|
|
||||||
/* Paired with .clean() */
|
/* Paired with .clean() */
|
||||||
bdrv_drained_begin(state->bs);
|
bdrv_drained_begin(state->bs);
|
||||||
|
|
||||||
state->job = do_blockdev_backup(backup, common->block_job_txn, &local_err);
|
state->job = do_backup_common(qapi_BlockdevBackup_base(backup),
|
||||||
if (local_err) {
|
bs, target_bs, aio_context,
|
||||||
error_propagate(errp, local_err);
|
common->block_job_txn, errp);
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
aio_context_release(aio_context);
|
aio_context_release(aio_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2861,6 +3005,7 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
|
|||||||
{
|
{
|
||||||
BlockDriverState *bs;
|
BlockDriverState *bs;
|
||||||
BdrvDirtyBitmap *bitmap;
|
BdrvDirtyBitmap *bitmap;
|
||||||
|
AioContext *aio_context;
|
||||||
|
|
||||||
if (!name || name[0] == '\0') {
|
if (!name || name[0] == '\0') {
|
||||||
error_setg(errp, "Bitmap name cannot be empty");
|
error_setg(errp, "Bitmap name cannot be empty");
|
||||||
@ -2872,11 +3017,14 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aio_context = bdrv_get_aio_context(bs);
|
||||||
|
aio_context_acquire(aio_context);
|
||||||
|
|
||||||
if (has_granularity) {
|
if (has_granularity) {
|
||||||
if (granularity < 512 || !is_power_of_2(granularity)) {
|
if (granularity < 512 || !is_power_of_2(granularity)) {
|
||||||
error_setg(errp, "Granularity must be power of 2 "
|
error_setg(errp, "Granularity must be power of 2 "
|
||||||
"and at least 512");
|
"and at least 512");
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Default to cluster size, if available: */
|
/* Default to cluster size, if available: */
|
||||||
@ -2894,12 +3042,12 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
|
|||||||
if (persistent &&
|
if (persistent &&
|
||||||
!bdrv_can_store_new_dirty_bitmap(bs, name, granularity, errp))
|
!bdrv_can_store_new_dirty_bitmap(bs, name, granularity, errp))
|
||||||
{
|
{
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
bitmap = bdrv_create_dirty_bitmap(bs, granularity, name, errp);
|
bitmap = bdrv_create_dirty_bitmap(bs, granularity, name, errp);
|
||||||
if (bitmap == NULL) {
|
if (bitmap == NULL) {
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (disabled) {
|
if (disabled) {
|
||||||
@ -2907,6 +3055,9 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bdrv_dirty_bitmap_set_persistence(bitmap, persistent);
|
bdrv_dirty_bitmap_set_persistence(bitmap, persistent);
|
||||||
|
|
||||||
|
out:
|
||||||
|
aio_context_release(aio_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
static BdrvDirtyBitmap *do_block_dirty_bitmap_remove(
|
static BdrvDirtyBitmap *do_block_dirty_bitmap_remove(
|
||||||
@ -2915,21 +3066,27 @@ static BdrvDirtyBitmap *do_block_dirty_bitmap_remove(
|
|||||||
{
|
{
|
||||||
BlockDriverState *bs;
|
BlockDriverState *bs;
|
||||||
BdrvDirtyBitmap *bitmap;
|
BdrvDirtyBitmap *bitmap;
|
||||||
|
AioContext *aio_context;
|
||||||
|
|
||||||
bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp);
|
bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp);
|
||||||
if (!bitmap || !bs) {
|
if (!bitmap || !bs) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aio_context = bdrv_get_aio_context(bs);
|
||||||
|
aio_context_acquire(aio_context);
|
||||||
|
|
||||||
if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_BUSY | BDRV_BITMAP_RO,
|
if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_BUSY | BDRV_BITMAP_RO,
|
||||||
errp)) {
|
errp)) {
|
||||||
|
aio_context_release(aio_context);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bdrv_dirty_bitmap_get_persistence(bitmap) &&
|
if (bdrv_dirty_bitmap_get_persistence(bitmap) &&
|
||||||
bdrv_remove_persistent_dirty_bitmap(bs, name, errp) < 0)
|
bdrv_remove_persistent_dirty_bitmap(bs, name, errp) < 0)
|
||||||
{
|
{
|
||||||
return NULL;
|
aio_context_release(aio_context);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (release) {
|
if (release) {
|
||||||
@ -2940,6 +3097,7 @@ static BdrvDirtyBitmap *do_block_dirty_bitmap_remove(
|
|||||||
*bitmap_bs = bs;
|
*bitmap_bs = bs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aio_context_release(aio_context);
|
||||||
return release ? NULL : bitmap;
|
return release ? NULL : bitmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3479,7 +3637,6 @@ static BlockJob *do_backup_common(BackupCommon *backup,
|
|||||||
BlockJob *job = NULL;
|
BlockJob *job = NULL;
|
||||||
BdrvDirtyBitmap *bmap = NULL;
|
BdrvDirtyBitmap *bmap = NULL;
|
||||||
int job_flags = JOB_DEFAULT;
|
int job_flags = JOB_DEFAULT;
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!backup->has_speed) {
|
if (!backup->has_speed) {
|
||||||
backup->speed = 0;
|
backup->speed = 0;
|
||||||
@ -3503,11 +3660,6 @@ static BlockJob *do_backup_common(BackupCommon *backup,
|
|||||||
backup->compress = false;
|
backup->compress = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
|
||||||
if (ret < 0) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
|
if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
|
||||||
(backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) {
|
(backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) {
|
||||||
/* done before desugaring 'incremental' to print the right message */
|
/* done before desugaring 'incremental' to print the right message */
|
||||||
@ -3587,124 +3739,13 @@ static BlockJob *do_backup_common(BackupCommon *backup,
|
|||||||
return job;
|
return job;
|
||||||
}
|
}
|
||||||
|
|
||||||
static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
|
void qmp_drive_backup(DriveBackup *backup, Error **errp)
|
||||||
Error **errp)
|
|
||||||
{
|
{
|
||||||
BlockDriverState *bs;
|
TransactionAction action = {
|
||||||
BlockDriverState *target_bs;
|
.type = TRANSACTION_ACTION_KIND_DRIVE_BACKUP,
|
||||||
BlockDriverState *source = NULL;
|
.u.drive_backup.data = backup,
|
||||||
BlockJob *job = NULL;
|
};
|
||||||
AioContext *aio_context;
|
blockdev_do_action(&action, errp);
|
||||||
QDict *options;
|
|
||||||
Error *local_err = NULL;
|
|
||||||
int flags;
|
|
||||||
int64_t size;
|
|
||||||
bool set_backing_hd = false;
|
|
||||||
|
|
||||||
if (!backup->has_mode) {
|
|
||||||
backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
|
|
||||||
}
|
|
||||||
|
|
||||||
bs = bdrv_lookup_bs(backup->device, backup->device, errp);
|
|
||||||
if (!bs) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!bs->drv) {
|
|
||||||
error_setg(errp, "Device has no medium");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
aio_context = bdrv_get_aio_context(bs);
|
|
||||||
aio_context_acquire(aio_context);
|
|
||||||
|
|
||||||
if (!backup->has_format) {
|
|
||||||
backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ?
|
|
||||||
NULL : (char*) bs->drv->format_name;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Early check to avoid creating target */
|
|
||||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
flags = bs->open_flags | BDRV_O_RDWR;
|
|
||||||
|
|
||||||
/* See if we have a backing HD we can use to create our new image
|
|
||||||
* on top of. */
|
|
||||||
if (backup->sync == MIRROR_SYNC_MODE_TOP) {
|
|
||||||
source = backing_bs(bs);
|
|
||||||
if (!source) {
|
|
||||||
backup->sync = MIRROR_SYNC_MODE_FULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (backup->sync == MIRROR_SYNC_MODE_NONE) {
|
|
||||||
source = bs;
|
|
||||||
flags |= BDRV_O_NO_BACKING;
|
|
||||||
set_backing_hd = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
size = bdrv_getlength(bs);
|
|
||||||
if (size < 0) {
|
|
||||||
error_setg_errno(errp, -size, "bdrv_getlength failed");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (backup->mode != NEW_IMAGE_MODE_EXISTING) {
|
|
||||||
assert(backup->format);
|
|
||||||
if (source) {
|
|
||||||
bdrv_refresh_filename(source);
|
|
||||||
bdrv_img_create(backup->target, backup->format, source->filename,
|
|
||||||
source->drv->format_name, NULL,
|
|
||||||
size, flags, false, &local_err);
|
|
||||||
} else {
|
|
||||||
bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL,
|
|
||||||
size, flags, false, &local_err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (local_err) {
|
|
||||||
error_propagate(errp, local_err);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
options = qdict_new();
|
|
||||||
qdict_put_str(options, "discard", "unmap");
|
|
||||||
qdict_put_str(options, "detect-zeroes", "unmap");
|
|
||||||
if (backup->format) {
|
|
||||||
qdict_put_str(options, "driver", backup->format);
|
|
||||||
}
|
|
||||||
|
|
||||||
target_bs = bdrv_open(backup->target, NULL, options, flags, errp);
|
|
||||||
if (!target_bs) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (set_backing_hd) {
|
|
||||||
bdrv_set_backing_hd(target_bs, source, &local_err);
|
|
||||||
if (local_err) {
|
|
||||||
goto unref;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
job = do_backup_common(qapi_DriveBackup_base(backup),
|
|
||||||
bs, target_bs, aio_context, txn, errp);
|
|
||||||
|
|
||||||
unref:
|
|
||||||
bdrv_unref(target_bs);
|
|
||||||
out:
|
|
||||||
aio_context_release(aio_context);
|
|
||||||
return job;
|
|
||||||
}
|
|
||||||
|
|
||||||
void qmp_drive_backup(DriveBackup *arg, Error **errp)
|
|
||||||
{
|
|
||||||
|
|
||||||
BlockJob *job;
|
|
||||||
job = do_drive_backup(arg, NULL, errp);
|
|
||||||
if (job) {
|
|
||||||
job_start(&job->job);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp)
|
BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp)
|
||||||
@ -3717,41 +3758,13 @@ XDbgBlockGraph *qmp_x_debug_query_block_graph(Error **errp)
|
|||||||
return bdrv_get_xdbg_block_graph(errp);
|
return bdrv_get_xdbg_block_graph(errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn,
|
void qmp_blockdev_backup(BlockdevBackup *backup, Error **errp)
|
||||||
Error **errp)
|
|
||||||
{
|
{
|
||||||
BlockDriverState *bs;
|
TransactionAction action = {
|
||||||
BlockDriverState *target_bs;
|
.type = TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP,
|
||||||
AioContext *aio_context;
|
.u.blockdev_backup.data = backup,
|
||||||
BlockJob *job;
|
};
|
||||||
|
blockdev_do_action(&action, errp);
|
||||||
bs = bdrv_lookup_bs(backup->device, backup->device, errp);
|
|
||||||
if (!bs) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
target_bs = bdrv_lookup_bs(backup->target, backup->target, errp);
|
|
||||||
if (!target_bs) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
aio_context = bdrv_get_aio_context(bs);
|
|
||||||
aio_context_acquire(aio_context);
|
|
||||||
|
|
||||||
job = do_backup_common(qapi_BlockdevBackup_base(backup),
|
|
||||||
bs, target_bs, aio_context, txn, errp);
|
|
||||||
|
|
||||||
aio_context_release(aio_context);
|
|
||||||
return job;
|
|
||||||
}
|
|
||||||
|
|
||||||
void qmp_blockdev_backup(BlockdevBackup *arg, Error **errp)
|
|
||||||
{
|
|
||||||
BlockJob *job;
|
|
||||||
job = do_blockdev_backup(arg, NULL, errp);
|
|
||||||
if (job) {
|
|
||||||
job_start(&job->job);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Parameter check and block job starting for drive mirroring.
|
/* Parameter check and block job starting for drive mirroring.
|
||||||
@ -3881,6 +3894,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
|||||||
BlockDriverState *bs;
|
BlockDriverState *bs;
|
||||||
BlockDriverState *source, *target_bs;
|
BlockDriverState *source, *target_bs;
|
||||||
AioContext *aio_context;
|
AioContext *aio_context;
|
||||||
|
AioContext *old_context;
|
||||||
BlockMirrorBackingMode backing_mode;
|
BlockMirrorBackingMode backing_mode;
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
QDict *options = NULL;
|
QDict *options = NULL;
|
||||||
@ -3993,12 +4007,22 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
|||||||
(arg->mode == NEW_IMAGE_MODE_EXISTING ||
|
(arg->mode == NEW_IMAGE_MODE_EXISTING ||
|
||||||
!bdrv_has_zero_init(target_bs)));
|
!bdrv_has_zero_init(target_bs)));
|
||||||
|
|
||||||
|
|
||||||
|
/* Honor bdrv_try_set_aio_context() context acquisition requirements. */
|
||||||
|
old_context = bdrv_get_aio_context(target_bs);
|
||||||
|
aio_context_release(aio_context);
|
||||||
|
aio_context_acquire(old_context);
|
||||||
|
|
||||||
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
bdrv_unref(target_bs);
|
bdrv_unref(target_bs);
|
||||||
goto out;
|
aio_context_release(old_context);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aio_context_release(old_context);
|
||||||
|
aio_context_acquire(aio_context);
|
||||||
|
|
||||||
blockdev_mirror_common(arg->has_job_id ? arg->job_id : NULL, bs, target_bs,
|
blockdev_mirror_common(arg->has_job_id ? arg->job_id : NULL, bs, target_bs,
|
||||||
arg->has_replaces, arg->replaces, arg->sync,
|
arg->has_replaces, arg->replaces, arg->sync,
|
||||||
backing_mode, zero_target,
|
backing_mode, zero_target,
|
||||||
@ -4040,6 +4064,7 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
|
|||||||
BlockDriverState *bs;
|
BlockDriverState *bs;
|
||||||
BlockDriverState *target_bs;
|
BlockDriverState *target_bs;
|
||||||
AioContext *aio_context;
|
AioContext *aio_context;
|
||||||
|
AioContext *old_context;
|
||||||
BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN;
|
BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN;
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
bool zero_target;
|
bool zero_target;
|
||||||
@ -4057,10 +4082,16 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
|
|||||||
|
|
||||||
zero_target = (sync == MIRROR_SYNC_MODE_FULL);
|
zero_target = (sync == MIRROR_SYNC_MODE_FULL);
|
||||||
|
|
||||||
|
/* Honor bdrv_try_set_aio_context() context acquisition requirements. */
|
||||||
|
old_context = bdrv_get_aio_context(target_bs);
|
||||||
aio_context = bdrv_get_aio_context(bs);
|
aio_context = bdrv_get_aio_context(bs);
|
||||||
aio_context_acquire(aio_context);
|
aio_context_acquire(old_context);
|
||||||
|
|
||||||
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
||||||
|
|
||||||
|
aio_context_release(old_context);
|
||||||
|
aio_context_acquire(aio_context);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -530,6 +530,7 @@ class TestQuorum(iotests.QMPTestCase):
|
|||||||
children = []
|
children = []
|
||||||
backing = []
|
backing = []
|
||||||
|
|
||||||
|
@iotests.skip_if_unsupported(['quorum'])
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
opts = ['driver=quorum', 'vote-threshold=2']
|
opts = ['driver=quorum', 'vote-threshold=2']
|
||||||
|
|
||||||
@ -560,9 +561,6 @@ class TestQuorum(iotests.QMPTestCase):
|
|||||||
os.remove(img)
|
os.remove(img)
|
||||||
|
|
||||||
def test_stream_quorum(self):
|
def test_stream_quorum(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]),
|
self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]),
|
||||||
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]),
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]),
|
||||||
'image file map matches backing file before streaming')
|
'image file map matches backing file before streaming')
|
||||||
|
@ -106,6 +106,7 @@ class TestSingleDrive(ImageCommitTestCase):
|
|||||||
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
|
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
|
||||||
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed"))
|
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed"))
|
||||||
|
|
||||||
|
@iotests.skip_if_unsupported(['throttle'])
|
||||||
def test_commit_with_filter_and_quit(self):
|
def test_commit_with_filter_and_quit(self):
|
||||||
result = self.vm.qmp('object-add', qom_type='throttle-group', id='tg')
|
result = self.vm.qmp('object-add', qom_type='throttle-group', id='tg')
|
||||||
self.assert_qmp(result, 'return', {})
|
self.assert_qmp(result, 'return', {})
|
||||||
@ -125,6 +126,7 @@ class TestSingleDrive(ImageCommitTestCase):
|
|||||||
self.has_quit = True
|
self.has_quit = True
|
||||||
|
|
||||||
# Same as above, but this time we add the filter after starting the job
|
# Same as above, but this time we add the filter after starting the job
|
||||||
|
@iotests.skip_if_unsupported(['throttle'])
|
||||||
def test_commit_plus_filter_and_quit(self):
|
def test_commit_plus_filter_and_quit(self):
|
||||||
result = self.vm.qmp('object-add', qom_type='throttle-group', id='tg')
|
result = self.vm.qmp('object-add', qom_type='throttle-group', id='tg')
|
||||||
self.assert_qmp(result, 'return', {})
|
self.assert_qmp(result, 'return', {})
|
||||||
|
@ -871,6 +871,7 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
image_len = 1 * 1024 * 1024 # MB
|
image_len = 1 * 1024 * 1024 # MB
|
||||||
IMAGES = [ quorum_img1, quorum_img2, quorum_img3 ]
|
IMAGES = [ quorum_img1, quorum_img2, quorum_img3 ]
|
||||||
|
|
||||||
|
@iotests.skip_if_unsupported(['quorum'])
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.vm = iotests.VM()
|
self.vm = iotests.VM()
|
||||||
|
|
||||||
@ -891,9 +892,8 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
#assemble the quorum block device from the individual files
|
#assemble the quorum block device from the individual files
|
||||||
args = { "driver": "quorum", "node-name": "quorum0",
|
args = { "driver": "quorum", "node-name": "quorum0",
|
||||||
"vote-threshold": 2, "children": [ "img0", "img1", "img2" ] }
|
"vote-threshold": 2, "children": [ "img0", "img1", "img2" ] }
|
||||||
if iotests.supports_quorum():
|
result = self.vm.qmp("blockdev-add", **args)
|
||||||
result = self.vm.qmp("blockdev-add", **args)
|
self.assert_qmp(result, 'return', {})
|
||||||
self.assert_qmp(result, 'return', {})
|
|
||||||
|
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
@ -906,9 +906,6 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def test_complete(self):
|
def test_complete(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
self.assert_no_active_block_jobs()
|
self.assert_no_active_block_jobs()
|
||||||
|
|
||||||
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
||||||
@ -925,9 +922,6 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
'target image does not match source after mirroring')
|
'target image does not match source after mirroring')
|
||||||
|
|
||||||
def test_cancel(self):
|
def test_cancel(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
self.assert_no_active_block_jobs()
|
self.assert_no_active_block_jobs()
|
||||||
|
|
||||||
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
||||||
@ -942,9 +936,6 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
self.vm.shutdown()
|
self.vm.shutdown()
|
||||||
|
|
||||||
def test_cancel_after_ready(self):
|
def test_cancel_after_ready(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
self.assert_no_active_block_jobs()
|
self.assert_no_active_block_jobs()
|
||||||
|
|
||||||
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
||||||
@ -961,9 +952,6 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
'target image does not match source after mirroring')
|
'target image does not match source after mirroring')
|
||||||
|
|
||||||
def test_pause(self):
|
def test_pause(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
self.assert_no_active_block_jobs()
|
self.assert_no_active_block_jobs()
|
||||||
|
|
||||||
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
||||||
@ -989,9 +977,6 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
'target image does not match source after mirroring')
|
'target image does not match source after mirroring')
|
||||||
|
|
||||||
def test_medium_not_found(self):
|
def test_medium_not_found(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
if iotests.qemu_default_machine != 'pc':
|
if iotests.qemu_default_machine != 'pc':
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -1003,9 +988,6 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
self.assert_qmp(result, 'error/class', 'GenericError')
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
||||||
|
|
||||||
def test_image_not_found(self):
|
def test_image_not_found(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
||||||
sync='full', node_name='repair0', replaces='img1',
|
sync='full', node_name='repair0', replaces='img1',
|
||||||
mode='existing', target=quorum_repair_img,
|
mode='existing', target=quorum_repair_img,
|
||||||
@ -1013,9 +995,6 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
self.assert_qmp(result, 'error/class', 'GenericError')
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
||||||
|
|
||||||
def test_device_not_found(self):
|
def test_device_not_found(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
result = self.vm.qmp('drive-mirror', job_id='job0',
|
result = self.vm.qmp('drive-mirror', job_id='job0',
|
||||||
device='nonexistent', sync='full',
|
device='nonexistent', sync='full',
|
||||||
node_name='repair0',
|
node_name='repair0',
|
||||||
@ -1024,9 +1003,6 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
self.assert_qmp(result, 'error/class', 'GenericError')
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
||||||
|
|
||||||
def test_wrong_sync_mode(self):
|
def test_wrong_sync_mode(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
result = self.vm.qmp('drive-mirror', device='quorum0', job_id='job0',
|
result = self.vm.qmp('drive-mirror', device='quorum0', job_id='job0',
|
||||||
node_name='repair0',
|
node_name='repair0',
|
||||||
replaces='img1',
|
replaces='img1',
|
||||||
@ -1034,27 +1010,18 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
|||||||
self.assert_qmp(result, 'error/class', 'GenericError')
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
||||||
|
|
||||||
def test_no_node_name(self):
|
def test_no_node_name(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
||||||
sync='full', replaces='img1',
|
sync='full', replaces='img1',
|
||||||
target=quorum_repair_img, format=iotests.imgfmt)
|
target=quorum_repair_img, format=iotests.imgfmt)
|
||||||
self.assert_qmp(result, 'error/class', 'GenericError')
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
||||||
|
|
||||||
def test_nonexistent_replaces(self):
|
def test_nonexistent_replaces(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0',
|
||||||
sync='full', node_name='repair0', replaces='img77',
|
sync='full', node_name='repair0', replaces='img77',
|
||||||
target=quorum_repair_img, format=iotests.imgfmt)
|
target=quorum_repair_img, format=iotests.imgfmt)
|
||||||
self.assert_qmp(result, 'error/class', 'GenericError')
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
||||||
|
|
||||||
def test_after_a_quorum_snapshot(self):
|
def test_after_a_quorum_snapshot(self):
|
||||||
if not iotests.supports_quorum():
|
|
||||||
return
|
|
||||||
|
|
||||||
result = self.vm.qmp('blockdev-snapshot-sync', node_name='img1',
|
result = self.vm.qmp('blockdev-snapshot-sync', node_name='img1',
|
||||||
snapshot_file=quorum_snapshot_file,
|
snapshot_file=quorum_snapshot_file,
|
||||||
snapshot_node_name="snap1");
|
snapshot_node_name="snap1");
|
||||||
|
@ -13,6 +13,8 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/m.
|
|||||||
Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
|
Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
|
||||||
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
|
||||||
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
|
||||||
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "paused", "id": "job0"}}
|
||||||
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
|
||||||
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
|
{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
|
||||||
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}}
|
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}}
|
||||||
{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
|
{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
|
||||||
|
@ -65,6 +65,8 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 l
|
|||||||
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 lazy_refcounts=off refcount_bits=16
|
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 lazy_refcounts=off refcount_bits=16
|
||||||
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
|
||||||
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
|
||||||
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "paused", "id": "disk"}}
|
||||||
|
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
{ 'execute': 'quit' }
|
{ 'execute': 'quit' }
|
||||||
{"return": {}}
|
{"return": {}}
|
||||||
|
@ -63,7 +63,7 @@ def test_pause_resume(vm):
|
|||||||
# logged immediately
|
# logged immediately
|
||||||
iotests.log(vm.qmp('query-jobs'))
|
iotests.log(vm.qmp('query-jobs'))
|
||||||
|
|
||||||
def test_job_lifecycle(vm, job, job_args, has_ready=False):
|
def test_job_lifecycle(vm, job, job_args, has_ready=False, is_mirror=False):
|
||||||
global img_size
|
global img_size
|
||||||
|
|
||||||
iotests.log('')
|
iotests.log('')
|
||||||
@ -135,6 +135,9 @@ def test_job_lifecycle(vm, job, job_args, has_ready=False):
|
|||||||
iotests.log('Waiting for PENDING state...')
|
iotests.log('Waiting for PENDING state...')
|
||||||
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
||||||
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
||||||
|
if is_mirror:
|
||||||
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
||||||
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
||||||
|
|
||||||
if not job_args.get('auto-finalize', True):
|
if not job_args.get('auto-finalize', True):
|
||||||
# PENDING state:
|
# PENDING state:
|
||||||
@ -218,7 +221,7 @@ with iotests.FilePath('disk.img') as disk_path, \
|
|||||||
|
|
||||||
for auto_finalize in [True, False]:
|
for auto_finalize in [True, False]:
|
||||||
for auto_dismiss in [True, False]:
|
for auto_dismiss in [True, False]:
|
||||||
test_job_lifecycle(vm, 'drive-backup', job_args={
|
test_job_lifecycle(vm, 'drive-backup', is_mirror=True, job_args={
|
||||||
'device': 'drive0-node',
|
'device': 'drive0-node',
|
||||||
'target': copy_path,
|
'target': copy_path,
|
||||||
'sync': 'full',
|
'sync': 'full',
|
||||||
|
@ -135,6 +135,8 @@ Pause/resume in RUNNING
|
|||||||
{"return": {}}
|
{"return": {}}
|
||||||
|
|
||||||
Waiting for PENDING state...
|
Waiting for PENDING state...
|
||||||
|
{"data": {"id": "job0", "status": "paused"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
|
{"data": {"id": "job0", "status": "running"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "concluded"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "concluded"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
@ -186,6 +188,8 @@ Pause/resume in RUNNING
|
|||||||
{"return": {}}
|
{"return": {}}
|
||||||
|
|
||||||
Waiting for PENDING state...
|
Waiting for PENDING state...
|
||||||
|
{"data": {"id": "job0", "status": "paused"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
|
{"data": {"id": "job0", "status": "running"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "concluded"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "concluded"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
@ -245,6 +249,8 @@ Pause/resume in RUNNING
|
|||||||
{"return": {}}
|
{"return": {}}
|
||||||
|
|
||||||
Waiting for PENDING state...
|
Waiting for PENDING state...
|
||||||
|
{"data": {"id": "job0", "status": "paused"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
|
{"data": {"id": "job0", "status": "running"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"return": [{"current-progress": 4194304, "id": "job0", "status": "pending", "total-progress": 4194304, "type": "backup"}]}
|
{"return": [{"current-progress": 4194304, "id": "job0", "status": "pending", "total-progress": 4194304, "type": "backup"}]}
|
||||||
@ -304,6 +310,8 @@ Pause/resume in RUNNING
|
|||||||
{"return": {}}
|
{"return": {}}
|
||||||
|
|
||||||
Waiting for PENDING state...
|
Waiting for PENDING state...
|
||||||
|
{"data": {"id": "job0", "status": "paused"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
|
{"data": {"id": "job0", "status": "running"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
{"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
|
||||||
{"return": [{"current-progress": 4194304, "id": "job0", "status": "pending", "total-progress": 4194304, "type": "backup"}]}
|
{"return": [{"current-progress": 4194304, "id": "job0", "status": "pending", "total-progress": 4194304, "type": "backup"}]}
|
||||||
|
@ -69,9 +69,9 @@ with iotests.FilePath('img') as img_path, \
|
|||||||
iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo_a)))
|
iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo_a)))
|
||||||
with iotests.Timeout(3, 'Migration does not complete'):
|
with iotests.Timeout(3, 'Migration does not complete'):
|
||||||
# Wait for the source first (which includes setup=setup)
|
# Wait for the source first (which includes setup=setup)
|
||||||
vm_a.wait_migration()
|
vm_a.wait_migration('postmigrate')
|
||||||
# Wait for the destination second (which does not)
|
# Wait for the destination second (which does not)
|
||||||
vm_b.wait_migration()
|
vm_b.wait_migration('running')
|
||||||
|
|
||||||
iotests.log(vm_a.qmp('query-migrate')['return']['status'])
|
iotests.log(vm_a.qmp('query-migrate')['return']['status'])
|
||||||
iotests.log(vm_b.qmp('query-migrate')['return']['status'])
|
iotests.log(vm_b.qmp('query-migrate')['return']['status'])
|
||||||
@ -98,9 +98,9 @@ with iotests.FilePath('img') as img_path, \
|
|||||||
iotests.log(vm_b.qmp('migrate', uri='exec:cat >%s' % (fifo_b)))
|
iotests.log(vm_b.qmp('migrate', uri='exec:cat >%s' % (fifo_b)))
|
||||||
with iotests.Timeout(3, 'Migration does not complete'):
|
with iotests.Timeout(3, 'Migration does not complete'):
|
||||||
# Wait for the source first (which includes setup=setup)
|
# Wait for the source first (which includes setup=setup)
|
||||||
vm_b.wait_migration()
|
vm_b.wait_migration('postmigrate')
|
||||||
# Wait for the destination second (which does not)
|
# Wait for the destination second (which does not)
|
||||||
vm_a.wait_migration()
|
vm_a.wait_migration('running')
|
||||||
|
|
||||||
iotests.log(vm_a.qmp('query-migrate')['return']['status'])
|
iotests.log(vm_a.qmp('query-migrate')['return']['status'])
|
||||||
iotests.log(vm_b.qmp('query-migrate')['return']['status'])
|
iotests.log(vm_b.qmp('query-migrate')['return']['status'])
|
||||||
|
@ -478,6 +478,7 @@ class TestBlockdevReopen(iotests.QMPTestCase):
|
|||||||
# This test verifies that we can't change the children of a block
|
# This test verifies that we can't change the children of a block
|
||||||
# device during a reopen operation in a way that would create
|
# device during a reopen operation in a way that would create
|
||||||
# cycles in the node graph
|
# cycles in the node graph
|
||||||
|
@iotests.skip_if_unsupported(['blkverify'])
|
||||||
def test_graph_cycles(self):
|
def test_graph_cycles(self):
|
||||||
opts = []
|
opts = []
|
||||||
|
|
||||||
@ -534,6 +535,7 @@ class TestBlockdevReopen(iotests.QMPTestCase):
|
|||||||
self.assert_qmp(result, 'return', {})
|
self.assert_qmp(result, 'return', {})
|
||||||
|
|
||||||
# Misc reopen tests with different block drivers
|
# Misc reopen tests with different block drivers
|
||||||
|
@iotests.skip_if_unsupported(['quorum', 'throttle'])
|
||||||
def test_misc_drivers(self):
|
def test_misc_drivers(self):
|
||||||
####################
|
####################
|
||||||
###### quorum ######
|
###### quorum ######
|
||||||
|
@ -71,9 +71,9 @@ with iotests.FilePath('img') as img_path, \
|
|||||||
iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo)))
|
iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo)))
|
||||||
with iotests.Timeout(3, 'Migration does not complete'):
|
with iotests.Timeout(3, 'Migration does not complete'):
|
||||||
# Wait for the source first (which includes setup=setup)
|
# Wait for the source first (which includes setup=setup)
|
||||||
vm_a.wait_migration()
|
vm_a.wait_migration('postmigrate')
|
||||||
# Wait for the destination second (which does not)
|
# Wait for the destination second (which does not)
|
||||||
vm_b.wait_migration()
|
vm_b.wait_migration('running')
|
||||||
|
|
||||||
iotests.log(vm_a.qmp('query-migrate')['return']['status'])
|
iotests.log(vm_a.qmp('query-migrate')['return']['status'])
|
||||||
iotests.log(vm_b.qmp('query-migrate')['return']['status'])
|
iotests.log(vm_b.qmp('query-migrate')['return']['status'])
|
||||||
|
@ -45,7 +45,7 @@ with iotests.FilePath('base') as base_path , \
|
|||||||
vm.qmp_log('migrate', uri='exec:cat > /dev/null')
|
vm.qmp_log('migrate', uri='exec:cat > /dev/null')
|
||||||
|
|
||||||
with iotests.Timeout(3, 'Migration does not complete'):
|
with iotests.Timeout(3, 'Migration does not complete'):
|
||||||
vm.wait_migration()
|
vm.wait_migration('postmigrate')
|
||||||
|
|
||||||
iotests.log('\nVM is now stopped:')
|
iotests.log('\nVM is now stopped:')
|
||||||
iotests.log(vm.qmp('query-migrate')['return']['status'])
|
iotests.log(vm.qmp('query-migrate')['return']['status'])
|
||||||
|
247
tests/qemu-iotests/281
Executable file
247
tests/qemu-iotests/281
Executable file
@ -0,0 +1,247 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Test cases for blockdev + IOThread interactions
|
||||||
|
#
|
||||||
|
# Copyright (C) 2019 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation; either version 2 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
import iotests
|
||||||
|
from iotests import qemu_img
|
||||||
|
|
||||||
|
image_len = 64 * 1024 * 1024
|
||||||
|
|
||||||
|
# Test for RHBZ#1782175
|
||||||
|
class TestDirtyBitmapIOThread(iotests.QMPTestCase):
|
||||||
|
drive0_img = os.path.join(iotests.test_dir, 'drive0.img')
|
||||||
|
images = { 'drive0': drive0_img }
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
for name in self.images:
|
||||||
|
qemu_img('create', '-f', iotests.imgfmt,
|
||||||
|
self.images[name], str(image_len))
|
||||||
|
|
||||||
|
self.vm = iotests.VM()
|
||||||
|
self.vm.add_object('iothread,id=iothread0')
|
||||||
|
|
||||||
|
for name in self.images:
|
||||||
|
self.vm.add_blockdev('driver=file,filename=%s,node-name=file_%s'
|
||||||
|
% (self.images[name], name))
|
||||||
|
self.vm.add_blockdev('driver=qcow2,file=file_%s,node-name=%s'
|
||||||
|
% (name, name))
|
||||||
|
|
||||||
|
self.vm.launch()
|
||||||
|
self.vm.qmp('x-blockdev-set-iothread',
|
||||||
|
node_name='drive0', iothread='iothread0',
|
||||||
|
force=True)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.vm.shutdown()
|
||||||
|
for name in self.images:
|
||||||
|
os.remove(self.images[name])
|
||||||
|
|
||||||
|
def test_add_dirty_bitmap(self):
|
||||||
|
result = self.vm.qmp(
|
||||||
|
'block-dirty-bitmap-add',
|
||||||
|
node='drive0',
|
||||||
|
name='bitmap1',
|
||||||
|
persistent=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assert_qmp(result, 'return', {})
|
||||||
|
|
||||||
|
|
||||||
|
# Test for RHBZ#1746217 & RHBZ#1773517
|
||||||
|
class TestNBDMirrorIOThread(iotests.QMPTestCase):
|
||||||
|
nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock')
|
||||||
|
drive0_img = os.path.join(iotests.test_dir, 'drive0.img')
|
||||||
|
mirror_img = os.path.join(iotests.test_dir, 'mirror.img')
|
||||||
|
images = { 'drive0': drive0_img, 'mirror': mirror_img }
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
for name in self.images:
|
||||||
|
qemu_img('create', '-f', iotests.imgfmt,
|
||||||
|
self.images[name], str(image_len))
|
||||||
|
|
||||||
|
self.vm_src = iotests.VM(path_suffix='src')
|
||||||
|
self.vm_src.add_object('iothread,id=iothread0')
|
||||||
|
self.vm_src.add_blockdev('driver=file,filename=%s,node-name=file0'
|
||||||
|
% (self.drive0_img))
|
||||||
|
self.vm_src.add_blockdev('driver=qcow2,file=file0,node-name=drive0')
|
||||||
|
self.vm_src.launch()
|
||||||
|
self.vm_src.qmp('x-blockdev-set-iothread',
|
||||||
|
node_name='drive0', iothread='iothread0',
|
||||||
|
force=True)
|
||||||
|
|
||||||
|
self.vm_tgt = iotests.VM(path_suffix='tgt')
|
||||||
|
self.vm_tgt.add_object('iothread,id=iothread0')
|
||||||
|
self.vm_tgt.add_blockdev('driver=file,filename=%s,node-name=file0'
|
||||||
|
% (self.mirror_img))
|
||||||
|
self.vm_tgt.add_blockdev('driver=qcow2,file=file0,node-name=drive0')
|
||||||
|
self.vm_tgt.launch()
|
||||||
|
self.vm_tgt.qmp('x-blockdev-set-iothread',
|
||||||
|
node_name='drive0', iothread='iothread0',
|
||||||
|
force=True)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.vm_src.shutdown()
|
||||||
|
self.vm_tgt.shutdown()
|
||||||
|
for name in self.images:
|
||||||
|
os.remove(self.images[name])
|
||||||
|
|
||||||
|
def test_nbd_mirror(self):
|
||||||
|
result = self.vm_tgt.qmp(
|
||||||
|
'nbd-server-start',
|
||||||
|
addr={
|
||||||
|
'type': 'unix',
|
||||||
|
'data': { 'path': self.nbd_sock }
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self.assert_qmp(result, 'return', {})
|
||||||
|
|
||||||
|
result = self.vm_tgt.qmp(
|
||||||
|
'nbd-server-add',
|
||||||
|
device='drive0',
|
||||||
|
writable=True
|
||||||
|
)
|
||||||
|
self.assert_qmp(result, 'return', {})
|
||||||
|
|
||||||
|
result = self.vm_src.qmp(
|
||||||
|
'drive-mirror',
|
||||||
|
device='drive0',
|
||||||
|
target='nbd+unix:///drive0?socket=' + self.nbd_sock,
|
||||||
|
sync='full',
|
||||||
|
mode='existing',
|
||||||
|
speed=64*1024*1024,
|
||||||
|
job_id='j1'
|
||||||
|
)
|
||||||
|
self.assert_qmp(result, 'return', {})
|
||||||
|
|
||||||
|
self.vm_src.event_wait(name="BLOCK_JOB_READY")
|
||||||
|
|
||||||
|
|
||||||
|
# Test for RHBZ#1779036
|
||||||
|
class TestExternalSnapshotAbort(iotests.QMPTestCase):
|
||||||
|
drive0_img = os.path.join(iotests.test_dir, 'drive0.img')
|
||||||
|
snapshot_img = os.path.join(iotests.test_dir, 'snapshot.img')
|
||||||
|
images = { 'drive0': drive0_img, 'snapshot': snapshot_img }
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
for name in self.images:
|
||||||
|
qemu_img('create', '-f', iotests.imgfmt,
|
||||||
|
self.images[name], str(image_len))
|
||||||
|
|
||||||
|
self.vm = iotests.VM()
|
||||||
|
self.vm.add_object('iothread,id=iothread0')
|
||||||
|
self.vm.add_blockdev('driver=file,filename=%s,node-name=file0'
|
||||||
|
% (self.drive0_img))
|
||||||
|
self.vm.add_blockdev('driver=qcow2,file=file0,node-name=drive0')
|
||||||
|
self.vm.launch()
|
||||||
|
self.vm.qmp('x-blockdev-set-iothread',
|
||||||
|
node_name='drive0', iothread='iothread0',
|
||||||
|
force=True)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.vm.shutdown()
|
||||||
|
for name in self.images:
|
||||||
|
os.remove(self.images[name])
|
||||||
|
|
||||||
|
def test_external_snapshot_abort(self):
|
||||||
|
# Use a two actions transaction with a bogus values on the second
|
||||||
|
# one to trigger an abort of the transaction.
|
||||||
|
result = self.vm.qmp('transaction', actions=[
|
||||||
|
{
|
||||||
|
'type': 'blockdev-snapshot-sync',
|
||||||
|
'data': { 'node-name': 'drive0',
|
||||||
|
'snapshot-file': self.snapshot_img,
|
||||||
|
'snapshot-node-name': 'snap1',
|
||||||
|
'mode': 'absolute-paths',
|
||||||
|
'format': 'qcow2' }
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'type': 'blockdev-snapshot-sync',
|
||||||
|
'data': { 'node-name': 'drive0',
|
||||||
|
'snapshot-file': '/fakesnapshot',
|
||||||
|
'snapshot-node-name': 'snap2',
|
||||||
|
'mode': 'absolute-paths',
|
||||||
|
'format': 'qcow2' }
|
||||||
|
},
|
||||||
|
])
|
||||||
|
|
||||||
|
# Crashes on failure, we expect this error.
|
||||||
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
||||||
|
|
||||||
|
|
||||||
|
# Test for RHBZ#1782111
|
||||||
|
class TestBlockdevBackupAbort(iotests.QMPTestCase):
|
||||||
|
drive0_img = os.path.join(iotests.test_dir, 'drive0.img')
|
||||||
|
drive1_img = os.path.join(iotests.test_dir, 'drive1.img')
|
||||||
|
snap0_img = os.path.join(iotests.test_dir, 'snap0.img')
|
||||||
|
snap1_img = os.path.join(iotests.test_dir, 'snap1.img')
|
||||||
|
images = { 'drive0': drive0_img,
|
||||||
|
'drive1': drive1_img,
|
||||||
|
'snap0': snap0_img,
|
||||||
|
'snap1': snap1_img }
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
for name in self.images:
|
||||||
|
qemu_img('create', '-f', iotests.imgfmt,
|
||||||
|
self.images[name], str(image_len))
|
||||||
|
|
||||||
|
self.vm = iotests.VM()
|
||||||
|
self.vm.add_object('iothread,id=iothread0')
|
||||||
|
self.vm.add_device('virtio-scsi,iothread=iothread0')
|
||||||
|
|
||||||
|
for name in self.images:
|
||||||
|
self.vm.add_blockdev('driver=file,filename=%s,node-name=file_%s'
|
||||||
|
% (self.images[name], name))
|
||||||
|
self.vm.add_blockdev('driver=qcow2,file=file_%s,node-name=%s'
|
||||||
|
% (name, name))
|
||||||
|
|
||||||
|
self.vm.add_device('scsi-hd,drive=drive0')
|
||||||
|
self.vm.add_device('scsi-hd,drive=drive1')
|
||||||
|
self.vm.launch()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.vm.shutdown()
|
||||||
|
for name in self.images:
|
||||||
|
os.remove(self.images[name])
|
||||||
|
|
||||||
|
def test_blockdev_backup_abort(self):
|
||||||
|
# Use a two actions transaction with a bogus values on the second
|
||||||
|
# one to trigger an abort of the transaction.
|
||||||
|
result = self.vm.qmp('transaction', actions=[
|
||||||
|
{
|
||||||
|
'type': 'blockdev-backup',
|
||||||
|
'data': { 'device': 'drive0',
|
||||||
|
'target': 'snap0',
|
||||||
|
'sync': 'full',
|
||||||
|
'job-id': 'j1' }
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'type': 'blockdev-backup',
|
||||||
|
'data': { 'device': 'drive1',
|
||||||
|
'target': 'snap1',
|
||||||
|
'sync': 'full' }
|
||||||
|
},
|
||||||
|
])
|
||||||
|
|
||||||
|
# Hangs on failure, we expect this error.
|
||||||
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
iotests.main(supported_fmts=['qcow2'],
|
||||||
|
supported_protocols=['file'])
|
5
tests/qemu-iotests/281.out
Normal file
5
tests/qemu-iotests/281.out
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
....
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
Ran 4 tests
|
||||||
|
|
||||||
|
OK
|
@ -288,3 +288,4 @@
|
|||||||
277 rw quick
|
277 rw quick
|
||||||
279 rw backing quick
|
279 rw backing quick
|
||||||
280 rw migration quick
|
280 rw migration quick
|
||||||
|
281 rw quick
|
||||||
|
@ -668,12 +668,16 @@ class VM(qtest.QEMUQtestMachine):
|
|||||||
}
|
}
|
||||||
]))
|
]))
|
||||||
|
|
||||||
def wait_migration(self):
|
def wait_migration(self, expect_runstate):
|
||||||
while True:
|
while True:
|
||||||
event = self.event_wait('MIGRATION')
|
event = self.event_wait('MIGRATION')
|
||||||
log(event, filters=[filter_qmp_event])
|
log(event, filters=[filter_qmp_event])
|
||||||
if event['data']['status'] == 'completed':
|
if event['data']['status'] == 'completed':
|
||||||
break
|
break
|
||||||
|
# The event may occur in finish-migrate, so wait for the expected
|
||||||
|
# post-migration runstate
|
||||||
|
while self.qmp('query-status')['return']['status'] != expect_runstate:
|
||||||
|
pass
|
||||||
|
|
||||||
def node_info(self, node_name):
|
def node_info(self, node_name):
|
||||||
nodes = self.qmp('query-named-block-nodes')
|
nodes = self.qmp('query-named-block-nodes')
|
||||||
|
Loading…
Reference in New Issue
Block a user