From 9e37271f50ec2e95f299dc297ac08f9be0096b48 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Fri, 23 Nov 2018 15:11:14 +0100 Subject: [PATCH 1/3] block: Don't inactivate children before parents bdrv_child_cb_inactivate() asserts that parents are already inactive when children get inactivated. This precondition is necessary because parents could still issue requests in their inactivation code. When block nodes are created individually with -blockdev, all of them are monitor owned and will be returned by bdrv_next() in an undefined order (in practice, in the order of their creation, which is usually children before parents), which obviously fails the assertion: qemu: block.c:899: bdrv_child_cb_inactivate: Assertion `bs->open_flags & BDRV_O_INACTIVE' failed. This patch fixes the ordering by skipping nodes with still active parents in bdrv_inactivate_recurse() because we know that they will be covered by recursion when the last active parent becomes inactive. With the correct parents-before-children ordering, we also got rid of the reason why commit aad0b7a0bfb introduced two passes, so we can go back to a single-pass recursion. This is necessary so we can rely on the BDRV_O_INACTIVE flag to skip nodes with active parents (the flag used to be set only in pass 2, so we would always skip non-root nodes in pass 1 because all parents would still be considered active; setting the flag in pass 1 would mean, that we never skip anything in pass 2 because all parents are already considered inactive). Because of the change to single pass, this patch is best reviewed with whitespace changes ignored. Signed-off-by: Kevin Wolf Reviewed-by: Max Reitz --- block.c | 86 ++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 54 insertions(+), 32 deletions(-) diff --git a/block.c b/block.c index 5ba3435f8f..811239ca23 100644 --- a/block.c +++ b/block.c @@ -4612,45 +4612,68 @@ void bdrv_invalidate_cache_all(Error **errp) } } -static int bdrv_inactivate_recurse(BlockDriverState *bs, - bool setting_flag) +static bool bdrv_has_bds_parent(BlockDriverState *bs, bool only_active) +{ + BdrvChild *parent; + + QLIST_FOREACH(parent, &bs->parents, next_parent) { + if (parent->role->parent_is_bds) { + BlockDriverState *parent_bs = parent->opaque; + if (!only_active || !(parent_bs->open_flags & BDRV_O_INACTIVE)) { + return true; + } + } + } + + return false; +} + +static int bdrv_inactivate_recurse(BlockDriverState *bs) { BdrvChild *child, *parent; + uint64_t perm, shared_perm; int ret; if (!bs->drv) { return -ENOMEDIUM; } - if (!setting_flag && bs->drv->bdrv_inactivate) { + /* Make sure that we don't inactivate a child before its parent. + * It will be covered by recursion from the yet active parent. */ + if (bdrv_has_bds_parent(bs, true)) { + return 0; + } + + assert(!(bs->open_flags & BDRV_O_INACTIVE)); + + /* Inactivate this node */ + if (bs->drv->bdrv_inactivate) { ret = bs->drv->bdrv_inactivate(bs); if (ret < 0) { return ret; } } - if (setting_flag && !(bs->open_flags & BDRV_O_INACTIVE)) { - uint64_t perm, shared_perm; - - QLIST_FOREACH(parent, &bs->parents, next_parent) { - if (parent->role->inactivate) { - ret = parent->role->inactivate(parent); - if (ret < 0) { - return ret; - } + QLIST_FOREACH(parent, &bs->parents, next_parent) { + if (parent->role->inactivate) { + ret = parent->role->inactivate(parent); + if (ret < 0) { + return ret; } } - - bs->open_flags |= BDRV_O_INACTIVE; - - /* Update permissions, they may differ for inactive nodes */ - bdrv_get_cumulative_perm(bs, &perm, &shared_perm); - bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, &error_abort); - bdrv_set_perm(bs, perm, shared_perm); } + bs->open_flags |= BDRV_O_INACTIVE; + + /* Update permissions, they may differ for inactive nodes */ + bdrv_get_cumulative_perm(bs, &perm, &shared_perm); + bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, &error_abort); + bdrv_set_perm(bs, perm, shared_perm); + + + /* Recursively inactivate children */ QLIST_FOREACH(child, &bs->children, next) { - ret = bdrv_inactivate_recurse(child->bs, setting_flag); + ret = bdrv_inactivate_recurse(child->bs); if (ret < 0) { return ret; } @@ -4664,7 +4687,6 @@ int bdrv_inactivate_all(void) BlockDriverState *bs = NULL; BdrvNextIterator it; int ret = 0; - int pass; GSList *aio_ctxs = NULL, *ctx; for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { @@ -4676,17 +4698,17 @@ int bdrv_inactivate_all(void) } } - /* We do two passes of inactivation. The first pass calls to drivers' - * .bdrv_inactivate callbacks recursively so all cache is flushed to disk; - * the second pass sets the BDRV_O_INACTIVE flag so that no further write - * is allowed. */ - for (pass = 0; pass < 2; pass++) { - for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { - ret = bdrv_inactivate_recurse(bs, pass); - if (ret < 0) { - bdrv_next_cleanup(&it); - goto out; - } + for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { + /* Nodes with BDS parents are covered by recursion from the last + * parent that gets inactivated. Don't inactivate them a second + * time if that has already happened. */ + if (bdrv_has_bds_parent(bs, false)) { + continue; + } + ret = bdrv_inactivate_recurse(bs); + if (ret < 0) { + bdrv_next_cleanup(&it); + goto out; } } From 330ca111ea0979d8c6fc9b3958f72d6dce164d5a Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Mon, 26 Nov 2018 12:21:07 +0100 Subject: [PATCH 2/3] iotests: Test migration with -blockdev Check that block node activation and inactivation works with a block graph that is built with individually created nodes. Signed-off-by: Kevin Wolf Reviewed-by: Max Reitz --- tests/qemu-iotests/234 | 121 +++++++++++++++++++++++++++++++++++++ tests/qemu-iotests/234.out | 30 +++++++++ tests/qemu-iotests/group | 1 + 3 files changed, 152 insertions(+) create mode 100755 tests/qemu-iotests/234 create mode 100644 tests/qemu-iotests/234.out diff --git a/tests/qemu-iotests/234 b/tests/qemu-iotests/234 new file mode 100755 index 0000000000..a8185b4360 --- /dev/null +++ b/tests/qemu-iotests/234 @@ -0,0 +1,121 @@ +#!/usr/bin/env python +# +# Copyright (C) 2018 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Creator/Owner: Kevin Wolf +# +# Check that block node activation and inactivation works with a block graph +# that is built with individually created nodes + +import iotests +import os + +iotests.verify_image_format(supported_fmts=['qcow2']) +iotests.verify_platform(['linux']) + +with iotests.FilePath('img') as img_path, \ + iotests.FilePath('backing') as backing_path, \ + iotests.FilePath('mig_fifo_a') as fifo_a, \ + iotests.FilePath('mig_fifo_b') as fifo_b, \ + iotests.VM(path_suffix='a') as vm_a, \ + iotests.VM(path_suffix='b') as vm_b: + + iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, backing_path, '64M') + iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, img_path, '64M') + + os.mkfifo(fifo_a) + os.mkfifo(fifo_b) + + iotests.log('Launching source VM...') + (vm_a.add_blockdev('file,filename=%s,node-name=drive0-file' % (img_path)) + .add_blockdev('%s,file=drive0-file,node-name=drive0' % (iotests.imgfmt)) + .add_blockdev('file,filename=%s,node-name=drive0-backing-file' % (backing_path)) + .add_blockdev('%s,file=drive0-backing-file,node-name=drive0-backing' % (iotests.imgfmt)) + .launch()) + + iotests.log('Launching destination VM...') + (vm_b.add_blockdev('file,filename=%s,node-name=drive0-file' % (img_path)) + .add_blockdev('%s,file=drive0-file,node-name=drive0' % (iotests.imgfmt)) + .add_blockdev('file,filename=%s,node-name=drive0-backing-file' % (backing_path)) + .add_blockdev('%s,file=drive0-backing-file,node-name=drive0-backing' % (iotests.imgfmt)) + .add_incoming("exec: cat '%s'" % (fifo_a)) + .launch()) + + # Add a child node that was created after the parent node. The reverse case + # is covered by the -blockdev options above. + iotests.log(vm_a.qmp('blockdev-snapshot', node='drive0-backing', + overlay='drive0')) + iotests.log(vm_b.qmp('blockdev-snapshot', node='drive0-backing', + overlay='drive0')) + + iotests.log('Enabling migration QMP events on A...') + iotests.log(vm_a.qmp('migrate-set-capabilities', capabilities=[ + { + 'capability': 'events', + 'state': True + } + ])) + + iotests.log('Starting migration to B...') + iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo_a))) + with iotests.Timeout(3, 'Migration does not complete'): + while True: + event = vm_a.event_wait('MIGRATION') + iotests.log(event, filters=[iotests.filter_qmp_event]) + if event['data']['status'] == 'completed': + break + + iotests.log(vm_a.qmp('query-migrate')['return']['status']) + iotests.log(vm_b.qmp('query-migrate')['return']['status']) + + iotests.log(vm_a.qmp('query-status')) + iotests.log(vm_b.qmp('query-status')) + + iotests.log('Add a second parent to drive0-file...') + iotests.log(vm_b.qmp('blockdev-add', driver='raw', file='drive0-file', + node_name='drive0-raw')) + + iotests.log('Restart A with -incoming and second parent...') + vm_a.shutdown() + (vm_a.add_blockdev('raw,file=drive0-file,node-name=drive0-raw') + .add_incoming("exec: cat '%s'" % (fifo_b)) + .launch()) + + iotests.log(vm_a.qmp('blockdev-snapshot', node='drive0-backing', + overlay='drive0')) + + iotests.log('Enabling migration QMP events on B...') + iotests.log(vm_b.qmp('migrate-set-capabilities', capabilities=[ + { + 'capability': 'events', + 'state': True + } + ])) + + iotests.log('Starting migration back to A...') + iotests.log(vm_b.qmp('migrate', uri='exec:cat >%s' % (fifo_b))) + with iotests.Timeout(3, 'Migration does not complete'): + while True: + event = vm_b.event_wait('MIGRATION') + iotests.log(event, filters=[iotests.filter_qmp_event]) + if event['data']['status'] == 'completed': + break + + iotests.log(vm_a.qmp('query-migrate')['return']['status']) + iotests.log(vm_b.qmp('query-migrate')['return']['status']) + + iotests.log(vm_a.qmp('query-status')) + iotests.log(vm_b.qmp('query-status')) diff --git a/tests/qemu-iotests/234.out b/tests/qemu-iotests/234.out new file mode 100644 index 0000000000..b9ed910b1a --- /dev/null +++ b/tests/qemu-iotests/234.out @@ -0,0 +1,30 @@ +Launching source VM... +Launching destination VM... +{"return": {}} +{"return": {}} +Enabling migration QMP events on A... +{"return": {}} +Starting migration to B... +{"return": {}} +{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +completed +completed +{"return": {"running": false, "singlestep": false, "status": "postmigrate"}} +{"return": {"running": true, "singlestep": false, "status": "running"}} +Add a second parent to drive0-file... +{"return": {}} +Restart A with -incoming and second parent... +{"return": {}} +Enabling migration QMP events on B... +{"return": {}} +Starting migration back to A... +{"return": {}} +{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +completed +completed +{"return": {"running": true, "singlestep": false, "status": "running"}} +{"return": {"running": false, "singlestep": false, "status": "postmigrate"}} diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index ddf1a5b549..8c56a0ad11 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -231,3 +231,4 @@ 231 auto quick 232 auto quick 233 auto quick +234 auto quick migration From 6da021815e752b3ca3a547eed53f3e92a8a35452 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 26 Nov 2018 10:17:45 -0700 Subject: [PATCH 3/3] nvme: Fix spurious interrupts The code had asserted an interrupt every time it was requested to check for new completion queue entries.This can result in spurious interrupts seen by the guest OS. Fix this by asserting an interrupt only if there are un-acknowledged completion queue entries available. Reported-by: Guenter Roeck Signed-off-by: Keith Busch Tested-by: Guenter Roeck Signed-off-by: Kevin Wolf --- hw/block/nvme.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hw/block/nvme.c b/hw/block/nvme.c index 9fbe5673cb..7c8c63e8f5 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -272,7 +272,9 @@ static void nvme_post_cqes(void *opaque) sizeof(req->cqe)); QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); } - nvme_irq_assert(n, cq); + if (cq->tail != cq->head) { + nvme_irq_assert(n, cq); + } } static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)