2019-02-23 22:20:41 +03:00
|
|
|
/*
|
|
|
|
* Block node graph modifications tests
|
|
|
|
*
|
2021-04-28 18:17:29 +03:00
|
|
|
* Copyright (c) 2019-2021 Virtuozzo International GmbH. All rights reserved.
|
2019-02-23 22:20:41 +03:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qapi/error.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2019-02-23 22:20:41 +03:00
|
|
|
#include "block/block_int.h"
|
|
|
|
#include "sysemu/block-backend.h"
|
|
|
|
|
|
|
|
static BlockDriver bdrv_pass_through = {
|
|
|
|
.format_name = "pass-through",
|
2020-05-13 14:05:39 +03:00
|
|
|
.bdrv_child_perm = bdrv_default_perms,
|
2019-02-23 22:20:41 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static void no_perm_default_perms(BlockDriverState *bs, BdrvChild *c,
|
2020-05-13 14:05:16 +03:00
|
|
|
BdrvChildRole role,
|
2019-02-23 22:20:41 +03:00
|
|
|
BlockReopenQueue *reopen_queue,
|
|
|
|
uint64_t perm, uint64_t shared,
|
|
|
|
uint64_t *nperm, uint64_t *nshared)
|
|
|
|
{
|
|
|
|
*nperm = 0;
|
|
|
|
*nshared = BLK_PERM_ALL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static BlockDriver bdrv_no_perm = {
|
|
|
|
.format_name = "no-perm",
|
|
|
|
.bdrv_child_perm = no_perm_default_perms,
|
|
|
|
};
|
|
|
|
|
2021-04-28 18:17:29 +03:00
|
|
|
static void exclusive_write_perms(BlockDriverState *bs, BdrvChild *c,
|
|
|
|
BdrvChildRole role,
|
|
|
|
BlockReopenQueue *reopen_queue,
|
|
|
|
uint64_t perm, uint64_t shared,
|
|
|
|
uint64_t *nperm, uint64_t *nshared)
|
|
|
|
{
|
|
|
|
*nperm = BLK_PERM_WRITE;
|
|
|
|
*nshared = BLK_PERM_ALL & ~BLK_PERM_WRITE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static BlockDriver bdrv_exclusive_writer = {
|
|
|
|
.format_name = "exclusive-writer",
|
|
|
|
.bdrv_child_perm = exclusive_write_perms,
|
|
|
|
};
|
|
|
|
|
2019-02-23 22:20:41 +03:00
|
|
|
static BlockDriverState *no_perm_node(const char *name)
|
|
|
|
{
|
|
|
|
return bdrv_new_open_driver(&bdrv_no_perm, name, BDRV_O_RDWR, &error_abort);
|
|
|
|
}
|
|
|
|
|
|
|
|
static BlockDriverState *pass_through_node(const char *name)
|
|
|
|
{
|
|
|
|
return bdrv_new_open_driver(&bdrv_pass_through, name,
|
|
|
|
BDRV_O_RDWR, &error_abort);
|
|
|
|
}
|
|
|
|
|
2021-04-28 18:17:29 +03:00
|
|
|
static BlockDriverState *exclusive_writer_node(const char *name)
|
|
|
|
{
|
|
|
|
return bdrv_new_open_driver(&bdrv_exclusive_writer, name,
|
|
|
|
BDRV_O_RDWR, &error_abort);
|
|
|
|
}
|
|
|
|
|
2019-02-23 22:20:41 +03:00
|
|
|
/*
|
|
|
|
* test_update_perm_tree
|
|
|
|
*
|
|
|
|
* When checking node for a possibility to update permissions, it's subtree
|
|
|
|
* should be correctly checked too. New permissions for each node should be
|
|
|
|
* calculated and checked in context of permissions of other nodes. If we
|
|
|
|
* check new permissions of the node only in context of old permissions of
|
|
|
|
* its neighbors, we can finish up with wrong permission graph.
|
|
|
|
*
|
|
|
|
* This test firstly create the following graph:
|
|
|
|
* +--------+
|
|
|
|
* | root |
|
|
|
|
* +--------+
|
|
|
|
* |
|
|
|
|
* | perm: write, read
|
|
|
|
* | shared: except write
|
|
|
|
* v
|
|
|
|
* +-------------------+ +----------------+
|
|
|
|
* | passtrough filter |---------->| null-co node |
|
|
|
|
* +-------------------+ +----------------+
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* and then, tries to append filter under node. Expected behavior: fail.
|
|
|
|
* Otherwise we'll get the following picture, with two BdrvChild'ren, having
|
|
|
|
* write permission to one node, without actually sharing it.
|
|
|
|
*
|
|
|
|
* +--------+
|
|
|
|
* | root |
|
|
|
|
* +--------+
|
|
|
|
* |
|
|
|
|
* | perm: write, read
|
|
|
|
* | shared: except write
|
|
|
|
* v
|
|
|
|
* +-------------------+
|
|
|
|
* | passtrough filter |
|
|
|
|
* +-------------------+
|
|
|
|
* | |
|
|
|
|
* perm: write, read | | perm: write, read
|
|
|
|
* shared: except write | | shared: except write
|
|
|
|
* v v
|
|
|
|
* +----------------+
|
|
|
|
* | null co node |
|
|
|
|
* +----------------+
|
|
|
|
*/
|
|
|
|
static void test_update_perm_tree(void)
|
|
|
|
{
|
2021-02-02 15:49:44 +03:00
|
|
|
int ret;
|
2019-02-23 22:20:41 +03:00
|
|
|
|
2019-04-25 15:25:10 +03:00
|
|
|
BlockBackend *root = blk_new(qemu_get_aio_context(),
|
|
|
|
BLK_PERM_WRITE | BLK_PERM_CONSISTENT_READ,
|
2019-02-23 22:20:41 +03:00
|
|
|
BLK_PERM_ALL & ~BLK_PERM_WRITE);
|
|
|
|
BlockDriverState *bs = no_perm_node("node");
|
|
|
|
BlockDriverState *filter = pass_through_node("filter");
|
|
|
|
|
|
|
|
blk_insert_bs(root, bs, &error_abort);
|
|
|
|
|
2020-05-13 14:05:38 +03:00
|
|
|
bdrv_attach_child(filter, bs, "child", &child_of_bds,
|
|
|
|
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort);
|
2019-02-23 22:20:41 +03:00
|
|
|
|
2021-02-02 15:49:44 +03:00
|
|
|
ret = bdrv_append(filter, bs, NULL);
|
|
|
|
g_assert_cmpint(ret, <, 0);
|
2019-02-23 22:20:41 +03:00
|
|
|
|
2021-04-28 18:17:32 +03:00
|
|
|
bdrv_unref(filter);
|
2019-02-23 22:20:41 +03:00
|
|
|
blk_unref(root);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* test_should_update_child
|
|
|
|
*
|
|
|
|
* Test that bdrv_replace_node, and concretely should_update_child
|
|
|
|
* do the right thing, i.e. not creating loops on the graph.
|
|
|
|
*
|
|
|
|
* The test does the following:
|
|
|
|
* 1. initial graph:
|
|
|
|
*
|
|
|
|
* +------+ +--------+
|
|
|
|
* | root | | filter |
|
|
|
|
* +------+ +--------+
|
|
|
|
* | |
|
|
|
|
* root| target|
|
|
|
|
* v v
|
|
|
|
* +------+ +--------+
|
|
|
|
* | node |<---------| target |
|
|
|
|
* +------+ backing +--------+
|
|
|
|
*
|
|
|
|
* 2. Append @filter above @node. If should_update_child works correctly,
|
|
|
|
* it understands, that backing child of @target should not be updated,
|
|
|
|
* as it will create a loop on node graph. Resulting picture should
|
|
|
|
* be the left one, not the right:
|
|
|
|
*
|
|
|
|
* +------+ +------+
|
|
|
|
* | root | | root |
|
|
|
|
* +------+ +------+
|
|
|
|
* | |
|
|
|
|
* root| root|
|
|
|
|
* v v
|
|
|
|
* +--------+ target +--------+ target
|
|
|
|
* | filter |--------------+ | filter |--------------+
|
|
|
|
* +--------+ | +--------+ |
|
|
|
|
* | | | ^ v
|
|
|
|
* backing| | backing| | +--------+
|
|
|
|
* v v | +-----------| target |
|
|
|
|
* +------+ +--------+ v backing +--------+
|
|
|
|
* | node |<---------| target | +------+
|
|
|
|
* +------+ backing +--------+ | node |
|
|
|
|
* +------+
|
|
|
|
*
|
|
|
|
* (good picture) (bad picture)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void test_should_update_child(void)
|
|
|
|
{
|
2019-04-25 15:25:10 +03:00
|
|
|
BlockBackend *root = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
|
2019-02-23 22:20:41 +03:00
|
|
|
BlockDriverState *bs = no_perm_node("node");
|
|
|
|
BlockDriverState *filter = no_perm_node("filter");
|
|
|
|
BlockDriverState *target = no_perm_node("target");
|
|
|
|
|
|
|
|
blk_insert_bs(root, bs, &error_abort);
|
|
|
|
|
|
|
|
bdrv_set_backing_hd(target, bs, &error_abort);
|
|
|
|
|
|
|
|
g_assert(target->backing->bs == bs);
|
2020-05-13 14:05:38 +03:00
|
|
|
bdrv_attach_child(filter, target, "target", &child_of_bds,
|
|
|
|
BDRV_CHILD_DATA, &error_abort);
|
2019-02-23 22:20:41 +03:00
|
|
|
bdrv_append(filter, bs, &error_abort);
|
|
|
|
g_assert(target->backing->bs == bs);
|
|
|
|
|
2021-04-28 18:17:32 +03:00
|
|
|
bdrv_unref(filter);
|
2019-02-23 22:20:41 +03:00
|
|
|
bdrv_unref(bs);
|
|
|
|
blk_unref(root);
|
|
|
|
}
|
|
|
|
|
2021-04-28 18:17:29 +03:00
|
|
|
/*
|
|
|
|
* test_parallel_exclusive_write
|
|
|
|
*
|
|
|
|
* Check that when we replace node, old permissions of the node being removed
|
|
|
|
* doesn't break the replacement.
|
|
|
|
*/
|
|
|
|
static void test_parallel_exclusive_write(void)
|
|
|
|
{
|
|
|
|
BlockDriverState *top = exclusive_writer_node("top");
|
|
|
|
BlockDriverState *base = no_perm_node("base");
|
|
|
|
BlockDriverState *fl1 = pass_through_node("fl1");
|
|
|
|
BlockDriverState *fl2 = pass_through_node("fl2");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bdrv_attach_child() eats child bs reference, so we need two @base
|
|
|
|
* references for two filters:
|
|
|
|
*/
|
|
|
|
bdrv_ref(base);
|
|
|
|
|
|
|
|
bdrv_attach_child(top, fl1, "backing", &child_of_bds, BDRV_CHILD_DATA,
|
|
|
|
&error_abort);
|
|
|
|
bdrv_attach_child(fl1, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED,
|
|
|
|
&error_abort);
|
|
|
|
bdrv_attach_child(fl2, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED,
|
|
|
|
&error_abort);
|
|
|
|
|
|
|
|
bdrv_replace_node(fl1, fl2, &error_abort);
|
|
|
|
|
|
|
|
bdrv_unref(fl2);
|
|
|
|
bdrv_unref(top);
|
|
|
|
}
|
|
|
|
|
tests/test-bdrv-graph-mod: add test_parallel_perm_update
Add test to show that simple DFS recursion order is not correct for
permission update. Correct order is topological-sort order, which will
be introduced later.
Consider the block driver which has two filter children: one active
with exclusive write access and one inactive with no specific
permissions.
And, these two children has a common base child, like this:
┌─────┐ ┌──────┐
│ fl2 │ ◀── │ top │
└─────┘ └──────┘
│ │
│ │ w
│ ▼
│ ┌──────┐
│ │ fl1 │
│ └──────┘
│ │
│ │ w
│ ▼
│ ┌──────┐
└───────▶ │ base │
└──────┘
So, exclusive write is propagated.
Assume, we want to make fl2 active instead of fl1.
So, we set some option for top driver and do permission update.
If permission update (remember, it's DFS) goes first through
top->fl1->base branch it will succeed: it firstly drop exclusive write
permissions and than apply them for another BdrvChildren.
But if permission update goes first through top->fl2->base branch it
will fail, as when we try to update fl2->base child, old not yet
updated fl1->base child will be in conflict.
Now test fails, so it runs only with -d flag. To run do
./test-bdrv-graph-mod -d -p /bdrv-graph-mod/parallel-perm-update
from <build-directory>/tests.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20210428151804.439460-3-vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2021-04-28 18:17:30 +03:00
|
|
|
static void write_to_file_perms(BlockDriverState *bs, BdrvChild *c,
|
|
|
|
BdrvChildRole role,
|
|
|
|
BlockReopenQueue *reopen_queue,
|
|
|
|
uint64_t perm, uint64_t shared,
|
|
|
|
uint64_t *nperm, uint64_t *nshared)
|
|
|
|
{
|
|
|
|
if (bs->file && c == bs->file) {
|
|
|
|
*nperm = BLK_PERM_WRITE;
|
|
|
|
*nshared = BLK_PERM_ALL & ~BLK_PERM_WRITE;
|
|
|
|
} else {
|
|
|
|
*nperm = 0;
|
|
|
|
*nshared = BLK_PERM_ALL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static BlockDriver bdrv_write_to_file = {
|
|
|
|
.format_name = "tricky-perm",
|
|
|
|
.bdrv_child_perm = write_to_file_perms,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following test shows that topological-sort order is required for
|
|
|
|
* permission update, simple DFS is not enough.
|
|
|
|
*
|
|
|
|
* Consider the block driver which has two filter children: one active
|
|
|
|
* with exclusive write access and one inactive with no specific
|
|
|
|
* permissions.
|
|
|
|
*
|
|
|
|
* And, these two children has a common base child, like this:
|
|
|
|
*
|
|
|
|
* ┌─────┐ ┌──────┐
|
|
|
|
* │ fl2 │ ◀── │ top │
|
|
|
|
* └─────┘ └──────┘
|
|
|
|
* │ │
|
|
|
|
* │ │ w
|
|
|
|
* │ ▼
|
|
|
|
* │ ┌──────┐
|
|
|
|
* │ │ fl1 │
|
|
|
|
* │ └──────┘
|
|
|
|
* │ │
|
|
|
|
* │ │ w
|
|
|
|
* │ ▼
|
|
|
|
* │ ┌──────┐
|
|
|
|
* └───────▶ │ base │
|
|
|
|
* └──────┘
|
|
|
|
*
|
|
|
|
* So, exclusive write is propagated.
|
|
|
|
*
|
|
|
|
* Assume, we want to make fl2 active instead of fl1.
|
|
|
|
* So, we set some option for top driver and do permission update.
|
|
|
|
*
|
|
|
|
* With simple DFS, if permission update goes first through
|
|
|
|
* top->fl1->base branch it will succeed: it firstly drop exclusive write
|
|
|
|
* permissions and than apply them for another BdrvChildren.
|
|
|
|
* But if permission update goes first through top->fl2->base branch it
|
|
|
|
* will fail, as when we try to update fl2->base child, old not yet
|
|
|
|
* updated fl1->base child will be in conflict.
|
|
|
|
*
|
|
|
|
* With topological-sort order we always update parents before children, so fl1
|
|
|
|
* and fl2 are both updated when we update base and there is no conflict.
|
|
|
|
*/
|
|
|
|
static void test_parallel_perm_update(void)
|
|
|
|
{
|
|
|
|
BlockDriverState *top = no_perm_node("top");
|
|
|
|
BlockDriverState *tricky =
|
|
|
|
bdrv_new_open_driver(&bdrv_write_to_file, "tricky", BDRV_O_RDWR,
|
|
|
|
&error_abort);
|
|
|
|
BlockDriverState *base = no_perm_node("base");
|
|
|
|
BlockDriverState *fl1 = pass_through_node("fl1");
|
|
|
|
BlockDriverState *fl2 = pass_through_node("fl2");
|
|
|
|
BdrvChild *c_fl1, *c_fl2;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bdrv_attach_child() eats child bs reference, so we need two @base
|
|
|
|
* references for two filters:
|
|
|
|
*/
|
|
|
|
bdrv_ref(base);
|
|
|
|
|
|
|
|
bdrv_attach_child(top, tricky, "file", &child_of_bds, BDRV_CHILD_DATA,
|
|
|
|
&error_abort);
|
|
|
|
c_fl1 = bdrv_attach_child(tricky, fl1, "first", &child_of_bds,
|
|
|
|
BDRV_CHILD_FILTERED, &error_abort);
|
|
|
|
c_fl2 = bdrv_attach_child(tricky, fl2, "second", &child_of_bds,
|
|
|
|
BDRV_CHILD_FILTERED, &error_abort);
|
|
|
|
bdrv_attach_child(fl1, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED,
|
|
|
|
&error_abort);
|
|
|
|
bdrv_attach_child(fl2, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED,
|
|
|
|
&error_abort);
|
|
|
|
|
|
|
|
/* Select fl1 as first child to be active */
|
|
|
|
tricky->file = c_fl1;
|
|
|
|
bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort);
|
|
|
|
|
|
|
|
assert(c_fl1->perm & BLK_PERM_WRITE);
|
|
|
|
assert(!(c_fl2->perm & BLK_PERM_WRITE));
|
|
|
|
|
|
|
|
/* Now, try to switch active child and update permissions */
|
|
|
|
tricky->file = c_fl2;
|
|
|
|
bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort);
|
|
|
|
|
|
|
|
assert(c_fl2->perm & BLK_PERM_WRITE);
|
|
|
|
assert(!(c_fl1->perm & BLK_PERM_WRITE));
|
|
|
|
|
|
|
|
/* Switch once more, to not care about real child order in the list */
|
|
|
|
tricky->file = c_fl1;
|
|
|
|
bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort);
|
|
|
|
|
|
|
|
assert(c_fl1->perm & BLK_PERM_WRITE);
|
|
|
|
assert(!(c_fl2->perm & BLK_PERM_WRITE));
|
|
|
|
|
|
|
|
bdrv_unref(top);
|
|
|
|
}
|
|
|
|
|
2021-04-28 18:17:31 +03:00
|
|
|
/*
|
|
|
|
* It's possible that filter required permissions allows to insert it to backing
|
|
|
|
* chain, like:
|
|
|
|
*
|
|
|
|
* 1. [top] -> [filter] -> [base]
|
|
|
|
*
|
|
|
|
* but doesn't allow to add it as a branch:
|
|
|
|
*
|
|
|
|
* 2. [filter] --\
|
|
|
|
* v
|
|
|
|
* [top] -> [base]
|
|
|
|
*
|
|
|
|
* So, inserting such filter should do all graph modifications and only then
|
|
|
|
* update permissions. If we try to go through intermediate state [2] and update
|
|
|
|
* permissions on it we'll fail.
|
|
|
|
*
|
|
|
|
* Let's check that bdrv_append() can append such a filter.
|
|
|
|
*/
|
|
|
|
static void test_append_greedy_filter(void)
|
|
|
|
{
|
|
|
|
BlockDriverState *top = exclusive_writer_node("top");
|
|
|
|
BlockDriverState *base = no_perm_node("base");
|
|
|
|
BlockDriverState *fl = exclusive_writer_node("fl1");
|
|
|
|
|
|
|
|
bdrv_attach_child(top, base, "backing", &child_of_bds, BDRV_CHILD_COW,
|
|
|
|
&error_abort);
|
|
|
|
|
|
|
|
bdrv_append(fl, base, &error_abort);
|
2021-04-28 18:17:32 +03:00
|
|
|
bdrv_unref(fl);
|
2021-04-28 18:17:31 +03:00
|
|
|
bdrv_unref(top);
|
|
|
|
}
|
|
|
|
|
2019-02-23 22:20:41 +03:00
|
|
|
int main(int argc, char *argv[])
|
|
|
|
{
|
|
|
|
bdrv_init();
|
|
|
|
qemu_init_main_loop(&error_abort);
|
|
|
|
|
|
|
|
g_test_init(&argc, &argv, NULL);
|
|
|
|
|
|
|
|
g_test_add_func("/bdrv-graph-mod/update-perm-tree", test_update_perm_tree);
|
|
|
|
g_test_add_func("/bdrv-graph-mod/should-update-child",
|
|
|
|
test_should_update_child);
|
block: use topological sort for permission update
Rewrite bdrv_check_perm(), bdrv_abort_perm_update() and bdrv_set_perm()
to update nodes in topological sort order instead of simple DFS. With
topologically sorted nodes, we update a node only when all its parents
already updated. With DFS it's not so.
Consider the following example:
A -+
| |
| v
| B
| |
v |
C<-+
A is parent for B and C, B is parent for C.
Obviously, to update permissions, we should go in order A B C, so, when
we update C, all parent permissions already updated. But with current
approach (simple recursion) we can update in sequence A C B C (C is
updated twice). On first update of C, we consider old B permissions, so
doing wrong thing. If it succeed, all is OK, on second C update we will
finish with correct graph. But if the wrong thing failed, we break the
whole process for no reason (it's possible that updated B permission
will be less strict, but we will never check it).
Also new approach gives a way to simultaneously and correctly update
several nodes, we just need to run bdrv_topological_dfs() several times
to add all nodes and their subtrees into one topologically sorted list
(next patch will update bdrv_replace_node() in this manner).
Test test_parallel_perm_update() is now passing, so move it out of
debugging "if".
We also need to support ignore_children in
bdrv_parent_perms_conflict()
For test 283 order of conflicting parents check is changed.
Note also that in bdrv_check_perm() we don't check for parents conflict
at root bs, as we may be in the middle of permission update in
bdrv_reopen_multiple(). bdrv_reopen_multiple() will be updated soon.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20210428151804.439460-14-vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2021-04-28 18:17:41 +03:00
|
|
|
g_test_add_func("/bdrv-graph-mod/parallel-perm-update",
|
|
|
|
test_parallel_perm_update);
|
2021-04-28 18:17:45 +03:00
|
|
|
g_test_add_func("/bdrv-graph-mod/parallel-exclusive-write",
|
|
|
|
test_parallel_exclusive_write);
|
2021-04-28 18:17:49 +03:00
|
|
|
g_test_add_func("/bdrv-graph-mod/append-greedy-filter",
|
|
|
|
test_append_greedy_filter);
|
2021-04-28 18:17:29 +03:00
|
|
|
|
2019-02-23 22:20:41 +03:00
|
|
|
return g_test_run();
|
|
|
|
}
|