Block layer patches

- Re-enable the graph lock
 - More fixes to coroutine_fn marking
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmScQCQRHGt3b2xmQHJl
 ZGhhdC5jb20ACgkQfwmycsiPL9bNSA//WIzPT45rFhl2U9QgyOJu26ho6ahsgwgI
 Z3QM5kCDB1dAN9USRPxhGboLGo8CyY7eeSwSrR7RtwBGYrWrAoJfGp5gK/7d9s5Q
 o0AGgRPnJGhFkBhRRMytsDsewM6Kk4IRmk4HMK3cOH3rsSM8RHs6KmDSBKesllu0
 QVGf3qW4u8LHyZyGM5OlPVUbtuDuK6/52FGhpXBp+x4oyNegOhjwO4mGOvTG+xIk
 Q5zwWZaPfjxaEDkvW8iahB6/D7Tpt64BmMf1Ydhxcd5eKEp932CiBI36aAlNKoRD
 Al5wztRx1GEh12ekN39jIi7Ypp3JX26keJcieKU0q656pT551UFRYjU0Rk08/Cca
 qv2oiQDu6bHgQ9zCQ1nMfa9+K2MyBwx0b5qfYkvs2RzgCTl8ImgBQANHfw8tz6Bq
 HUo1zsFBXCaK0boUB5iFwdf3rlx3t9UTEuDej/RaHqZjZD5xeG/smCcOlSfHaKUa
 wXfYxvm8ZfefJn1D6io1A+7M956uvIQNtmh13cU44clgFX9Y/bBNMg/5lMRsJKo8
 xxjvqCAyxo/pPfUsVWx4pc8AXbfVa85gyoSiaLEYZnqP54sJ2lFccqykCsTy58Lo
 VDcoPnoSc+LNqBOvtzxXgQbEWFCXU6fe0+TZgVYUvExWFIAOImeDWg2GD1JVrwsX
 e9QrPhL3DXg=
 =ZQcP
 -----END PGP SIGNATURE-----

Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging

Block layer patches

- Re-enable the graph lock
- More fixes to coroutine_fn marking

# -----BEGIN PGP SIGNATURE-----
#
# iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmScQCQRHGt3b2xmQHJl
# ZGhhdC5jb20ACgkQfwmycsiPL9bNSA//WIzPT45rFhl2U9QgyOJu26ho6ahsgwgI
# Z3QM5kCDB1dAN9USRPxhGboLGo8CyY7eeSwSrR7RtwBGYrWrAoJfGp5gK/7d9s5Q
# o0AGgRPnJGhFkBhRRMytsDsewM6Kk4IRmk4HMK3cOH3rsSM8RHs6KmDSBKesllu0
# QVGf3qW4u8LHyZyGM5OlPVUbtuDuK6/52FGhpXBp+x4oyNegOhjwO4mGOvTG+xIk
# Q5zwWZaPfjxaEDkvW8iahB6/D7Tpt64BmMf1Ydhxcd5eKEp932CiBI36aAlNKoRD
# Al5wztRx1GEh12ekN39jIi7Ypp3JX26keJcieKU0q656pT551UFRYjU0Rk08/Cca
# qv2oiQDu6bHgQ9zCQ1nMfa9+K2MyBwx0b5qfYkvs2RzgCTl8ImgBQANHfw8tz6Bq
# HUo1zsFBXCaK0boUB5iFwdf3rlx3t9UTEuDej/RaHqZjZD5xeG/smCcOlSfHaKUa
# wXfYxvm8ZfefJn1D6io1A+7M956uvIQNtmh13cU44clgFX9Y/bBNMg/5lMRsJKo8
# xxjvqCAyxo/pPfUsVWx4pc8AXbfVa85gyoSiaLEYZnqP54sJ2lFccqykCsTy58Lo
# VDcoPnoSc+LNqBOvtzxXgQbEWFCXU6fe0+TZgVYUvExWFIAOImeDWg2GD1JVrwsX
# e9QrPhL3DXg=
# =ZQcP
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 28 Jun 2023 04:13:56 PM CEST
# gpg:                using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6
# gpg:                issuer "kwolf@redhat.com"
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full]

* tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (23 commits)
  block: use bdrv_co_debug_event in coroutine context
  block: use bdrv_co_getlength in coroutine context
  qcow2: mark more functions as coroutine_fns and GRAPH_RDLOCK
  vhdx: mark more functions as coroutine_fns and GRAPH_RDLOCK
  vmdk: mark more functions as coroutine_fns and GRAPH_RDLOCK
  dmg: mark more functions as coroutine_fns and GRAPH_RDLOCK
  cloop: mark more functions as coroutine_fns and GRAPH_RDLOCK
  block: mark another function as coroutine_fns and GRAPH_UNLOCKED
  bochs: mark more functions as coroutine_fns and GRAPH_RDLOCK
  vpc: mark more functions as coroutine_fns and GRAPH_RDLOCK
  qed: mark more functions as coroutine_fns and GRAPH_RDLOCK
  file-posix: remove incorrect coroutine_fn calls
  Revert "graph-lock: Disable locking for now"
  graph-lock: Unlock the AioContext while polling
  blockjob: Fix AioContext locking in block_job_add_bdrv()
  block: Fix AioContext locking in bdrv_open_backing_file()
  block: Fix AioContext locking in bdrv_open_inherit()
  block: Fix AioContext locking in bdrv_reopen_parse_file_or_backing()
  block: Fix AioContext locking in bdrv_attach_child_common()
  block: Fix AioContext locking in bdrv_open_child()
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-06-28 17:29:53 +02:00
commit 0eb8f90ede
30 changed files with 574 additions and 340 deletions

114
block.c
View File

@ -555,8 +555,9 @@ int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename,
* On success, return @blk's actual length.
* Otherwise, return -errno.
*/
static int64_t create_file_fallback_truncate(BlockBackend *blk,
int64_t minimum_size, Error **errp)
static int64_t coroutine_fn GRAPH_UNLOCKED
create_file_fallback_truncate(BlockBackend *blk, int64_t minimum_size,
Error **errp)
{
Error *local_err = NULL;
int64_t size;
@ -564,14 +565,14 @@ static int64_t create_file_fallback_truncate(BlockBackend *blk,
GLOBAL_STATE_CODE();
ret = blk_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, 0,
&local_err);
ret = blk_co_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, 0,
&local_err);
if (ret < 0 && ret != -ENOTSUP) {
error_propagate(errp, local_err);
return ret;
}
size = blk_getlength(blk);
size = blk_co_getlength(blk);
if (size < 0) {
error_free(local_err);
error_setg_errno(errp, -size,
@ -2854,7 +2855,7 @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm)
* Replaces the node that a BdrvChild points to without updating permissions.
*
* If @new_bs is non-NULL, the parent of @child must already be drained through
* @child.
* @child and the caller must hold the AioContext lock for @new_bs.
*/
static void bdrv_replace_child_noperm(BdrvChild *child,
BlockDriverState *new_bs)
@ -2893,7 +2894,7 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
}
/* TODO Pull this up into the callers to avoid polling here */
bdrv_graph_wrlock();
bdrv_graph_wrlock(new_bs);
if (old_bs) {
if (child->klass->detach) {
child->klass->detach(child);
@ -2989,6 +2990,10 @@ static TransactionActionDrv bdrv_attach_child_common_drv = {
* Function doesn't update permissions, caller is responsible for this.
*
* Returns new created child.
*
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
* @child_bs can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
const char *child_name,
@ -2999,7 +3004,7 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
Transaction *tran, Error **errp)
{
BdrvChild *new_child;
AioContext *parent_ctx;
AioContext *parent_ctx, *new_child_ctx;
AioContext *child_ctx = bdrv_get_aio_context(child_bs);
assert(child_class->get_parent_desc);
@ -3050,6 +3055,12 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
}
}
new_child_ctx = bdrv_get_aio_context(child_bs);
if (new_child_ctx != child_ctx) {
aio_context_release(child_ctx);
aio_context_acquire(new_child_ctx);
}
bdrv_ref(child_bs);
/*
* Let every new BdrvChild start with a drained parent. Inserting the child
@ -3079,11 +3090,20 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
};
tran_add(tran, &bdrv_attach_child_common_drv, s);
if (new_child_ctx != child_ctx) {
aio_context_release(new_child_ctx);
aio_context_acquire(child_ctx);
}
return new_child;
}
/*
* Function doesn't update permissions, caller is responsible for this.
*
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
* @child_bs can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static BdrvChild *bdrv_attach_child_noperm(BlockDriverState *parent_bs,
BlockDriverState *child_bs,
@ -3347,6 +3367,10 @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs)
* callers which don't need their own reference any more must call bdrv_unref().
*
* Function doesn't update permissions, caller is responsible for this.
*
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
* @child_bs can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static int bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs,
BlockDriverState *child_bs,
@ -3435,6 +3459,11 @@ out:
return 0;
}
/*
* The caller must hold the AioContext lock for @backing_hd. Both @bs and
* @backing_hd can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static int bdrv_set_backing_noperm(BlockDriverState *bs,
BlockDriverState *backing_hd,
Transaction *tran, Error **errp)
@ -3498,6 +3527,7 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
int ret = 0;
bool implicit_backing = false;
BlockDriverState *backing_hd;
AioContext *backing_hd_ctx;
QDict *options;
QDict *tmp_parent_options = NULL;
Error *local_err = NULL;
@ -3582,8 +3612,12 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
/* Hook up the backing file link; drop our reference, bs owns the
* backing_hd reference now */
backing_hd_ctx = bdrv_get_aio_context(backing_hd);
aio_context_acquire(backing_hd_ctx);
ret = bdrv_set_backing_hd(bs, backing_hd, errp);
bdrv_unref(backing_hd);
aio_context_release(backing_hd_ctx);
if (ret < 0) {
goto free_exit;
}
@ -3654,6 +3688,7 @@ done:
*
* The BlockdevRef will be removed from the options QDict.
*
* The caller must hold the lock of the main AioContext and no other AioContext.
* @parent can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
@ -3665,6 +3700,8 @@ BdrvChild *bdrv_open_child(const char *filename,
bool allow_none, Error **errp)
{
BlockDriverState *bs;
BdrvChild *child;
AioContext *ctx;
GLOBAL_STATE_CODE();
@ -3674,13 +3711,19 @@ BdrvChild *bdrv_open_child(const char *filename,
return NULL;
}
return bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
errp);
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
errp);
aio_context_release(ctx);
return child;
}
/*
* Wrapper on bdrv_open_child() for most popular case: open primary child of bs.
*
* The caller must hold the lock of the main AioContext and no other AioContext.
* @parent can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
@ -3757,6 +3800,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
int64_t total_size;
QemuOpts *opts = NULL;
BlockDriverState *bs_snapshot = NULL;
AioContext *ctx = bdrv_get_aio_context(bs);
int ret;
GLOBAL_STATE_CODE();
@ -3765,7 +3809,10 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
instead of opening 'filename' directly */
/* Get the required size from the image */
aio_context_acquire(ctx);
total_size = bdrv_getlength(bs);
aio_context_release(ctx);
if (total_size < 0) {
error_setg_errno(errp, -total_size, "Could not get image size");
goto out;
@ -3799,7 +3846,10 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
goto out;
}
aio_context_acquire(ctx);
ret = bdrv_append(bs_snapshot, bs, errp);
aio_context_release(ctx);
if (ret < 0) {
bs_snapshot = NULL;
goto out;
@ -3843,6 +3893,7 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
Error *local_err = NULL;
QDict *snapshot_options = NULL;
int snapshot_flags = 0;
AioContext *ctx = qemu_get_aio_context();
assert(!child_class || !flags);
assert(!child_class == !parent);
@ -3980,9 +4031,13 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
/* Not requesting BLK_PERM_CONSISTENT_READ because we're only
* looking at the header to guess the image format. This works even
* in cases where a guest would not see a consistent state. */
file = blk_new(bdrv_get_aio_context(file_bs), 0, BLK_PERM_ALL);
ctx = bdrv_get_aio_context(file_bs);
aio_context_acquire(ctx);
file = blk_new(ctx, 0, BLK_PERM_ALL);
blk_insert_bs(file, file_bs, &local_err);
bdrv_unref(file_bs);
aio_context_release(ctx);
if (local_err) {
goto fail;
}
@ -4028,8 +4083,13 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
goto fail;
}
/* The AioContext could have changed during bdrv_open_common() */
ctx = bdrv_get_aio_context(bs);
if (file) {
aio_context_acquire(ctx);
blk_unref(file);
aio_context_release(ctx);
file = NULL;
}
@ -4087,13 +4147,16 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
* (snapshot_bs); thus, we have to drop the strong reference to bs
* (which we obtained by calling bdrv_new()). bs will not be deleted,
* though, because the overlay still has a reference to it. */
aio_context_acquire(ctx);
bdrv_unref(bs);
aio_context_release(ctx);
bs = snapshot_bs;
}
return bs;
fail:
aio_context_acquire(ctx);
blk_unref(file);
qobject_unref(snapshot_options);
qobject_unref(bs->explicit_options);
@ -4102,11 +4165,14 @@ fail:
bs->options = NULL;
bs->explicit_options = NULL;
bdrv_unref(bs);
aio_context_release(ctx);
error_propagate(errp, local_err);
return NULL;
close_and_fail:
aio_context_acquire(ctx);
bdrv_unref(bs);
aio_context_release(ctx);
qobject_unref(snapshot_options);
qobject_unref(options);
error_propagate(errp, local_err);
@ -4578,6 +4644,11 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
* backing BlockDriverState (or NULL).
*
* Return 0 on success, otherwise return < 0 and set @errp.
*
* The caller must hold the AioContext lock of @reopen_state->bs.
* @reopen_state->bs can move to a different AioContext in this function.
* Callers must make sure that their AioContext locking is still correct after
* this.
*/
static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
bool is_backing, Transaction *tran,
@ -4590,6 +4661,8 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
const char *child_name = is_backing ? "backing" : "file";
QObject *value;
const char *str;
AioContext *ctx, *old_ctx;
int ret;
GLOBAL_STATE_CODE();
@ -4654,8 +4727,22 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
reopen_state->old_file_bs = old_child_bs;
}
return bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing,
tran, errp);
old_ctx = bdrv_get_aio_context(bs);
ctx = bdrv_get_aio_context(new_child_bs);
if (old_ctx != ctx) {
aio_context_release(old_ctx);
aio_context_acquire(ctx);
}
ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing,
tran, errp);
if (old_ctx != ctx) {
aio_context_release(ctx);
aio_context_acquire(old_ctx);
}
return ret;
}
/*
@ -4674,6 +4761,7 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
* It is the responsibility of the caller to then call the abort() or
* commit() for any other BDS that have been left in a prepare() state
*
* The caller must hold the AioContext lock of @reopen_state->bs.
*/
static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue,

View File

@ -203,7 +203,8 @@ static void bochs_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
}
static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
static int64_t coroutine_fn GRAPH_RDLOCK
seek_to_sector(BlockDriverState *bs, int64_t sector_num)
{
BDRVBochsState *s = bs->opaque;
uint64_t offset = sector_num * 512;
@ -224,8 +225,8 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
(s->extent_blocks + s->bitmap_blocks));
/* read in bitmap for current extent */
ret = bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8), 1,
&bitmap_entry, 0);
ret = bdrv_co_pread(bs->file, bitmap_offset + (extent_offset / 8), 1,
&bitmap_entry, 0);
if (ret < 0) {
return ret;
}

View File

@ -212,7 +212,8 @@ static void cloop_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
}
static inline int cloop_read_block(BlockDriverState *bs, int block_num)
static int coroutine_fn GRAPH_RDLOCK
cloop_read_block(BlockDriverState *bs, int block_num)
{
BDRVCloopState *s = bs->opaque;
@ -220,8 +221,8 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num)
int ret;
uint32_t bytes = s->offsets[block_num + 1] - s->offsets[block_num];
ret = bdrv_pread(bs->file, s->offsets[block_num], bytes,
s->compressed_block, 0);
ret = bdrv_co_pread(bs->file, s->offsets[block_num], bytes,
s->compressed_block, 0);
if (ret < 0) {
return -1;
}
@ -244,7 +245,7 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num)
return 0;
}
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
cloop_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{

View File

@ -616,7 +616,8 @@ err:
return s->n_chunks; /* error */
}
static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
static int coroutine_fn GRAPH_RDLOCK
dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
{
BDRVDMGState *s = bs->opaque;
@ -633,8 +634,8 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
case UDZO: { /* zlib compressed */
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->compressed_chunk, 0);
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->compressed_chunk, 0);
if (ret < 0) {
return -1;
}
@ -659,8 +660,8 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
}
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->compressed_chunk, 0);
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->compressed_chunk, 0);
if (ret < 0) {
return -1;
}
@ -680,8 +681,8 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
}
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->compressed_chunk, 0);
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->compressed_chunk, 0);
if (ret < 0) {
return -1;
}
@ -696,8 +697,8 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
}
break;
case UDRW: /* copy */
ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->uncompressed_chunk, 0);
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->uncompressed_chunk, 0);
if (ret < 0) {
return -1;
}
@ -713,7 +714,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
return 0;
}
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{

View File

@ -193,7 +193,7 @@ static int fd_open(BlockDriverState *bs)
return -EIO;
}
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs);
static int64_t raw_getlength(BlockDriverState *bs);
typedef struct RawPosixAIOData {
BlockDriverState *bs;
@ -1974,7 +1974,7 @@ static int handle_aiocb_write_zeroes(void *opaque)
#ifdef CONFIG_FALLOCATE
/* Last resort: we are trying to extend the file with zeroed data. This
* can be done via fallocate(fd, 0) */
len = raw_co_getlength(aiocb->bs);
len = raw_getlength(aiocb->bs);
if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) {
int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
if (ret == 0 || ret != -ENOTSUP) {
@ -2666,7 +2666,7 @@ static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
}
if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
int64_t cur_length = raw_co_getlength(bs);
int64_t cur_length = raw_getlength(bs);
if (offset != cur_length && exact) {
error_setg(errp, "Cannot resize device files");
@ -2684,7 +2684,7 @@ static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
}
#ifdef __OpenBSD__
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
int fd = s->fd;
@ -2703,7 +2703,7 @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
return st.st_size;
}
#elif defined(__NetBSD__)
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
int fd = s->fd;
@ -2728,7 +2728,7 @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
return st.st_size;
}
#elif defined(__sun__)
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
struct dk_minfo minfo;
@ -2759,7 +2759,7 @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
return size;
}
#elif defined(CONFIG_BSD)
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
int fd = s->fd;
@ -2831,7 +2831,7 @@ again:
return size;
}
#else
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
int ret;
@ -2850,6 +2850,11 @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
}
#endif
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
{
return raw_getlength(bs);
}
static int64_t coroutine_fn raw_co_get_allocated_file_size(BlockDriverState *bs)
{
struct stat st;
@ -3215,7 +3220,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
* round up if necessary.
*/
if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
int64_t file_length = raw_co_getlength(bs);
int64_t file_length = raw_getlength(bs);
if (file_length > 0) {
/* Ignore errors, this is just a safeguard */
assert(hole == file_length);
@ -3237,7 +3242,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
#if defined(__linux__)
/* Verify that the file is not in the page cache */
static void coroutine_fn check_cache_dropped(BlockDriverState *bs, Error **errp)
static void check_cache_dropped(BlockDriverState *bs, Error **errp)
{
const size_t window_size = 128 * 1024 * 1024;
BDRVRawState *s = bs->opaque;
@ -3252,7 +3257,7 @@ static void coroutine_fn check_cache_dropped(BlockDriverState *bs, Error **errp)
page_size = sysconf(_SC_PAGESIZE);
vec = g_malloc(DIV_ROUND_UP(window_size, page_size));
end = raw_co_getlength(bs);
end = raw_getlength(bs);
for (offset = 0; offset < end; offset += window_size) {
void *new_window;
@ -4468,7 +4473,7 @@ static int cdrom_reopen(BlockDriverState *bs)
static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs)
{
return raw_co_getlength(bs) > 0;
return raw_getlength(bs) > 0;
}
static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag)

View File

@ -30,10 +30,8 @@ BdrvGraphLock graph_lock;
/* Protects the list of aiocontext and orphaned_reader_count */
static QemuMutex aio_context_list_lock;
#if 0
/* Written and read with atomic operations. */
static int has_writer;
#endif
/*
* A reader coroutine could move from an AioContext to another.
@ -90,7 +88,6 @@ void unregister_aiocontext(AioContext *ctx)
g_free(ctx->bdrv_graph);
}
#if 0
static uint32_t reader_count(void)
{
BdrvGraphRWlock *brdv_graph;
@ -108,19 +105,27 @@ static uint32_t reader_count(void)
assert((int32_t)rd >= 0);
return rd;
}
#endif
void bdrv_graph_wrlock(void)
void bdrv_graph_wrlock(BlockDriverState *bs)
{
AioContext *ctx = NULL;
GLOBAL_STATE_CODE();
/*
* TODO Some callers hold an AioContext lock when this is called, which
* causes deadlocks. Reenable once the AioContext locking is cleaned up (or
* AioContext locks are gone).
*/
#if 0
assert(!qatomic_read(&has_writer));
/*
* Release only non-mainloop AioContext. The mainloop often relies on the
* BQL and doesn't lock the main AioContext before doing things.
*/
if (bs) {
ctx = bdrv_get_aio_context(bs);
if (ctx != qemu_get_aio_context()) {
aio_context_release(ctx);
} else {
ctx = NULL;
}
}
/* Make sure that constantly arriving new I/O doesn't cause starvation */
bdrv_drain_all_begin_nopoll();
@ -149,13 +154,15 @@ void bdrv_graph_wrlock(void)
} while (reader_count() >= 1);
bdrv_drain_all_end();
#endif
if (ctx) {
aio_context_acquire(bdrv_get_aio_context(bs));
}
}
void bdrv_graph_wrunlock(void)
{
GLOBAL_STATE_CODE();
#if 0
QEMU_LOCK_GUARD(&aio_context_list_lock);
assert(qatomic_read(&has_writer));
@ -167,13 +174,10 @@ void bdrv_graph_wrunlock(void)
/* Wake up all coroutine that are waiting to read the graph */
qemu_co_enter_all(&reader_queue, &aio_context_list_lock);
#endif
}
void coroutine_fn bdrv_graph_co_rdlock(void)
{
/* TODO Reenable when wrlock is reenabled */
#if 0
BdrvGraphRWlock *bdrv_graph;
bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
@ -233,12 +237,10 @@ void coroutine_fn bdrv_graph_co_rdlock(void)
qemu_co_queue_wait(&reader_queue, &aio_context_list_lock);
}
}
#endif
}
void coroutine_fn bdrv_graph_co_rdunlock(void)
{
#if 0
BdrvGraphRWlock *bdrv_graph;
bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
@ -256,7 +258,6 @@ void coroutine_fn bdrv_graph_co_rdunlock(void)
if (qatomic_read(&has_writer)) {
aio_wait_kick();
}
#endif
}
void bdrv_graph_rdlock_main_loop(void)
@ -274,19 +275,13 @@ void bdrv_graph_rdunlock_main_loop(void)
void assert_bdrv_graph_readable(void)
{
/* reader_count() is slow due to aio_context_list_lock lock contention */
/* TODO Reenable when wrlock is reenabled */
#if 0
#ifdef CONFIG_DEBUG_GRAPH_LOCK
assert(qemu_in_main_thread() || reader_count());
#endif
#endif
}
void assert_bdrv_graph_writable(void)
{
assert(qemu_in_main_thread());
/* TODO Reenable when wrlock is reenabled */
#if 0
assert(qatomic_read(&has_writer));
#endif
}

View File

@ -1379,7 +1379,7 @@ bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
}
/* Forward the request to the BlockDriver, possibly fragmenting it */
total_bytes = bdrv_getlength(bs);
total_bytes = bdrv_co_getlength(bs);
if (total_bytes < 0) {
ret = total_bytes;
goto out;
@ -2388,7 +2388,7 @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
assert(pnum);
assert_bdrv_graph_readable();
*pnum = 0;
total_size = bdrv_getlength(bs);
total_size = bdrv_co_getlength(bs);
if (total_size < 0) {
ret = total_size;
goto early_out;
@ -2408,7 +2408,7 @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
bytes = n;
}
/* Must be non-NULL or bdrv_getlength() would have failed */
/* Must be non-NULL or bdrv_co_getlength() would have failed */
assert(bs->drv);
has_filtered_child = bdrv_filter_child(bs);
if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
@ -2546,7 +2546,7 @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
if (!cow_bs) {
ret |= BDRV_BLOCK_ZERO;
} else if (want_zero) {
int64_t size2 = bdrv_getlength(cow_bs);
int64_t size2 = bdrv_co_getlength(cow_bs);
if (size2 >= 0 && offset >= size2) {
ret |= BDRV_BLOCK_ZERO;
@ -3011,7 +3011,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
}
/* Write back cached data to the OS even with cache=unsafe */
BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
if (bs->drv->bdrv_co_flush_to_os) {
ret = bs->drv->bdrv_co_flush_to_os(bs);
if (ret < 0) {
@ -3029,7 +3029,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
goto flush_children;
}
BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
if (!bs->drv) {
/* bs->drv->bdrv_co_flush() might have ejected the BDS
* (even in case of apparent success) */
@ -3592,7 +3592,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
return ret;
}
old_size = bdrv_getlength(bs);
old_size = bdrv_co_getlength(bs);
if (old_size < 0) {
error_setg_errno(errp, -old_size, "Failed to get old image size");
return old_size;

View File

@ -200,7 +200,7 @@ allocate_clusters(BlockDriverState *bs, int64_t sector_num,
assert(idx < s->bat_size && idx + to_allocate <= s->bat_size);
space = to_allocate * s->tracks;
len = bdrv_getlength(bs->file->bs);
len = bdrv_co_getlength(bs->file->bs);
if (len < 0) {
return len;
}
@ -448,7 +448,7 @@ parallels_check_outside_image(BlockDriverState *bs, BdrvCheckResult *res,
uint32_t i;
int64_t off, high_off, size;
size = bdrv_getlength(bs->file->bs);
size = bdrv_co_getlength(bs->file->bs);
if (size < 0) {
res->check_errors++;
return size;

View File

@ -370,7 +370,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
if (!allocate)
return 0;
/* allocate a new l2 entry */
l2_offset = bdrv_getlength(bs->file->bs);
l2_offset = bdrv_co_getlength(bs->file->bs);
if (l2_offset < 0) {
return l2_offset;
}
@ -379,7 +379,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
/* update the L1 entry */
s->l1_table[l1_index] = l2_offset;
tmp = cpu_to_be64(l2_offset);
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L1_UPDATE);
ret = bdrv_co_pwrite_sync(bs->file,
s->l1_table_offset + l1_index * sizeof(tmp),
sizeof(tmp), &tmp, 0);
@ -410,7 +410,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
}
}
l2_table = s->l2_cache + (min_index << s->l2_bits);
BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_LOAD);
if (new_l2_table) {
memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
ret = bdrv_co_pwrite_sync(bs->file, l2_offset,
@ -434,7 +434,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
((cluster_offset & QCOW_OFLAG_COMPRESSED) && allocate == 1)) {
if (!allocate)
return 0;
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
assert(QEMU_IS_ALIGNED(n_start | n_end, BDRV_SECTOR_SIZE));
/* allocate a new cluster */
if ((cluster_offset & QCOW_OFLAG_COMPRESSED) &&
@ -445,20 +445,20 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
if (decompress_cluster(bs, cluster_offset) < 0) {
return -EIO;
}
cluster_offset = bdrv_getlength(bs->file->bs);
cluster_offset = bdrv_co_getlength(bs->file->bs);
if ((int64_t) cluster_offset < 0) {
return cluster_offset;
}
cluster_offset = QEMU_ALIGN_UP(cluster_offset, s->cluster_size);
/* write the cluster content */
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_pwrite(bs->file, cluster_offset, s->cluster_size,
s->cluster_cache, 0);
if (ret < 0) {
return ret;
}
} else {
cluster_offset = bdrv_getlength(bs->file->bs);
cluster_offset = bdrv_co_getlength(bs->file->bs);
if ((int64_t) cluster_offset < 0) {
return cluster_offset;
}
@ -491,7 +491,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
NULL) < 0) {
return -EIO;
}
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_pwrite(bs->file, cluster_offset + i,
BDRV_SECTOR_SIZE,
s->cluster_data, 0);
@ -510,9 +510,9 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
tmp = cpu_to_be64(cluster_offset);
l2_table[l2_index] = tmp;
if (allocate == 2) {
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
} else {
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_UPDATE);
}
ret = bdrv_co_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp),
sizeof(tmp), &tmp, 0);
@ -595,7 +595,7 @@ decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
if (s->cluster_cache_offset != coffset) {
csize = cluster_offset >> (63 - s->cluster_bits);
csize &= (s->cluster_size - 1);
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
ret = bdrv_co_pread(bs->file, coffset, csize, s->cluster_data, 0);
if (ret < 0)
return -1;
@ -657,7 +657,7 @@ qcow_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
/* read from the base image */
qemu_co_mutex_unlock(&s->lock);
/* qcow2 emits this on bs->file instead of bs->backing */
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
ret = bdrv_co_pread(bs->backing, offset, n, buf, 0);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
@ -680,7 +680,7 @@ qcow_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
break;
}
qemu_co_mutex_unlock(&s->lock);
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
ret = bdrv_co_pread(bs->file, cluster_offset + offset_in_cluster,
n, buf, 0);
qemu_co_mutex_lock(&s->lock);
@ -765,7 +765,7 @@ qcow_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
}
qemu_co_mutex_unlock(&s->lock);
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_pwrite(bs->file, cluster_offset + offset_in_cluster,
n, buf, 0);
qemu_co_mutex_lock(&s->lock);
@ -1114,7 +1114,7 @@ qcow_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes,
}
cluster_offset &= s->cluster_offset_mask;
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
ret = bdrv_co_pwrite(bs->file, cluster_offset, out_len, out_buf, 0);
if (ret < 0) {
goto fail;

View File

@ -283,10 +283,9 @@ static int free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb)
/* load_bitmap_data
* @bitmap_table entries must satisfy specification constraints.
* @bitmap must be cleared */
static int load_bitmap_data(BlockDriverState *bs,
const uint64_t *bitmap_table,
uint32_t bitmap_table_size,
BdrvDirtyBitmap *bitmap)
static int coroutine_fn GRAPH_RDLOCK
load_bitmap_data(BlockDriverState *bs, const uint64_t *bitmap_table,
uint32_t bitmap_table_size, BdrvDirtyBitmap *bitmap)
{
int ret = 0;
BDRVQcow2State *s = bs->opaque;
@ -319,7 +318,7 @@ static int load_bitmap_data(BlockDriverState *bs,
* already cleared */
}
} else {
ret = bdrv_pread(bs->file, data_offset, s->cluster_size, buf, 0);
ret = bdrv_co_pread(bs->file, data_offset, s->cluster_size, buf, 0);
if (ret < 0) {
goto finish;
}
@ -337,8 +336,9 @@ finish:
return ret;
}
static BdrvDirtyBitmap *load_bitmap(BlockDriverState *bs,
Qcow2Bitmap *bm, Error **errp)
static coroutine_fn GRAPH_RDLOCK
BdrvDirtyBitmap *load_bitmap(BlockDriverState *bs,
Qcow2Bitmap *bm, Error **errp)
{
int ret;
uint64_t *bitmap_table = NULL;
@ -649,9 +649,10 @@ fail:
return NULL;
}
int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size)
int coroutine_fn
qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size)
{
int ret;
BDRVQcow2State *s = bs->opaque;
@ -957,8 +958,9 @@ static void set_readonly_helper(gpointer bitmap, gpointer value)
* If header_updated is not NULL then it is set appropriately regardless of
* the return value.
*/
bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs,
bool *header_updated, Error **errp)
bool coroutine_fn GRAPH_RDLOCK
qcow2_load_dirty_bitmaps(BlockDriverState *bs,
bool *header_updated, Error **errp)
{
BDRVQcow2State *s = bs->opaque;
Qcow2BitmapList *bm_list;

View File

@ -48,7 +48,7 @@ int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs,
fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
#endif
BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
ret = bdrv_co_pwrite_zeroes(bs->file,
s->l1_table_offset + new_l1_size * L1E_SIZE,
(s->l1_size - new_l1_size) * L1E_SIZE, 0);
@ -61,7 +61,7 @@ int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs,
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
continue;
@ -501,7 +501,7 @@ do_perform_cow_read(BlockDriverState *bs, uint64_t src_cluster_offset,
return 0;
}
BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
BLKDBG_CO_EVENT(bs->file, BLKDBG_COW_READ);
if (!bs->drv) {
return -ENOMEDIUM;
@ -551,7 +551,7 @@ do_perform_cow_write(BlockDriverState *bs, uint64_t cluster_offset,
return ret;
}
BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster,
qiov->size, qiov, 0);
if (ret < 0) {
@ -823,10 +823,9 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
*
* Return 0 on success and -errno in error cases
*/
int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size,
uint64_t *host_offset)
int coroutine_fn GRAPH_RDLOCK
qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset,
int compressed_size, uint64_t *host_offset)
{
BDRVQcow2State *s = bs->opaque;
int l2_index, ret;
@ -872,7 +871,7 @@ int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
/* compressed clusters never have the copied flag */
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
set_l2_entry(s, l2_slice, l2_index, cluster_offset);
if (has_subclusters(s)) {
@ -992,7 +991,7 @@ perform_cow(BlockDriverState *bs, QCowL2Meta *m)
/* NOTE: we have a write_aio blkdebug event here followed by
* a cow_write one in do_perform_cow_write(), but there's only
* one single I/O operation */
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
} else {
/* If there's no guest data then write both COW regions separately */
@ -2038,8 +2037,9 @@ fail:
* all clusters in the same L2 slice) and returns the number of zeroed
* clusters.
*/
static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
uint64_t nb_clusters, int flags)
static int coroutine_fn
zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
uint64_t nb_clusters, int flags)
{
BDRVQcow2State *s = bs->opaque;
uint64_t *l2_slice;

View File

@ -118,7 +118,7 @@ int coroutine_fn qcow2_refcount_init(BlockDriverState *bs)
ret = -ENOMEM;
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
BLKDBG_CO_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
ret = bdrv_co_pread(bs->file, s->refcount_table_offset,
refcount_table_size2, s->refcount_table, 0);
if (ret < 0) {
@ -1069,14 +1069,14 @@ int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offs
/* only used to allocate compressed sectors. We try to allocate
contiguous sectors. size must be <= cluster_size */
int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size)
int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int size)
{
BDRVQcow2State *s = bs->opaque;
int64_t offset;
size_t free_in_cluster;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
assert(size > 0 && size <= s->cluster_size);
assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset));
@ -1524,10 +1524,11 @@ static int realloc_refcount_array(BDRVQcow2State *s, void **array,
*
* Modifies the number of errors in res.
*/
int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size)
int coroutine_fn GRAPH_RDLOCK
qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size)
{
BDRVQcow2State *s = bs->opaque;
uint64_t start, last, cluster_offset, k, refcount;
@ -1538,7 +1539,7 @@ int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
return 0;
}
file_len = bdrv_getlength(bs->file->bs);
file_len = bdrv_co_getlength(bs->file->bs);
if (file_len < 0) {
return file_len;
}
@ -1600,10 +1601,11 @@ enum {
*
* On failure in-memory @l2_table may be modified.
*/
static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
uint64_t l2_offset,
uint64_t *l2_table, int l2_index, bool active,
bool *metadata_overlap)
static int coroutine_fn GRAPH_RDLOCK
fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
uint64_t l2_offset, uint64_t *l2_table,
int l2_index, bool active,
bool *metadata_overlap)
{
BDRVQcow2State *s = bs->opaque;
int ret;
@ -1634,8 +1636,8 @@ static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
goto fail;
}
ret = bdrv_pwrite_sync(bs->file, l2e_offset, l2_entry_size(s),
&l2_table[idx], 0);
ret = bdrv_co_pwrite_sync(bs->file, l2e_offset, l2_entry_size(s),
&l2_table[idx], 0);
if (ret < 0) {
fprintf(stderr, "ERROR: Failed to overwrite L2 "
"table entry: %s\n", strerror(-ret));
@ -1659,10 +1661,11 @@ fail:
* Returns the number of errors found by the checks or -errno if an internal
* error occurred.
*/
static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size, int64_t l2_offset,
int flags, BdrvCheckMode fix, bool active)
static int coroutine_fn GRAPH_RDLOCK
check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size, int64_t l2_offset,
int flags, BdrvCheckMode fix, bool active)
{
BDRVQcow2State *s = bs->opaque;
uint64_t l2_entry, l2_bitmap;
@ -1673,7 +1676,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
bool metadata_overlap;
/* Read L2 table from disk */
ret = bdrv_pread(bs->file, l2_offset, l2_size_bytes, l2_table, 0);
ret = bdrv_co_pread(bs->file, l2_offset, l2_size_bytes, l2_table, 0);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
res->check_errors++;
@ -1858,12 +1861,11 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
* Returns the number of errors found by the checks or -errno if an internal
* error occurred.
*/
static int check_refcounts_l1(BlockDriverState *bs,
BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t l1_table_offset, int l1_size,
int flags, BdrvCheckMode fix, bool active)
static int coroutine_fn GRAPH_RDLOCK
check_refcounts_l1(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table, int64_t *refcount_table_size,
int64_t l1_table_offset, int l1_size,
int flags, BdrvCheckMode fix, bool active)
{
BDRVQcow2State *s = bs->opaque;
size_t l1_size_bytes = l1_size * L1E_SIZE;
@ -1889,7 +1891,7 @@ static int check_refcounts_l1(BlockDriverState *bs,
}
/* Read L1 table entries from disk */
ret = bdrv_pread(bs->file, l1_table_offset, l1_size_bytes, l1_table, 0);
ret = bdrv_co_pread(bs->file, l1_table_offset, l1_size_bytes, l1_table, 0);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
res->check_errors++;
@ -1949,8 +1951,8 @@ static int check_refcounts_l1(BlockDriverState *bs,
* have been already detected and sufficiently signaled by the calling function
* (qcow2_check_refcounts) by the time this function is called).
*/
static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix)
static int coroutine_fn GRAPH_RDLOCK
check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
{
BDRVQcow2State *s = bs->opaque;
uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size);
@ -2005,8 +2007,8 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
}
}
ret = bdrv_pread(bs->file, l2_offset, s->l2_size * l2_entry_size(s),
l2_table, 0);
ret = bdrv_co_pread(bs->file, l2_offset, s->l2_size * l2_entry_size(s),
l2_table, 0);
if (ret < 0) {
fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
strerror(-ret));
@ -2059,8 +2061,7 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
goto fail;
}
ret = bdrv_pwrite(bs->file, l2_offset, s->cluster_size, l2_table,
0);
ret = bdrv_co_pwrite(bs->file, l2_offset, s->cluster_size, l2_table, 0);
if (ret < 0) {
fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
strerror(-ret));
@ -2083,9 +2084,10 @@ fail:
* Checks consistency of refblocks and accounts for each refblock in
* *refcount_table.
*/
static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
void **refcount_table, int64_t *nb_clusters)
static int coroutine_fn GRAPH_RDLOCK
check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
void **refcount_table, int64_t *nb_clusters)
{
BDRVQcow2State *s = bs->opaque;
int64_t i, size;
@ -2127,13 +2129,13 @@ static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
goto resize_fail;
}
ret = bdrv_truncate(bs->file, offset + s->cluster_size, false,
PREALLOC_MODE_OFF, 0, &local_err);
ret = bdrv_co_truncate(bs->file, offset + s->cluster_size, false,
PREALLOC_MODE_OFF, 0, &local_err);
if (ret < 0) {
error_report_err(local_err);
goto resize_fail;
}
size = bdrv_getlength(bs->file->bs);
size = bdrv_co_getlength(bs->file->bs);
if (size < 0) {
ret = size;
goto resize_fail;
@ -2197,9 +2199,10 @@ resize_fail:
/*
* Calculates an in-memory refcount table.
*/
static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
void **refcount_table, int64_t *nb_clusters)
static int coroutine_fn GRAPH_RDLOCK
calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
void **refcount_table, int64_t *nb_clusters)
{
BDRVQcow2State *s = bs->opaque;
int64_t i;
@ -2299,10 +2302,11 @@ static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
* Compares the actual reference count for each cluster in the image against the
* refcount as reported by the refcount structures on-disk.
*/
static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
int64_t *highest_cluster,
void *refcount_table, int64_t nb_clusters)
static void coroutine_fn
compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
int64_t *highest_cluster,
void *refcount_table, int64_t nb_clusters)
{
BDRVQcow2State *s = bs->opaque;
int64_t i;
@ -2463,7 +2467,8 @@ static int64_t alloc_clusters_imrt(BlockDriverState *bs,
* Return whether the on-disk reftable array was resized (true/false),
* or -errno on error.
*/
static int rebuild_refcounts_write_refblocks(
static int coroutine_fn GRAPH_RDLOCK
rebuild_refcounts_write_refblocks(
BlockDriverState *bs, void **refcount_table, int64_t *nb_clusters,
int64_t first_cluster, int64_t end_cluster,
uint64_t **on_disk_reftable_ptr, uint32_t *on_disk_reftable_entries_ptr,
@ -2578,8 +2583,8 @@ static int rebuild_refcounts_write_refblocks(
on_disk_refblock = (void *)((char *) *refcount_table +
refblock_index * s->cluster_size);
ret = bdrv_pwrite(bs->file, refblock_offset, s->cluster_size,
on_disk_refblock, 0);
ret = bdrv_co_pwrite(bs->file, refblock_offset, s->cluster_size,
on_disk_refblock, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR writing refblock");
return ret;
@ -2601,11 +2606,10 @@ static int rebuild_refcounts_write_refblocks(
* On success, the old refcount structure is leaked (it will be covered by the
* new refcount structure).
*/
static int rebuild_refcount_structure(BlockDriverState *bs,
BdrvCheckResult *res,
void **refcount_table,
int64_t *nb_clusters,
Error **errp)
static int coroutine_fn GRAPH_RDLOCK
rebuild_refcount_structure(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table, int64_t *nb_clusters,
Error **errp)
{
BDRVQcow2State *s = bs->opaque;
int64_t reftable_offset = -1;
@ -2734,8 +2738,8 @@ static int rebuild_refcount_structure(BlockDriverState *bs,
}
assert(reftable_length < INT_MAX);
ret = bdrv_pwrite(bs->file, reftable_offset, reftable_length,
on_disk_reftable, 0);
ret = bdrv_co_pwrite(bs->file, reftable_offset, reftable_length,
on_disk_reftable, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR writing reftable");
goto fail;
@ -2745,10 +2749,10 @@ static int rebuild_refcount_structure(BlockDriverState *bs,
reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
reftable_offset_and_clusters.reftable_clusters =
cpu_to_be32(reftable_clusters);
ret = bdrv_pwrite_sync(bs->file,
offsetof(QCowHeader, refcount_table_offset),
sizeof(reftable_offset_and_clusters),
&reftable_offset_and_clusters, 0);
ret = bdrv_co_pwrite_sync(bs->file,
offsetof(QCowHeader, refcount_table_offset),
sizeof(reftable_offset_and_clusters),
&reftable_offset_and_clusters, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR setting reftable");
goto fail;
@ -2777,8 +2781,8 @@ fail:
* Returns 0 if no errors are found, the number of errors in case the image is
* detected as corrupted, and -errno when an internal error occurred.
*/
int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix)
int coroutine_fn GRAPH_RDLOCK
qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
{
BDRVQcow2State *s = bs->opaque;
BdrvCheckResult pre_compare_res;
@ -2787,7 +2791,7 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
bool rebuild = false;
int ret;
size = bdrv_getlength(bs->file->bs);
size = bdrv_co_getlength(bs->file->bs);
if (size < 0) {
res->check_errors++;
return size;
@ -3541,7 +3545,8 @@ done:
return ret;
}
static int64_t get_refblock_offset(BlockDriverState *bs, uint64_t offset)
static int64_t coroutine_fn get_refblock_offset(BlockDriverState *bs,
uint64_t offset)
{
BDRVQcow2State *s = bs->opaque;
uint32_t index = offset_to_reftable_index(s, offset);
@ -3707,7 +3712,8 @@ int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size)
return -EIO;
}
int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs)
int coroutine_fn GRAPH_RDLOCK
qcow2_detect_metadata_preallocation(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
int64_t i, end_cluster, cluster_count = 0, threshold;

View File

@ -570,7 +570,7 @@ int qcow2_mark_corrupt(BlockDriverState *bs)
* Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
* before if necessary.
*/
int qcow2_mark_consistent(BlockDriverState *bs)
static int coroutine_fn qcow2_mark_consistent(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
@ -2225,7 +2225,7 @@ qcow2_co_preadv_encrypted(BlockDriverState *bs,
return -ENOMEM;
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0);
if (ret < 0) {
goto fail;
@ -2315,7 +2315,7 @@ qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type,
case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
return bdrv_co_preadv_part(bs->backing, offset, bytes,
qiov, qiov_offset, 0);
@ -2329,7 +2329,7 @@ qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type,
offset, bytes, qiov, qiov_offset);
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
return bdrv_co_preadv_part(s->data_file, host_offset,
bytes, qiov, qiov_offset, 0);
@ -2539,7 +2539,7 @@ handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
return ret;
}
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE);
ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes,
BDRV_REQ_NO_FALLBACK);
if (ret < 0) {
@ -2604,7 +2604,7 @@ int qcow2_co_pwritev_task(BlockDriverState *bs, uint64_t host_offset,
* guest data now.
*/
if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) {
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
trace_qcow2_writev_data(qemu_coroutine_self(), host_offset);
ret = bdrv_co_pwritev_part(s->data_file, host_offset,
bytes, qiov, qiov_offset, 0);
@ -4678,7 +4678,7 @@ qcow2_co_pwritev_compressed_task(BlockDriverState *bs,
goto fail;
}
BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED);
BLKDBG_CO_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED);
ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0);
if (ret < 0) {
goto fail;
@ -4797,7 +4797,7 @@ qcow2_co_preadv_compressed(BlockDriverState *bs,
out_buf = qemu_blockalign(bs, s->cluster_size);
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0);
if (ret < 0) {
goto fail;
@ -5344,7 +5344,7 @@ qcow2_co_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
return offset;
}
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0);
}
@ -5356,7 +5356,7 @@ qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
return offset;
}
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0);
}

View File

@ -836,7 +836,6 @@ int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
int qcow2_mark_dirty(BlockDriverState *bs);
int qcow2_mark_corrupt(BlockDriverState *bs);
int qcow2_mark_consistent(BlockDriverState *bs);
int qcow2_update_header(BlockDriverState *bs);
void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
@ -867,7 +866,7 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
int64_t nb_clusters);
int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size);
int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int size);
void qcow2_free_clusters(BlockDriverState *bs,
int64_t offset, int64_t size,
enum qcow2_discard_type type);
@ -879,8 +878,8 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
int qcow2_flush_caches(BlockDriverState *bs);
int qcow2_write_caches(BlockDriverState *bs);
int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix);
int coroutine_fn qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix);
void qcow2_process_discards(BlockDriverState *bs, int ret);
@ -888,10 +887,10 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
int64_t size);
int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
int64_t size, bool data_file);
int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size);
int coroutine_fn qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size);
int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
BlockDriverAmendStatusCB *status_cb,
@ -919,10 +918,9 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes,
uint64_t *host_offset, QCowL2Meta **m);
int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size,
uint64_t *host_offset);
int coroutine_fn GRAPH_RDLOCK
qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset,
int compressed_size, uint64_t *host_offset);
void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
uint64_t *coffset, int *csize);
@ -992,11 +990,12 @@ void *qcow2_cache_is_table_offset(Qcow2Cache *c, uint64_t offset);
void qcow2_cache_discard(Qcow2Cache *c, void *table);
/* qcow2-bitmap.c functions */
int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size);
bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs,
bool *header_updated, Error **errp);
int coroutine_fn
qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size);
bool coroutine_fn GRAPH_RDLOCK
qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated, Error **errp);
bool qcow2_get_bitmap_info_list(BlockDriverState *bs,
Qcow2BitmapInfoList **info_list, Error **errp);
int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);

View File

@ -200,7 +200,8 @@ static void qed_check_for_leaks(QEDCheck *check)
/**
* Mark an image clean once it passes check or has been repaired
*/
static void qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result)
static void coroutine_fn GRAPH_RDLOCK
qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result)
{
/* Skip if there were unfixable corruptions or I/O errors */
if (result->corruptions > 0 || result->check_errors > 0) {
@ -213,7 +214,7 @@ static void qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result)
}
/* Ensure fixes reach storage before clearing check bit */
bdrv_flush(s->bs);
bdrv_co_flush(s->bs);
s->header.features &= ~QED_F_NEED_CHECK;
qed_write_header_sync(s);

View File

@ -122,7 +122,7 @@ int coroutine_fn qed_read_l1_table_sync(BDRVQEDState *s)
int coroutine_fn qed_write_l1_table(BDRVQEDState *s, unsigned int index,
unsigned int n)
{
BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
return qed_write_table(s, s->header.l1_table_offset,
s->l1_table, index, n, false);
}
@ -150,7 +150,7 @@ int coroutine_fn qed_read_l2_table(BDRVQEDState *s, QEDRequest *request,
request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
request->l2_table->table = qed_alloc_table(s);
BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_L2_LOAD);
ret = qed_read_table(s, offset, request->l2_table->table);
if (ret) {
@ -183,7 +183,7 @@ int coroutine_fn qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
unsigned int index, unsigned int n,
bool flush)
{
BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_L2_UPDATE);
return qed_write_table(s, request->l2_table->offset,
request->l2_table->table, index, n, flush);
}

View File

@ -195,14 +195,15 @@ static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
*
* The string is NUL-terminated.
*/
static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
char *buf, size_t buflen)
static int coroutine_fn GRAPH_RDLOCK
qed_read_string(BdrvChild *file, uint64_t offset,
size_t n, char *buf, size_t buflen)
{
int ret;
if (n >= buflen) {
return -EINVAL;
}
ret = bdrv_pread(file, offset, n, buf, 0);
ret = bdrv_co_pread(file, offset, n, buf, 0);
if (ret < 0) {
return ret;
}
@ -882,7 +883,7 @@ static int coroutine_fn GRAPH_RDLOCK
qed_read_backing_file(BDRVQEDState *s, uint64_t pos, QEMUIOVector *qiov)
{
if (s->bs->backing) {
BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
return bdrv_co_preadv(s->bs->backing, pos, qiov->size, qiov, 0);
}
qemu_iovec_memset(qiov, 0, 0, qiov->size);
@ -917,7 +918,7 @@ qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, uint64_t len,
goto out;
}
BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
if (ret < 0) {
goto out;
@ -1069,7 +1070,7 @@ static int coroutine_fn GRAPH_RDLOCK qed_aio_write_main(QEDAIOCB *acb)
trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
&acb->cur_qiov, 0);
}
@ -1323,7 +1324,7 @@ qed_aio_read_data(void *opaque, int ret, uint64_t offset, size_t len)
} else if (ret != QED_CLUSTER_FOUND) {
r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov);
} else {
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
&acb->cur_qiov, 0);
}

View File

@ -214,7 +214,7 @@ raw_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
return ret;
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
}
@ -268,7 +268,7 @@ raw_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
fail:

View File

@ -169,9 +169,10 @@ exit:
* It is assumed that 'buffer' is at least 4096*num_sectors large.
*
* 0 is returned on success, -errno otherwise */
static int vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log,
uint32_t *sectors_written, void *buffer,
uint32_t num_sectors)
static int coroutine_fn GRAPH_RDLOCK
vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log,
uint32_t *sectors_written, void *buffer,
uint32_t num_sectors)
{
int ret = 0;
uint64_t offset;
@ -195,8 +196,7 @@ static int vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log,
/* full */
break;
}
ret = bdrv_pwrite(bs->file, offset, VHDX_LOG_SECTOR_SIZE, buffer_tmp,
0);
ret = bdrv_co_pwrite(bs->file, offset, VHDX_LOG_SECTOR_SIZE, buffer_tmp, 0);
if (ret < 0) {
goto exit;
}
@ -853,8 +853,9 @@ static void vhdx_log_raw_to_le_sector(VHDXLogDescriptor *desc,
}
static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset)
static int coroutine_fn GRAPH_RDLOCK
vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset)
{
int ret = 0;
void *buffer = NULL;
@ -924,7 +925,7 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
sectors += partial_sectors;
file_length = bdrv_getlength(bs->file->bs);
file_length = bdrv_co_getlength(bs->file->bs);
if (file_length < 0) {
ret = file_length;
goto exit;
@ -971,8 +972,8 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
if (i == 0 && leading_length) {
/* partial sector at the front of the buffer */
ret = bdrv_pread(bs->file, file_offset, VHDX_LOG_SECTOR_SIZE,
merged_sector, 0);
ret = bdrv_co_pread(bs->file, file_offset, VHDX_LOG_SECTOR_SIZE,
merged_sector, 0);
if (ret < 0) {
goto exit;
}
@ -981,9 +982,9 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
sector_write = merged_sector;
} else if (i == sectors - 1 && trailing_length) {
/* partial sector at the end of the buffer */
ret = bdrv_pread(bs->file, file_offset + trailing_length,
VHDX_LOG_SECTOR_SIZE - trailing_length,
merged_sector + trailing_length, 0);
ret = bdrv_co_pread(bs->file, file_offset + trailing_length,
VHDX_LOG_SECTOR_SIZE - trailing_length,
merged_sector + trailing_length, 0);
if (ret < 0) {
goto exit;
}
@ -1036,8 +1037,9 @@ exit:
}
/* Perform a log write, and then immediately flush the entire log */
int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset)
int coroutine_fn
vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset)
{
int ret = 0;
VHDXLogSequence logs = { .valid = true,
@ -1047,7 +1049,7 @@ int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
/* Make sure data written (new and/or changed blocks) is stable
* on disk, before creating log entry */
ret = bdrv_flush(bs);
ret = bdrv_co_flush(bs);
if (ret < 0) {
goto exit;
}
@ -1059,7 +1061,7 @@ int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
logs.log = s->log;
/* Make sure log is stable on disk */
ret = bdrv_flush(bs);
ret = bdrv_co_flush(bs);
if (ret < 0) {
goto exit;
}

View File

@ -1250,12 +1250,13 @@ exit:
*
* Returns the file offset start of the new payload block
*/
static int vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s,
uint64_t *new_offset, bool *need_zero)
static int coroutine_fn GRAPH_RDLOCK
vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s,
uint64_t *new_offset, bool *need_zero)
{
int64_t current_len;
current_len = bdrv_getlength(bs->file->bs);
current_len = bdrv_co_getlength(bs->file->bs);
if (current_len < 0) {
return current_len;
}
@ -1271,16 +1272,16 @@ static int vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s,
if (*need_zero) {
int ret;
ret = bdrv_truncate(bs->file, *new_offset + s->block_size, false,
PREALLOC_MODE_OFF, BDRV_REQ_ZERO_WRITE, NULL);
ret = bdrv_co_truncate(bs->file, *new_offset + s->block_size, false,
PREALLOC_MODE_OFF, BDRV_REQ_ZERO_WRITE, NULL);
if (ret != -ENOTSUP) {
*need_zero = false;
return ret;
}
}
return bdrv_truncate(bs->file, *new_offset + s->block_size, false,
PREALLOC_MODE_OFF, 0, NULL);
return bdrv_co_truncate(bs->file, *new_offset + s->block_size, false,
PREALLOC_MODE_OFF, 0, NULL);
}
/*
@ -1572,12 +1573,10 @@ exit:
* The first 64KB of the Metadata section is reserved for the metadata
* header and entries; beyond that, the metadata items themselves reside.
*/
static int vhdx_create_new_metadata(BlockBackend *blk,
uint64_t image_size,
uint32_t block_size,
uint32_t sector_size,
uint64_t metadata_offset,
VHDXImageType type)
static int coroutine_fn
vhdx_create_new_metadata(BlockBackend *blk, uint64_t image_size,
uint32_t block_size, uint32_t sector_size,
uint64_t metadata_offset, VHDXImageType type)
{
int ret = 0;
uint32_t offset = 0;
@ -1668,13 +1667,13 @@ static int vhdx_create_new_metadata(BlockBackend *blk,
VHDX_META_FLAGS_IS_VIRTUAL_DISK;
vhdx_metadata_entry_le_export(&md_table_entry[4]);
ret = blk_pwrite(blk, metadata_offset, VHDX_HEADER_BLOCK_SIZE, buffer, 0);
ret = blk_co_pwrite(blk, metadata_offset, VHDX_HEADER_BLOCK_SIZE, buffer, 0);
if (ret < 0) {
goto exit;
}
ret = blk_pwrite(blk, metadata_offset + (64 * KiB),
VHDX_METADATA_ENTRY_BUFFER_SIZE, entry_buffer, 0);
ret = blk_co_pwrite(blk, metadata_offset + (64 * KiB),
VHDX_METADATA_ENTRY_BUFFER_SIZE, entry_buffer, 0);
if (ret < 0) {
goto exit;
}
@ -1694,10 +1693,11 @@ exit:
* Fixed images: default state of the BAT is fully populated, with
* file offsets and state PAYLOAD_BLOCK_FULLY_PRESENT.
*/
static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
uint64_t image_size, VHDXImageType type,
bool use_zero_blocks, uint64_t file_offset,
uint32_t length, Error **errp)
static int coroutine_fn
vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
uint64_t image_size, VHDXImageType type,
bool use_zero_blocks, uint64_t file_offset,
uint32_t length, Error **errp)
{
int ret = 0;
uint64_t data_file_offset;
@ -1718,14 +1718,14 @@ static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
if (type == VHDX_TYPE_DYNAMIC) {
/* All zeroes, so we can just extend the file - the end of the BAT
* is the furthest thing we have written yet */
ret = blk_truncate(blk, data_file_offset, false, PREALLOC_MODE_OFF,
0, errp);
ret = blk_co_truncate(blk, data_file_offset, false, PREALLOC_MODE_OFF,
0, errp);
if (ret < 0) {
goto exit;
}
} else if (type == VHDX_TYPE_FIXED) {
ret = blk_truncate(blk, data_file_offset + image_size, false,
PREALLOC_MODE_OFF, 0, errp);
ret = blk_co_truncate(blk, data_file_offset + image_size, false,
PREALLOC_MODE_OFF, 0, errp);
if (ret < 0) {
goto exit;
}
@ -1759,7 +1759,7 @@ static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
s->bat[sinfo.bat_idx] = cpu_to_le64(s->bat[sinfo.bat_idx]);
sector_num += s->sectors_per_block;
}
ret = blk_pwrite(blk, file_offset, length, s->bat, 0);
ret = blk_co_pwrite(blk, file_offset, length, s->bat, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to write the BAT");
goto exit;
@ -1780,15 +1780,12 @@ exit:
* to create the BAT itself, we will also cause the BAT to be
* created.
*/
static int vhdx_create_new_region_table(BlockBackend *blk,
uint64_t image_size,
uint32_t block_size,
uint32_t sector_size,
uint32_t log_size,
bool use_zero_blocks,
VHDXImageType type,
uint64_t *metadata_offset,
Error **errp)
static int coroutine_fn
vhdx_create_new_region_table(BlockBackend *blk, uint64_t image_size,
uint32_t block_size, uint32_t sector_size,
uint32_t log_size, bool use_zero_blocks,
VHDXImageType type, uint64_t *metadata_offset,
Error **errp)
{
int ret = 0;
uint32_t offset = 0;
@ -1863,15 +1860,15 @@ static int vhdx_create_new_region_table(BlockBackend *blk,
}
/* Now write out the region headers to disk */
ret = blk_pwrite(blk, VHDX_REGION_TABLE_OFFSET, VHDX_HEADER_BLOCK_SIZE,
buffer, 0);
ret = blk_co_pwrite(blk, VHDX_REGION_TABLE_OFFSET, VHDX_HEADER_BLOCK_SIZE,
buffer, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to write first region table");
goto exit;
}
ret = blk_pwrite(blk, VHDX_REGION_TABLE2_OFFSET, VHDX_HEADER_BLOCK_SIZE,
buffer, 0);
ret = blk_co_pwrite(blk, VHDX_REGION_TABLE2_OFFSET, VHDX_HEADER_BLOCK_SIZE,
buffer, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to write second region table");
goto exit;

View File

@ -413,8 +413,9 @@ bool vhdx_checksum_is_valid(uint8_t *buf, size_t size, int crc_offset);
int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed,
Error **errp);
int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset);
int coroutine_fn GRAPH_RDLOCK
vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset);
static inline void leguid_to_cpus(MSGUID *guid)
{

View File

@ -339,7 +339,8 @@ out:
return ret;
}
static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
static int coroutine_fn GRAPH_RDLOCK
vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
{
char *desc, *tmp_desc;
char *p_name, *tmp_str;
@ -348,7 +349,7 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
desc = g_malloc0(DESC_SIZE);
tmp_desc = g_malloc0(DESC_SIZE);
ret = bdrv_pread(bs->file, s->desc_offset, DESC_SIZE, desc, 0);
ret = bdrv_co_pread(bs->file, s->desc_offset, DESC_SIZE, desc, 0);
if (ret < 0) {
goto out;
}
@ -368,7 +369,7 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
pstrcat(desc, DESC_SIZE, tmp_desc);
}
ret = bdrv_pwrite_sync(bs->file, s->desc_offset, DESC_SIZE, desc, 0);
ret = bdrv_co_pwrite_sync(bs->file, s->desc_offset, DESC_SIZE, desc, 0);
out:
g_free(desc);
@ -1437,7 +1438,7 @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent,
if (skip_start_bytes > 0) {
if (copy_from_backing) {
/* qcow2 emits this on bs->file instead of bs->backing */
BLKDBG_EVENT(extent->file, BLKDBG_COW_READ);
BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_READ);
ret = bdrv_co_pread(bs->backing, offset, skip_start_bytes,
whole_grain, 0);
if (ret < 0) {
@ -1445,7 +1446,7 @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent,
goto exit;
}
}
BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE);
BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwrite(extent->file, cluster_offset, skip_start_bytes,
whole_grain, 0);
if (ret < 0) {
@ -1457,7 +1458,7 @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent,
if (skip_end_bytes < cluster_bytes) {
if (copy_from_backing) {
/* qcow2 emits this on bs->file instead of bs->backing */
BLKDBG_EVENT(extent->file, BLKDBG_COW_READ);
BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_READ);
ret = bdrv_co_pread(bs->backing, offset + skip_end_bytes,
cluster_bytes - skip_end_bytes,
whole_grain + skip_end_bytes, 0);
@ -1466,7 +1467,7 @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent,
goto exit;
}
}
BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE);
BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwrite(extent->file, cluster_offset + skip_end_bytes,
cluster_bytes - skip_end_bytes,
whole_grain + skip_end_bytes, 0);
@ -1487,7 +1488,7 @@ vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data, uint32_t offset)
{
offset = cpu_to_le32(offset);
/* update L2 table */
BLKDBG_EVENT(extent->file, BLKDBG_L2_UPDATE);
BLKDBG_CO_EVENT(extent->file, BLKDBG_L2_UPDATE);
if (bdrv_co_pwrite(extent->file,
((int64_t)m_data->l2_offset * 512)
+ (m_data->l2_index * sizeof(offset)),
@ -1617,7 +1618,7 @@ get_cluster_offset(BlockDriverState *bs, VmdkExtent *extent,
}
}
l2_table = (char *)extent->l2_cache + (min_index * l2_size_bytes);
BLKDBG_EVENT(extent->file, BLKDBG_L2_LOAD);
BLKDBG_CO_EVENT(extent->file, BLKDBG_L2_LOAD);
if (bdrv_co_pread(extent->file,
(int64_t)l2_offset * 512,
l2_size_bytes,
@ -1828,12 +1829,12 @@ vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
n_bytes = buf_len + sizeof(VmdkGrainMarker);
qemu_iovec_init_buf(&local_qiov, data, n_bytes);
BLKDBG_EVENT(extent->file, BLKDBG_WRITE_COMPRESSED);
BLKDBG_CO_EVENT(extent->file, BLKDBG_WRITE_COMPRESSED);
} else {
qemu_iovec_init(&local_qiov, qiov->niov);
qemu_iovec_concat(&local_qiov, qiov, qiov_offset, n_bytes);
BLKDBG_EVENT(extent->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(extent->file, BLKDBG_WRITE_AIO);
}
write_offset = cluster_offset + offset_in_cluster;
@ -1875,7 +1876,7 @@ vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
if (!extent->compressed) {
BLKDBG_EVENT(extent->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(extent->file, BLKDBG_READ_AIO);
ret = bdrv_co_preadv(extent->file,
cluster_offset + offset_in_cluster, bytes,
qiov, 0);
@ -1889,7 +1890,7 @@ vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
buf_bytes = cluster_bytes * 2;
cluster_buf = g_malloc(buf_bytes);
uncomp_buf = g_malloc(cluster_bytes);
BLKDBG_EVENT(extent->file, BLKDBG_READ_COMPRESSED);
BLKDBG_CO_EVENT(extent->file, BLKDBG_READ_COMPRESSED);
ret = bdrv_co_pread(extent->file, cluster_offset, buf_bytes, cluster_buf,
0);
if (ret < 0) {
@ -1967,7 +1968,7 @@ vmdk_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
/* qcow2 emits this on bs->file instead of bs->backing */
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
ret = bdrv_co_preadv(bs->backing, offset, n_bytes,
&local_qiov, 0);
if (ret < 0) {
@ -2131,7 +2132,7 @@ vmdk_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t length;
for (i = 0; i < s->num_extents; i++) {
length = bdrv_getlength(s->extents[i].file->bs);
length = bdrv_co_getlength(s->extents[i].file->bs);
if (length < 0) {
return length;
}
@ -2165,7 +2166,7 @@ vmdk_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
return ret;
}
static int GRAPH_UNLOCKED
static int coroutine_fn GRAPH_UNLOCKED
vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
bool zeroed_grain, Error **errp)
{
@ -2176,7 +2177,7 @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
int gd_buf_size;
if (flat) {
ret = blk_truncate(blk, filesize, false, PREALLOC_MODE_OFF, 0, errp);
ret = blk_co_truncate(blk, filesize, false, PREALLOC_MODE_OFF, 0, errp);
goto exit;
}
magic = cpu_to_be32(VMDK4_MAGIC);
@ -2228,19 +2229,19 @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
header.check_bytes[3] = 0xa;
/* write all the data */
ret = blk_pwrite(blk, 0, sizeof(magic), &magic, 0);
ret = blk_co_pwrite(blk, 0, sizeof(magic), &magic, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
goto exit;
}
ret = blk_pwrite(blk, sizeof(magic), sizeof(header), &header, 0);
ret = blk_co_pwrite(blk, sizeof(magic), sizeof(header), &header, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
goto exit;
}
ret = blk_truncate(blk, le64_to_cpu(header.grain_offset) << 9, false,
PREALLOC_MODE_OFF, 0, errp);
ret = blk_co_truncate(blk, le64_to_cpu(header.grain_offset) << 9, false,
PREALLOC_MODE_OFF, 0, errp);
if (ret < 0) {
goto exit;
}
@ -2252,8 +2253,8 @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
i < gt_count; i++, tmp += gt_size) {
gd_buf[i] = cpu_to_le32(tmp);
}
ret = blk_pwrite(blk, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE,
gd_buf_size, gd_buf, 0);
ret = blk_co_pwrite(blk, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE,
gd_buf_size, gd_buf, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
goto exit;
@ -2264,8 +2265,8 @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
i < gt_count; i++, tmp += gt_size) {
gd_buf[i] = cpu_to_le32(tmp);
}
ret = blk_pwrite(blk, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE,
gd_buf_size, gd_buf, 0);
ret = blk_co_pwrite(blk, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE,
gd_buf_size, gd_buf, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
}
@ -2908,7 +2909,7 @@ vmdk_co_check(BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix)
BDRVVmdkState *s = bs->opaque;
VmdkExtent *extent = NULL;
int64_t sector_num = 0;
int64_t total_sectors = bdrv_nb_sectors(bs);
int64_t total_sectors = bdrv_co_nb_sectors(bs);
int ret;
uint64_t cluster_offset;
@ -2938,7 +2939,7 @@ vmdk_co_check(BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix)
break;
}
if (ret == VMDK_OK) {
int64_t extent_len = bdrv_getlength(extent->file->bs);
int64_t extent_len = bdrv_co_getlength(extent->file->bs);
if (extent_len < 0) {
fprintf(stderr,
"ERROR: could not get extent file length for sector %"

View File

@ -486,8 +486,8 @@ static int vpc_reopen_prepare(BDRVReopenState *state,
* operation (the block bitmaps is updated then), 0 otherwise.
* If write is true then err must not be NULL.
*/
static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
bool write, int *err)
static int64_t coroutine_fn GRAPH_RDLOCK
get_image_offset(BlockDriverState *bs, uint64_t offset, bool write, int *err)
{
BDRVVPCState *s = bs->opaque;
uint64_t bitmap_offset, block_offset;
@ -515,8 +515,7 @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
s->last_bitmap_offset = bitmap_offset;
memset(bitmap, 0xff, s->bitmap_size);
r = bdrv_pwrite_sync(bs->file, bitmap_offset, s->bitmap_size, bitmap,
0);
r = bdrv_co_pwrite_sync(bs->file, bitmap_offset, s->bitmap_size, bitmap, 0);
if (r < 0) {
*err = r;
return -2;
@ -532,13 +531,13 @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
*
* Returns 0 on success and < 0 on error
*/
static int rewrite_footer(BlockDriverState *bs)
static int coroutine_fn GRAPH_RDLOCK rewrite_footer(BlockDriverState *bs)
{
int ret;
BDRVVPCState *s = bs->opaque;
int64_t offset = s->free_data_block_offset;
ret = bdrv_pwrite_sync(bs->file, offset, sizeof(s->footer), &s->footer, 0);
ret = bdrv_co_pwrite_sync(bs->file, offset, sizeof(s->footer), &s->footer, 0);
if (ret < 0)
return ret;
@ -552,7 +551,8 @@ static int rewrite_footer(BlockDriverState *bs)
*
* Returns the sectors' offset in the image file on success and < 0 on error
*/
static int64_t alloc_block(BlockDriverState *bs, int64_t offset)
static int64_t coroutine_fn GRAPH_RDLOCK
alloc_block(BlockDriverState *bs, int64_t offset)
{
BDRVVPCState *s = bs->opaque;
int64_t bat_offset;
@ -572,8 +572,8 @@ static int64_t alloc_block(BlockDriverState *bs, int64_t offset)
/* Initialize the block's bitmap */
memset(bitmap, 0xff, s->bitmap_size);
ret = bdrv_pwrite_sync(bs->file, s->free_data_block_offset,
s->bitmap_size, bitmap, 0);
ret = bdrv_co_pwrite_sync(bs->file, s->free_data_block_offset,
s->bitmap_size, bitmap, 0);
if (ret < 0) {
return ret;
}
@ -587,7 +587,7 @@ static int64_t alloc_block(BlockDriverState *bs, int64_t offset)
/* Write BAT entry to disk */
bat_offset = s->bat_offset + (4 * index);
bat_value = cpu_to_be32(s->pagetable[index]);
ret = bdrv_pwrite_sync(bs->file, bat_offset, 4, &bat_value, 0);
ret = bdrv_co_pwrite_sync(bs->file, bat_offset, 4, &bat_value, 0);
if (ret < 0)
goto fail;
@ -718,11 +718,11 @@ fail:
return ret;
}
static int coroutine_fn vpc_co_block_status(BlockDriverState *bs,
bool want_zero,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
static int coroutine_fn GRAPH_RDLOCK
vpc_co_block_status(BlockDriverState *bs, bool want_zero,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
BDRVVPCState *s = bs->opaque;
int64_t image_offset;
@ -820,8 +820,8 @@ static int calculate_geometry(int64_t total_sectors, uint16_t *cyls,
return 0;
}
static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
int64_t total_sectors)
static int coroutine_fn create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
int64_t total_sectors)
{
VHDDynDiskHeader dyndisk_header;
uint8_t bat_sector[512];
@ -834,13 +834,13 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
block_size = 0x200000;
num_bat_entries = DIV_ROUND_UP(total_sectors, block_size / 512);
ret = blk_pwrite(blk, offset, sizeof(*footer), footer, 0);
ret = blk_co_pwrite(blk, offset, sizeof(*footer), footer, 0);
if (ret < 0) {
goto fail;
}
offset = 1536 + ((num_bat_entries * 4 + 511) & ~511);
ret = blk_pwrite(blk, offset, sizeof(*footer), footer, 0);
ret = blk_co_pwrite(blk, offset, sizeof(*footer), footer, 0);
if (ret < 0) {
goto fail;
}
@ -850,7 +850,7 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
memset(bat_sector, 0xFF, 512);
for (i = 0; i < DIV_ROUND_UP(num_bat_entries * 4, 512); i++) {
ret = blk_pwrite(blk, offset, 512, bat_sector, 0);
ret = blk_co_pwrite(blk, offset, 512, bat_sector, 0);
if (ret < 0) {
goto fail;
}
@ -878,7 +878,7 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
/* Write the header */
offset = 512;
ret = blk_pwrite(blk, offset, sizeof(dyndisk_header), &dyndisk_header, 0);
ret = blk_co_pwrite(blk, offset, sizeof(dyndisk_header), &dyndisk_header, 0);
if (ret < 0) {
goto fail;
}
@ -888,21 +888,21 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
return ret;
}
static int create_fixed_disk(BlockBackend *blk, VHDFooter *footer,
int64_t total_size, Error **errp)
static int coroutine_fn create_fixed_disk(BlockBackend *blk, VHDFooter *footer,
int64_t total_size, Error **errp)
{
int ret;
/* Add footer to total size */
total_size += sizeof(*footer);
ret = blk_truncate(blk, total_size, false, PREALLOC_MODE_OFF, 0, errp);
ret = blk_co_truncate(blk, total_size, false, PREALLOC_MODE_OFF, 0, errp);
if (ret < 0) {
return ret;
}
ret = blk_pwrite(blk, total_size - sizeof(*footer), sizeof(*footer),
footer, 0);
ret = blk_co_pwrite(blk, total_size - sizeof(*footer), sizeof(*footer),
footer, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Unable to write VHD header");
return ret;

View File

@ -230,20 +230,27 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
uint64_t perm, uint64_t shared_perm, Error **errp)
{
BdrvChild *c;
AioContext *ctx = bdrv_get_aio_context(bs);
bool need_context_ops;
GLOBAL_STATE_CODE();
bdrv_ref(bs);
need_context_ops = bdrv_get_aio_context(bs) != job->job.aio_context;
need_context_ops = ctx != job->job.aio_context;
if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
aio_context_release(job->job.aio_context);
if (need_context_ops) {
if (job->job.aio_context != qemu_get_aio_context()) {
aio_context_release(job->job.aio_context);
}
aio_context_acquire(ctx);
}
c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
errp);
if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
aio_context_acquire(job->job.aio_context);
if (need_context_ops) {
aio_context_release(ctx);
if (job->job.aio_context != qemu_get_aio_context()) {
aio_context_acquire(job->job.aio_context);
}
}
if (c == NULL) {
return -EPERM;

View File

@ -143,11 +143,15 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name,
* aware of iothreads require their BlockBackends to be in the main
* AioContext.
*/
ctx = iothread ? bdrv_get_aio_context(bs) : qemu_get_aio_context();
blk = blk_new(ctx, 0, BLK_PERM_ALL);
ctx = bdrv_get_aio_context(bs);
blk = blk_new(iothread ? ctx : qemu_get_aio_context(),
0, BLK_PERM_ALL);
blk_created = true;
aio_context_acquire(ctx);
ret = blk_insert_bs(blk, bs, errp);
aio_context_release(ctx);
if (ret < 0) {
goto fail;
}

View File

@ -224,6 +224,13 @@ bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event);
void co_wrapper_mixed_bdrv_rdlock
bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
#define BLKDBG_CO_EVENT(child, evt) \
do { \
if (child) { \
bdrv_co_debug_event(child->bs, evt); \
} \
} while (0)
#define BLKDBG_EVENT(child, evt) \
do { \
if (child) { \

View File

@ -111,10 +111,12 @@ void unregister_aiocontext(AioContext *ctx);
* The wrlock can only be taken from the main loop, with BQL held, as only the
* main loop is allowed to modify the graph.
*
* If @bs is non-NULL, its AioContext is temporarily released.
*
* This function polls. Callers must not hold the lock of any AioContext other
* than the current one.
* than the current one and the one of @bs.
*/
void bdrv_graph_wrlock(void) TSA_ACQUIRE(graph_lock) TSA_NO_TSA;
void bdrv_graph_wrlock(BlockDriverState *bs) TSA_ACQUIRE(graph_lock) TSA_NO_TSA;
/*
* bdrv_graph_wrunlock:

View File

@ -0,0 +1,85 @@
#!/usr/bin/env python3
# group: rw quick auto
#
# Copyright (C) 2023 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Creator/Owner: Kevin Wolf <kwolf@redhat.com>
import asyncio
import iotests
iotests.script_initialize(supported_fmts=['qcow2'],
supported_platforms=['linux'])
iotests.verify_virtio_scsi_pci_or_ccw()
with iotests.FilePath('disk0.img') as img_path, \
iotests.FilePath('disk0-snap.img') as snap_path, \
iotests.FilePath('mirror-src.img') as src_path, \
iotests.FilePath('mirror-dst.img') as dst_path, \
iotests.VM() as vm:
img_size = '10M'
iotests.qemu_img_create('-f', iotests.imgfmt, img_path, img_size)
iotests.qemu_img_create('-f', iotests.imgfmt, '-b', img_path,
'-F', iotests.imgfmt, snap_path)
iotests.qemu_img_create('-f', iotests.imgfmt, src_path, img_size)
iotests.qemu_img_create('-f', iotests.imgfmt, dst_path, img_size)
iotests.qemu_io_log('-c', 'write 0 64k', img_path)
iotests.qemu_io_log('-c', 'write 1M 64k', snap_path)
iotests.qemu_io_log('-c', 'write 3M 64k', snap_path)
iotests.qemu_io_log('-c', f'write 0 {img_size}', src_path)
iotests.log('Launching VM...')
vm.add_object('iothread,id=iothread0')
vm.add_object('throttle-group,x-bps-write=1048576,id=tg0')
vm.add_blockdev(f'file,node-name=disk0-file,filename={img_path}')
vm.add_blockdev('qcow2,node-name=disk0-fmt,file=disk0-file')
vm.add_drive(snap_path, 'backing=disk0-fmt,node-name=disk0',
interface='none')
vm.add_device('virtio-scsi,iothread=iothread0')
vm.add_device('scsi-hd,drive=drive0')
vm.add_blockdev(f'file,filename={src_path},node-name=mirror-src-file')
vm.add_blockdev('qcow2,file=mirror-src-file,node-name=mirror-src')
vm.add_blockdev(f'file,filename={dst_path},node-name=mirror-dst-file')
vm.add_blockdev('qcow2,file=mirror-dst-file,node-name=mirror-dst-fmt')
vm.add_blockdev('throttle,throttle-group=tg0,file=mirror-dst-fmt,'
'node-name=mirror-dst')
vm.add_device('scsi-hd,drive=mirror-src')
vm.launch()
# The background I/O is created on unrelated nodes (so that they won't be
# drained together with the other ones), but on the same iothread
iotests.log('Creating some background I/O...')
iotests.log(vm.qmp('blockdev-mirror', job_id='job0', sync='full',
device='mirror-src', target='mirror-dst',
auto_dismiss=False))
iotests.log('Starting active commit...')
iotests.log(vm.qmp('block-commit', device='disk0', job_id='job1',
auto_dismiss=False))
# Should succeed and not time out
try:
vm.run_job('job1', wait=5.0)
vm.shutdown()
except asyncio.TimeoutError:
# VM may be stuck, kill it
vm.kill()
raise

View File

@ -0,0 +1,23 @@
wrote 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 65536/65536 bytes at offset 1048576
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 65536/65536 bytes at offset 3145728
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 10485760/10485760 bytes at offset 0
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
Launching VM...
Creating some background I/O...
{"return": {}}
Starting active commit...
{"return": {}}
{"execute": "job-complete", "arguments": {"id": "job1"}}
{"return": {}}
{"data": {"device": "job1", "len": 131072, "offset": 131072, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_READY", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"device": "job1", "len": 131072, "offset": 131072, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"execute": "job-dismiss", "arguments": {"id": "job1"}}
{"return": {}}

View File

@ -825,6 +825,7 @@ static void test_attach_second_node(void)
BlockDriverState *bs, *filter;
QDict *options;
aio_context_acquire(main_ctx);
blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
blk_insert_bs(blk, bs, &error_abort);
@ -833,7 +834,6 @@ static void test_attach_second_node(void)
qdict_put_str(options, "driver", "raw");
qdict_put_str(options, "file", "base");
aio_context_acquire(main_ctx);
filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
aio_context_release(main_ctx);
@ -857,9 +857,11 @@ static void test_attach_preserve_blk_ctx(void)
{
IOThread *iothread = iothread_new();
AioContext *ctx = iothread_get_aio_context(iothread);
AioContext *main_ctx = qemu_get_aio_context();
BlockBackend *blk;
BlockDriverState *bs;
aio_context_acquire(main_ctx);
blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
@ -868,6 +870,7 @@ static void test_attach_preserve_blk_ctx(void)
blk_insert_bs(blk, bs, &error_abort);
g_assert(blk_get_aio_context(blk) == ctx);
g_assert(bdrv_get_aio_context(bs) == ctx);
aio_context_release(main_ctx);
/* Remove the node again */
aio_context_acquire(ctx);
@ -877,7 +880,9 @@ static void test_attach_preserve_blk_ctx(void)
g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
/* Re-attach the node */
aio_context_acquire(main_ctx);
blk_insert_bs(blk, bs, &error_abort);
aio_context_release(main_ctx);
g_assert(blk_get_aio_context(blk) == ctx);
g_assert(bdrv_get_aio_context(bs) == ctx);