Pull request

v3:
 - Drop UFS emulation due to CI failures
 - Add "aio-posix: zero out io_uring sqe user_data"
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmTvLIEACgkQnKSrs4Gr
 c8itVggAka3RMkEclbeW7JKJBOolm3oUuJTobV8oJfDNMQ8mmom9JkXVUctyPWQT
 EF+oeqZz1omjr0Dk7YEA2toCahTbXm/UsG7i6cZg8JXPl6e9sOne0j+p5zO5x/kc
 YlG43SBQJHdp/BfTm/gvwUh0W2on0wadaeEV82m3ZyIrZGTgNcrC1p1gj5dwF5VX
 SqW02mgALETECyJpo8O7y9vNUYGxEtETG9jzAhtrugGpYk4bPeXlm/rc+2zwV+ET
 YCnfUvhjhlu5vS4nkta6natg0If16ODjy35vWYm/aGlgveGTqQq9HWgTL71eNuxm
 Smn+hJHuvkyBclKjbGiiO1W1MuG1/g==
 =UvNK
 -----END PGP SIGNATURE-----

Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging

Pull request

v3:
- Drop UFS emulation due to CI failures
- Add "aio-posix: zero out io_uring sqe user_data"

# -----BEGIN PGP SIGNATURE-----
#
# iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmTvLIEACgkQnKSrs4Gr
# c8itVggAka3RMkEclbeW7JKJBOolm3oUuJTobV8oJfDNMQ8mmom9JkXVUctyPWQT
# EF+oeqZz1omjr0Dk7YEA2toCahTbXm/UsG7i6cZg8JXPl6e9sOne0j+p5zO5x/kc
# YlG43SBQJHdp/BfTm/gvwUh0W2on0wadaeEV82m3ZyIrZGTgNcrC1p1gj5dwF5VX
# SqW02mgALETECyJpo8O7y9vNUYGxEtETG9jzAhtrugGpYk4bPeXlm/rc+2zwV+ET
# YCnfUvhjhlu5vS4nkta6natg0If16ODjy35vWYm/aGlgveGTqQq9HWgTL71eNuxm
# Smn+hJHuvkyBclKjbGiiO1W1MuG1/g==
# =UvNK
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 30 Aug 2023 07:48:17 EDT
# gpg:                using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [ultimate]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [ultimate]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* tag 'block-pull-request' of https://gitlab.com/stefanha/qemu:
  aio-posix: zero out io_uring sqe user_data
  tests/qemu-iotests/197: add testcase for CoR with subclusters
  block/io: align requests to subcluster_size
  block: add subcluster_size field to BlockDriverInfo
  block-migration: Ensure we don't crash during migration cleanup

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-08-30 09:20:27 -04:00
commit 156618d9ea
10 changed files with 110 additions and 35 deletions

View File

@ -6480,6 +6480,13 @@ int coroutine_fn bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
}
memset(bdi, 0, sizeof(*bdi));
ret = drv->bdrv_co_get_info(bs, bdi);
if (bdi->subcluster_size == 0) {
/*
* If the driver left this unset, subclusters are not supported.
* Then it is safe to treat each cluster as having only one subcluster.
*/
bdi->subcluster_size = bdi->cluster_size;
}
if (ret < 0) {
return ret;
}

View File

@ -728,21 +728,21 @@ BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
}
/**
* Round a region to cluster boundaries
* Round a region to subcluster (if supported) or cluster boundaries
*/
void coroutine_fn GRAPH_RDLOCK
bdrv_round_to_clusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *cluster_offset, int64_t *cluster_bytes)
bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *align_offset, int64_t *align_bytes)
{
BlockDriverInfo bdi;
IO_CODE();
if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
*cluster_offset = offset;
*cluster_bytes = bytes;
if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) {
*align_offset = offset;
*align_bytes = bytes;
} else {
int64_t c = bdi.cluster_size;
*cluster_offset = QEMU_ALIGN_DOWN(offset, c);
*cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
int64_t c = bdi.subcluster_size;
*align_offset = QEMU_ALIGN_DOWN(offset, c);
*align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c);
}
}
@ -1168,8 +1168,8 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
void *bounce_buffer = NULL;
BlockDriver *drv = bs->drv;
int64_t cluster_offset;
int64_t cluster_bytes;
int64_t align_offset;
int64_t align_bytes;
int64_t skip_bytes;
int ret;
int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
@ -1203,28 +1203,28 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
* BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
* is one reason we loop rather than doing it all at once.
*/
bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
skip_bytes = offset - cluster_offset;
bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes);
skip_bytes = offset - align_offset;
trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
cluster_offset, cluster_bytes);
align_offset, align_bytes);
while (cluster_bytes) {
while (align_bytes) {
int64_t pnum;
if (skip_write) {
ret = 1; /* "already allocated", so nothing will be copied */
pnum = MIN(cluster_bytes, max_transfer);
pnum = MIN(align_bytes, max_transfer);
} else {
ret = bdrv_is_allocated(bs, cluster_offset,
MIN(cluster_bytes, max_transfer), &pnum);
ret = bdrv_is_allocated(bs, align_offset,
MIN(align_bytes, max_transfer), &pnum);
if (ret < 0) {
/*
* Safe to treat errors in querying allocation as if
* unallocated; we'll probably fail again soon on the
* read, but at least that will set a decent errno.
*/
pnum = MIN(cluster_bytes, max_transfer);
pnum = MIN(align_bytes, max_transfer);
}
/* Stop at EOF if the image ends in the middle of the cluster */
@ -1242,7 +1242,7 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
/* Must copy-on-read; use the bounce buffer */
pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
if (!bounce_buffer) {
int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
int64_t max_we_need = MAX(pnum, align_bytes - pnum);
int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
@ -1254,7 +1254,7 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
}
qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
ret = bdrv_driver_preadv(bs, align_offset, pnum,
&local_qiov, 0, 0);
if (ret < 0) {
goto err;
@ -1266,13 +1266,13 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
/* FIXME: Should we (perhaps conditionally) be setting
* BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
* that still correctly reads as zero? */
ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum,
BDRV_REQ_WRITE_UNCHANGED);
} else {
/* This does not change the data on the disk, it is not
* necessary to flush even in cache=writethrough mode.
*/
ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
ret = bdrv_driver_pwritev(bs, align_offset, pnum,
&local_qiov, 0,
BDRV_REQ_WRITE_UNCHANGED);
}
@ -1301,8 +1301,8 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
}
}
cluster_offset += pnum;
cluster_bytes -= pnum;
align_offset += pnum;
align_bytes -= pnum;
progress += pnum - skip_bytes;
skip_bytes = 0;
}

View File

@ -283,8 +283,8 @@ static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
s->cow_bitmap);
if (need_cow) {
bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
&align_offset, &align_bytes);
bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes,
&align_offset, &align_bytes);
}
if (align_bytes > max_bytes) {
@ -576,8 +576,8 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
int64_t target_offset;
int64_t target_bytes;
WITH_GRAPH_RDLOCK_GUARD() {
bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
&target_offset, &target_bytes);
bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes,
&target_offset, &target_bytes);
}
if (target_offset == offset &&
target_bytes == io_bytes) {

View File

@ -5197,6 +5197,7 @@ qcow2_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
{
BDRVQcow2State *s = bs->opaque;
bdi->cluster_size = s->cluster_size;
bdi->subcluster_size = s->subcluster_size;
bdi->vm_state_offset = qcow2_vm_state_offset(s);
bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY;
return 0;

View File

@ -132,6 +132,11 @@ typedef struct BlockZoneWps {
typedef struct BlockDriverInfo {
/* in bytes, 0 if irrelevant */
int cluster_size;
/*
* A fraction of cluster_size, if supported (currently QCOW2 only); if
* disabled or unsupported, set equal to cluster_size.
*/
int subcluster_size;
/* offset at which the VM state can be saved (0 if not possible) */
int64_t vm_state_offset;
bool is_dirty;

View File

@ -189,10 +189,10 @@ bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
Error **errp);
BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
void bdrv_round_to_clusters(BlockDriverState *bs,
int64_t offset, int64_t bytes,
int64_t *cluster_offset,
int64_t *cluster_bytes);
void bdrv_round_to_subclusters(BlockDriverState *bs,
int64_t offset, int64_t bytes,
int64_t *cluster_offset,
int64_t *cluster_bytes);
void bdrv_get_backing_filename(BlockDriverState *bs,
char *filename, int filename_size);

View File

@ -368,7 +368,9 @@ static void unset_dirty_tracking(void)
BlkMigDevState *bmds;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
if (bmds->dirty_bitmap) {
bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
}
}
}
@ -676,13 +678,18 @@ static int64_t get_remaining_dirty(void)
static void block_migration_cleanup_bmds(void)
{
BlkMigDevState *bmds;
BlockDriverState *bs;
AioContext *ctx;
unset_dirty_tracking();
while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
bs = blk_bs(bmds->blk);
if (bs) {
bdrv_op_unblock_all(bs, bmds->blocker);
}
error_free(bmds->blocker);
/* Save ctx, because bmds->blk can disappear during blk_unref. */

View File

@ -122,6 +122,35 @@ $QEMU_IO -f qcow2 -C -c 'read 0 1024' "$TEST_WRAP" | _filter_qemu_io
$QEMU_IO -f qcow2 -c map "$TEST_WRAP"
_check_test_img
echo
echo '=== Copy-on-read with subclusters ==='
echo
# Create base and top images 64K (1 cluster) each. Make subclusters enabled
# for the top image
_make_test_img 64K
IMGPROTO=file IMGFMT=qcow2 TEST_IMG_FILE="$TEST_WRAP" \
_make_test_img --no-opts -o extended_l2=true -F "$IMGFMT" -b "$TEST_IMG" \
64K | _filter_img_create
$QEMU_IO -c "write -P 0xaa 0 64k" "$TEST_IMG" | _filter_qemu_io
# Allocate individual subclusters in the top image, and not the whole cluster
$QEMU_IO -c "write -P 0xbb 28K 2K" -c "write -P 0xcc 34K 2K" "$TEST_WRAP" \
| _filter_qemu_io
# Only 2 subclusters should be allocated in the top image at this point
$QEMU_IMG map "$TEST_WRAP" | _filter_qemu_img_map
# Actual copy-on-read operation
$QEMU_IO -C -c "read -P 0xaa 30K 4K" "$TEST_WRAP" | _filter_qemu_io
# And here we should have 4 subclusters allocated right in the middle of the
# top image. Make sure the whole cluster remains unallocated
$QEMU_IMG map "$TEST_WRAP" | _filter_qemu_img_map
_check_test_img
# success, all done
echo '*** done'
status=0

View File

@ -31,4 +31,28 @@ read 1024/1024 bytes at offset 0
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1 KiB (0x400) bytes allocated at offset 0 bytes (0x0)
No errors were found on the image.
=== Copy-on-read with subclusters ===
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=65536
Formatting 'TEST_DIR/t.wrap.IMGFMT', fmt=IMGFMT size=65536 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
wrote 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 2048/2048 bytes at offset 28672
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 2048/2048 bytes at offset 34816
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
Offset Length File
0 0x7000 TEST_DIR/t.IMGFMT
0x7000 0x800 TEST_DIR/t.wrap.IMGFMT
0x7800 0x1000 TEST_DIR/t.IMGFMT
0x8800 0x800 TEST_DIR/t.wrap.IMGFMT
0x9000 0x7000 TEST_DIR/t.IMGFMT
read 4096/4096 bytes at offset 30720
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
Offset Length File
0 0x7000 TEST_DIR/t.IMGFMT
0x7000 0x2000 TEST_DIR/t.wrap.IMGFMT
0x9000 0x7000 TEST_DIR/t.IMGFMT
No errors were found on the image.
*** done

View File

@ -184,6 +184,7 @@ static void add_poll_remove_sqe(AioContext *ctx, AioHandler *node)
#else
io_uring_prep_poll_remove(sqe, node);
#endif
io_uring_sqe_set_data(sqe, NULL);
}
/* Add a timeout that self-cancels when another cqe becomes ready */
@ -197,6 +198,7 @@ static void add_timeout_sqe(AioContext *ctx, int64_t ns)
sqe = get_sqe(ctx);
io_uring_prep_timeout(sqe, &ts, 1, 0);
io_uring_sqe_set_data(sqe, NULL);
}
/* Add sqes from ctx->submit_list for submission */