block: Byte-based bdrv_co_do_copy_on_readv()

In a first step to convert the common I/O path to work on bytes rather
than sectors, this converts the copy-on-read logic that is used by
bdrv_aligned_preadv().

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Kevin Wolf 2016-06-02 11:41:52 +02:00
parent 8c0dcbc4ad
commit 244483e64e
4 changed files with 52 additions and 33 deletions

View File

@ -404,9 +404,9 @@ static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
} }
/** /**
* Round a region to cluster boundaries * Round a region to cluster boundaries (sector-based)
*/ */
void bdrv_round_to_clusters(BlockDriverState *bs, void bdrv_round_sectors_to_clusters(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, int64_t sector_num, int nb_sectors,
int64_t *cluster_sector_num, int64_t *cluster_sector_num,
int *cluster_nb_sectors) int *cluster_nb_sectors)
@ -424,6 +424,26 @@ void bdrv_round_to_clusters(BlockDriverState *bs,
} }
} }
/**
* Round a region to cluster boundaries
*/
void bdrv_round_to_clusters(BlockDriverState *bs,
int64_t offset, unsigned int bytes,
int64_t *cluster_offset,
unsigned int *cluster_bytes)
{
BlockDriverInfo bdi;
if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
*cluster_offset = offset;
*cluster_bytes = bytes;
} else {
int64_t c = bdi.cluster_size;
*cluster_offset = QEMU_ALIGN_DOWN(offset, c);
*cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
}
}
static int bdrv_get_cluster_size(BlockDriverState *bs) static int bdrv_get_cluster_size(BlockDriverState *bs)
{ {
BlockDriverInfo bdi; BlockDriverInfo bdi;
@ -865,7 +885,7 @@ emulate_flags:
} }
static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
{ {
/* Perform I/O through a temporary buffer so that users who scribble over /* Perform I/O through a temporary buffer so that users who scribble over
* their read buffer while the operation is in progress do not end up * their read buffer while the operation is in progress do not end up
@ -877,21 +897,20 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
BlockDriver *drv = bs->drv; BlockDriver *drv = bs->drv;
struct iovec iov; struct iovec iov;
QEMUIOVector bounce_qiov; QEMUIOVector bounce_qiov;
int64_t cluster_sector_num; int64_t cluster_offset;
int cluster_nb_sectors; unsigned int cluster_bytes;
size_t skip_bytes; size_t skip_bytes;
int ret; int ret;
/* Cover entire cluster so no additional backing file I/O is required when /* Cover entire cluster so no additional backing file I/O is required when
* allocating cluster in the image file. * allocating cluster in the image file.
*/ */
bdrv_round_to_clusters(bs, sector_num, nb_sectors, bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
&cluster_sector_num, &cluster_nb_sectors);
trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
cluster_sector_num, cluster_nb_sectors); cluster_offset, cluster_bytes);
iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; iov.iov_len = cluster_bytes;
iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
if (bounce_buffer == NULL) { if (bounce_buffer == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
@ -900,8 +919,7 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
qemu_iovec_init_external(&bounce_qiov, &iov, 1); qemu_iovec_init_external(&bounce_qiov, &iov, 1);
ret = bdrv_driver_preadv(bs, cluster_sector_num * BDRV_SECTOR_SIZE, ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes,
cluster_nb_sectors * BDRV_SECTOR_SIZE,
&bounce_qiov, 0); &bounce_qiov, 0);
if (ret < 0) { if (ret < 0) {
goto err; goto err;
@ -909,16 +927,12 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
if (drv->bdrv_co_pwrite_zeroes && if (drv->bdrv_co_pwrite_zeroes &&
buffer_is_zero(bounce_buffer, iov.iov_len)) { buffer_is_zero(bounce_buffer, iov.iov_len)) {
ret = bdrv_co_do_pwrite_zeroes(bs, ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0);
cluster_sector_num * BDRV_SECTOR_SIZE,
cluster_nb_sectors * BDRV_SECTOR_SIZE,
0);
} else { } else {
/* This does not change the data on the disk, it is not necessary /* This does not change the data on the disk, it is not necessary
* to flush even in cache=writethrough mode. * to flush even in cache=writethrough mode.
*/ */
ret = bdrv_driver_pwritev(bs, cluster_sector_num * BDRV_SECTOR_SIZE, ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes,
cluster_nb_sectors * BDRV_SECTOR_SIZE,
&bounce_qiov, 0); &bounce_qiov, 0);
} }
@ -930,9 +944,8 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
goto err; goto err;
} }
skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; skip_bytes = offset - cluster_offset;
qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes);
nb_sectors * BDRV_SECTOR_SIZE);
err: err:
qemu_vfree(bounce_buffer); qemu_vfree(bounce_buffer);
@ -982,7 +995,7 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
} }
if (!ret || pnum != nb_sectors) { if (!ret || pnum != nb_sectors) {
ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); ret = bdrv_co_do_copy_on_readv(bs, offset, bytes, qiov);
goto out; goto out;
} }
} }

View File

@ -185,8 +185,9 @@ static int mirror_cow_align(MirrorBlockJob *s,
need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors, need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors,
s->cow_bitmap); s->cow_bitmap);
if (need_cow) { if (need_cow) {
bdrv_round_to_clusters(blk_bs(s->target), *sector_num, *nb_sectors, bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num,
&align_sector_num, &align_nb_sectors); *nb_sectors, &align_sector_num,
&align_nb_sectors);
} }
if (align_nb_sectors > max_sectors) { if (align_nb_sectors > max_sectors) {
@ -384,8 +385,9 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
} else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
int64_t target_sector_num; int64_t target_sector_num;
int target_nb_sectors; int target_nb_sectors;
bdrv_round_to_clusters(blk_bs(s->target), sector_num, io_sectors, bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
&target_sector_num, &target_nb_sectors); io_sectors, &target_sector_num,
&target_nb_sectors);
if (target_sector_num == sector_num && if (target_sector_num == sector_num &&
target_nb_sectors == io_sectors) { target_nb_sectors == io_sectors) {
mirror_method = ret & BDRV_BLOCK_ZERO ? mirror_method = ret & BDRV_BLOCK_ZERO ?

View File

@ -404,10 +404,14 @@ int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
const uint8_t *buf, int nb_sectors); const uint8_t *buf, int nb_sectors);
int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs); ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs);
void bdrv_round_to_clusters(BlockDriverState *bs, void bdrv_round_sectors_to_clusters(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, int64_t sector_num, int nb_sectors,
int64_t *cluster_sector_num, int64_t *cluster_sector_num,
int *cluster_nb_sectors); int *cluster_nb_sectors);
void bdrv_round_to_clusters(BlockDriverState *bs,
int64_t offset, unsigned int bytes,
int64_t *cluster_offset,
unsigned int *cluster_bytes);
const char *bdrv_get_encrypted_filename(BlockDriverState *bs); const char *bdrv_get_encrypted_filename(BlockDriverState *bs);
void bdrv_get_backing_filename(BlockDriverState *bs, void bdrv_get_backing_filename(BlockDriverState *bs,

View File

@ -73,7 +73,7 @@ bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs
bdrv_co_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d" bdrv_co_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_writev(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d" bdrv_co_writev(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_pwrite_zeroes(void *bs, int64_t offset, int count, int flags) "bs %p offset %"PRId64" count %d flags %#x" bdrv_co_pwrite_zeroes(void *bs, int64_t offset, int count, int flags) "bs %p offset %"PRId64" count %d flags %#x"
bdrv_co_do_copy_on_readv(void *bs, int64_t sector_num, int nb_sectors, int64_t cluster_sector_num, int cluster_nb_sectors) "bs %p sector_num %"PRId64" nb_sectors %d cluster_sector_num %"PRId64" cluster_nb_sectors %d" bdrv_co_do_copy_on_readv(void *bs, int64_t offset, unsigned int bytes, int64_t cluster_offset, unsigned int cluster_bytes) "bs %p offset %"PRId64" bytes %u cluster_offset %"PRId64" cluster_bytes %u"
# block/stream.c # block/stream.c
stream_one_iteration(void *s, int64_t sector_num, int nb_sectors, int is_allocated) "s %p sector_num %"PRId64" nb_sectors %d is_allocated %d" stream_one_iteration(void *s, int64_t sector_num, int nb_sectors, int is_allocated) "s %p sector_num %"PRId64" nb_sectors %d is_allocated %d"