-----BEGIN PGP SIGNATURE-----

iQIcBAABAgAGBQJbOvCTAAoJEL2+eyfA3jBX354P/RFVLsozhcb3DeFj5Ocq2kfS
 sRt/82Ke/f/w/8lNd4wNbOsdG/eg2M4RmLWF4ONWWoeO7Z0KIatUOTtw5HxjBxBX
 XfhQy7RZ65luuCHnLDU6c4IdnDvXBVG/kErydhDZjEyeY8qlxrurBB8331cTRFwu
 hisweIwogPOFDA+/Bty0W0EyVQWFAobL3ExYFlOYFuHwsqJfMPQbytw2zDzC4kjn
 8Ecppyt7rfLsEcyf/4OAoHfbbYOiQl7PkXE7/uXDFyL8zPdRpIlDFSZtmy1Zb213
 mcYhPmehUkFHV/BDF/LdnzjlraK8oMaNu0IDld5cX/1xUU4VtbW2YjAt6OdCn7Ll
 7YbNNKYU/mM1QUPshX4qJkbUaCu7JoTDKPiBbJei/MV7zMJBLpNVG/AuJE2gbweI
 2levV76QzS2+fQVKv/9LUliqOYEp5T0/aybb+35Vzhf5WNpSO7s1oaCDAvSgUhS+
 qU1MIAROQQPCmdM8PwqzG9b2TGp/tYcWOju5bqt488Twmo0BbTGjYCFl6StJHibC
 mN5fASP5nQiz1fc3FrBp0h/PCQlGtd2ZgeyeC+lkPVcFovclA2vo/ib1k2LK/0nU
 TzkKIFRJZH58yYjppuBrB6c/aFfVkutE4Hz25i+3nZ91ZEyQKbv1mDxCyRNXNWjt
 Gteul6gUo/AjzMOWFFvH
 =AKR7
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging

# gpg: Signature made Tue 03 Jul 2018 04:42:11 BST
# gpg:                using RSA key BDBE7B27C0DE3057
# gpg: Good signature from "Jeffrey Cody <jcody@redhat.com>"
# gpg:                 aka "Jeffrey Cody <jeff@codyprime.org>"
# gpg:                 aka "Jeffrey Cody <codyprime@gmail.com>"
# Primary key fingerprint: 9957 4B4D 3474 90E7 9D98  D624 BDBE 7B27 C0DE 3057

* remotes/cody/tags/block-pull-request:
  backup: Use copy offloading
  block: Honour BDRV_REQ_NO_SERIALISING in copy range
  block: Fix parameter checking in bdrv_co_copy_range_internal

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-07-03 11:49:51 +01:00
commit a395717cbd
4 changed files with 132 additions and 59 deletions

View File

@ -45,6 +45,8 @@ typedef struct BackupBlockJob {
QLIST_HEAD(, CowRequest) inflight_reqs;
HBitmap *copy_bitmap;
bool use_copy_range;
int64_t copy_range_size;
} BackupBlockJob;
static const BlockJobDriver backup_job_driver;
@ -86,19 +88,101 @@ static void cow_request_end(CowRequest *req)
qemu_co_queue_restart_all(&req->wait_queue);
}
/* Copy range to target with a bounce buffer and return the bytes copied. If
* error occured, return a negative error number */
static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
int64_t start,
int64_t end,
bool is_write_notifier,
bool *error_is_read,
void **bounce_buffer)
{
int ret;
struct iovec iov;
QEMUIOVector qiov;
BlockBackend *blk = job->common.blk;
int nbytes;
hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1);
nbytes = MIN(job->cluster_size, job->len - start);
if (!*bounce_buffer) {
*bounce_buffer = blk_blockalign(blk, job->cluster_size);
}
iov.iov_base = *bounce_buffer;
iov.iov_len = nbytes;
qemu_iovec_init_external(&qiov, &iov, 1);
ret = blk_co_preadv(blk, start, qiov.size, &qiov,
is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
if (ret < 0) {
trace_backup_do_cow_read_fail(job, start, ret);
if (error_is_read) {
*error_is_read = true;
}
goto fail;
}
if (qemu_iovec_is_zero(&qiov)) {
ret = blk_co_pwrite_zeroes(job->target, start,
qiov.size, BDRV_REQ_MAY_UNMAP);
} else {
ret = blk_co_pwritev(job->target, start,
qiov.size, &qiov,
job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
}
if (ret < 0) {
trace_backup_do_cow_write_fail(job, start, ret);
if (error_is_read) {
*error_is_read = false;
}
goto fail;
}
return nbytes;
fail:
hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
return ret;
}
/* Copy range to target and return the bytes copied. If error occured, return a
* negative error number. */
static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
int64_t start,
int64_t end,
bool is_write_notifier)
{
int ret;
int nr_clusters;
BlockBackend *blk = job->common.blk;
int nbytes;
assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
nbytes = MIN(job->copy_range_size, end - start);
nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
hbitmap_reset(job->copy_bitmap, start / job->cluster_size,
nr_clusters);
ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
if (ret < 0) {
trace_backup_do_cow_copy_range_fail(job, start, ret);
hbitmap_set(job->copy_bitmap, start / job->cluster_size,
nr_clusters);
return ret;
}
return nbytes;
}
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
int64_t offset, uint64_t bytes,
bool *error_is_read,
bool is_write_notifier)
{
BlockBackend *blk = job->common.blk;
CowRequest cow_request;
struct iovec iov;
QEMUIOVector bounce_qiov;
void *bounce_buffer = NULL;
int ret = 0;
int64_t start, end; /* bytes */
int n; /* bytes */
void *bounce_buffer = NULL;
qemu_co_rwlock_rdlock(&job->flush_rwlock);
@ -110,60 +194,38 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
wait_for_overlapping_requests(job, start, end);
cow_request_begin(&cow_request, job, start, end);
for (; start < end; start += job->cluster_size) {
while (start < end) {
if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) {
trace_backup_do_cow_skip(job, start);
start += job->cluster_size;
continue; /* already copied */
}
hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1);
trace_backup_do_cow_process(job, start);
n = MIN(job->cluster_size, job->len - start);
if (!bounce_buffer) {
bounce_buffer = blk_blockalign(blk, job->cluster_size);
}
iov.iov_base = bounce_buffer;
iov.iov_len = n;
qemu_iovec_init_external(&bounce_qiov, &iov, 1);
ret = blk_co_preadv(blk, start, bounce_qiov.size, &bounce_qiov,
is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
if (job->use_copy_range) {
ret = backup_cow_with_offload(job, start, end, is_write_notifier);
if (ret < 0) {
trace_backup_do_cow_read_fail(job, start, ret);
if (error_is_read) {
*error_is_read = true;
job->use_copy_range = false;
}
hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
goto out;
}
if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
ret = blk_co_pwrite_zeroes(job->target, start,
bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
} else {
ret = blk_co_pwritev(job->target, start,
bounce_qiov.size, &bounce_qiov,
job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
if (!job->use_copy_range) {
ret = backup_cow_with_bounce_buffer(job, start, end, is_write_notifier,
error_is_read, &bounce_buffer);
}
if (ret < 0) {
trace_backup_do_cow_write_fail(job, start, ret);
if (error_is_read) {
*error_is_read = false;
}
hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
goto out;
break;
}
/* Publish progress, guest I/O counts as progress too. Note that the
* offset field is an opaque progress value, it is not a disk offset.
*/
job->bytes_read += n;
job_progress_update(&job->common.job, n);
start += ret;
job->bytes_read += ret;
job_progress_update(&job->common.job, ret);
ret = 0;
}
out:
if (bounce_buffer) {
qemu_vfree(bounce_buffer);
}
@ -665,6 +727,12 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
} else {
job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
}
job->use_copy_range = true;
job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
blk_get_max_transfer(job->target));
job->copy_range_size = MAX(job->cluster_size,
QEMU_ALIGN_UP(job->copy_range_size,
job->cluster_size));
/* Required permissions are already taken with target's blk_new() */
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,

View File

@ -2897,18 +2897,11 @@ static int coroutine_fn bdrv_co_copy_range_internal(BdrvChild *src,
bool recurse_src)
{
BdrvTrackedRequest src_req, dst_req;
BlockDriverState *src_bs = src->bs;
BlockDriverState *dst_bs = dst->bs;
int ret;
if (!src || !dst || !src->bs || !dst->bs) {
if (!dst || !dst->bs) {
return -ENOMEDIUM;
}
ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
if (ret) {
return ret;
}
ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
if (ret) {
return ret;
@ -2917,20 +2910,30 @@ static int coroutine_fn bdrv_co_copy_range_internal(BdrvChild *src,
return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, flags);
}
if (!src || !src->bs) {
return -ENOMEDIUM;
}
ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
if (ret) {
return ret;
}
if (!src->bs->drv->bdrv_co_copy_range_from
|| !dst->bs->drv->bdrv_co_copy_range_to
|| src->bs->encrypted || dst->bs->encrypted) {
return -ENOTSUP;
}
bdrv_inc_in_flight(src_bs);
bdrv_inc_in_flight(dst_bs);
tracked_request_begin(&src_req, src_bs, src_offset,
bdrv_inc_in_flight(src->bs);
bdrv_inc_in_flight(dst->bs);
tracked_request_begin(&src_req, src->bs, src_offset,
bytes, BDRV_TRACKED_READ);
tracked_request_begin(&dst_req, dst_bs, dst_offset,
tracked_request_begin(&dst_req, dst->bs, dst_offset,
bytes, BDRV_TRACKED_WRITE);
if (!(flags & BDRV_REQ_NO_SERIALISING)) {
wait_serialising_requests(&src_req);
wait_serialising_requests(&dst_req);
}
if (recurse_src) {
ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
src, src_offset,
@ -2944,8 +2947,8 @@ static int coroutine_fn bdrv_co_copy_range_internal(BdrvChild *src,
}
tracked_request_end(&src_req);
tracked_request_end(&dst_req);
bdrv_dec_in_flight(src_bs);
bdrv_dec_in_flight(dst_bs);
bdrv_dec_in_flight(src->bs);
bdrv_dec_in_flight(dst->bs);
return ret;
}

View File

@ -42,6 +42,7 @@ backup_do_cow_skip(void *job, int64_t start) "job %p start %"PRId64
backup_do_cow_process(void *job, int64_t start) "job %p start %"PRId64
backup_do_cow_read_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
backup_do_cow_write_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
backup_do_cow_copy_range_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
# blockdev.c
qmp_block_job_cancel(void *job) "job %p"

View File

@ -659,13 +659,14 @@ void bdrv_unregister_buf(BlockDriverState *bs, void *host);
* @dst: Destination child to copy data to
* @dst_offset: offset in @dst image to write data
* @bytes: number of bytes to copy
* @flags: request flags. Must be one of:
* 0 - actually read data from src;
* @flags: request flags. Supported flags:
* BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
* write on @dst as if bdrv_co_pwrite_zeroes is
* called. Used to simplify caller code, or
* during BlockDriver.bdrv_co_copy_range_from()
* recursion.
* BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
* requests currently in flight.
*
* Returns: 0 if succeeded; negative error code if failed.
**/