Block patches for the 5.0 softfreeze:
- qemu-img measure for LUKS - Improve block-copy's performance by reducing inter-request dependencies - Make curl's detection of accept-ranges more robust - Memleak fixes - iotest fix -----BEGIN PGP SIGNATURE----- iQFGBAABCAAwFiEEkb62CjDbPohX0Rgp9AfbAGHVz0AFAl5o5UUSHG1yZWl0ekBy ZWRoYXQuY29tAAoJEPQH2wBh1c9AnpMIALI1JGJ+kywe+BNTphOKtFGuIuHku/K4 2GujC9WjX4to1xyozockXcP/AlFCk1yicbxW8hxPMxgtNmLh6E6Y2vRNT4/Fnpjc KeyFw062rtD/O6H8jchfX1wIaMBDBASM2GGUtonbfs9mdYmFnEPg/9jo0eX/b9Qp kEDFyVonJsAGFT7gWYB7p2RxToqC/JQBdenacKuYeKkLQOMvUHChERktnRhSfRt6 bJIBkQ1e8xFuuFfnEUQR2GkvzzVw8k7kkY5gSN5m1kBCi3D1i/Pee5CFkYnQFiAD 66GGKH5OIFt83t7msqu1zhb7kobZ1OH671nMSTdqTRncRnQfLWloCkk= =D7LF -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-03-11' into staging Block patches for the 5.0 softfreeze: - qemu-img measure for LUKS - Improve block-copy's performance by reducing inter-request dependencies - Make curl's detection of accept-ranges more robust - Memleak fixes - iotest fix # gpg: Signature made Wed 11 Mar 2020 13:19:01 GMT # gpg: using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40 # gpg: issuer "mreitz@redhat.com" # gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full] # Primary key fingerprint: 91BE B60A 30DB 3E88 57D1 1829 F407 DB00 61D5 CF40 * remotes/maxreitz/tags/pull-block-2020-03-11: block/block-copy: hide structure definitions block/block-copy: reduce intersecting request lock block/block-copy: rename start to offset in interfaces block/block-copy: refactor interfaces to use bytes instead of end block/block-copy: factor out find_conflicting_inflight_req block/block-copy: use block_status block/block-copy: specialcase first copy_range request block/block-copy: fix progress calculation job: refactor progress to separate object block/qcow2-threads: fix qcow2_decompress qemu-img: free memory before re-assign block/qcow2: do free crypto_opts in qcow2_close() iotests: Fix nonportable use of od --endian block/curl: HTTP header field names are case insensitive block/curl: HTTP header fields allow whitespace around values iotests: add 288 luks qemu-img measure test qemu-img: allow qemu-img measure --object without a filename luks: implement .bdrv_measure() luks: extract qcrypto_block_calculate_payload_offset() Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
5931ed5641
@ -38,6 +38,7 @@ typedef struct BDRVBackupTopState {
|
|||||||
BlockCopyState *bcs;
|
BlockCopyState *bcs;
|
||||||
BdrvChild *target;
|
BdrvChild *target;
|
||||||
bool active;
|
bool active;
|
||||||
|
int64_t cluster_size;
|
||||||
} BDRVBackupTopState;
|
} BDRVBackupTopState;
|
||||||
|
|
||||||
static coroutine_fn int backup_top_co_preadv(
|
static coroutine_fn int backup_top_co_preadv(
|
||||||
@ -57,8 +58,8 @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
off = QEMU_ALIGN_DOWN(offset, s->bcs->cluster_size);
|
off = QEMU_ALIGN_DOWN(offset, s->cluster_size);
|
||||||
end = QEMU_ALIGN_UP(offset + bytes, s->bcs->cluster_size);
|
end = QEMU_ALIGN_UP(offset + bytes, s->cluster_size);
|
||||||
|
|
||||||
return block_copy(s->bcs, off, end - off, NULL);
|
return block_copy(s->bcs, off, end - off, NULL);
|
||||||
}
|
}
|
||||||
@ -238,6 +239,7 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
state->cluster_size = cluster_size;
|
||||||
state->bcs = block_copy_state_new(top->backing, state->target,
|
state->bcs = block_copy_state_new(top->backing, state->target,
|
||||||
cluster_size, write_flags, &local_err);
|
cluster_size, write_flags, &local_err);
|
||||||
if (local_err) {
|
if (local_err) {
|
||||||
|
@ -57,15 +57,6 @@ static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
|
|||||||
BackupBlockJob *s = opaque;
|
BackupBlockJob *s = opaque;
|
||||||
|
|
||||||
s->bytes_read += bytes;
|
s->bytes_read += bytes;
|
||||||
job_progress_update(&s->common.job, bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void backup_progress_reset_callback(void *opaque)
|
|
||||||
{
|
|
||||||
BackupBlockJob *s = opaque;
|
|
||||||
uint64_t estimate = bdrv_get_dirty_count(s->bcs->copy_bitmap);
|
|
||||||
|
|
||||||
job_progress_set_remaining(&s->common.job, estimate);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
|
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
|
||||||
@ -111,7 +102,7 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
|
|||||||
|
|
||||||
if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
|
if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
|
||||||
/* If we failed and synced, merge in the bits we didn't copy: */
|
/* If we failed and synced, merge in the bits we didn't copy: */
|
||||||
bdrv_dirty_bitmap_merge_internal(bm, job->bcs->copy_bitmap,
|
bdrv_dirty_bitmap_merge_internal(bm, block_copy_dirty_bitmap(job->bcs),
|
||||||
NULL, true);
|
NULL, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -154,7 +145,8 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
bdrv_set_dirty_bitmap(backup_job->bcs->copy_bitmap, 0, backup_job->len);
|
bdrv_set_dirty_bitmap(block_copy_dirty_bitmap(backup_job->bcs), 0,
|
||||||
|
backup_job->len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static BlockErrorAction backup_error_action(BackupBlockJob *job,
|
static BlockErrorAction backup_error_action(BackupBlockJob *job,
|
||||||
@ -199,7 +191,7 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
|
|||||||
BdrvDirtyBitmapIter *bdbi;
|
BdrvDirtyBitmapIter *bdbi;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
bdbi = bdrv_dirty_iter_new(job->bcs->copy_bitmap);
|
bdbi = bdrv_dirty_iter_new(block_copy_dirty_bitmap(job->bcs));
|
||||||
while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
|
while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
|
||||||
do {
|
do {
|
||||||
if (yield_and_check(job)) {
|
if (yield_and_check(job)) {
|
||||||
@ -219,14 +211,14 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void backup_init_copy_bitmap(BackupBlockJob *job)
|
static void backup_init_bcs_bitmap(BackupBlockJob *job)
|
||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
uint64_t estimate;
|
uint64_t estimate;
|
||||||
|
BdrvDirtyBitmap *bcs_bitmap = block_copy_dirty_bitmap(job->bcs);
|
||||||
|
|
||||||
if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
|
if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
|
||||||
ret = bdrv_dirty_bitmap_merge_internal(job->bcs->copy_bitmap,
|
ret = bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap,
|
||||||
job->sync_bitmap,
|
|
||||||
NULL, true);
|
NULL, true);
|
||||||
assert(ret);
|
assert(ret);
|
||||||
} else {
|
} else {
|
||||||
@ -235,12 +227,12 @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
|
|||||||
* We can't hog the coroutine to initialize this thoroughly.
|
* We can't hog the coroutine to initialize this thoroughly.
|
||||||
* Set a flag and resume work when we are able to yield safely.
|
* Set a flag and resume work when we are able to yield safely.
|
||||||
*/
|
*/
|
||||||
job->bcs->skip_unallocated = true;
|
block_copy_set_skip_unallocated(job->bcs, true);
|
||||||
}
|
}
|
||||||
bdrv_set_dirty_bitmap(job->bcs->copy_bitmap, 0, job->len);
|
bdrv_set_dirty_bitmap(bcs_bitmap, 0, job->len);
|
||||||
}
|
}
|
||||||
|
|
||||||
estimate = bdrv_get_dirty_count(job->bcs->copy_bitmap);
|
estimate = bdrv_get_dirty_count(bcs_bitmap);
|
||||||
job_progress_set_remaining(&job->common.job, estimate);
|
job_progress_set_remaining(&job->common.job, estimate);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,7 +241,7 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
|
|||||||
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
|
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
backup_init_copy_bitmap(s);
|
backup_init_bcs_bitmap(s);
|
||||||
|
|
||||||
if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
|
if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
|
||||||
int64_t offset = 0;
|
int64_t offset = 0;
|
||||||
@ -268,12 +260,12 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
|
|||||||
|
|
||||||
offset += count;
|
offset += count;
|
||||||
}
|
}
|
||||||
s->bcs->skip_unallocated = false;
|
block_copy_set_skip_unallocated(s->bcs, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
|
if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
|
||||||
/*
|
/*
|
||||||
* All bits are set in copy_bitmap to allow any cluster to be copied.
|
* All bits are set in bcs bitmap to allow any cluster to be copied.
|
||||||
* This does not actually require them to be copied.
|
* This does not actually require them to be copied.
|
||||||
*/
|
*/
|
||||||
while (!job_is_cancelled(job)) {
|
while (!job_is_cancelled(job)) {
|
||||||
@ -464,8 +456,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
|||||||
job->cluster_size = cluster_size;
|
job->cluster_size = cluster_size;
|
||||||
job->len = len;
|
job->len = len;
|
||||||
|
|
||||||
block_copy_set_callbacks(bcs, backup_progress_bytes_callback,
|
block_copy_set_progress_callback(bcs, backup_progress_bytes_callback, job);
|
||||||
backup_progress_reset_callback, job);
|
block_copy_set_progress_meter(bcs, &job->common.job.progress);
|
||||||
|
|
||||||
/* Required permissions are already taken by backup-top target */
|
/* Required permissions are already taken by backup-top target */
|
||||||
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
|
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
|
||||||
|
@ -24,37 +24,136 @@
|
|||||||
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
|
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
|
||||||
#define BLOCK_COPY_MAX_MEM (128 * MiB)
|
#define BLOCK_COPY_MAX_MEM (128 * MiB)
|
||||||
|
|
||||||
static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
|
typedef struct BlockCopyInFlightReq {
|
||||||
int64_t start,
|
int64_t offset;
|
||||||
int64_t end)
|
int64_t bytes;
|
||||||
|
QLIST_ENTRY(BlockCopyInFlightReq) list;
|
||||||
|
CoQueue wait_queue; /* coroutines blocked on this request */
|
||||||
|
} BlockCopyInFlightReq;
|
||||||
|
|
||||||
|
typedef struct BlockCopyState {
|
||||||
|
/*
|
||||||
|
* BdrvChild objects are not owned or managed by block-copy. They are
|
||||||
|
* provided by block-copy user and user is responsible for appropriate
|
||||||
|
* permissions on these children.
|
||||||
|
*/
|
||||||
|
BdrvChild *source;
|
||||||
|
BdrvChild *target;
|
||||||
|
BdrvDirtyBitmap *copy_bitmap;
|
||||||
|
int64_t in_flight_bytes;
|
||||||
|
int64_t cluster_size;
|
||||||
|
bool use_copy_range;
|
||||||
|
int64_t copy_size;
|
||||||
|
uint64_t len;
|
||||||
|
QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
|
||||||
|
|
||||||
|
BdrvRequestFlags write_flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* skip_unallocated:
|
||||||
|
*
|
||||||
|
* Used by sync=top jobs, which first scan the source node for unallocated
|
||||||
|
* areas and clear them in the copy_bitmap. During this process, the bitmap
|
||||||
|
* is thus not fully initialized: It may still have bits set for areas that
|
||||||
|
* are unallocated and should actually not be copied.
|
||||||
|
*
|
||||||
|
* This is indicated by skip_unallocated.
|
||||||
|
*
|
||||||
|
* In this case, block_copy() will query the source’s allocation status,
|
||||||
|
* skip unallocated regions, clear them in the copy_bitmap, and invoke
|
||||||
|
* block_copy_reset_unallocated() every time it does.
|
||||||
|
*/
|
||||||
|
bool skip_unallocated;
|
||||||
|
|
||||||
|
ProgressMeter *progress;
|
||||||
|
/* progress_bytes_callback: called when some copying progress is done. */
|
||||||
|
ProgressBytesCallbackFunc progress_bytes_callback;
|
||||||
|
void *progress_opaque;
|
||||||
|
|
||||||
|
SharedResource *mem;
|
||||||
|
} BlockCopyState;
|
||||||
|
|
||||||
|
static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
|
||||||
|
int64_t offset,
|
||||||
|
int64_t bytes)
|
||||||
{
|
{
|
||||||
BlockCopyInFlightReq *req;
|
BlockCopyInFlightReq *req;
|
||||||
bool waited;
|
|
||||||
|
|
||||||
do {
|
QLIST_FOREACH(req, &s->inflight_reqs, list) {
|
||||||
waited = false;
|
if (offset + bytes > req->offset && offset < req->offset + req->bytes) {
|
||||||
QLIST_FOREACH(req, &s->inflight_reqs, list) {
|
return req;
|
||||||
if (end > req->start_byte && start < req->end_byte) {
|
|
||||||
qemu_co_queue_wait(&req->wait_queue, NULL);
|
|
||||||
waited = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} while (waited);
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there are no intersecting requests return false. Otherwise, wait for the
|
||||||
|
* first found intersecting request to finish and return true.
|
||||||
|
*/
|
||||||
|
static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
|
||||||
|
int64_t bytes)
|
||||||
|
{
|
||||||
|
BlockCopyInFlightReq *req = find_conflicting_inflight_req(s, offset, bytes);
|
||||||
|
|
||||||
|
if (!req) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
qemu_co_queue_wait(&req->wait_queue, NULL);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Called only on full-dirty region */
|
||||||
static void block_copy_inflight_req_begin(BlockCopyState *s,
|
static void block_copy_inflight_req_begin(BlockCopyState *s,
|
||||||
BlockCopyInFlightReq *req,
|
BlockCopyInFlightReq *req,
|
||||||
int64_t start, int64_t end)
|
int64_t offset, int64_t bytes)
|
||||||
{
|
{
|
||||||
req->start_byte = start;
|
assert(!find_conflicting_inflight_req(s, offset, bytes));
|
||||||
req->end_byte = end;
|
|
||||||
|
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
|
||||||
|
s->in_flight_bytes += bytes;
|
||||||
|
|
||||||
|
req->offset = offset;
|
||||||
|
req->bytes = bytes;
|
||||||
qemu_co_queue_init(&req->wait_queue);
|
qemu_co_queue_init(&req->wait_queue);
|
||||||
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
|
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req)
|
/*
|
||||||
|
* block_copy_inflight_req_shrink
|
||||||
|
*
|
||||||
|
* Drop the tail of the request to be handled later. Set dirty bits back and
|
||||||
|
* wake up all requests waiting for us (may be some of them are not intersecting
|
||||||
|
* with shrunk request)
|
||||||
|
*/
|
||||||
|
static void coroutine_fn block_copy_inflight_req_shrink(BlockCopyState *s,
|
||||||
|
BlockCopyInFlightReq *req, int64_t new_bytes)
|
||||||
{
|
{
|
||||||
|
if (new_bytes == req->bytes) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(new_bytes > 0 && new_bytes < req->bytes);
|
||||||
|
|
||||||
|
s->in_flight_bytes -= req->bytes - new_bytes;
|
||||||
|
bdrv_set_dirty_bitmap(s->copy_bitmap,
|
||||||
|
req->offset + new_bytes, req->bytes - new_bytes);
|
||||||
|
|
||||||
|
req->bytes = new_bytes;
|
||||||
|
qemu_co_queue_restart_all(&req->wait_queue);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void coroutine_fn block_copy_inflight_req_end(BlockCopyState *s,
|
||||||
|
BlockCopyInFlightReq *req,
|
||||||
|
int ret)
|
||||||
|
{
|
||||||
|
s->in_flight_bytes -= req->bytes;
|
||||||
|
if (ret < 0) {
|
||||||
|
bdrv_set_dirty_bitmap(s->copy_bitmap, req->offset, req->bytes);
|
||||||
|
}
|
||||||
QLIST_REMOVE(req, list);
|
QLIST_REMOVE(req, list);
|
||||||
qemu_co_queue_restart_all(&req->wait_queue);
|
qemu_co_queue_restart_all(&req->wait_queue);
|
||||||
}
|
}
|
||||||
@ -70,16 +169,19 @@ void block_copy_state_free(BlockCopyState *s)
|
|||||||
g_free(s);
|
g_free(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
|
||||||
|
{
|
||||||
|
return MIN_NON_ZERO(INT_MAX,
|
||||||
|
MIN_NON_ZERO(source->bs->bl.max_transfer,
|
||||||
|
target->bs->bl.max_transfer));
|
||||||
|
}
|
||||||
|
|
||||||
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
||||||
int64_t cluster_size,
|
int64_t cluster_size,
|
||||||
BdrvRequestFlags write_flags, Error **errp)
|
BdrvRequestFlags write_flags, Error **errp)
|
||||||
{
|
{
|
||||||
BlockCopyState *s;
|
BlockCopyState *s;
|
||||||
BdrvDirtyBitmap *copy_bitmap;
|
BdrvDirtyBitmap *copy_bitmap;
|
||||||
uint32_t max_transfer =
|
|
||||||
MIN_NON_ZERO(INT_MAX,
|
|
||||||
MIN_NON_ZERO(source->bs->bl.max_transfer,
|
|
||||||
target->bs->bl.max_transfer));
|
|
||||||
|
|
||||||
copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
|
copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
|
||||||
errp);
|
errp);
|
||||||
@ -99,7 +201,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
|||||||
.mem = shres_create(BLOCK_COPY_MAX_MEM),
|
.mem = shres_create(BLOCK_COPY_MAX_MEM),
|
||||||
};
|
};
|
||||||
|
|
||||||
if (max_transfer < cluster_size) {
|
if (block_copy_max_transfer(source, target) < cluster_size) {
|
||||||
/*
|
/*
|
||||||
* copy_range does not respect max_transfer. We don't want to bother
|
* copy_range does not respect max_transfer. We don't want to bother
|
||||||
* with requests smaller than block-copy cluster size, so fallback to
|
* with requests smaller than block-copy cluster size, so fallback to
|
||||||
@ -114,12 +216,11 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
|||||||
s->copy_size = cluster_size;
|
s->copy_size = cluster_size;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* copy_range does not respect max_transfer (it's a TODO), so we factor
|
* We enable copy-range, but keep small copy_size, until first
|
||||||
* that in here.
|
* successful copy_range (look at block_copy_do_copy).
|
||||||
*/
|
*/
|
||||||
s->use_copy_range = true;
|
s->use_copy_range = true;
|
||||||
s->copy_size = MIN(MAX(cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
|
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
|
||||||
QEMU_ALIGN_DOWN(max_transfer, cluster_size));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QLIST_INIT(&s->inflight_reqs);
|
QLIST_INIT(&s->inflight_reqs);
|
||||||
@ -127,48 +228,83 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
void block_copy_set_callbacks(
|
void block_copy_set_progress_callback(
|
||||||
BlockCopyState *s,
|
BlockCopyState *s,
|
||||||
ProgressBytesCallbackFunc progress_bytes_callback,
|
ProgressBytesCallbackFunc progress_bytes_callback,
|
||||||
ProgressResetCallbackFunc progress_reset_callback,
|
|
||||||
void *progress_opaque)
|
void *progress_opaque)
|
||||||
{
|
{
|
||||||
s->progress_bytes_callback = progress_bytes_callback;
|
s->progress_bytes_callback = progress_bytes_callback;
|
||||||
s->progress_reset_callback = progress_reset_callback;
|
|
||||||
s->progress_opaque = progress_opaque;
|
s->progress_opaque = progress_opaque;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
|
||||||
|
{
|
||||||
|
s->progress = pm;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* block_copy_do_copy
|
* block_copy_do_copy
|
||||||
*
|
*
|
||||||
* Do copy of cluser-aligned chunk. @end is allowed to exceed s->len only to
|
* Do copy of cluster-aligned chunk. Requested region is allowed to exceed
|
||||||
* cover last cluster when s->len is not aligned to clusters.
|
* s->len only to cover last cluster when s->len is not aligned to clusters.
|
||||||
*
|
*
|
||||||
* No sync here: nor bitmap neighter intersecting requests handling, only copy.
|
* No sync here: nor bitmap neighter intersecting requests handling, only copy.
|
||||||
*
|
*
|
||||||
* Returns 0 on success.
|
* Returns 0 on success.
|
||||||
*/
|
*/
|
||||||
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
|
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
|
||||||
int64_t start, int64_t end,
|
int64_t offset, int64_t bytes,
|
||||||
bool *error_is_read)
|
bool zeroes, bool *error_is_read)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int nbytes = MIN(end, s->len) - start;
|
int64_t nbytes = MIN(offset + bytes, s->len) - offset;
|
||||||
void *bounce_buffer = NULL;
|
void *bounce_buffer = NULL;
|
||||||
|
|
||||||
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
|
assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
|
||||||
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
|
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
|
||||||
assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
|
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
|
||||||
|
assert(offset < s->len);
|
||||||
|
assert(offset + bytes <= s->len ||
|
||||||
|
offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
|
||||||
|
assert(nbytes < INT_MAX);
|
||||||
|
|
||||||
|
if (zeroes) {
|
||||||
|
ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
|
||||||
|
~BDRV_REQ_WRITE_COMPRESSED);
|
||||||
|
if (ret < 0) {
|
||||||
|
trace_block_copy_write_zeroes_fail(s, offset, ret);
|
||||||
|
if (error_is_read) {
|
||||||
|
*error_is_read = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (s->use_copy_range) {
|
if (s->use_copy_range) {
|
||||||
ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
|
ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
|
||||||
0, s->write_flags);
|
0, s->write_flags);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
trace_block_copy_copy_range_fail(s, start, ret);
|
trace_block_copy_copy_range_fail(s, offset, ret);
|
||||||
s->use_copy_range = false;
|
s->use_copy_range = false;
|
||||||
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
|
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
|
||||||
/* Fallback to read+write with allocated buffer */
|
/* Fallback to read+write with allocated buffer */
|
||||||
} else {
|
} else {
|
||||||
|
if (s->use_copy_range) {
|
||||||
|
/*
|
||||||
|
* Successful copy-range. Now increase copy_size. copy_range
|
||||||
|
* does not respect max_transfer (it's a TODO), so we factor
|
||||||
|
* that in here.
|
||||||
|
*
|
||||||
|
* Note: we double-check s->use_copy_range for the case when
|
||||||
|
* parallel block-copy request unsets it during previous
|
||||||
|
* bdrv_co_copy_range call.
|
||||||
|
*/
|
||||||
|
s->copy_size =
|
||||||
|
MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
|
||||||
|
QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
|
||||||
|
s->target),
|
||||||
|
s->cluster_size));
|
||||||
|
}
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -176,24 +312,27 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
|
|||||||
/*
|
/*
|
||||||
* In case of failed copy_range request above, we may proceed with buffered
|
* In case of failed copy_range request above, we may proceed with buffered
|
||||||
* request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will
|
* request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will
|
||||||
* be properly limited, so don't care too much.
|
* be properly limited, so don't care too much. Moreover the most likely
|
||||||
|
* case (copy_range is unsupported for the configuration, so the very first
|
||||||
|
* copy_range request fails) is handled by setting large copy_size only
|
||||||
|
* after first successful copy_range.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
|
bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
|
||||||
|
|
||||||
ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0);
|
ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
trace_block_copy_read_fail(s, start, ret);
|
trace_block_copy_read_fail(s, offset, ret);
|
||||||
if (error_is_read) {
|
if (error_is_read) {
|
||||||
*error_is_read = true;
|
*error_is_read = true;
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bdrv_co_pwrite(s->target, start, nbytes, bounce_buffer,
|
ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
|
||||||
s->write_flags);
|
s->write_flags);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
trace_block_copy_write_fail(s, start, ret);
|
trace_block_copy_write_fail(s, offset, ret);
|
||||||
if (error_is_read) {
|
if (error_is_read) {
|
||||||
*error_is_read = false;
|
*error_is_read = false;
|
||||||
}
|
}
|
||||||
@ -206,6 +345,38 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int block_copy_block_status(BlockCopyState *s, int64_t offset,
|
||||||
|
int64_t bytes, int64_t *pnum)
|
||||||
|
{
|
||||||
|
int64_t num;
|
||||||
|
BlockDriverState *base;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (s->skip_unallocated && s->source->bs->backing) {
|
||||||
|
base = s->source->bs->backing->bs;
|
||||||
|
} else {
|
||||||
|
base = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
|
||||||
|
NULL, NULL);
|
||||||
|
if (ret < 0 || num < s->cluster_size) {
|
||||||
|
/*
|
||||||
|
* On error or if failed to obtain large enough chunk just fallback to
|
||||||
|
* copy one cluster.
|
||||||
|
*/
|
||||||
|
num = s->cluster_size;
|
||||||
|
ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
|
||||||
|
} else if (offset + num == s->len) {
|
||||||
|
num = QEMU_ALIGN_UP(num, s->cluster_size);
|
||||||
|
} else {
|
||||||
|
num = QEMU_ALIGN_DOWN(num, s->cluster_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
*pnum = num;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if the cluster starting at offset is allocated or not.
|
* Check if the cluster starting at offset is allocated or not.
|
||||||
* return via pnum the number of contiguous clusters sharing this allocation.
|
* return via pnum the number of contiguous clusters sharing this allocation.
|
||||||
@ -269,21 +440,28 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
|
|||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
|
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
|
||||||
s->progress_reset_callback(s->progress_opaque);
|
progress_set_remaining(s->progress,
|
||||||
|
bdrv_get_dirty_count(s->copy_bitmap) +
|
||||||
|
s->in_flight_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
*count = bytes;
|
*count = bytes;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int coroutine_fn block_copy(BlockCopyState *s,
|
/*
|
||||||
int64_t start, uint64_t bytes,
|
* block_copy_dirty_clusters
|
||||||
bool *error_is_read)
|
*
|
||||||
|
* Copy dirty clusters in @offset/@bytes range.
|
||||||
|
* Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
|
||||||
|
* clusters found and -errno on failure.
|
||||||
|
*/
|
||||||
|
static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
|
||||||
|
int64_t offset, int64_t bytes,
|
||||||
|
bool *error_is_read)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int64_t end = bytes + start; /* bytes */
|
bool found_dirty = false;
|
||||||
int64_t status_bytes;
|
|
||||||
BlockCopyInFlightReq req;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* block_copy() user is responsible for keeping source and target in same
|
* block_copy() user is responsible for keeping source and target in same
|
||||||
@ -292,60 +470,109 @@ int coroutine_fn block_copy(BlockCopyState *s,
|
|||||||
assert(bdrv_get_aio_context(s->source->bs) ==
|
assert(bdrv_get_aio_context(s->source->bs) ==
|
||||||
bdrv_get_aio_context(s->target->bs));
|
bdrv_get_aio_context(s->target->bs));
|
||||||
|
|
||||||
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
|
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
|
||||||
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
|
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
|
||||||
|
|
||||||
block_copy_wait_inflight_reqs(s, start, bytes);
|
while (bytes) {
|
||||||
block_copy_inflight_req_begin(s, &req, start, end);
|
BlockCopyInFlightReq req;
|
||||||
|
int64_t next_zero, cur_bytes, status_bytes;
|
||||||
|
|
||||||
while (start < end) {
|
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
|
||||||
int64_t next_zero, chunk_end;
|
trace_block_copy_skip(s, offset);
|
||||||
|
offset += s->cluster_size;
|
||||||
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
|
bytes -= s->cluster_size;
|
||||||
trace_block_copy_skip(s, start);
|
|
||||||
start += s->cluster_size;
|
|
||||||
continue; /* already copied */
|
continue; /* already copied */
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk_end = MIN(end, start + s->copy_size);
|
found_dirty = true;
|
||||||
|
|
||||||
next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
|
cur_bytes = MIN(bytes, s->copy_size);
|
||||||
chunk_end - start);
|
|
||||||
|
next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset,
|
||||||
|
cur_bytes);
|
||||||
if (next_zero >= 0) {
|
if (next_zero >= 0) {
|
||||||
assert(next_zero > start); /* start is dirty */
|
assert(next_zero > offset); /* offset is dirty */
|
||||||
assert(next_zero < chunk_end); /* no need to do MIN() */
|
assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
|
||||||
chunk_end = next_zero;
|
cur_bytes = next_zero - offset;
|
||||||
|
}
|
||||||
|
block_copy_inflight_req_begin(s, &req, offset, cur_bytes);
|
||||||
|
|
||||||
|
ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
|
||||||
|
assert(ret >= 0); /* never fail */
|
||||||
|
cur_bytes = MIN(cur_bytes, status_bytes);
|
||||||
|
block_copy_inflight_req_shrink(s, &req, cur_bytes);
|
||||||
|
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
|
||||||
|
block_copy_inflight_req_end(s, &req, 0);
|
||||||
|
progress_set_remaining(s->progress,
|
||||||
|
bdrv_get_dirty_count(s->copy_bitmap) +
|
||||||
|
s->in_flight_bytes);
|
||||||
|
trace_block_copy_skip_range(s, offset, status_bytes);
|
||||||
|
offset += status_bytes;
|
||||||
|
bytes -= status_bytes;
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->skip_unallocated) {
|
trace_block_copy_process(s, offset);
|
||||||
ret = block_copy_reset_unallocated(s, start, &status_bytes);
|
|
||||||
if (ret == 0) {
|
|
||||||
trace_block_copy_skip_range(s, start, status_bytes);
|
|
||||||
start += status_bytes;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/* Clamp to known allocated region */
|
|
||||||
chunk_end = MIN(chunk_end, start + status_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
trace_block_copy_process(s, start);
|
co_get_from_shres(s->mem, cur_bytes);
|
||||||
|
ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
|
||||||
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
|
error_is_read);
|
||||||
|
co_put_to_shres(s->mem, cur_bytes);
|
||||||
co_get_from_shres(s->mem, chunk_end - start);
|
block_copy_inflight_req_end(s, &req, ret);
|
||||||
ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
|
|
||||||
co_put_to_shres(s->mem, chunk_end - start);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
|
return ret;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s->progress_bytes_callback(chunk_end - start, s->progress_opaque);
|
progress_work_done(s->progress, cur_bytes);
|
||||||
start = chunk_end;
|
s->progress_bytes_callback(cur_bytes, s->progress_opaque);
|
||||||
ret = 0;
|
offset += cur_bytes;
|
||||||
|
bytes -= cur_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
block_copy_inflight_req_end(&req);
|
return found_dirty;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* block_copy
|
||||||
|
*
|
||||||
|
* Copy requested region, accordingly to dirty bitmap.
|
||||||
|
* Collaborate with parallel block_copy requests: if they succeed it will help
|
||||||
|
* us. If they fail, we will retry not-copied regions. So, if we return error,
|
||||||
|
* it means that some I/O operation failed in context of _this_ block_copy call,
|
||||||
|
* not some parallel operation.
|
||||||
|
*/
|
||||||
|
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
|
||||||
|
bool *error_is_read)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
do {
|
||||||
|
ret = block_copy_dirty_clusters(s, offset, bytes, error_is_read);
|
||||||
|
|
||||||
|
if (ret == 0) {
|
||||||
|
ret = block_copy_wait_one(s, offset, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We retry in two cases:
|
||||||
|
* 1. Some progress done
|
||||||
|
* Something was copied, which means that there were yield points
|
||||||
|
* and some new dirty bits may have appeared (due to failed parallel
|
||||||
|
* block-copy requests).
|
||||||
|
* 2. We have waited for some intersecting block-copy request
|
||||||
|
* It may have failed and produced new dirty bits.
|
||||||
|
*/
|
||||||
|
} while (ret > 0);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
|
||||||
|
{
|
||||||
|
return s->copy_bitmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
|
||||||
|
{
|
||||||
|
s->skip_unallocated = skip;
|
||||||
|
}
|
||||||
|
@ -484,6 +484,67 @@ static int64_t block_crypto_getlength(BlockDriverState *bs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static BlockMeasureInfo *block_crypto_measure(QemuOpts *opts,
|
||||||
|
BlockDriverState *in_bs,
|
||||||
|
Error **errp)
|
||||||
|
{
|
||||||
|
g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL;
|
||||||
|
Error *local_err = NULL;
|
||||||
|
BlockMeasureInfo *info;
|
||||||
|
uint64_t size;
|
||||||
|
size_t luks_payload_size;
|
||||||
|
QDict *cryptoopts;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Preallocation mode doesn't affect size requirements but we must consume
|
||||||
|
* the option.
|
||||||
|
*/
|
||||||
|
g_free(qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC));
|
||||||
|
|
||||||
|
size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
|
||||||
|
|
||||||
|
if (in_bs) {
|
||||||
|
int64_t ssize = bdrv_getlength(in_bs);
|
||||||
|
|
||||||
|
if (ssize < 0) {
|
||||||
|
error_setg_errno(&local_err, -ssize,
|
||||||
|
"Unable to get image virtual_size");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
size = ssize;
|
||||||
|
}
|
||||||
|
|
||||||
|
cryptoopts = qemu_opts_to_qdict_filtered(opts, NULL,
|
||||||
|
&block_crypto_create_opts_luks, true);
|
||||||
|
qdict_put_str(cryptoopts, "format", "luks");
|
||||||
|
create_opts = block_crypto_create_opts_init(cryptoopts, &local_err);
|
||||||
|
qobject_unref(cryptoopts);
|
||||||
|
if (!create_opts) {
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!qcrypto_block_calculate_payload_offset(create_opts, NULL,
|
||||||
|
&luks_payload_size,
|
||||||
|
&local_err)) {
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unallocated blocks are still encrypted so allocation status makes no
|
||||||
|
* difference to the file size.
|
||||||
|
*/
|
||||||
|
info = g_new(BlockMeasureInfo, 1);
|
||||||
|
info->fully_allocated = luks_payload_size + size;
|
||||||
|
info->required = luks_payload_size + size;
|
||||||
|
return info;
|
||||||
|
|
||||||
|
err:
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int block_crypto_probe_luks(const uint8_t *buf,
|
static int block_crypto_probe_luks(const uint8_t *buf,
|
||||||
int buf_size,
|
int buf_size,
|
||||||
const char *filename) {
|
const char *filename) {
|
||||||
@ -670,6 +731,7 @@ static BlockDriver bdrv_crypto_luks = {
|
|||||||
.bdrv_co_preadv = block_crypto_co_preadv,
|
.bdrv_co_preadv = block_crypto_co_preadv,
|
||||||
.bdrv_co_pwritev = block_crypto_co_pwritev,
|
.bdrv_co_pwritev = block_crypto_co_pwritev,
|
||||||
.bdrv_getlength = block_crypto_getlength,
|
.bdrv_getlength = block_crypto_getlength,
|
||||||
|
.bdrv_measure = block_crypto_measure,
|
||||||
.bdrv_get_info = block_crypto_get_info_luks,
|
.bdrv_get_info = block_crypto_get_info_luks,
|
||||||
.bdrv_get_specific_info = block_crypto_get_specific_info_luks,
|
.bdrv_get_specific_info = block_crypto_get_specific_info_luks,
|
||||||
|
|
||||||
|
32
block/curl.c
32
block/curl.c
@ -214,11 +214,35 @@ static size_t curl_header_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
|
|||||||
{
|
{
|
||||||
BDRVCURLState *s = opaque;
|
BDRVCURLState *s = opaque;
|
||||||
size_t realsize = size * nmemb;
|
size_t realsize = size * nmemb;
|
||||||
const char *accept_line = "Accept-Ranges: bytes";
|
const char *header = (char *)ptr;
|
||||||
|
const char *end = header + realsize;
|
||||||
|
const char *accept_ranges = "accept-ranges:";
|
||||||
|
const char *bytes = "bytes";
|
||||||
|
|
||||||
if (realsize >= strlen(accept_line)
|
if (realsize >= strlen(accept_ranges)
|
||||||
&& strncmp((char *)ptr, accept_line, strlen(accept_line)) == 0) {
|
&& g_ascii_strncasecmp(header, accept_ranges,
|
||||||
s->accept_range = true;
|
strlen(accept_ranges)) == 0) {
|
||||||
|
|
||||||
|
char *p = strchr(header, ':') + 1;
|
||||||
|
|
||||||
|
/* Skip whitespace between the header name and value. */
|
||||||
|
while (p < end && *p && g_ascii_isspace(*p)) {
|
||||||
|
p++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (end - p >= strlen(bytes)
|
||||||
|
&& strncmp(p, bytes, strlen(bytes)) == 0) {
|
||||||
|
|
||||||
|
/* Check that there is nothing but whitespace after the value. */
|
||||||
|
p += strlen(bytes);
|
||||||
|
while (p < end && *p && g_ascii_isspace(*p)) {
|
||||||
|
p++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (p == end || !*p) {
|
||||||
|
s->accept_range = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return realsize;
|
return realsize;
|
||||||
|
@ -128,12 +128,12 @@ static ssize_t qcow2_compress(void *dest, size_t dest_size,
|
|||||||
* @src - source buffer, @src_size bytes
|
* @src - source buffer, @src_size bytes
|
||||||
*
|
*
|
||||||
* Returns: 0 on success
|
* Returns: 0 on success
|
||||||
* -1 on fail
|
* -EIO on fail
|
||||||
*/
|
*/
|
||||||
static ssize_t qcow2_decompress(void *dest, size_t dest_size,
|
static ssize_t qcow2_decompress(void *dest, size_t dest_size,
|
||||||
const void *src, size_t src_size)
|
const void *src, size_t src_size)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret;
|
||||||
z_stream strm;
|
z_stream strm;
|
||||||
|
|
||||||
memset(&strm, 0, sizeof(strm));
|
memset(&strm, 0, sizeof(strm));
|
||||||
@ -144,17 +144,19 @@ static ssize_t qcow2_decompress(void *dest, size_t dest_size,
|
|||||||
|
|
||||||
ret = inflateInit2(&strm, -12);
|
ret = inflateInit2(&strm, -12);
|
||||||
if (ret != Z_OK) {
|
if (ret != Z_OK) {
|
||||||
return -1;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = inflate(&strm, Z_FINISH);
|
ret = inflate(&strm, Z_FINISH);
|
||||||
if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || strm.avail_out != 0) {
|
if ((ret == Z_STREAM_END || ret == Z_BUF_ERROR) && strm.avail_out == 0) {
|
||||||
/*
|
/*
|
||||||
* We approve Z_BUF_ERROR because we need @dest buffer to be filled, but
|
* We approve Z_BUF_ERROR because we need @dest buffer to be filled, but
|
||||||
* @src buffer may be processed partly (because in qcow2 we know size of
|
* @src buffer may be processed partly (because in qcow2 we know size of
|
||||||
* compressed data with precision of one sector)
|
* compressed data with precision of one sector)
|
||||||
*/
|
*/
|
||||||
ret = -1;
|
ret = 0;
|
||||||
|
} else {
|
||||||
|
ret = -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
inflateEnd(&strm);
|
inflateEnd(&strm);
|
||||||
|
@ -2610,6 +2610,7 @@ static void qcow2_close(BlockDriverState *bs)
|
|||||||
|
|
||||||
qcrypto_block_free(s->crypto);
|
qcrypto_block_free(s->crypto);
|
||||||
s->crypto = NULL;
|
s->crypto = NULL;
|
||||||
|
qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
|
||||||
|
|
||||||
g_free(s->unknown_header_fields);
|
g_free(s->unknown_header_fields);
|
||||||
cleanup_unknown_header_ext(bs);
|
cleanup_unknown_header_ext(bs);
|
||||||
@ -4608,60 +4609,6 @@ static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t qcow2_measure_crypto_hdr_init_func(QCryptoBlock *block,
|
|
||||||
size_t headerlen, void *opaque, Error **errp)
|
|
||||||
{
|
|
||||||
size_t *headerlenp = opaque;
|
|
||||||
|
|
||||||
/* Stash away the payload size */
|
|
||||||
*headerlenp = headerlen;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t qcow2_measure_crypto_hdr_write_func(QCryptoBlock *block,
|
|
||||||
size_t offset, const uint8_t *buf, size_t buflen,
|
|
||||||
void *opaque, Error **errp)
|
|
||||||
{
|
|
||||||
/* Discard the bytes, we're not actually writing to an image */
|
|
||||||
return buflen;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the number of bytes for the LUKS payload */
|
|
||||||
static bool qcow2_measure_luks_headerlen(QemuOpts *opts, size_t *len,
|
|
||||||
Error **errp)
|
|
||||||
{
|
|
||||||
QDict *opts_qdict;
|
|
||||||
QDict *cryptoopts_qdict;
|
|
||||||
QCryptoBlockCreateOptions *cryptoopts;
|
|
||||||
QCryptoBlock *crypto;
|
|
||||||
|
|
||||||
/* Extract "encrypt." options into a qdict */
|
|
||||||
opts_qdict = qemu_opts_to_qdict(opts, NULL);
|
|
||||||
qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt.");
|
|
||||||
qobject_unref(opts_qdict);
|
|
||||||
|
|
||||||
/* Build QCryptoBlockCreateOptions object from qdict */
|
|
||||||
qdict_put_str(cryptoopts_qdict, "format", "luks");
|
|
||||||
cryptoopts = block_crypto_create_opts_init(cryptoopts_qdict, errp);
|
|
||||||
qobject_unref(cryptoopts_qdict);
|
|
||||||
if (!cryptoopts) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Fake LUKS creation in order to determine the payload size */
|
|
||||||
crypto = qcrypto_block_create(cryptoopts, "encrypt.",
|
|
||||||
qcow2_measure_crypto_hdr_init_func,
|
|
||||||
qcow2_measure_crypto_hdr_write_func,
|
|
||||||
len, errp);
|
|
||||||
qapi_free_QCryptoBlockCreateOptions(cryptoopts);
|
|
||||||
if (!crypto) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
qcrypto_block_free(crypto);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
|
static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
{
|
{
|
||||||
@ -4712,9 +4659,27 @@ static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
|
|||||||
g_free(optstr);
|
g_free(optstr);
|
||||||
|
|
||||||
if (has_luks) {
|
if (has_luks) {
|
||||||
|
g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL;
|
||||||
|
QDict *opts_qdict;
|
||||||
|
QDict *cryptoopts;
|
||||||
size_t headerlen;
|
size_t headerlen;
|
||||||
|
|
||||||
if (!qcow2_measure_luks_headerlen(opts, &headerlen, &local_err)) {
|
opts_qdict = qemu_opts_to_qdict(opts, NULL);
|
||||||
|
qdict_extract_subqdict(opts_qdict, &cryptoopts, "encrypt.");
|
||||||
|
qobject_unref(opts_qdict);
|
||||||
|
|
||||||
|
qdict_put_str(cryptoopts, "format", "luks");
|
||||||
|
|
||||||
|
create_opts = block_crypto_create_opts_init(cryptoopts, errp);
|
||||||
|
qobject_unref(cryptoopts);
|
||||||
|
if (!create_opts) {
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!qcrypto_block_calculate_payload_offset(create_opts,
|
||||||
|
"encrypt.",
|
||||||
|
&headerlen,
|
||||||
|
&local_err)) {
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,6 +48,7 @@ block_copy_process(void *bcs, int64_t start) "bcs %p start %"PRId64
|
|||||||
block_copy_copy_range_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
|
block_copy_copy_range_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
|
||||||
block_copy_read_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
|
block_copy_read_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
|
||||||
block_copy_write_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
|
block_copy_write_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
|
||||||
|
block_copy_write_zeroes_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
|
||||||
|
|
||||||
# ../blockdev.c
|
# ../blockdev.c
|
||||||
qmp_block_job_cancel(void *job) "job %p"
|
qmp_block_job_cancel(void *job) "job %p"
|
||||||
|
16
blockjob.c
16
blockjob.c
@ -299,8 +299,8 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
|
|||||||
info->device = g_strdup(job->job.id);
|
info->device = g_strdup(job->job.id);
|
||||||
info->busy = atomic_read(&job->job.busy);
|
info->busy = atomic_read(&job->job.busy);
|
||||||
info->paused = job->job.pause_count > 0;
|
info->paused = job->job.pause_count > 0;
|
||||||
info->offset = job->job.progress_current;
|
info->offset = job->job.progress.current;
|
||||||
info->len = job->job.progress_total;
|
info->len = job->job.progress.total;
|
||||||
info->speed = job->speed;
|
info->speed = job->speed;
|
||||||
info->io_status = job->iostatus;
|
info->io_status = job->iostatus;
|
||||||
info->ready = job_is_ready(&job->job),
|
info->ready = job_is_ready(&job->job),
|
||||||
@ -330,8 +330,8 @@ static void block_job_event_cancelled(Notifier *n, void *opaque)
|
|||||||
|
|
||||||
qapi_event_send_block_job_cancelled(job_type(&job->job),
|
qapi_event_send_block_job_cancelled(job_type(&job->job),
|
||||||
job->job.id,
|
job->job.id,
|
||||||
job->job.progress_total,
|
job->job.progress.total,
|
||||||
job->job.progress_current,
|
job->job.progress.current,
|
||||||
job->speed);
|
job->speed);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -350,8 +350,8 @@ static void block_job_event_completed(Notifier *n, void *opaque)
|
|||||||
|
|
||||||
qapi_event_send_block_job_completed(job_type(&job->job),
|
qapi_event_send_block_job_completed(job_type(&job->job),
|
||||||
job->job.id,
|
job->job.id,
|
||||||
job->job.progress_total,
|
job->job.progress.total,
|
||||||
job->job.progress_current,
|
job->job.progress.current,
|
||||||
job->speed,
|
job->speed,
|
||||||
!!msg,
|
!!msg,
|
||||||
msg);
|
msg);
|
||||||
@ -379,8 +379,8 @@ static void block_job_event_ready(Notifier *n, void *opaque)
|
|||||||
|
|
||||||
qapi_event_send_block_job_ready(job_type(&job->job),
|
qapi_event_send_block_job_ready(job_type(&job->job),
|
||||||
job->job.id,
|
job->job.id,
|
||||||
job->job.progress_total,
|
job->job.progress.total,
|
||||||
job->job.progress_current,
|
job->job.progress.current,
|
||||||
job->speed);
|
job->speed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,6 +115,42 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static ssize_t qcrypto_block_headerlen_hdr_init_func(QCryptoBlock *block,
|
||||||
|
size_t headerlen, void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
size_t *headerlenp = opaque;
|
||||||
|
|
||||||
|
/* Stash away the payload size */
|
||||||
|
*headerlenp = headerlen;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static ssize_t qcrypto_block_headerlen_hdr_write_func(QCryptoBlock *block,
|
||||||
|
size_t offset, const uint8_t *buf, size_t buflen,
|
||||||
|
void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
/* Discard the bytes, we're not actually writing to an image */
|
||||||
|
return buflen;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool
|
||||||
|
qcrypto_block_calculate_payload_offset(QCryptoBlockCreateOptions *create_opts,
|
||||||
|
const char *optprefix,
|
||||||
|
size_t *len,
|
||||||
|
Error **errp)
|
||||||
|
{
|
||||||
|
/* Fake LUKS creation in order to determine the payload size */
|
||||||
|
g_autoptr(QCryptoBlock) crypto =
|
||||||
|
qcrypto_block_create(create_opts, optprefix,
|
||||||
|
qcrypto_block_headerlen_hdr_init_func,
|
||||||
|
qcrypto_block_headerlen_hdr_write_func,
|
||||||
|
len, errp);
|
||||||
|
return crypto != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
QCryptoBlockInfo *qcrypto_block_get_info(QCryptoBlock *block,
|
QCryptoBlockInfo *qcrypto_block_get_info(QCryptoBlock *block,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
{
|
{
|
||||||
|
@ -18,79 +18,30 @@
|
|||||||
#include "block/block.h"
|
#include "block/block.h"
|
||||||
#include "qemu/co-shared-resource.h"
|
#include "qemu/co-shared-resource.h"
|
||||||
|
|
||||||
typedef struct BlockCopyInFlightReq {
|
|
||||||
int64_t start_byte;
|
|
||||||
int64_t end_byte;
|
|
||||||
QLIST_ENTRY(BlockCopyInFlightReq) list;
|
|
||||||
CoQueue wait_queue; /* coroutines blocked on this request */
|
|
||||||
} BlockCopyInFlightReq;
|
|
||||||
|
|
||||||
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
|
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
|
||||||
typedef void (*ProgressResetCallbackFunc)(void *opaque);
|
typedef struct BlockCopyState BlockCopyState;
|
||||||
typedef struct BlockCopyState {
|
|
||||||
/*
|
|
||||||
* BdrvChild objects are not owned or managed by block-copy. They are
|
|
||||||
* provided by block-copy user and user is responsible for appropriate
|
|
||||||
* permissions on these children.
|
|
||||||
*/
|
|
||||||
BdrvChild *source;
|
|
||||||
BdrvChild *target;
|
|
||||||
BdrvDirtyBitmap *copy_bitmap;
|
|
||||||
int64_t cluster_size;
|
|
||||||
bool use_copy_range;
|
|
||||||
int64_t copy_size;
|
|
||||||
uint64_t len;
|
|
||||||
QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
|
|
||||||
|
|
||||||
BdrvRequestFlags write_flags;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* skip_unallocated:
|
|
||||||
*
|
|
||||||
* Used by sync=top jobs, which first scan the source node for unallocated
|
|
||||||
* areas and clear them in the copy_bitmap. During this process, the bitmap
|
|
||||||
* is thus not fully initialized: It may still have bits set for areas that
|
|
||||||
* are unallocated and should actually not be copied.
|
|
||||||
*
|
|
||||||
* This is indicated by skip_unallocated.
|
|
||||||
*
|
|
||||||
* In this case, block_copy() will query the source’s allocation status,
|
|
||||||
* skip unallocated regions, clear them in the copy_bitmap, and invoke
|
|
||||||
* block_copy_reset_unallocated() every time it does.
|
|
||||||
*/
|
|
||||||
bool skip_unallocated;
|
|
||||||
|
|
||||||
/* progress_bytes_callback: called when some copying progress is done. */
|
|
||||||
ProgressBytesCallbackFunc progress_bytes_callback;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* progress_reset_callback: called when some bytes reset from copy_bitmap
|
|
||||||
* (see @skip_unallocated above). The callee is assumed to recalculate how
|
|
||||||
* many bytes remain based on the dirty bit count of copy_bitmap.
|
|
||||||
*/
|
|
||||||
ProgressResetCallbackFunc progress_reset_callback;
|
|
||||||
void *progress_opaque;
|
|
||||||
|
|
||||||
SharedResource *mem;
|
|
||||||
} BlockCopyState;
|
|
||||||
|
|
||||||
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
||||||
int64_t cluster_size,
|
int64_t cluster_size,
|
||||||
BdrvRequestFlags write_flags,
|
BdrvRequestFlags write_flags,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
|
|
||||||
void block_copy_set_callbacks(
|
void block_copy_set_progress_callback(
|
||||||
BlockCopyState *s,
|
BlockCopyState *s,
|
||||||
ProgressBytesCallbackFunc progress_bytes_callback,
|
ProgressBytesCallbackFunc progress_bytes_callback,
|
||||||
ProgressResetCallbackFunc progress_reset_callback,
|
|
||||||
void *progress_opaque);
|
void *progress_opaque);
|
||||||
|
|
||||||
|
void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
|
||||||
|
|
||||||
void block_copy_state_free(BlockCopyState *s);
|
void block_copy_state_free(BlockCopyState *s);
|
||||||
|
|
||||||
int64_t block_copy_reset_unallocated(BlockCopyState *s,
|
int64_t block_copy_reset_unallocated(BlockCopyState *s,
|
||||||
int64_t offset, int64_t *count);
|
int64_t offset, int64_t *count);
|
||||||
|
|
||||||
int coroutine_fn block_copy(BlockCopyState *s, int64_t start, uint64_t bytes,
|
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
|
||||||
bool *error_is_read);
|
bool *error_is_read);
|
||||||
|
|
||||||
|
BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s);
|
||||||
|
void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip);
|
||||||
|
|
||||||
#endif /* BLOCK_COPY_H */
|
#endif /* BLOCK_COPY_H */
|
||||||
|
@ -145,6 +145,26 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options,
|
|||||||
Error **errp);
|
Error **errp);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* qcrypto_block_calculate_payload_offset:
|
||||||
|
* @create_opts: the encryption options
|
||||||
|
* @optprefix: name prefix for options
|
||||||
|
* @len: output for number of header bytes before payload
|
||||||
|
* @errp: pointer to a NULL-initialized error object
|
||||||
|
*
|
||||||
|
* Calculate the number of header bytes before the payload in an encrypted
|
||||||
|
* storage volume. The header is an area before the payload that is reserved
|
||||||
|
* for encryption metadata.
|
||||||
|
*
|
||||||
|
* Returns: true on success, false on error
|
||||||
|
*/
|
||||||
|
bool
|
||||||
|
qcrypto_block_calculate_payload_offset(QCryptoBlockCreateOptions *create_opts,
|
||||||
|
const char *optprefix,
|
||||||
|
size_t *len,
|
||||||
|
Error **errp);
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* qcrypto_block_get_info:
|
* qcrypto_block_get_info:
|
||||||
* @block: the block encryption object
|
* @block: the block encryption object
|
||||||
@ -269,5 +289,7 @@ uint64_t qcrypto_block_get_sector_size(QCryptoBlock *block);
|
|||||||
void qcrypto_block_free(QCryptoBlock *block);
|
void qcrypto_block_free(QCryptoBlock *block);
|
||||||
|
|
||||||
G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlock, qcrypto_block_free)
|
G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlock, qcrypto_block_free)
|
||||||
|
G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlockCreateOptions,
|
||||||
|
qapi_free_QCryptoBlockCreateOptions)
|
||||||
|
|
||||||
#endif /* QCRYPTO_BLOCK_H */
|
#endif /* QCRYPTO_BLOCK_H */
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
|
|
||||||
#include "qapi/qapi-types-job.h"
|
#include "qapi/qapi-types-job.h"
|
||||||
#include "qemu/queue.h"
|
#include "qemu/queue.h"
|
||||||
|
#include "qemu/progress_meter.h"
|
||||||
#include "qemu/coroutine.h"
|
#include "qemu/coroutine.h"
|
||||||
#include "block/aio.h"
|
#include "block/aio.h"
|
||||||
|
|
||||||
@ -117,15 +118,7 @@ typedef struct Job {
|
|||||||
/** True if this job should automatically dismiss itself */
|
/** True if this job should automatically dismiss itself */
|
||||||
bool auto_dismiss;
|
bool auto_dismiss;
|
||||||
|
|
||||||
/**
|
ProgressMeter progress;
|
||||||
* Current progress. The unit is arbitrary as long as the ratio between
|
|
||||||
* progress_current and progress_total represents the estimated percentage
|
|
||||||
* of work already done.
|
|
||||||
*/
|
|
||||||
int64_t progress_current;
|
|
||||||
|
|
||||||
/** Estimated progress_current value at the completion of the job */
|
|
||||||
int64_t progress_total;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return code from @run and/or @prepare callback(s).
|
* Return code from @run and/or @prepare callback(s).
|
||||||
|
58
include/qemu/progress_meter.h
Normal file
58
include/qemu/progress_meter.h
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
/*
|
||||||
|
* Helper functionality for some process progress tracking.
|
||||||
|
*
|
||||||
|
* Copyright (c) 2011 IBM Corp.
|
||||||
|
* Copyright (c) 2012, 2018 Red Hat, Inc.
|
||||||
|
* Copyright (c) 2020 Virtuozzo International GmbH
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
* THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef QEMU_PROGRESS_METER_H
|
||||||
|
#define QEMU_PROGRESS_METER_H
|
||||||
|
|
||||||
|
typedef struct ProgressMeter {
|
||||||
|
/**
|
||||||
|
* Current progress. The unit is arbitrary as long as the ratio between
|
||||||
|
* current and total represents the estimated percentage
|
||||||
|
* of work already done.
|
||||||
|
*/
|
||||||
|
uint64_t current;
|
||||||
|
|
||||||
|
/** Estimated current value at the completion of the process */
|
||||||
|
uint64_t total;
|
||||||
|
} ProgressMeter;
|
||||||
|
|
||||||
|
static inline void progress_work_done(ProgressMeter *pm, uint64_t done)
|
||||||
|
{
|
||||||
|
pm->current += done;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void progress_set_remaining(ProgressMeter *pm, uint64_t remaining)
|
||||||
|
{
|
||||||
|
pm->total = pm->current + remaining;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void progress_increase_remaining(ProgressMeter *pm,
|
||||||
|
uint64_t delta)
|
||||||
|
{
|
||||||
|
pm->total += delta;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* QEMU_PROGRESS_METER_H */
|
@ -143,8 +143,8 @@ static JobInfo *job_query_single(Job *job, Error **errp)
|
|||||||
.id = g_strdup(job->id),
|
.id = g_strdup(job->id),
|
||||||
.type = job_type(job),
|
.type = job_type(job),
|
||||||
.status = job->status,
|
.status = job->status,
|
||||||
.current_progress = job->progress_current,
|
.current_progress = job->progress.current,
|
||||||
.total_progress = job->progress_total,
|
.total_progress = job->progress.total,
|
||||||
.has_error = !!job->err,
|
.has_error = !!job->err,
|
||||||
.error = job->err ? \
|
.error = job->err ? \
|
||||||
g_strdup(error_get_pretty(job->err)) : NULL,
|
g_strdup(error_get_pretty(job->err)) : NULL,
|
||||||
|
6
job.c
6
job.c
@ -369,17 +369,17 @@ void job_unref(Job *job)
|
|||||||
|
|
||||||
void job_progress_update(Job *job, uint64_t done)
|
void job_progress_update(Job *job, uint64_t done)
|
||||||
{
|
{
|
||||||
job->progress_current += done;
|
progress_work_done(&job->progress, done);
|
||||||
}
|
}
|
||||||
|
|
||||||
void job_progress_set_remaining(Job *job, uint64_t remaining)
|
void job_progress_set_remaining(Job *job, uint64_t remaining)
|
||||||
{
|
{
|
||||||
job->progress_total = job->progress_current + remaining;
|
progress_set_remaining(&job->progress, remaining);
|
||||||
}
|
}
|
||||||
|
|
||||||
void job_progress_increase_remaining(Job *job, uint64_t delta)
|
void job_progress_increase_remaining(Job *job, uint64_t delta)
|
||||||
{
|
{
|
||||||
job->progress_total += delta;
|
progress_increase_remaining(&job->progress, delta);
|
||||||
}
|
}
|
||||||
|
|
||||||
void job_event_cancelled(Job *job)
|
void job_event_cancelled(Job *job)
|
||||||
|
14
qemu-img.c
14
qemu-img.c
@ -817,6 +817,8 @@ static int img_check(int argc, char **argv)
|
|||||||
check->corruptions_fixed);
|
check->corruptions_fixed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qapi_free_ImageCheck(check);
|
||||||
|
check = g_new0(ImageCheck, 1);
|
||||||
ret = collect_image_check(bs, check, filename, fmt, 0);
|
ret = collect_image_check(bs, check, filename, fmt, 0);
|
||||||
|
|
||||||
check->leaks_fixed = leaks_fixed;
|
check->leaks_fixed = leaks_fixed;
|
||||||
@ -882,9 +884,9 @@ static void run_block_job(BlockJob *job, Error **errp)
|
|||||||
do {
|
do {
|
||||||
float progress = 0.0f;
|
float progress = 0.0f;
|
||||||
aio_poll(aio_context, true);
|
aio_poll(aio_context, true);
|
||||||
if (job->job.progress_total) {
|
if (job->job.progress.total) {
|
||||||
progress = (float)job->job.progress_current /
|
progress = (float)job->job.progress.current /
|
||||||
job->job.progress_total * 100.f;
|
job->job.progress.total * 100.f;
|
||||||
}
|
}
|
||||||
qemu_progress_print(progress, 0);
|
qemu_progress_print(progress, 0);
|
||||||
} while (!job_is_ready(&job->job) && !job_is_completed(&job->job));
|
} while (!job_is_ready(&job->job) && !job_is_completed(&job->job));
|
||||||
@ -4932,10 +4934,8 @@ static int img_measure(int argc, char **argv)
|
|||||||
filename = argv[optind];
|
filename = argv[optind];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!filename &&
|
if (!filename && (image_opts || fmt || snapshot_name || sn_opts)) {
|
||||||
(object_opts || image_opts || fmt || snapshot_name || sn_opts)) {
|
error_report("--image-opts, -f, and -l require a filename argument.");
|
||||||
error_report("--object, --image-opts, -f, and -l "
|
|
||||||
"require a filename argument.");
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (filename && img_size != UINT64_MAX) {
|
if (filename && img_size != UINT64_MAX) {
|
||||||
|
@ -50,7 +50,7 @@ _make_test_img 1G
|
|||||||
$QEMU_IMG measure # missing arguments
|
$QEMU_IMG measure # missing arguments
|
||||||
$QEMU_IMG measure --size 2G "$TEST_IMG" # only one allowed
|
$QEMU_IMG measure --size 2G "$TEST_IMG" # only one allowed
|
||||||
$QEMU_IMG measure "$TEST_IMG" a # only one filename allowed
|
$QEMU_IMG measure "$TEST_IMG" a # only one filename allowed
|
||||||
$QEMU_IMG measure --object secret,id=sec0,data=MTIzNDU2,format=base64 # missing filename
|
$QEMU_IMG measure --object secret,id=sec0,data=MTIzNDU2,format=base64 # size or filename needed
|
||||||
$QEMU_IMG measure --image-opts # missing filename
|
$QEMU_IMG measure --image-opts # missing filename
|
||||||
$QEMU_IMG measure -f qcow2 # missing filename
|
$QEMU_IMG measure -f qcow2 # missing filename
|
||||||
$QEMU_IMG measure -l snap1 # missing filename
|
$QEMU_IMG measure -l snap1 # missing filename
|
||||||
|
@ -5,10 +5,10 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
|
|||||||
qemu-img: Either --size N or one filename must be specified.
|
qemu-img: Either --size N or one filename must be specified.
|
||||||
qemu-img: --size N cannot be used together with a filename.
|
qemu-img: --size N cannot be used together with a filename.
|
||||||
qemu-img: At most one filename argument is allowed.
|
qemu-img: At most one filename argument is allowed.
|
||||||
qemu-img: --object, --image-opts, -f, and -l require a filename argument.
|
qemu-img: Either --size N or one filename must be specified.
|
||||||
qemu-img: --object, --image-opts, -f, and -l require a filename argument.
|
qemu-img: --image-opts, -f, and -l require a filename argument.
|
||||||
qemu-img: --object, --image-opts, -f, and -l require a filename argument.
|
qemu-img: --image-opts, -f, and -l require a filename argument.
|
||||||
qemu-img: --object, --image-opts, -f, and -l require a filename argument.
|
qemu-img: --image-opts, -f, and -l require a filename argument.
|
||||||
qemu-img: Invalid option list: ,
|
qemu-img: Invalid option list: ,
|
||||||
qemu-img: Invalid parameter 'snapshot.foo'
|
qemu-img: Invalid parameter 'snapshot.foo'
|
||||||
qemu-img: Failed in parsing snapshot param 'snapshot.foo'
|
qemu-img: Failed in parsing snapshot param 'snapshot.foo'
|
||||||
|
@ -5,10 +5,10 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
|
|||||||
qemu-img: Either --size N or one filename must be specified.
|
qemu-img: Either --size N or one filename must be specified.
|
||||||
qemu-img: --size N cannot be used together with a filename.
|
qemu-img: --size N cannot be used together with a filename.
|
||||||
qemu-img: At most one filename argument is allowed.
|
qemu-img: At most one filename argument is allowed.
|
||||||
qemu-img: --object, --image-opts, -f, and -l require a filename argument.
|
qemu-img: Either --size N or one filename must be specified.
|
||||||
qemu-img: --object, --image-opts, -f, and -l require a filename argument.
|
qemu-img: --image-opts, -f, and -l require a filename argument.
|
||||||
qemu-img: --object, --image-opts, -f, and -l require a filename argument.
|
qemu-img: --image-opts, -f, and -l require a filename argument.
|
||||||
qemu-img: --object, --image-opts, -f, and -l require a filename argument.
|
qemu-img: --image-opts, -f, and -l require a filename argument.
|
||||||
qemu-img: Invalid option list: ,
|
qemu-img: Invalid option list: ,
|
||||||
qemu-img: Invalid parameter 'snapshot.foo'
|
qemu-img: Invalid parameter 'snapshot.foo'
|
||||||
qemu-img: Failed in parsing snapshot param 'snapshot.foo'
|
qemu-img: Failed in parsing snapshot param 'snapshot.foo'
|
||||||
|
93
tests/qemu-iotests/288
Executable file
93
tests/qemu-iotests/288
Executable file
@ -0,0 +1,93 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# qemu-img measure tests for LUKS images
|
||||||
|
#
|
||||||
|
# Copyright (C) 2020 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation; either version 2 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
# creator
|
||||||
|
owner=stefanha@redhat.com
|
||||||
|
|
||||||
|
seq=`basename $0`
|
||||||
|
echo "QA output created by $seq"
|
||||||
|
|
||||||
|
status=1 # failure is the default!
|
||||||
|
|
||||||
|
_cleanup()
|
||||||
|
{
|
||||||
|
_cleanup_test_img
|
||||||
|
rm -f "$TEST_IMG.converted"
|
||||||
|
}
|
||||||
|
trap "_cleanup; exit \$status" 0 1 2 3 15
|
||||||
|
|
||||||
|
# get standard environment, filters and checks
|
||||||
|
. ./common.rc
|
||||||
|
. ./common.filter
|
||||||
|
. ./common.pattern
|
||||||
|
|
||||||
|
_supported_fmt luks
|
||||||
|
_supported_proto file
|
||||||
|
_supported_os Linux
|
||||||
|
|
||||||
|
SECRET=secret,id=sec0,data=passphrase
|
||||||
|
|
||||||
|
echo "== measure 1G image file =="
|
||||||
|
echo
|
||||||
|
|
||||||
|
$QEMU_IMG measure --object "$SECRET" \
|
||||||
|
-O "$IMGFMT" \
|
||||||
|
-o key-secret=sec0,iter-time=10 \
|
||||||
|
--size 1G
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "== create 1G image file (size should be no greater than measured) =="
|
||||||
|
echo
|
||||||
|
|
||||||
|
_make_test_img 1G
|
||||||
|
stat -c "image file size in bytes: %s" "$TEST_IMG_FILE"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "== modified 1G image file (size should be no greater than measured) =="
|
||||||
|
echo
|
||||||
|
|
||||||
|
$QEMU_IO --object "$SECRET" --image-opts "$TEST_IMG" -c "write -P 0x51 0x10000 0x400" | _filter_qemu_io | _filter_testdir
|
||||||
|
stat -c "image file size in bytes: %s" "$TEST_IMG_FILE"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "== measure preallocation=falloc 1G image file =="
|
||||||
|
echo
|
||||||
|
|
||||||
|
$QEMU_IMG measure --object "$SECRET" \
|
||||||
|
-O "$IMGFMT" \
|
||||||
|
-o key-secret=sec0,iter-time=10,preallocation=falloc \
|
||||||
|
--size 1G
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "== measure with input image file =="
|
||||||
|
echo
|
||||||
|
|
||||||
|
IMGFMT=raw IMGKEYSECRET= IMGOPTS= _make_test_img 1G | _filter_imgfmt
|
||||||
|
QEMU_IO_OPTIONS= IMGOPTSSYNTAX= $QEMU_IO -f raw -c "write -P 0x51 0x10000 0x400" "$TEST_IMG_FILE" | _filter_qemu_io | _filter_testdir
|
||||||
|
$QEMU_IMG measure --object "$SECRET" \
|
||||||
|
-O "$IMGFMT" \
|
||||||
|
-o key-secret=sec0,iter-time=10 \
|
||||||
|
-f raw \
|
||||||
|
"$TEST_IMG_FILE"
|
||||||
|
|
||||||
|
# success, all done
|
||||||
|
echo "*** done"
|
||||||
|
rm -f $seq.full
|
||||||
|
status=0
|
30
tests/qemu-iotests/288.out
Normal file
30
tests/qemu-iotests/288.out
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
QA output created by 288
|
||||||
|
== measure 1G image file ==
|
||||||
|
|
||||||
|
required size: 1075810304
|
||||||
|
fully allocated size: 1075810304
|
||||||
|
|
||||||
|
== create 1G image file (size should be no greater than measured) ==
|
||||||
|
|
||||||
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
|
||||||
|
image file size in bytes: 1075810304
|
||||||
|
|
||||||
|
== modified 1G image file (size should be no greater than measured) ==
|
||||||
|
|
||||||
|
wrote 1024/1024 bytes at offset 65536
|
||||||
|
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
image file size in bytes: 1075810304
|
||||||
|
|
||||||
|
== measure preallocation=falloc 1G image file ==
|
||||||
|
|
||||||
|
required size: 1075810304
|
||||||
|
fully allocated size: 1075810304
|
||||||
|
|
||||||
|
== measure with input image file ==
|
||||||
|
|
||||||
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
|
||||||
|
wrote 1024/1024 bytes at offset 65536
|
||||||
|
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
required size: 1075810304
|
||||||
|
fully allocated size: 1075810304
|
||||||
|
*** done
|
@ -56,18 +56,30 @@ poke_file()
|
|||||||
# peek_file_le 'test.img' 512 2 => 65534
|
# peek_file_le 'test.img' 512 2 => 65534
|
||||||
peek_file_le()
|
peek_file_le()
|
||||||
{
|
{
|
||||||
# Wrap in echo $() to strip spaces
|
local val=0 shift=0 byte
|
||||||
echo $(od -j"$2" -N"$3" --endian=little -An -vtu"$3" "$1")
|
|
||||||
|
# coreutils' od --endian is not portable, so manually assemble bytes.
|
||||||
|
for byte in $(od -j"$2" -N"$3" -An -v -tu1 "$1"); do
|
||||||
|
val=$(( val | (byte << shift) ))
|
||||||
|
shift=$((shift + 8))
|
||||||
|
done
|
||||||
|
printf %llu $val
|
||||||
}
|
}
|
||||||
|
|
||||||
# peek_file_be 'test.img' 512 2 => 65279
|
# peek_file_be 'test.img' 512 2 => 65279
|
||||||
peek_file_be()
|
peek_file_be()
|
||||||
{
|
{
|
||||||
# Wrap in echo $() to strip spaces
|
local val=0 byte
|
||||||
echo $(od -j"$2" -N"$3" --endian=big -An -vtu"$3" "$1")
|
|
||||||
|
# coreutils' od --endian is not portable, so manually assemble bytes.
|
||||||
|
for byte in $(od -j"$2" -N"$3" -An -v -tu1 "$1"); do
|
||||||
|
val=$(( (val << 8) | byte ))
|
||||||
|
done
|
||||||
|
printf %llu $val
|
||||||
}
|
}
|
||||||
|
|
||||||
# peek_file_raw 'test.img' 512 2 => '\xff\xfe'
|
# peek_file_raw 'test.img' 512 2 => '\xff\xfe'. Do not use if the raw data
|
||||||
|
# is likely to contain \0 or trailing \n.
|
||||||
peek_file_raw()
|
peek_file_raw()
|
||||||
{
|
{
|
||||||
dd if="$1" bs=1 skip="$2" count="$3" status=none
|
dd if="$1" bs=1 skip="$2" count="$3" status=none
|
||||||
|
@ -293,3 +293,4 @@
|
|||||||
283 auto quick
|
283 auto quick
|
||||||
284 rw
|
284 rw
|
||||||
286 rw quick
|
286 rw quick
|
||||||
|
288 quick
|
||||||
|
Loading…
Reference in New Issue
Block a user