2019-09-20 17:20:48 +03:00
|
|
|
|
/*
|
|
|
|
|
* block_copy API
|
|
|
|
|
*
|
|
|
|
|
* Copyright (C) 2013 Proxmox Server Solutions
|
|
|
|
|
* Copyright (c) 2019 Virtuozzo International GmbH.
|
|
|
|
|
*
|
|
|
|
|
* Authors:
|
|
|
|
|
* Dietmar Maurer (dietmar@proxmox.com)
|
|
|
|
|
* Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
|
|
|
|
*
|
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
|
|
|
|
|
|
#include "trace.h"
|
|
|
|
|
#include "qapi/error.h"
|
|
|
|
|
#include "block/block-copy.h"
|
|
|
|
|
#include "sysemu/block-backend.h"
|
2019-10-22 14:18:01 +03:00
|
|
|
|
#include "qemu/units.h"
|
2020-04-29 16:08:47 +03:00
|
|
|
|
#include "qemu/coroutine.h"
|
|
|
|
|
#include "block/aio_task.h"
|
2019-10-22 14:18:01 +03:00
|
|
|
|
|
|
|
|
|
#define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
|
2019-10-22 14:18:05 +03:00
|
|
|
|
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
|
2019-10-22 14:18:04 +03:00
|
|
|
|
#define BLOCK_COPY_MAX_MEM (128 * MiB)
|
2020-04-29 16:08:47 +03:00
|
|
|
|
#define BLOCK_COPY_MAX_WORKERS 64
|
2021-01-17 00:46:48 +03:00
|
|
|
|
#define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
|
2020-04-29 16:08:47 +03:00
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
typedef enum {
|
|
|
|
|
COPY_READ_WRITE_CLUSTER,
|
|
|
|
|
COPY_READ_WRITE,
|
|
|
|
|
COPY_WRITE_ZEROES,
|
|
|
|
|
COPY_RANGE_SMALL,
|
|
|
|
|
COPY_RANGE_FULL
|
|
|
|
|
} BlockCopyMethod;
|
|
|
|
|
|
2020-04-29 16:08:47 +03:00
|
|
|
|
static coroutine_fn int block_copy_task_entry(AioTask *task);
|
|
|
|
|
|
|
|
|
|
typedef struct BlockCopyCallState {
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/* Fields initialized in block_copy_async() and never changed. */
|
2021-01-17 00:46:44 +03:00
|
|
|
|
BlockCopyState *s;
|
|
|
|
|
int64_t offset;
|
|
|
|
|
int64_t bytes;
|
2021-01-17 00:46:46 +03:00
|
|
|
|
int max_workers;
|
|
|
|
|
int64_t max_chunk;
|
2021-01-17 00:46:48 +03:00
|
|
|
|
bool ignore_ratelimit;
|
2021-01-17 00:46:45 +03:00
|
|
|
|
BlockCopyAsyncCallbackFunc cb;
|
|
|
|
|
void *cb_opaque;
|
|
|
|
|
/* Coroutine where async block-copy is running */
|
|
|
|
|
Coroutine *co;
|
2021-01-17 00:46:44 +03:00
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/* Fields whose state changes throughout the execution */
|
2021-06-24 10:20:43 +03:00
|
|
|
|
bool finished; /* atomic */
|
2021-06-24 10:20:42 +03:00
|
|
|
|
QemuCoSleep sleep; /* TODO: protect API with a lock */
|
2021-06-24 10:20:43 +03:00
|
|
|
|
bool cancelled; /* atomic */
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/* To reference all call states from BlockCopyState */
|
|
|
|
|
QLIST_ENTRY(BlockCopyCallState) list;
|
2021-01-17 00:46:44 +03:00
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/*
|
|
|
|
|
* Fields that report information about return values and erros.
|
|
|
|
|
* Protected by lock in BlockCopyState.
|
|
|
|
|
*/
|
2020-04-29 16:08:47 +03:00
|
|
|
|
bool error_is_read;
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/*
|
|
|
|
|
* @ret is set concurrently by tasks under mutex. Only set once by first
|
|
|
|
|
* failed task (and untouched if no task failed).
|
|
|
|
|
* After finishing (call_state->finished is true), it is not modified
|
|
|
|
|
* anymore and may be safely read without mutex.
|
|
|
|
|
*/
|
|
|
|
|
int ret;
|
2020-04-29 16:08:47 +03:00
|
|
|
|
} BlockCopyCallState;
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2020-04-29 16:08:43 +03:00
|
|
|
|
typedef struct BlockCopyTask {
|
2020-04-29 16:08:47 +03:00
|
|
|
|
AioTask task;
|
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/*
|
|
|
|
|
* Fields initialized in block_copy_task_create()
|
|
|
|
|
* and never changed.
|
|
|
|
|
*/
|
2020-04-29 16:08:45 +03:00
|
|
|
|
BlockCopyState *s;
|
2020-04-29 16:08:47 +03:00
|
|
|
|
BlockCopyCallState *call_state;
|
2020-03-11 13:30:04 +03:00
|
|
|
|
int64_t offset;
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/*
|
|
|
|
|
* @method can also be set again in the while loop of
|
|
|
|
|
* block_copy_dirty_clusters(), but it is never accessed concurrently
|
|
|
|
|
* because the only other function that reads it is
|
|
|
|
|
* block_copy_task_entry() and it is invoked afterwards in the same
|
|
|
|
|
* iteration.
|
|
|
|
|
*/
|
2021-06-24 10:20:40 +03:00
|
|
|
|
BlockCopyMethod method;
|
2021-06-24 10:20:42 +03:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Fields whose state changes throughout the execution
|
|
|
|
|
* Protected by lock in BlockCopyState.
|
|
|
|
|
*/
|
2020-04-29 16:08:43 +03:00
|
|
|
|
CoQueue wait_queue; /* coroutines blocked on this task */
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/*
|
|
|
|
|
* Only protect the case of parallel read while updating @bytes
|
|
|
|
|
* value in block_copy_task_shrink().
|
|
|
|
|
*/
|
|
|
|
|
int64_t bytes;
|
|
|
|
|
QLIST_ENTRY(BlockCopyTask) list;
|
2020-04-29 16:08:43 +03:00
|
|
|
|
} BlockCopyTask;
|
2020-03-11 13:30:04 +03:00
|
|
|
|
|
2020-04-29 16:08:46 +03:00
|
|
|
|
static int64_t task_end(BlockCopyTask *task)
|
|
|
|
|
{
|
|
|
|
|
return task->offset + task->bytes;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-11 13:30:04 +03:00
|
|
|
|
typedef struct BlockCopyState {
|
|
|
|
|
/*
|
|
|
|
|
* BdrvChild objects are not owned or managed by block-copy. They are
|
|
|
|
|
* provided by block-copy user and user is responsible for appropriate
|
|
|
|
|
* permissions on these children.
|
|
|
|
|
*/
|
|
|
|
|
BdrvChild *source;
|
|
|
|
|
BdrvChild *target;
|
2021-06-24 10:20:42 +03:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Fields initialized in block_copy_state_new()
|
|
|
|
|
* and never changed.
|
|
|
|
|
*/
|
2020-03-11 13:30:04 +03:00
|
|
|
|
int64_t cluster_size;
|
2021-06-24 10:20:40 +03:00
|
|
|
|
int64_t max_transfer;
|
2020-03-11 13:30:04 +03:00
|
|
|
|
uint64_t len;
|
|
|
|
|
BdrvRequestFlags write_flags;
|
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/*
|
|
|
|
|
* Fields whose state changes throughout the execution
|
|
|
|
|
* Protected by lock.
|
|
|
|
|
*/
|
|
|
|
|
CoMutex lock;
|
|
|
|
|
int64_t in_flight_bytes;
|
|
|
|
|
BlockCopyMethod method;
|
|
|
|
|
QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
|
|
|
|
|
QLIST_HEAD(, BlockCopyCallState) calls;
|
2020-03-11 13:30:04 +03:00
|
|
|
|
/*
|
|
|
|
|
* skip_unallocated:
|
|
|
|
|
*
|
|
|
|
|
* Used by sync=top jobs, which first scan the source node for unallocated
|
|
|
|
|
* areas and clear them in the copy_bitmap. During this process, the bitmap
|
|
|
|
|
* is thus not fully initialized: It may still have bits set for areas that
|
|
|
|
|
* are unallocated and should actually not be copied.
|
|
|
|
|
*
|
|
|
|
|
* This is indicated by skip_unallocated.
|
|
|
|
|
*
|
|
|
|
|
* In this case, block_copy() will query the source’s allocation status,
|
|
|
|
|
* skip unallocated regions, clear them in the copy_bitmap, and invoke
|
|
|
|
|
* block_copy_reset_unallocated() every time it does.
|
|
|
|
|
*/
|
2021-06-24 10:20:42 +03:00
|
|
|
|
bool skip_unallocated; /* atomic */
|
|
|
|
|
/* State fields that use a thread-safe API */
|
|
|
|
|
BdrvDirtyBitmap *copy_bitmap;
|
2020-03-11 13:30:04 +03:00
|
|
|
|
ProgressMeter *progress;
|
|
|
|
|
SharedResource *mem;
|
2021-01-17 00:46:48 +03:00
|
|
|
|
RateLimit rate_limit;
|
2020-03-11 13:30:04 +03:00
|
|
|
|
} BlockCopyState;
|
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/* Called with lock held */
|
2020-04-29 16:08:43 +03:00
|
|
|
|
static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
|
|
|
|
|
int64_t offset, int64_t bytes)
|
2020-03-11 13:30:00 +03:00
|
|
|
|
{
|
2020-04-29 16:08:43 +03:00
|
|
|
|
BlockCopyTask *t;
|
2020-03-11 13:30:00 +03:00
|
|
|
|
|
2020-04-29 16:08:43 +03:00
|
|
|
|
QLIST_FOREACH(t, &s->tasks, list) {
|
|
|
|
|
if (offset + bytes > t->offset && offset < t->offset + t->bytes) {
|
|
|
|
|
return t;
|
2020-03-11 13:30:00 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-11 13:30:03 +03:00
|
|
|
|
/*
|
2020-04-29 16:08:43 +03:00
|
|
|
|
* If there are no intersecting tasks return false. Otherwise, wait for the
|
|
|
|
|
* first found intersecting tasks to finish and return true.
|
2021-06-24 10:20:42 +03:00
|
|
|
|
*
|
|
|
|
|
* Called with lock held. May temporary release the lock.
|
|
|
|
|
* Return value of 0 proves that lock was NOT released.
|
2020-03-11 13:30:03 +03:00
|
|
|
|
*/
|
|
|
|
|
static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
|
|
|
|
|
int64_t bytes)
|
2019-10-01 16:14:05 +03:00
|
|
|
|
{
|
2020-04-29 16:08:43 +03:00
|
|
|
|
BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
|
2020-03-11 13:30:00 +03:00
|
|
|
|
|
2020-04-29 16:08:43 +03:00
|
|
|
|
if (!task) {
|
2020-03-11 13:30:03 +03:00
|
|
|
|
return false;
|
2020-03-11 13:30:00 +03:00
|
|
|
|
}
|
2020-03-11 13:30:03 +03:00
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
qemu_co_queue_wait(&task->wait_queue, &s->lock);
|
2020-03-11 13:30:03 +03:00
|
|
|
|
|
|
|
|
|
return true;
|
2019-10-01 16:14:05 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/* Called with lock held */
|
2021-06-24 10:20:40 +03:00
|
|
|
|
static int64_t block_copy_chunk_size(BlockCopyState *s)
|
|
|
|
|
{
|
|
|
|
|
switch (s->method) {
|
|
|
|
|
case COPY_READ_WRITE_CLUSTER:
|
|
|
|
|
return s->cluster_size;
|
|
|
|
|
case COPY_READ_WRITE:
|
|
|
|
|
case COPY_RANGE_SMALL:
|
|
|
|
|
return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER),
|
|
|
|
|
s->max_transfer);
|
|
|
|
|
case COPY_RANGE_FULL:
|
|
|
|
|
return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
|
|
|
|
|
s->max_transfer);
|
|
|
|
|
default:
|
|
|
|
|
/* Cannot have COPY_WRITE_ZEROES here. */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-29 16:08:46 +03:00
|
|
|
|
/*
|
|
|
|
|
* Search for the first dirty area in offset/bytes range and create task at
|
|
|
|
|
* the beginning of it.
|
|
|
|
|
*/
|
2021-06-24 10:20:42 +03:00
|
|
|
|
static coroutine_fn BlockCopyTask *
|
|
|
|
|
block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
|
|
|
|
|
int64_t offset, int64_t bytes)
|
2019-10-01 16:14:05 +03:00
|
|
|
|
{
|
2020-04-29 16:08:46 +03:00
|
|
|
|
BlockCopyTask *task;
|
2021-06-24 10:20:40 +03:00
|
|
|
|
int64_t max_chunk;
|
2020-04-29 16:08:44 +03:00
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
QEMU_LOCK_GUARD(&s->lock);
|
2021-06-24 10:20:40 +03:00
|
|
|
|
max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk);
|
2020-04-29 16:08:46 +03:00
|
|
|
|
if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
|
|
|
|
|
offset, offset + bytes,
|
2021-01-17 00:46:46 +03:00
|
|
|
|
max_chunk, &offset, &bytes))
|
2020-04-29 16:08:46 +03:00
|
|
|
|
{
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-10 12:55:22 +03:00
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
|
|
|
|
|
bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
|
|
|
|
|
|
2020-04-29 16:08:46 +03:00
|
|
|
|
/* region is dirty, so no existent tasks possible in it */
|
2020-04-29 16:08:43 +03:00
|
|
|
|
assert(!find_conflicting_task(s, offset, bytes));
|
2020-03-11 13:30:03 +03:00
|
|
|
|
|
|
|
|
|
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
|
|
|
|
|
s->in_flight_bytes += bytes;
|
|
|
|
|
|
2020-04-29 16:08:46 +03:00
|
|
|
|
task = g_new(BlockCopyTask, 1);
|
2020-04-29 16:08:45 +03:00
|
|
|
|
*task = (BlockCopyTask) {
|
2020-04-29 16:08:47 +03:00
|
|
|
|
.task.func = block_copy_task_entry,
|
2020-04-29 16:08:45 +03:00
|
|
|
|
.s = s,
|
2020-04-29 16:08:47 +03:00
|
|
|
|
.call_state = call_state,
|
2020-04-29 16:08:45 +03:00
|
|
|
|
.offset = offset,
|
|
|
|
|
.bytes = bytes,
|
2021-06-24 10:20:40 +03:00
|
|
|
|
.method = s->method,
|
2020-04-29 16:08:45 +03:00
|
|
|
|
};
|
2020-04-29 16:08:43 +03:00
|
|
|
|
qemu_co_queue_init(&task->wait_queue);
|
|
|
|
|
QLIST_INSERT_HEAD(&s->tasks, task, list);
|
2020-04-29 16:08:44 +03:00
|
|
|
|
|
|
|
|
|
return task;
|
2019-10-01 16:14:05 +03:00
|
|
|
|
}
|
|
|
|
|
|
2020-03-11 13:30:03 +03:00
|
|
|
|
/*
|
2020-04-29 16:08:43 +03:00
|
|
|
|
* block_copy_task_shrink
|
2020-03-11 13:30:03 +03:00
|
|
|
|
*
|
2020-04-29 16:08:43 +03:00
|
|
|
|
* Drop the tail of the task to be handled later. Set dirty bits back and
|
|
|
|
|
* wake up all tasks waiting for us (may be some of them are not intersecting
|
|
|
|
|
* with shrunk task)
|
2020-03-11 13:30:03 +03:00
|
|
|
|
*/
|
2020-04-29 16:08:45 +03:00
|
|
|
|
static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
|
2020-04-29 16:08:43 +03:00
|
|
|
|
int64_t new_bytes)
|
2019-10-01 16:14:05 +03:00
|
|
|
|
{
|
2021-06-24 10:20:42 +03:00
|
|
|
|
QEMU_LOCK_GUARD(&task->s->lock);
|
2020-04-29 16:08:43 +03:00
|
|
|
|
if (new_bytes == task->bytes) {
|
2020-03-11 13:30:03 +03:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-29 16:08:43 +03:00
|
|
|
|
assert(new_bytes > 0 && new_bytes < task->bytes);
|
2020-03-11 13:30:03 +03:00
|
|
|
|
|
2020-04-29 16:08:45 +03:00
|
|
|
|
task->s->in_flight_bytes -= task->bytes - new_bytes;
|
|
|
|
|
bdrv_set_dirty_bitmap(task->s->copy_bitmap,
|
2020-04-29 16:08:43 +03:00
|
|
|
|
task->offset + new_bytes, task->bytes - new_bytes);
|
2020-03-11 13:30:03 +03:00
|
|
|
|
|
2020-04-29 16:08:43 +03:00
|
|
|
|
task->bytes = new_bytes;
|
|
|
|
|
qemu_co_queue_restart_all(&task->wait_queue);
|
2020-03-11 13:30:03 +03:00
|
|
|
|
}
|
|
|
|
|
|
2020-04-29 16:08:45 +03:00
|
|
|
|
static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
|
2020-03-11 13:30:03 +03:00
|
|
|
|
{
|
2021-06-24 10:20:42 +03:00
|
|
|
|
QEMU_LOCK_GUARD(&task->s->lock);
|
2020-04-29 16:08:45 +03:00
|
|
|
|
task->s->in_flight_bytes -= task->bytes;
|
2020-03-11 13:30:03 +03:00
|
|
|
|
if (ret < 0) {
|
2020-04-29 16:08:45 +03:00
|
|
|
|
bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
|
2020-03-11 13:30:03 +03:00
|
|
|
|
}
|
2020-04-29 16:08:43 +03:00
|
|
|
|
QLIST_REMOVE(task, list);
|
2021-06-24 10:20:41 +03:00
|
|
|
|
progress_set_remaining(task->s->progress,
|
|
|
|
|
bdrv_get_dirty_count(task->s->copy_bitmap) +
|
|
|
|
|
task->s->in_flight_bytes);
|
2020-04-29 16:08:43 +03:00
|
|
|
|
qemu_co_queue_restart_all(&task->wait_queue);
|
2019-10-01 16:14:05 +03:00
|
|
|
|
}
|
|
|
|
|
|
2019-09-20 17:20:48 +03:00
|
|
|
|
void block_copy_state_free(BlockCopyState *s)
|
|
|
|
|
{
|
|
|
|
|
if (!s) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-13 11:20:32 +03:00
|
|
|
|
ratelimit_destroy(&s->rate_limit);
|
2019-09-16 17:19:09 +03:00
|
|
|
|
bdrv_release_dirty_bitmap(s->copy_bitmap);
|
2019-10-22 14:18:04 +03:00
|
|
|
|
shres_destroy(s->mem);
|
2019-09-20 17:20:48 +03:00
|
|
|
|
g_free(s);
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-11 13:29:58 +03:00
|
|
|
|
static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
|
|
|
|
|
{
|
|
|
|
|
return MIN_NON_ZERO(INT_MAX,
|
|
|
|
|
MIN_NON_ZERO(source->bs->bl.max_transfer,
|
|
|
|
|
target->bs->bl.max_transfer));
|
|
|
|
|
}
|
|
|
|
|
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
|
|
|
|
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
qapi: backup: add perf.use-copy-range parameter
Experiments show, that copy_range is not always making things faster.
So, to make experimentation simpler, let's add a parameter. Some more
perf parameters will be added soon, so here is a new struct.
For now, add new backup qmp parameter with x- prefix for the following
reasons:
- We are going to add more performance parameters, some will be
related to the whole block-copy process, some only to background
copying in backup (ignored for copy-before-write operations).
- On the other hand, we are going to use block-copy interface in other
block jobs, which will need performance options as well.. And it
should be the same structure or at least somehow related.
So, there are too much unclean things about how the interface and now
we need the new options mostly for testing. Let's keep them
experimental for a while.
In do_backup_common() new x-perf parameter handled in a way to
make further options addition simpler.
We add use-copy-range with default=true, and we'll change the default
in further patch, after moving backup to use block-copy.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-Id: <20210116214705.822267-2-vsementsov@virtuozzo.com>
[mreitz: s/5\.2/6.0/]
Signed-off-by: Max Reitz <mreitz@redhat.com>
2021-01-17 00:46:43 +03:00
|
|
|
|
int64_t cluster_size, bool use_copy_range,
|
2019-10-01 16:14:07 +03:00
|
|
|
|
BdrvRequestFlags write_flags, Error **errp)
|
2019-09-20 17:20:48 +03:00
|
|
|
|
{
|
|
|
|
|
BlockCopyState *s;
|
|
|
|
|
BdrvDirtyBitmap *copy_bitmap;
|
|
|
|
|
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
|
|
|
|
copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
|
|
|
|
|
errp);
|
2019-09-20 17:20:48 +03:00
|
|
|
|
if (!copy_bitmap) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
bdrv_disable_dirty_bitmap(copy_bitmap);
|
|
|
|
|
|
|
|
|
|
s = g_new(BlockCopyState, 1);
|
|
|
|
|
*s = (BlockCopyState) {
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
|
|
|
|
.source = source,
|
|
|
|
|
.target = target,
|
2019-09-20 17:20:48 +03:00
|
|
|
|
.copy_bitmap = copy_bitmap,
|
|
|
|
|
.cluster_size = cluster_size,
|
|
|
|
|
.len = bdrv_dirty_bitmap_size(copy_bitmap),
|
|
|
|
|
.write_flags = write_flags,
|
2019-10-22 14:18:04 +03:00
|
|
|
|
.mem = shres_create(BLOCK_COPY_MAX_MEM),
|
2021-06-24 10:20:40 +03:00
|
|
|
|
.max_transfer = QEMU_ALIGN_DOWN(
|
|
|
|
|
block_copy_max_transfer(source, target),
|
|
|
|
|
cluster_size),
|
2019-09-20 17:20:48 +03:00
|
|
|
|
};
|
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
if (s->max_transfer < cluster_size) {
|
2019-10-22 14:18:05 +03:00
|
|
|
|
/*
|
|
|
|
|
* copy_range does not respect max_transfer. We don't want to bother
|
|
|
|
|
* with requests smaller than block-copy cluster size, so fallback to
|
|
|
|
|
* buffered copying (read and write respect max_transfer on their
|
|
|
|
|
* behalf).
|
|
|
|
|
*/
|
2021-06-24 10:20:40 +03:00
|
|
|
|
s->method = COPY_READ_WRITE_CLUSTER;
|
2019-10-22 14:18:05 +03:00
|
|
|
|
} else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) {
|
2019-10-29 18:09:34 +03:00
|
|
|
|
/* Compression supports only cluster-size writes and no copy-range. */
|
2021-06-24 10:20:40 +03:00
|
|
|
|
s->method = COPY_READ_WRITE_CLUSTER;
|
2019-10-22 14:18:05 +03:00
|
|
|
|
} else {
|
|
|
|
|
/*
|
2021-06-24 10:20:40 +03:00
|
|
|
|
* If copy range enabled, start with COPY_RANGE_SMALL, until first
|
2020-03-11 13:29:58 +03:00
|
|
|
|
* successful copy_range (look at block_copy_do_copy).
|
2019-10-22 14:18:05 +03:00
|
|
|
|
*/
|
2021-06-24 10:20:40 +03:00
|
|
|
|
s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
|
2019-10-22 14:18:05 +03:00
|
|
|
|
}
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2021-04-13 11:20:32 +03:00
|
|
|
|
ratelimit_init(&s->rate_limit);
|
2021-06-24 10:20:42 +03:00
|
|
|
|
qemu_co_mutex_init(&s->lock);
|
2020-04-29 16:08:43 +03:00
|
|
|
|
QLIST_INIT(&s->tasks);
|
2021-01-17 00:46:47 +03:00
|
|
|
|
QLIST_INIT(&s->calls);
|
2019-10-01 16:14:05 +03:00
|
|
|
|
|
2019-09-20 17:20:48 +03:00
|
|
|
|
return s;
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
/* Only set before running the job, no need for locking. */
|
block/block-copy: fix progress calculation
Assume we have two regions, A and B, and region B is in-flight now,
region A is not yet touched, but it is unallocated and should be
skipped.
Correspondingly, as progress we have
total = A + B
current = 0
If we reset unallocated region A and call progress_reset_callback,
it will calculate 0 bytes dirty in the bitmap and call
job_progress_set_remaining, which will set
total = current + 0 = 0 + 0 = 0
So, B bytes are actually removed from total accounting. When job
finishes we'll have
total = 0
current = B
, which doesn't sound good.
This is because we didn't considered in-flight bytes, actually when
calculating remaining, we should have set (in_flight + dirty_bytes)
as remaining, not only dirty_bytes.
To fix it, let's refactor progress calculation, moving it to block-copy
itself instead of fixing callback. And, of course, track in_flight
bytes count.
We still have to keep one callback, to maintain backup job bytes_read
calculation, but it will go on soon, when we turn the whole backup
process into one block_copy call.
Cc: qemu-stable@nongnu.org
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
Message-Id: <20200311103004.7649-3-vsementsov@virtuozzo.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2020-03-11 13:29:57 +03:00
|
|
|
|
void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
|
|
|
|
|
{
|
|
|
|
|
s->progress = pm;
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-29 16:08:47 +03:00
|
|
|
|
/*
|
|
|
|
|
* Takes ownership of @task
|
|
|
|
|
*
|
|
|
|
|
* If pool is NULL directly run the task, otherwise schedule it into the pool.
|
|
|
|
|
*
|
|
|
|
|
* Returns: task.func return code if pool is NULL
|
|
|
|
|
* otherwise -ECANCELED if pool status is bad
|
|
|
|
|
* otherwise 0 (successfully scheduled)
|
|
|
|
|
*/
|
|
|
|
|
static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
|
|
|
|
|
BlockCopyTask *task)
|
|
|
|
|
{
|
|
|
|
|
if (!pool) {
|
|
|
|
|
int ret = task->task.func(&task->task);
|
|
|
|
|
|
|
|
|
|
g_free(task);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
aio_task_pool_wait_slot(pool);
|
|
|
|
|
if (aio_task_pool_status(pool) < 0) {
|
|
|
|
|
co_put_to_shres(task->s->mem, task->bytes);
|
|
|
|
|
block_copy_task_end(task, -ECANCELED);
|
|
|
|
|
g_free(task);
|
|
|
|
|
return -ECANCELED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
aio_task_pool_start_task(pool, &task->task);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-20 17:20:48 +03:00
|
|
|
|
/*
|
2019-10-22 14:18:02 +03:00
|
|
|
|
* block_copy_do_copy
|
|
|
|
|
*
|
2020-03-11 13:30:01 +03:00
|
|
|
|
* Do copy of cluster-aligned chunk. Requested region is allowed to exceed
|
|
|
|
|
* s->len only to cover last cluster when s->len is not aligned to clusters.
|
2019-10-22 14:18:02 +03:00
|
|
|
|
*
|
|
|
|
|
* No sync here: nor bitmap neighter intersecting requests handling, only copy.
|
|
|
|
|
*
|
2021-06-24 10:20:40 +03:00
|
|
|
|
* @method is an in-out argument, so that copy_range can be either extended to
|
|
|
|
|
* a full-size buffer or disabled if the copy_range attempt fails. The output
|
|
|
|
|
* value of @method should be used for subsequent tasks.
|
2019-10-22 14:18:02 +03:00
|
|
|
|
* Returns 0 on success.
|
2019-09-20 17:20:48 +03:00
|
|
|
|
*/
|
2019-10-22 14:18:02 +03:00
|
|
|
|
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
|
2020-03-11 13:30:02 +03:00
|
|
|
|
int64_t offset, int64_t bytes,
|
2021-06-24 10:20:40 +03:00
|
|
|
|
BlockCopyMethod *method,
|
2021-05-28 17:16:28 +03:00
|
|
|
|
bool *error_is_read)
|
2019-09-20 17:20:48 +03:00
|
|
|
|
{
|
|
|
|
|
int ret;
|
2020-03-11 13:30:02 +03:00
|
|
|
|
int64_t nbytes = MIN(offset + bytes, s->len) - offset;
|
2019-10-22 14:18:02 +03:00
|
|
|
|
void *bounce_buffer = NULL;
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2020-03-11 13:30:02 +03:00
|
|
|
|
assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
|
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
|
2020-03-11 13:30:01 +03:00
|
|
|
|
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
|
2020-03-11 13:30:02 +03:00
|
|
|
|
assert(offset < s->len);
|
|
|
|
|
assert(offset + bytes <= s->len ||
|
|
|
|
|
offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
|
2020-03-11 13:30:01 +03:00
|
|
|
|
assert(nbytes < INT_MAX);
|
2019-10-22 14:18:02 +03:00
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
switch (*method) {
|
|
|
|
|
case COPY_WRITE_ZEROES:
|
2020-03-11 13:30:02 +03:00
|
|
|
|
ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
|
2020-03-11 13:29:59 +03:00
|
|
|
|
~BDRV_REQ_WRITE_COMPRESSED);
|
|
|
|
|
if (ret < 0) {
|
2020-03-11 13:30:02 +03:00
|
|
|
|
trace_block_copy_write_zeroes_fail(s, offset, ret);
|
2020-05-07 15:11:29 +03:00
|
|
|
|
*error_is_read = false;
|
2020-03-11 13:29:59 +03:00
|
|
|
|
}
|
|
|
|
|
return ret;
|
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
case COPY_RANGE_SMALL:
|
|
|
|
|
case COPY_RANGE_FULL:
|
2020-03-11 13:30:02 +03:00
|
|
|
|
ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
|
2019-10-22 14:18:02 +03:00
|
|
|
|
0, s->write_flags);
|
2021-06-24 10:20:40 +03:00
|
|
|
|
if (ret >= 0) {
|
|
|
|
|
/* Successful copy-range, increase chunk size. */
|
|
|
|
|
*method = COPY_RANGE_FULL;
|
2021-05-28 17:16:28 +03:00
|
|
|
|
return 0;
|
2019-10-22 14:18:02 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
trace_block_copy_copy_range_fail(s, offset, ret);
|
|
|
|
|
*method = COPY_READ_WRITE;
|
|
|
|
|
/* Fall through to read+write with allocated buffer */
|
2019-10-22 14:18:05 +03:00
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
case COPY_READ_WRITE_CLUSTER:
|
|
|
|
|
case COPY_READ_WRITE:
|
|
|
|
|
/*
|
|
|
|
|
* In case of failed copy_range request above, we may proceed with
|
|
|
|
|
* buffered request larger than BLOCK_COPY_MAX_BUFFER.
|
|
|
|
|
* Still, further requests will be properly limited, so don't care too
|
|
|
|
|
* much. Moreover the most likely case (copy_range is unsupported for
|
|
|
|
|
* the configuration, so the very first copy_range request fails)
|
|
|
|
|
* is handled by setting large copy_size only after first successful
|
|
|
|
|
* copy_range.
|
|
|
|
|
*/
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
trace_block_copy_read_fail(s, offset, ret);
|
|
|
|
|
*error_is_read = true;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
|
|
|
|
|
s->write_flags);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
trace_block_copy_write_fail(s, offset, ret);
|
|
|
|
|
*error_is_read = false;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2019-10-22 14:18:00 +03:00
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
out:
|
|
|
|
|
qemu_vfree(bounce_buffer);
|
|
|
|
|
break;
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
default:
|
|
|
|
|
abort();
|
2021-05-28 17:16:28 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
return ret;
|
2021-05-28 17:16:28 +03:00
|
|
|
|
}
|
|
|
|
|
|
2020-04-29 16:08:47 +03:00
|
|
|
|
static coroutine_fn int block_copy_task_entry(AioTask *task)
|
|
|
|
|
{
|
|
|
|
|
BlockCopyTask *t = container_of(task, BlockCopyTask, task);
|
2021-06-24 10:20:39 +03:00
|
|
|
|
BlockCopyState *s = t->s;
|
2020-05-07 15:11:28 +03:00
|
|
|
|
bool error_is_read = false;
|
2021-06-24 10:20:40 +03:00
|
|
|
|
BlockCopyMethod method = t->method;
|
2020-04-29 16:08:47 +03:00
|
|
|
|
int ret;
|
|
|
|
|
|
2021-06-24 10:20:40 +03:00
|
|
|
|
ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read);
|
2021-06-24 10:20:42 +03:00
|
|
|
|
|
|
|
|
|
WITH_QEMU_LOCK_GUARD(&s->lock) {
|
|
|
|
|
if (s->method == t->method) {
|
|
|
|
|
s->method = method;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
if (!t->call_state->ret) {
|
|
|
|
|
t->call_state->ret = ret;
|
|
|
|
|
t->call_state->error_is_read = error_is_read;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
progress_work_done(s->progress, t->bytes);
|
2021-05-28 17:16:27 +03:00
|
|
|
|
}
|
2020-04-29 16:08:47 +03:00
|
|
|
|
}
|
2021-06-24 10:20:39 +03:00
|
|
|
|
co_put_to_shres(s->mem, t->bytes);
|
2020-04-29 16:08:47 +03:00
|
|
|
|
block_copy_task_end(t, ret);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-11 13:29:59 +03:00
|
|
|
|
static int block_copy_block_status(BlockCopyState *s, int64_t offset,
|
|
|
|
|
int64_t bytes, int64_t *pnum)
|
|
|
|
|
{
|
|
|
|
|
int64_t num;
|
|
|
|
|
BlockDriverState *base;
|
|
|
|
|
int ret;
|
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
if (qatomic_read(&s->skip_unallocated)) {
|
2020-06-23 12:29:04 +03:00
|
|
|
|
base = bdrv_backing_chain_next(s->source->bs);
|
2020-03-11 13:29:59 +03:00
|
|
|
|
} else {
|
|
|
|
|
base = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
|
|
|
|
|
NULL, NULL);
|
|
|
|
|
if (ret < 0 || num < s->cluster_size) {
|
|
|
|
|
/*
|
|
|
|
|
* On error or if failed to obtain large enough chunk just fallback to
|
|
|
|
|
* copy one cluster.
|
|
|
|
|
*/
|
|
|
|
|
num = s->cluster_size;
|
|
|
|
|
ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
|
|
|
|
|
} else if (offset + num == s->len) {
|
|
|
|
|
num = QEMU_ALIGN_UP(num, s->cluster_size);
|
|
|
|
|
} else {
|
|
|
|
|
num = QEMU_ALIGN_DOWN(num, s->cluster_size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*pnum = num;
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-20 17:20:48 +03:00
|
|
|
|
/*
|
|
|
|
|
* Check if the cluster starting at offset is allocated or not.
|
|
|
|
|
* return via pnum the number of contiguous clusters sharing this allocation.
|
|
|
|
|
*/
|
|
|
|
|
static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
|
|
|
|
|
int64_t *pnum)
|
|
|
|
|
{
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
|
|
|
|
BlockDriverState *bs = s->source->bs;
|
2019-09-20 17:20:48 +03:00
|
|
|
|
int64_t count, total_count = 0;
|
|
|
|
|
int64_t bytes = s->len - offset;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
|
|
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
|
ret = bdrv_is_allocated(bs, offset, bytes, &count);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
total_count += count;
|
|
|
|
|
|
|
|
|
|
if (ret || count == 0) {
|
|
|
|
|
/*
|
|
|
|
|
* ret: partial segment(s) are considered allocated.
|
|
|
|
|
* otherwise: unallocated tail is treated as an entire segment.
|
|
|
|
|
*/
|
|
|
|
|
*pnum = DIV_ROUND_UP(total_count, s->cluster_size);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Unallocated segment(s) with uncertain following segment(s) */
|
|
|
|
|
if (total_count >= s->cluster_size) {
|
|
|
|
|
*pnum = total_count / s->cluster_size;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
offset += count;
|
|
|
|
|
bytes -= count;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Reset bits in copy_bitmap starting at offset if they represent unallocated
|
|
|
|
|
* data in the image. May reset subsequent contiguous bits.
|
|
|
|
|
* @return 0 when the cluster at @offset was unallocated,
|
|
|
|
|
* 1 otherwise, and -ret on error.
|
|
|
|
|
*/
|
|
|
|
|
int64_t block_copy_reset_unallocated(BlockCopyState *s,
|
|
|
|
|
int64_t offset, int64_t *count)
|
|
|
|
|
{
|
|
|
|
|
int ret;
|
|
|
|
|
int64_t clusters, bytes;
|
|
|
|
|
|
|
|
|
|
ret = block_copy_is_cluster_allocated(s, offset, &clusters);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bytes = clusters * s->cluster_size;
|
|
|
|
|
|
|
|
|
|
if (!ret) {
|
2021-06-24 10:20:42 +03:00
|
|
|
|
qemu_co_mutex_lock(&s->lock);
|
2019-09-20 17:20:48 +03:00
|
|
|
|
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
|
block/block-copy: fix progress calculation
Assume we have two regions, A and B, and region B is in-flight now,
region A is not yet touched, but it is unallocated and should be
skipped.
Correspondingly, as progress we have
total = A + B
current = 0
If we reset unallocated region A and call progress_reset_callback,
it will calculate 0 bytes dirty in the bitmap and call
job_progress_set_remaining, which will set
total = current + 0 = 0 + 0 = 0
So, B bytes are actually removed from total accounting. When job
finishes we'll have
total = 0
current = B
, which doesn't sound good.
This is because we didn't considered in-flight bytes, actually when
calculating remaining, we should have set (in_flight + dirty_bytes)
as remaining, not only dirty_bytes.
To fix it, let's refactor progress calculation, moving it to block-copy
itself instead of fixing callback. And, of course, track in_flight
bytes count.
We still have to keep one callback, to maintain backup job bytes_read
calculation, but it will go on soon, when we turn the whole backup
process into one block_copy call.
Cc: qemu-stable@nongnu.org
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
Message-Id: <20200311103004.7649-3-vsementsov@virtuozzo.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2020-03-11 13:29:57 +03:00
|
|
|
|
progress_set_remaining(s->progress,
|
|
|
|
|
bdrv_get_dirty_count(s->copy_bitmap) +
|
|
|
|
|
s->in_flight_bytes);
|
2021-06-24 10:20:42 +03:00
|
|
|
|
qemu_co_mutex_unlock(&s->lock);
|
2019-09-20 17:20:48 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*count = bytes;
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-11 13:30:03 +03:00
|
|
|
|
/*
|
|
|
|
|
* block_copy_dirty_clusters
|
|
|
|
|
*
|
|
|
|
|
* Copy dirty clusters in @offset/@bytes range.
|
|
|
|
|
* Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
|
|
|
|
|
* clusters found and -errno on failure.
|
|
|
|
|
*/
|
2021-01-17 00:46:44 +03:00
|
|
|
|
static int coroutine_fn
|
|
|
|
|
block_copy_dirty_clusters(BlockCopyCallState *call_state)
|
2019-09-20 17:20:48 +03:00
|
|
|
|
{
|
2021-01-17 00:46:44 +03:00
|
|
|
|
BlockCopyState *s = call_state->s;
|
|
|
|
|
int64_t offset = call_state->offset;
|
|
|
|
|
int64_t bytes = call_state->bytes;
|
|
|
|
|
|
2019-09-20 17:20:48 +03:00
|
|
|
|
int ret = 0;
|
2020-03-11 13:30:03 +03:00
|
|
|
|
bool found_dirty = false;
|
2020-04-29 16:08:46 +03:00
|
|
|
|
int64_t end = offset + bytes;
|
2020-04-29 16:08:47 +03:00
|
|
|
|
AioTaskPool *aio = NULL;
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* block_copy() user is responsible for keeping source and target in same
|
|
|
|
|
* aio context
|
|
|
|
|
*/
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 16:14:09 +03:00
|
|
|
|
assert(bdrv_get_aio_context(s->source->bs) ==
|
|
|
|
|
bdrv_get_aio_context(s->target->bs));
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2020-03-11 13:30:02 +03:00
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
|
2020-03-11 13:30:01 +03:00
|
|
|
|
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2021-06-24 10:20:43 +03:00
|
|
|
|
while (bytes && aio_task_pool_status(aio) == 0 &&
|
|
|
|
|
!qatomic_read(&call_state->cancelled)) {
|
2020-04-29 16:08:47 +03:00
|
|
|
|
BlockCopyTask *task;
|
2020-04-29 16:08:46 +03:00
|
|
|
|
int64_t status_bytes;
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2021-01-17 00:46:44 +03:00
|
|
|
|
task = block_copy_task_create(s, call_state, offset, bytes);
|
2020-04-29 16:08:46 +03:00
|
|
|
|
if (!task) {
|
|
|
|
|
/* No more dirty bits in the bitmap */
|
|
|
|
|
trace_block_copy_skip_range(s, offset, bytes);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if (task->offset > offset) {
|
|
|
|
|
trace_block_copy_skip_range(s, offset, task->offset - offset);
|
2019-09-20 17:20:48 +03:00
|
|
|
|
}
|
|
|
|
|
|
2020-03-11 13:30:03 +03:00
|
|
|
|
found_dirty = true;
|
|
|
|
|
|
2020-04-29 16:08:46 +03:00
|
|
|
|
ret = block_copy_block_status(s, task->offset, task->bytes,
|
|
|
|
|
&status_bytes);
|
2020-03-11 13:30:03 +03:00
|
|
|
|
assert(ret >= 0); /* never fail */
|
2020-04-29 16:08:46 +03:00
|
|
|
|
if (status_bytes < task->bytes) {
|
|
|
|
|
block_copy_task_shrink(task, status_bytes);
|
|
|
|
|
}
|
2021-06-24 10:20:42 +03:00
|
|
|
|
if (qatomic_read(&s->skip_unallocated) &&
|
|
|
|
|
!(ret & BDRV_BLOCK_ALLOCATED)) {
|
2020-04-29 16:08:45 +03:00
|
|
|
|
block_copy_task_end(task, 0);
|
2020-04-29 16:08:46 +03:00
|
|
|
|
trace_block_copy_skip_range(s, task->offset, task->bytes);
|
|
|
|
|
offset = task_end(task);
|
|
|
|
|
bytes = end - offset;
|
2020-05-07 21:38:00 +03:00
|
|
|
|
g_free(task);
|
2020-03-11 13:29:59 +03:00
|
|
|
|
continue;
|
2019-09-20 17:20:48 +03:00
|
|
|
|
}
|
2021-05-28 17:16:28 +03:00
|
|
|
|
if (ret & BDRV_BLOCK_ZERO) {
|
2021-06-24 10:20:40 +03:00
|
|
|
|
task->method = COPY_WRITE_ZEROES;
|
2021-05-28 17:16:28 +03:00
|
|
|
|
}
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2021-06-14 11:11:27 +03:00
|
|
|
|
if (!call_state->ignore_ratelimit) {
|
|
|
|
|
uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
|
|
|
|
|
if (ns > 0) {
|
|
|
|
|
block_copy_task_end(task, -EAGAIN);
|
|
|
|
|
g_free(task);
|
|
|
|
|
qemu_co_sleep_ns_wakeable(&call_state->sleep,
|
|
|
|
|
QEMU_CLOCK_REALTIME, ns);
|
|
|
|
|
continue;
|
2021-01-17 00:46:48 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-14 11:11:27 +03:00
|
|
|
|
ratelimit_calculate_delay(&s->rate_limit, task->bytes);
|
|
|
|
|
|
2020-04-29 16:08:46 +03:00
|
|
|
|
trace_block_copy_process(s, task->offset);
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2020-04-29 16:08:46 +03:00
|
|
|
|
co_get_from_shres(s->mem, task->bytes);
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2020-04-29 16:08:46 +03:00
|
|
|
|
offset = task_end(task);
|
|
|
|
|
bytes = end - offset;
|
2020-04-29 16:08:47 +03:00
|
|
|
|
|
|
|
|
|
if (!aio && bytes) {
|
2021-01-17 00:46:46 +03:00
|
|
|
|
aio = aio_task_pool_new(call_state->max_workers);
|
2020-04-29 16:08:47 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = block_copy_task_run(aio, task);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
if (aio) {
|
|
|
|
|
aio_task_pool_wait_all(aio);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We are not really interested in -ECANCELED returned from
|
|
|
|
|
* block_copy_task_run. If it fails, it means some task already failed
|
|
|
|
|
* for real reason, let's return first failure.
|
|
|
|
|
* Still, assert that we don't rewrite failure by success.
|
2020-05-26 21:13:47 +03:00
|
|
|
|
*
|
|
|
|
|
* Note: ret may be positive here because of block-status result.
|
2020-04-29 16:08:47 +03:00
|
|
|
|
*/
|
2020-05-26 21:13:47 +03:00
|
|
|
|
assert(ret >= 0 || aio_task_pool_status(aio) < 0);
|
2020-04-29 16:08:47 +03:00
|
|
|
|
ret = aio_task_pool_status(aio);
|
|
|
|
|
|
|
|
|
|
aio_task_pool_free(aio);
|
|
|
|
|
}
|
2019-09-20 17:20:48 +03:00
|
|
|
|
|
2020-04-29 16:08:47 +03:00
|
|
|
|
return ret < 0 ? ret : found_dirty;
|
2020-03-11 13:30:03 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-01-17 00:46:48 +03:00
|
|
|
|
void block_copy_kick(BlockCopyCallState *call_state)
|
|
|
|
|
{
|
2021-05-17 13:05:47 +03:00
|
|
|
|
qemu_co_sleep_wake(&call_state->sleep);
|
2021-01-17 00:46:48 +03:00
|
|
|
|
}
|
|
|
|
|
|
2020-03-11 13:30:03 +03:00
|
|
|
|
/*
|
2021-01-17 00:46:44 +03:00
|
|
|
|
* block_copy_common
|
2020-03-11 13:30:03 +03:00
|
|
|
|
*
|
|
|
|
|
* Copy requested region, accordingly to dirty bitmap.
|
|
|
|
|
* Collaborate with parallel block_copy requests: if they succeed it will help
|
|
|
|
|
* us. If they fail, we will retry not-copied regions. So, if we return error,
|
|
|
|
|
* it means that some I/O operation failed in context of _this_ block_copy call,
|
|
|
|
|
* not some parallel operation.
|
|
|
|
|
*/
|
2021-01-17 00:46:44 +03:00
|
|
|
|
static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
|
2020-03-11 13:30:03 +03:00
|
|
|
|
{
|
|
|
|
|
int ret;
|
2021-06-24 10:20:39 +03:00
|
|
|
|
BlockCopyState *s = call_state->s;
|
2020-03-11 13:30:03 +03:00
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
qemu_co_mutex_lock(&s->lock);
|
2021-06-24 10:20:39 +03:00
|
|
|
|
QLIST_INSERT_HEAD(&s->calls, call_state, list);
|
2021-06-24 10:20:42 +03:00
|
|
|
|
qemu_co_mutex_unlock(&s->lock);
|
2021-01-17 00:46:47 +03:00
|
|
|
|
|
2020-03-11 13:30:03 +03:00
|
|
|
|
do {
|
2021-01-17 00:46:44 +03:00
|
|
|
|
ret = block_copy_dirty_clusters(call_state);
|
2020-03-11 13:30:03 +03:00
|
|
|
|
|
2021-06-24 10:20:43 +03:00
|
|
|
|
if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
|
2021-06-24 10:20:42 +03:00
|
|
|
|
WITH_QEMU_LOCK_GUARD(&s->lock) {
|
|
|
|
|
/*
|
|
|
|
|
* Check that there is no task we still need to
|
|
|
|
|
* wait to complete
|
|
|
|
|
*/
|
|
|
|
|
ret = block_copy_wait_one(s, call_state->offset,
|
|
|
|
|
call_state->bytes);
|
|
|
|
|
if (ret == 0) {
|
|
|
|
|
/*
|
|
|
|
|
* No pending tasks, but check again the bitmap in this
|
|
|
|
|
* same critical section, since a task might have failed
|
|
|
|
|
* between this and the critical section in
|
|
|
|
|
* block_copy_dirty_clusters().
|
|
|
|
|
*
|
|
|
|
|
* block_copy_wait_one return value 0 also means that it
|
|
|
|
|
* didn't release the lock. So, we are still in the same
|
|
|
|
|
* critical section, not interrupted by any concurrent
|
|
|
|
|
* access to state.
|
|
|
|
|
*/
|
|
|
|
|
ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap,
|
|
|
|
|
call_state->offset,
|
|
|
|
|
call_state->bytes) >= 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-03-11 13:30:03 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We retry in two cases:
|
|
|
|
|
* 1. Some progress done
|
|
|
|
|
* Something was copied, which means that there were yield points
|
|
|
|
|
* and some new dirty bits may have appeared (due to failed parallel
|
|
|
|
|
* block-copy requests).
|
|
|
|
|
* 2. We have waited for some intersecting block-copy request
|
|
|
|
|
* It may have failed and produced new dirty bits.
|
|
|
|
|
*/
|
2021-06-24 10:20:43 +03:00
|
|
|
|
} while (ret > 0 && !qatomic_read(&call_state->cancelled));
|
2019-10-01 16:14:05 +03:00
|
|
|
|
|
2021-06-24 10:20:43 +03:00
|
|
|
|
qatomic_store_release(&call_state->finished, true);
|
2021-01-17 00:46:45 +03:00
|
|
|
|
|
|
|
|
|
if (call_state->cb) {
|
|
|
|
|
call_state->cb(call_state->cb_opaque);
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-24 10:20:42 +03:00
|
|
|
|
qemu_co_mutex_lock(&s->lock);
|
2021-01-17 00:46:47 +03:00
|
|
|
|
QLIST_REMOVE(call_state, list);
|
2021-06-24 10:20:42 +03:00
|
|
|
|
qemu_co_mutex_unlock(&s->lock);
|
2021-01-17 00:46:47 +03:00
|
|
|
|
|
2019-09-20 17:20:48 +03:00
|
|
|
|
return ret;
|
|
|
|
|
}
|
2020-03-11 13:30:04 +03:00
|
|
|
|
|
2021-01-17 00:46:44 +03:00
|
|
|
|
int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
|
2021-01-17 00:47:02 +03:00
|
|
|
|
bool ignore_ratelimit)
|
2021-01-17 00:46:44 +03:00
|
|
|
|
{
|
|
|
|
|
BlockCopyCallState call_state = {
|
|
|
|
|
.s = s,
|
|
|
|
|
.offset = start,
|
|
|
|
|
.bytes = bytes,
|
2021-01-17 00:46:48 +03:00
|
|
|
|
.ignore_ratelimit = ignore_ratelimit,
|
2021-01-17 00:46:46 +03:00
|
|
|
|
.max_workers = BLOCK_COPY_MAX_WORKERS,
|
2021-01-17 00:46:44 +03:00
|
|
|
|
};
|
|
|
|
|
|
2021-01-17 00:47:02 +03:00
|
|
|
|
return block_copy_common(&call_state);
|
2021-01-17 00:46:44 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-01-17 00:46:45 +03:00
|
|
|
|
static void coroutine_fn block_copy_async_co_entry(void *opaque)
|
|
|
|
|
{
|
|
|
|
|
block_copy_common(opaque);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BlockCopyCallState *block_copy_async(BlockCopyState *s,
|
|
|
|
|
int64_t offset, int64_t bytes,
|
2021-01-17 00:46:46 +03:00
|
|
|
|
int max_workers, int64_t max_chunk,
|
2021-01-17 00:46:45 +03:00
|
|
|
|
BlockCopyAsyncCallbackFunc cb,
|
|
|
|
|
void *cb_opaque)
|
|
|
|
|
{
|
|
|
|
|
BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
|
|
|
|
|
|
|
|
|
|
*call_state = (BlockCopyCallState) {
|
|
|
|
|
.s = s,
|
|
|
|
|
.offset = offset,
|
|
|
|
|
.bytes = bytes,
|
2021-01-17 00:46:46 +03:00
|
|
|
|
.max_workers = max_workers,
|
|
|
|
|
.max_chunk = max_chunk,
|
2021-01-17 00:46:45 +03:00
|
|
|
|
.cb = cb,
|
|
|
|
|
.cb_opaque = cb_opaque,
|
|
|
|
|
|
|
|
|
|
.co = qemu_coroutine_create(block_copy_async_co_entry, call_state),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
qemu_coroutine_enter(call_state->co);
|
|
|
|
|
|
|
|
|
|
return call_state;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void block_copy_call_free(BlockCopyCallState *call_state)
|
|
|
|
|
{
|
|
|
|
|
if (!call_state) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-24 10:20:43 +03:00
|
|
|
|
assert(qatomic_read(&call_state->finished));
|
2021-01-17 00:46:45 +03:00
|
|
|
|
g_free(call_state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool block_copy_call_finished(BlockCopyCallState *call_state)
|
|
|
|
|
{
|
2021-06-24 10:20:43 +03:00
|
|
|
|
return qatomic_read(&call_state->finished);
|
2021-01-17 00:46:45 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool block_copy_call_succeeded(BlockCopyCallState *call_state)
|
|
|
|
|
{
|
2021-06-24 10:20:43 +03:00
|
|
|
|
return qatomic_load_acquire(&call_state->finished) &&
|
|
|
|
|
!qatomic_read(&call_state->cancelled) &&
|
|
|
|
|
call_state->ret == 0;
|
2021-01-17 00:46:45 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool block_copy_call_failed(BlockCopyCallState *call_state)
|
|
|
|
|
{
|
2021-06-24 10:20:43 +03:00
|
|
|
|
return qatomic_load_acquire(&call_state->finished) &&
|
|
|
|
|
!qatomic_read(&call_state->cancelled) &&
|
|
|
|
|
call_state->ret < 0;
|
2021-01-17 00:46:49 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool block_copy_call_cancelled(BlockCopyCallState *call_state)
|
|
|
|
|
{
|
2021-06-24 10:20:43 +03:00
|
|
|
|
return qatomic_read(&call_state->cancelled);
|
2021-01-17 00:46:45 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
|
|
|
|
|
{
|
2021-06-24 10:20:43 +03:00
|
|
|
|
assert(qatomic_load_acquire(&call_state->finished));
|
2021-01-17 00:46:45 +03:00
|
|
|
|
if (error_is_read) {
|
|
|
|
|
*error_is_read = call_state->error_is_read;
|
|
|
|
|
}
|
|
|
|
|
return call_state->ret;
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-24 10:20:43 +03:00
|
|
|
|
/*
|
|
|
|
|
* Note that cancelling and finishing are racy.
|
|
|
|
|
* User can cancel a block-copy that is already finished.
|
|
|
|
|
*/
|
2021-01-17 00:46:49 +03:00
|
|
|
|
void block_copy_call_cancel(BlockCopyCallState *call_state)
|
|
|
|
|
{
|
2021-06-24 10:20:43 +03:00
|
|
|
|
qatomic_set(&call_state->cancelled, true);
|
2021-01-17 00:46:49 +03:00
|
|
|
|
block_copy_kick(call_state);
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-11 13:30:04 +03:00
|
|
|
|
BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
|
|
|
|
|
{
|
|
|
|
|
return s->copy_bitmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
|
|
|
|
|
{
|
2021-06-24 10:20:42 +03:00
|
|
|
|
qatomic_set(&s->skip_unallocated, skip);
|
2020-03-11 13:30:04 +03:00
|
|
|
|
}
|
2021-01-17 00:46:48 +03:00
|
|
|
|
|
|
|
|
|
void block_copy_set_speed(BlockCopyState *s, uint64_t speed)
|
|
|
|
|
{
|
2021-06-14 11:11:27 +03:00
|
|
|
|
ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
|
2021-01-17 00:46:48 +03:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Note: it's good to kick all call states from here, but it should be done
|
|
|
|
|
* only from a coroutine, to not crash if s->calls list changed while
|
|
|
|
|
* entering one call. So for now, the only user of this function kicks its
|
|
|
|
|
* only one call_state by hand.
|
|
|
|
|
*/
|
|
|
|
|
}
|