block: add block_job_sleep_ns
This function abstracts the pretty complex semantics of the "busy" member of BlockJob. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
0ac9377d04
commit
4513eafe92
11
block.c
11
block.c
@ -4188,6 +4188,7 @@ void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
|
|||||||
job->bs = bs;
|
job->bs = bs;
|
||||||
job->cb = cb;
|
job->cb = cb;
|
||||||
job->opaque = opaque;
|
job->opaque = opaque;
|
||||||
|
job->busy = true;
|
||||||
bs->job = job;
|
bs->job = job;
|
||||||
|
|
||||||
/* Only set speed when necessary to avoid NotSupported error */
|
/* Only set speed when necessary to avoid NotSupported error */
|
||||||
@ -4254,3 +4255,13 @@ void block_job_cancel_sync(BlockJob *job)
|
|||||||
qemu_aio_wait();
|
qemu_aio_wait();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
|
||||||
|
{
|
||||||
|
/* Check cancellation *before* setting busy = false, too! */
|
||||||
|
if (!block_job_is_cancelled(job)) {
|
||||||
|
job->busy = false;
|
||||||
|
co_sleep_ns(clock, ns);
|
||||||
|
job->busy = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -192,12 +192,17 @@ static void coroutine_fn stream_run(void *opaque)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (sector_num = 0; sector_num < end; sector_num += n) {
|
for (sector_num = 0; sector_num < end; sector_num += n) {
|
||||||
retry:
|
uint64_t delay_ns = 0;
|
||||||
|
|
||||||
|
wait:
|
||||||
|
/* Note that even when no rate limit is applied we need to yield
|
||||||
|
* with no pending I/O here so that qemu_aio_flush() returns.
|
||||||
|
*/
|
||||||
|
block_job_sleep_ns(&s->common, rt_clock, delay_ns);
|
||||||
if (block_job_is_cancelled(&s->common)) {
|
if (block_job_is_cancelled(&s->common)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->common.busy = true;
|
|
||||||
if (base) {
|
if (base) {
|
||||||
ret = is_allocated_base(bs, base, sector_num,
|
ret = is_allocated_base(bs, base, sector_num,
|
||||||
STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n);
|
STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n);
|
||||||
@ -209,13 +214,9 @@ retry:
|
|||||||
trace_stream_one_iteration(s, sector_num, n, ret);
|
trace_stream_one_iteration(s, sector_num, n, ret);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
if (s->common.speed) {
|
if (s->common.speed) {
|
||||||
uint64_t delay_ns = ratelimit_calculate_delay(&s->limit, n);
|
delay_ns = ratelimit_calculate_delay(&s->limit, n);
|
||||||
if (delay_ns > 0) {
|
if (delay_ns > 0) {
|
||||||
s->common.busy = false;
|
goto wait;
|
||||||
co_sleep_ns(rt_clock, delay_ns);
|
|
||||||
|
|
||||||
/* Recheck cancellation and that sectors are unallocated */
|
|
||||||
goto retry;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ret = stream_populate(bs, sector_num, n, buf);
|
ret = stream_populate(bs, sector_num, n, buf);
|
||||||
@ -227,12 +228,6 @@ retry:
|
|||||||
|
|
||||||
/* Publish progress */
|
/* Publish progress */
|
||||||
s->common.offset += n * BDRV_SECTOR_SIZE;
|
s->common.offset += n * BDRV_SECTOR_SIZE;
|
||||||
|
|
||||||
/* Note that even when no rate limit is applied we need to yield
|
|
||||||
* with no pending I/O here so that qemu_aio_flush() returns.
|
|
||||||
*/
|
|
||||||
s->common.busy = false;
|
|
||||||
co_sleep_ns(rt_clock, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!base) {
|
if (!base) {
|
||||||
|
22
block_int.h
22
block_int.h
@ -97,18 +97,15 @@ struct BlockJob {
|
|||||||
/**
|
/**
|
||||||
* Set to true if the job should cancel itself. The flag must
|
* Set to true if the job should cancel itself. The flag must
|
||||||
* always be tested just before toggling the busy flag from false
|
* always be tested just before toggling the busy flag from false
|
||||||
* to true. After a job has detected that the cancelled flag is
|
* to true. After a job has been cancelled, it should only yield
|
||||||
* true, it should not anymore issue any I/O operation to the
|
* if #qemu_aio_wait will ("sooner or later") reenter the coroutine.
|
||||||
* block device.
|
|
||||||
*/
|
*/
|
||||||
bool cancelled;
|
bool cancelled;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set to false by the job while it is in a quiescent state, where
|
* Set to false by the job while it is in a quiescent state, where
|
||||||
* no I/O is pending and cancellation can be processed without
|
* no I/O is pending and the job has yielded on any condition
|
||||||
* issuing new I/O. The busy flag must be set to false when the
|
* that is not detected by #qemu_aio_wait, such as a timer.
|
||||||
* job goes to sleep on any condition that is not detected by
|
|
||||||
* #qemu_aio_wait, such as a timer.
|
|
||||||
*/
|
*/
|
||||||
bool busy;
|
bool busy;
|
||||||
|
|
||||||
@ -363,6 +360,17 @@ void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
|
|||||||
int64_t speed, BlockDriverCompletionFunc *cb,
|
int64_t speed, BlockDriverCompletionFunc *cb,
|
||||||
void *opaque, Error **errp);
|
void *opaque, Error **errp);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* block_job_sleep_ns:
|
||||||
|
* @job: The job that calls the function.
|
||||||
|
* @clock: The clock to sleep on.
|
||||||
|
* @ns: How many nanoseconds to stop for.
|
||||||
|
*
|
||||||
|
* Put the job to sleep (assuming that it wasn't canceled) for @ns
|
||||||
|
* nanoseconds. Canceling the job will interrupt the wait immediately.
|
||||||
|
*/
|
||||||
|
void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* block_job_complete:
|
* block_job_complete:
|
||||||
* @job: The job being completed.
|
* @job: The job being completed.
|
||||||
|
Loading…
Reference in New Issue
Block a user