blockjob: Introduce block_job_ratelimit_get_delay()

This gets us rid of more direct accesses to BlockJob fields from the
job drivers.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
This commit is contained in:
Kevin Wolf 2018-01-18 21:19:38 +01:00
parent 18bb69287e
commit dee81d5111
6 changed files with 29 additions and 19 deletions

View File

@ -325,21 +325,17 @@ static void backup_complete(BlockJob *job, void *opaque)
static bool coroutine_fn yield_and_check(BackupBlockJob *job) static bool coroutine_fn yield_and_check(BackupBlockJob *job)
{ {
uint64_t delay_ns;
if (block_job_is_cancelled(&job->common)) { if (block_job_is_cancelled(&job->common)) {
return true; return true;
} }
/* we need to yield so that bdrv_drain_all() returns. /* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
* (without, VM does not reboot) * return. Without a yield, the VM would not reboot. */
*/ delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
if (job->common.speed) {
uint64_t delay_ns = ratelimit_calculate_delay(&job->common.limit,
job->bytes_read);
job->bytes_read = 0; job->bytes_read = 0;
block_job_sleep_ns(&job->common, delay_ns); block_job_sleep_ns(&job->common, delay_ns);
} else {
block_job_sleep_ns(&job->common, 0);
}
if (block_job_is_cancelled(&job->common)) { if (block_job_is_cancelled(&job->common)) {
return true; return true;

View File

@ -197,8 +197,8 @@ static void coroutine_fn commit_run(void *opaque)
/* Publish progress */ /* Publish progress */
block_job_progress_update(&s->common, n); block_job_progress_update(&s->common, n);
if (copy && s->common.speed) { if (copy) {
delay_ns = ratelimit_calculate_delay(&s->common.limit, n); delay_ns = block_job_ratelimit_get_delay(&s->common, n);
} else { } else {
delay_ns = 0; delay_ns = 0;
} }

View File

@ -447,10 +447,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
assert(io_bytes); assert(io_bytes);
offset += io_bytes; offset += io_bytes;
nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
if (s->common.speed) { delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
delay_ns = ratelimit_calculate_delay(&s->common.limit,
io_bytes_acct);
}
} }
return delay_ns; return delay_ns;
} }

View File

@ -185,8 +185,8 @@ static void coroutine_fn stream_run(void *opaque)
/* Publish progress */ /* Publish progress */
block_job_progress_update(&s->common, n); block_job_progress_update(&s->common, n);
if (copy && s->common.speed) { if (copy) {
delay_ns = ratelimit_calculate_delay(&s->common.limit, n); delay_ns = block_job_ratelimit_get_delay(&s->common, n);
} else { } else {
delay_ns = 0; delay_ns = 0;
} }

View File

@ -680,6 +680,15 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
block_job_enter_cond(job, block_job_timer_pending); block_job_enter_cond(job, block_job_timer_pending);
} }
int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
{
if (!job->speed) {
return 0;
}
return ratelimit_calculate_delay(&job->limit, n);
}
void block_job_complete(BlockJob *job, Error **errp) void block_job_complete(BlockJob *job, Error **errp)
{ {
/* Should not be reachable via external interface for internal jobs */ /* Should not be reachable via external interface for internal jobs */

View File

@ -165,6 +165,14 @@ void block_job_sleep_ns(BlockJob *job, int64_t ns);
*/ */
void block_job_yield(BlockJob *job); void block_job_yield(BlockJob *job);
/**
* block_job_ratelimit_get_delay:
*
* Calculate and return delay for the next request in ns. See the documentation
* of ratelimit_calculate_delay() for details.
*/
int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n);
/** /**
* block_job_early_fail: * block_job_early_fail:
* @bs: The block device. * @bs: The block device.