2013-09-02 16:14:37 +04:00
|
|
|
/*
|
|
|
|
* QEMU throttling infrastructure
|
|
|
|
*
|
2015-06-08 19:17:47 +03:00
|
|
|
* Copyright (C) Nodalink, EURL. 2013-2014
|
|
|
|
* Copyright (C) Igalia, S.L. 2015
|
2013-09-02 16:14:37 +04:00
|
|
|
*
|
2015-06-08 19:17:47 +03:00
|
|
|
* Authors:
|
|
|
|
* Benoît Canet <benoit.canet@nodalink.com>
|
|
|
|
* Alberto Garcia <berto@igalia.com>
|
2013-09-02 16:14:37 +04:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation; either version 2 or
|
|
|
|
* (at your option) version 3 of the License.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-01-29 20:49:55 +03:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2013-09-02 16:14:37 +04:00
|
|
|
#include "qemu/throttle.h"
|
|
|
|
#include "qemu/timer.h"
|
2014-05-14 18:22:45 +04:00
|
|
|
#include "block/aio.h"
|
2013-09-02 16:14:37 +04:00
|
|
|
|
|
|
|
/* This function make a bucket leak
|
|
|
|
*
|
|
|
|
* @bkt: the bucket to make leak
|
|
|
|
* @delta_ns: the time delta
|
|
|
|
*/
|
|
|
|
void throttle_leak_bucket(LeakyBucket *bkt, int64_t delta_ns)
|
|
|
|
{
|
|
|
|
double leak;
|
|
|
|
|
|
|
|
/* compute how much to leak */
|
2015-07-08 17:10:09 +03:00
|
|
|
leak = (bkt->avg * (double) delta_ns) / NANOSECONDS_PER_SECOND;
|
2013-09-02 16:14:37 +04:00
|
|
|
|
|
|
|
/* make the bucket leak */
|
|
|
|
bkt->level = MAX(bkt->level - leak, 0);
|
2016-02-18 13:27:01 +03:00
|
|
|
|
|
|
|
/* if we allow bursts for more than one second we also need to
|
|
|
|
* keep track of bkt->burst_level so the bkt->max goal per second
|
|
|
|
* is attained */
|
|
|
|
if (bkt->burst_length > 1) {
|
|
|
|
leak = (bkt->max * (double) delta_ns) / NANOSECONDS_PER_SECOND;
|
|
|
|
bkt->burst_level = MAX(bkt->burst_level - leak, 0);
|
|
|
|
}
|
2013-09-02 16:14:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Calculate the time delta since last leak and make proportionals leaks
|
|
|
|
*
|
|
|
|
* @now: the current timestamp in ns
|
|
|
|
*/
|
|
|
|
static void throttle_do_leak(ThrottleState *ts, int64_t now)
|
|
|
|
{
|
|
|
|
/* compute the time elapsed since the last leak */
|
|
|
|
int64_t delta_ns = now - ts->previous_leak;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ts->previous_leak = now;
|
|
|
|
|
|
|
|
if (delta_ns <= 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* make each bucket leak */
|
|
|
|
for (i = 0; i < BUCKETS_COUNT; i++) {
|
|
|
|
throttle_leak_bucket(&ts->cfg.buckets[i], delta_ns);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* do the real job of computing the time to wait
|
|
|
|
*
|
|
|
|
* @limit: the throttling limit
|
|
|
|
* @extra: the number of operation to delay
|
|
|
|
* @ret: the time to wait in ns
|
|
|
|
*/
|
|
|
|
static int64_t throttle_do_compute_wait(double limit, double extra)
|
|
|
|
{
|
2015-07-08 17:10:09 +03:00
|
|
|
double wait = extra * NANOSECONDS_PER_SECOND;
|
2013-09-02 16:14:37 +04:00
|
|
|
wait /= limit;
|
|
|
|
return wait;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function compute the wait time in ns that a leaky bucket should trigger
|
|
|
|
*
|
|
|
|
* @bkt: the leaky bucket we operate on
|
|
|
|
* @ret: the resulting wait time in ns or 0 if the operation can go through
|
|
|
|
*/
|
|
|
|
int64_t throttle_compute_wait(LeakyBucket *bkt)
|
|
|
|
{
|
|
|
|
double extra; /* the number of extra units blocking the io */
|
2017-08-24 16:24:46 +03:00
|
|
|
double bucket_size; /* I/O before throttling to bkt->avg */
|
|
|
|
double burst_bucket_size; /* Before throttling to bkt->max */
|
2013-09-02 16:14:37 +04:00
|
|
|
|
|
|
|
if (!bkt->avg) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-24 16:24:46 +03:00
|
|
|
if (!bkt->max) {
|
|
|
|
/* If bkt->max is 0 we still want to allow short bursts of I/O
|
|
|
|
* from the guest, otherwise every other request will be throttled
|
|
|
|
* and performance will suffer considerably. */
|
2017-08-24 16:24:47 +03:00
|
|
|
bucket_size = (double) bkt->avg / 10;
|
2017-08-24 16:24:46 +03:00
|
|
|
burst_bucket_size = 0;
|
|
|
|
} else {
|
|
|
|
/* If we have a burst limit then we have to wait until all I/O
|
|
|
|
* at burst rate has finished before throttling to bkt->avg */
|
|
|
|
bucket_size = bkt->max * bkt->burst_length;
|
2017-08-24 16:24:47 +03:00
|
|
|
burst_bucket_size = (double) bkt->max / 10;
|
2017-08-24 16:24:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If the main bucket is full then we have to wait */
|
|
|
|
extra = bkt->level - bucket_size;
|
2016-02-18 13:27:01 +03:00
|
|
|
if (extra > 0) {
|
|
|
|
return throttle_do_compute_wait(bkt->avg, extra);
|
|
|
|
}
|
2013-09-02 16:14:37 +04:00
|
|
|
|
2017-08-24 16:24:46 +03:00
|
|
|
/* If the main bucket is not full yet we still have to check the
|
|
|
|
* burst bucket in order to enforce the burst limit */
|
2016-02-18 13:27:01 +03:00
|
|
|
if (bkt->burst_length > 1) {
|
2017-09-13 11:28:17 +03:00
|
|
|
assert(bkt->max > 0); /* see throttle_is_valid() */
|
2017-08-24 16:24:46 +03:00
|
|
|
extra = bkt->burst_level - burst_bucket_size;
|
2016-02-18 13:27:01 +03:00
|
|
|
if (extra > 0) {
|
|
|
|
return throttle_do_compute_wait(bkt->max, extra);
|
|
|
|
}
|
2013-09-02 16:14:37 +04:00
|
|
|
}
|
|
|
|
|
2016-02-18 13:27:01 +03:00
|
|
|
return 0;
|
2013-09-02 16:14:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function compute the time that must be waited while this IO
|
|
|
|
*
|
|
|
|
* @is_write: true if the current IO is a write, false if it's a read
|
|
|
|
* @ret: time to wait
|
|
|
|
*/
|
|
|
|
static int64_t throttle_compute_wait_for(ThrottleState *ts,
|
|
|
|
bool is_write)
|
|
|
|
{
|
|
|
|
BucketType to_check[2][4] = { {THROTTLE_BPS_TOTAL,
|
|
|
|
THROTTLE_OPS_TOTAL,
|
|
|
|
THROTTLE_BPS_READ,
|
|
|
|
THROTTLE_OPS_READ},
|
|
|
|
{THROTTLE_BPS_TOTAL,
|
|
|
|
THROTTLE_OPS_TOTAL,
|
|
|
|
THROTTLE_BPS_WRITE,
|
|
|
|
THROTTLE_OPS_WRITE}, };
|
|
|
|
int64_t wait, max_wait = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
BucketType index = to_check[is_write][i];
|
|
|
|
wait = throttle_compute_wait(&ts->cfg.buckets[index]);
|
|
|
|
if (wait > max_wait) {
|
|
|
|
max_wait = wait;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return max_wait;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* compute the timer for this type of operation
|
|
|
|
*
|
|
|
|
* @is_write: the type of operation
|
|
|
|
* @now: the current clock timestamp
|
|
|
|
* @next_timestamp: the resulting timer
|
|
|
|
* @ret: true if a timer must be set
|
|
|
|
*/
|
2016-02-18 13:26:54 +03:00
|
|
|
static bool throttle_compute_timer(ThrottleState *ts,
|
|
|
|
bool is_write,
|
|
|
|
int64_t now,
|
|
|
|
int64_t *next_timestamp)
|
2013-09-02 16:14:37 +04:00
|
|
|
{
|
|
|
|
int64_t wait;
|
|
|
|
|
|
|
|
/* leak proportionally to the time elapsed */
|
|
|
|
throttle_do_leak(ts, now);
|
|
|
|
|
|
|
|
/* compute the wait time if any */
|
|
|
|
wait = throttle_compute_wait_for(ts, is_write);
|
|
|
|
|
|
|
|
/* if the code must wait compute when the next timer should fire */
|
|
|
|
if (wait) {
|
|
|
|
*next_timestamp = now + wait;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* else no need to wait at all */
|
|
|
|
*next_timestamp = now;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-05-14 18:22:45 +04:00
|
|
|
/* Add timers to event loop */
|
2015-06-08 19:17:41 +03:00
|
|
|
void throttle_timers_attach_aio_context(ThrottleTimers *tt,
|
|
|
|
AioContext *new_context)
|
2014-05-14 18:22:45 +04:00
|
|
|
{
|
2015-06-08 19:17:41 +03:00
|
|
|
tt->timers[0] = aio_timer_new(new_context, tt->clock_type, SCALE_NS,
|
|
|
|
tt->read_timer_cb, tt->timer_opaque);
|
|
|
|
tt->timers[1] = aio_timer_new(new_context, tt->clock_type, SCALE_NS,
|
|
|
|
tt->write_timer_cb, tt->timer_opaque);
|
2014-05-14 18:22:45 +04:00
|
|
|
}
|
|
|
|
|
2016-02-18 13:27:00 +03:00
|
|
|
/*
|
|
|
|
* Initialize the ThrottleConfig structure to a valid state
|
|
|
|
* @cfg: the config to initialize
|
|
|
|
*/
|
|
|
|
void throttle_config_init(ThrottleConfig *cfg)
|
|
|
|
{
|
2016-02-18 13:27:01 +03:00
|
|
|
unsigned i;
|
2016-02-18 13:27:00 +03:00
|
|
|
memset(cfg, 0, sizeof(*cfg));
|
2016-02-18 13:27:01 +03:00
|
|
|
for (i = 0; i < BUCKETS_COUNT; i++) {
|
|
|
|
cfg->buckets[i].burst_length = 1;
|
|
|
|
}
|
2016-02-18 13:27:00 +03:00
|
|
|
}
|
|
|
|
|
2013-09-02 16:14:37 +04:00
|
|
|
/* To be called first on the ThrottleState */
|
2015-06-08 19:17:41 +03:00
|
|
|
void throttle_init(ThrottleState *ts)
|
2013-09-02 16:14:37 +04:00
|
|
|
{
|
|
|
|
memset(ts, 0, sizeof(ThrottleState));
|
2016-02-18 13:27:00 +03:00
|
|
|
throttle_config_init(&ts->cfg);
|
2015-06-08 19:17:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* To be called first on the ThrottleTimers */
|
|
|
|
void throttle_timers_init(ThrottleTimers *tt,
|
|
|
|
AioContext *aio_context,
|
|
|
|
QEMUClockType clock_type,
|
|
|
|
QEMUTimerCB *read_timer_cb,
|
|
|
|
QEMUTimerCB *write_timer_cb,
|
|
|
|
void *timer_opaque)
|
|
|
|
{
|
|
|
|
memset(tt, 0, sizeof(ThrottleTimers));
|
2013-09-02 16:14:37 +04:00
|
|
|
|
2015-06-08 19:17:41 +03:00
|
|
|
tt->clock_type = clock_type;
|
|
|
|
tt->read_timer_cb = read_timer_cb;
|
|
|
|
tt->write_timer_cb = write_timer_cb;
|
|
|
|
tt->timer_opaque = timer_opaque;
|
|
|
|
throttle_timers_attach_aio_context(tt, aio_context);
|
2013-09-02 16:14:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* destroy a timer */
|
|
|
|
static void throttle_timer_destroy(QEMUTimer **timer)
|
|
|
|
{
|
|
|
|
assert(*timer != NULL);
|
|
|
|
|
|
|
|
timer_del(*timer);
|
|
|
|
timer_free(*timer);
|
|
|
|
*timer = NULL;
|
|
|
|
}
|
|
|
|
|
2014-05-14 18:22:45 +04:00
|
|
|
/* Remove timers from event loop */
|
2015-06-08 19:17:41 +03:00
|
|
|
void throttle_timers_detach_aio_context(ThrottleTimers *tt)
|
2013-09-02 16:14:37 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 2; i++) {
|
2015-06-08 19:17:41 +03:00
|
|
|
throttle_timer_destroy(&tt->timers[i]);
|
2013-09-02 16:14:37 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-08 19:17:41 +03:00
|
|
|
/* To be called last on the ThrottleTimers */
|
|
|
|
void throttle_timers_destroy(ThrottleTimers *tt)
|
2014-05-14 18:22:45 +04:00
|
|
|
{
|
2015-06-08 19:17:41 +03:00
|
|
|
throttle_timers_detach_aio_context(tt);
|
2014-05-14 18:22:45 +04:00
|
|
|
}
|
|
|
|
|
2013-09-02 16:14:37 +04:00
|
|
|
/* is any throttling timer configured */
|
2015-06-08 19:17:41 +03:00
|
|
|
bool throttle_timers_are_initialized(ThrottleTimers *tt)
|
2013-09-02 16:14:37 +04:00
|
|
|
{
|
2015-06-08 19:17:41 +03:00
|
|
|
if (tt->timers[0]) {
|
2013-09-02 16:14:37 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Does any throttling must be done
|
|
|
|
*
|
|
|
|
* @cfg: the throttling configuration to inspect
|
|
|
|
* @ret: true if throttling must be done else false
|
|
|
|
*/
|
|
|
|
bool throttle_enabled(ThrottleConfig *cfg)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < BUCKETS_COUNT; i++) {
|
|
|
|
if (cfg->buckets[i].avg > 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-02-18 13:26:59 +03:00
|
|
|
/* check if a throttling configuration is valid
|
2013-09-02 16:14:37 +04:00
|
|
|
* @cfg: the throttling configuration to inspect
|
2016-02-18 13:26:59 +03:00
|
|
|
* @ret: true if valid else false
|
2016-02-18 13:26:55 +03:00
|
|
|
* @errp: error object
|
2013-09-02 16:14:37 +04:00
|
|
|
*/
|
2016-02-18 13:26:59 +03:00
|
|
|
bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
|
2013-09-02 16:14:37 +04:00
|
|
|
{
|
2016-02-18 13:26:59 +03:00
|
|
|
int i;
|
2013-09-02 16:14:37 +04:00
|
|
|
bool bps_flag, ops_flag;
|
|
|
|
bool bps_max_flag, ops_max_flag;
|
|
|
|
|
|
|
|
bps_flag = cfg->buckets[THROTTLE_BPS_TOTAL].avg &&
|
|
|
|
(cfg->buckets[THROTTLE_BPS_READ].avg ||
|
|
|
|
cfg->buckets[THROTTLE_BPS_WRITE].avg);
|
|
|
|
|
|
|
|
ops_flag = cfg->buckets[THROTTLE_OPS_TOTAL].avg &&
|
|
|
|
(cfg->buckets[THROTTLE_OPS_READ].avg ||
|
|
|
|
cfg->buckets[THROTTLE_OPS_WRITE].avg);
|
|
|
|
|
|
|
|
bps_max_flag = cfg->buckets[THROTTLE_BPS_TOTAL].max &&
|
|
|
|
(cfg->buckets[THROTTLE_BPS_READ].max ||
|
|
|
|
cfg->buckets[THROTTLE_BPS_WRITE].max);
|
|
|
|
|
|
|
|
ops_max_flag = cfg->buckets[THROTTLE_OPS_TOTAL].max &&
|
|
|
|
(cfg->buckets[THROTTLE_OPS_READ].max ||
|
|
|
|
cfg->buckets[THROTTLE_OPS_WRITE].max);
|
|
|
|
|
2016-02-18 13:26:55 +03:00
|
|
|
if (bps_flag || ops_flag || bps_max_flag || ops_max_flag) {
|
|
|
|
error_setg(errp, "bps/iops/max total values and read/write values"
|
|
|
|
" cannot be used at the same time");
|
2016-02-18 13:26:59 +03:00
|
|
|
return false;
|
2016-02-18 13:26:55 +03:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:40:31 +03:00
|
|
|
if (cfg->op_size &&
|
|
|
|
!cfg->buckets[THROTTLE_OPS_TOTAL].avg &&
|
|
|
|
!cfg->buckets[THROTTLE_OPS_READ].avg &&
|
|
|
|
!cfg->buckets[THROTTLE_OPS_WRITE].avg) {
|
|
|
|
error_setg(errp, "iops size requires an iops value to be set");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-09-02 16:14:37 +04:00
|
|
|
for (i = 0; i < BUCKETS_COUNT; i++) {
|
2017-08-24 16:24:45 +03:00
|
|
|
LeakyBucket *bkt = &cfg->buckets[i];
|
2017-08-24 16:24:47 +03:00
|
|
|
if (bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) {
|
2016-02-18 13:26:57 +03:00
|
|
|
error_setg(errp, "bps/iops/max values must be within [0, %lld]",
|
|
|
|
THROTTLE_VALUE_MAX);
|
2016-01-20 07:21:20 +03:00
|
|
|
return false;
|
2013-09-02 16:14:37 +04:00
|
|
|
}
|
|
|
|
|
2017-08-24 16:24:45 +03:00
|
|
|
if (!bkt->burst_length) {
|
2016-02-18 13:27:01 +03:00
|
|
|
error_setg(errp, "the burst length cannot be 0");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-08-24 16:24:45 +03:00
|
|
|
if (bkt->burst_length > 1 && !bkt->max) {
|
2016-02-18 13:27:01 +03:00
|
|
|
error_setg(errp, "burst length set without burst rate");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-08-24 16:24:48 +03:00
|
|
|
if (bkt->max && bkt->burst_length > THROTTLE_VALUE_MAX / bkt->max) {
|
|
|
|
error_setg(errp, "burst length too high for this burst rate");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-08-24 16:24:45 +03:00
|
|
|
if (bkt->max && !bkt->avg) {
|
2016-02-18 13:26:56 +03:00
|
|
|
error_setg(errp, "bps_max/iops_max require corresponding"
|
|
|
|
" bps/iops values");
|
2016-02-18 13:26:59 +03:00
|
|
|
return false;
|
2015-08-04 13:22:12 +03:00
|
|
|
}
|
2016-07-28 11:08:12 +03:00
|
|
|
|
2017-08-24 16:24:45 +03:00
|
|
|
if (bkt->max && bkt->max < bkt->avg) {
|
2016-07-28 11:08:12 +03:00
|
|
|
error_setg(errp, "bps_max/iops_max cannot be lower than bps/iops");
|
|
|
|
return false;
|
|
|
|
}
|
2015-08-04 13:22:12 +03:00
|
|
|
}
|
2016-02-18 13:26:59 +03:00
|
|
|
|
|
|
|
return true;
|
2015-08-04 13:22:12 +03:00
|
|
|
}
|
|
|
|
|
2013-09-02 16:14:37 +04:00
|
|
|
/* Used to configure the throttle
|
|
|
|
*
|
|
|
|
* @ts: the throttle state we are working on
|
2017-07-02 13:06:45 +03:00
|
|
|
* @clock_type: the group's clock_type
|
2013-09-02 16:14:37 +04:00
|
|
|
* @cfg: the config to set
|
|
|
|
*/
|
2015-06-08 19:17:41 +03:00
|
|
|
void throttle_config(ThrottleState *ts,
|
2017-07-02 13:06:45 +03:00
|
|
|
QEMUClockType clock_type,
|
2015-06-08 19:17:41 +03:00
|
|
|
ThrottleConfig *cfg)
|
2013-09-02 16:14:37 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ts->cfg = *cfg;
|
|
|
|
|
2017-08-24 16:24:46 +03:00
|
|
|
/* Zero bucket level */
|
2013-09-02 16:14:37 +04:00
|
|
|
for (i = 0; i < BUCKETS_COUNT; i++) {
|
2017-08-24 16:24:46 +03:00
|
|
|
ts->cfg.buckets[i].level = 0;
|
|
|
|
ts->cfg.buckets[i].burst_level = 0;
|
2013-09-02 16:14:37 +04:00
|
|
|
}
|
|
|
|
|
2017-07-02 13:06:45 +03:00
|
|
|
ts->previous_leak = qemu_clock_get_ns(clock_type);
|
2013-09-02 16:14:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* used to get config
|
|
|
|
*
|
|
|
|
* @ts: the throttle state we are working on
|
|
|
|
* @cfg: the config to write
|
|
|
|
*/
|
|
|
|
void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
|
|
|
|
{
|
|
|
|
*cfg = ts->cfg;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Schedule the read or write timer if needed
|
|
|
|
*
|
|
|
|
* NOTE: this function is not unit tested due to it's usage of timer_mod
|
|
|
|
*
|
2015-06-08 19:17:41 +03:00
|
|
|
* @tt: the timers structure
|
2013-09-02 16:14:37 +04:00
|
|
|
* @is_write: the type of operation (read/write)
|
|
|
|
* @ret: true if the timer has been scheduled else false
|
|
|
|
*/
|
2015-06-08 19:17:41 +03:00
|
|
|
bool throttle_schedule_timer(ThrottleState *ts,
|
|
|
|
ThrottleTimers *tt,
|
|
|
|
bool is_write)
|
2013-09-02 16:14:37 +04:00
|
|
|
{
|
2015-06-08 19:17:41 +03:00
|
|
|
int64_t now = qemu_clock_get_ns(tt->clock_type);
|
2013-09-02 16:14:37 +04:00
|
|
|
int64_t next_timestamp;
|
|
|
|
bool must_wait;
|
|
|
|
|
|
|
|
must_wait = throttle_compute_timer(ts,
|
|
|
|
is_write,
|
|
|
|
now,
|
|
|
|
&next_timestamp);
|
|
|
|
|
|
|
|
/* request not throttled */
|
|
|
|
if (!must_wait) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* request throttled and timer pending -> do nothing */
|
2015-06-08 19:17:41 +03:00
|
|
|
if (timer_pending(tt->timers[is_write])) {
|
2013-09-02 16:14:37 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* request throttled and timer not pending -> arm timer */
|
2015-06-08 19:17:41 +03:00
|
|
|
timer_mod(tt->timers[is_write], next_timestamp);
|
2013-09-02 16:14:37 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* do the accounting for this operation
|
|
|
|
*
|
|
|
|
* @is_write: the type of operation (read/write)
|
|
|
|
* @size: the size of the operation
|
|
|
|
*/
|
|
|
|
void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
|
|
|
|
{
|
2016-02-18 13:27:01 +03:00
|
|
|
const BucketType bucket_types_size[2][2] = {
|
|
|
|
{ THROTTLE_BPS_TOTAL, THROTTLE_BPS_READ },
|
|
|
|
{ THROTTLE_BPS_TOTAL, THROTTLE_BPS_WRITE }
|
|
|
|
};
|
|
|
|
const BucketType bucket_types_units[2][2] = {
|
|
|
|
{ THROTTLE_OPS_TOTAL, THROTTLE_OPS_READ },
|
|
|
|
{ THROTTLE_OPS_TOTAL, THROTTLE_OPS_WRITE }
|
|
|
|
};
|
2013-09-02 16:14:37 +04:00
|
|
|
double units = 1.0;
|
2016-02-18 13:27:01 +03:00
|
|
|
unsigned i;
|
2013-09-02 16:14:37 +04:00
|
|
|
|
|
|
|
/* if cfg.op_size is defined and smaller than size we compute unit count */
|
|
|
|
if (ts->cfg.op_size && size > ts->cfg.op_size) {
|
|
|
|
units = (double) size / ts->cfg.op_size;
|
|
|
|
}
|
|
|
|
|
2016-02-18 13:27:01 +03:00
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
LeakyBucket *bkt;
|
|
|
|
|
|
|
|
bkt = &ts->cfg.buckets[bucket_types_size[is_write][i]];
|
|
|
|
bkt->level += size;
|
|
|
|
if (bkt->burst_length > 1) {
|
|
|
|
bkt->burst_level += size;
|
|
|
|
}
|
2013-09-02 16:14:37 +04:00
|
|
|
|
2016-02-18 13:27:01 +03:00
|
|
|
bkt = &ts->cfg.buckets[bucket_types_units[is_write][i]];
|
|
|
|
bkt->level += units;
|
|
|
|
if (bkt->burst_length > 1) {
|
|
|
|
bkt->burst_level += units;
|
|
|
|
}
|
2013-09-02 16:14:37 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
block: convert ThrottleGroup to object with QOM
ThrottleGroup is converted to an object. This will allow the future
throttle block filter drive easy creation and configuration of throttle
groups in QMP and cli.
A new QAPI struct, ThrottleLimits, is introduced to provide a shared
struct for all throttle configuration needs in QMP.
ThrottleGroups can be created via CLI as
-object throttle-group,id=foo,x-iops-total=100,x-..
where x-* are individual limit properties. Since we can't add non-scalar
properties in -object this interface must be used instead. However,
setting these properties must be disabled after initialization because
certain combinations of limits are forbidden and thus configuration
changes should be done in one transaction. The individual properties
will go away when support for non-scalar values in CLI is implemented
and thus are marked as experimental.
ThrottleGroup also has a `limits` property that uses the ThrottleLimits
struct. It can be used to create ThrottleGroups or set the
configuration in existing groups as follows:
{ "execute": "object-add",
"arguments": {
"qom-type": "throttle-group",
"id": "foo",
"props" : {
"limits": {
"iops-total": 100
}
}
}
}
{ "execute" : "qom-set",
"arguments" : {
"path" : "foo",
"property" : "limits",
"value" : {
"iops-total" : 99
}
}
}
This also means a group's configuration can be fetched with qom-get.
Signed-off-by: Manos Pitsidianakis <el13635@mail.ntua.gr>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Alberto Garcia <berto@igalia.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-08-25 16:20:26 +03:00
|
|
|
/* return a ThrottleConfig based on the options in a ThrottleLimits
|
|
|
|
*
|
|
|
|
* @arg: the ThrottleLimits object to read from
|
|
|
|
* @cfg: the ThrottleConfig to edit
|
|
|
|
* @errp: error object
|
|
|
|
*/
|
|
|
|
void throttle_limits_to_config(ThrottleLimits *arg, ThrottleConfig *cfg,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
if (arg->has_bps_total) {
|
|
|
|
cfg->buckets[THROTTLE_BPS_TOTAL].avg = arg->bps_total;
|
|
|
|
}
|
|
|
|
if (arg->has_bps_read) {
|
|
|
|
cfg->buckets[THROTTLE_BPS_READ].avg = arg->bps_read;
|
|
|
|
}
|
|
|
|
if (arg->has_bps_write) {
|
|
|
|
cfg->buckets[THROTTLE_BPS_WRITE].avg = arg->bps_write;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arg->has_iops_total) {
|
|
|
|
cfg->buckets[THROTTLE_OPS_TOTAL].avg = arg->iops_total;
|
|
|
|
}
|
|
|
|
if (arg->has_iops_read) {
|
|
|
|
cfg->buckets[THROTTLE_OPS_READ].avg = arg->iops_read;
|
|
|
|
}
|
|
|
|
if (arg->has_iops_write) {
|
|
|
|
cfg->buckets[THROTTLE_OPS_WRITE].avg = arg->iops_write;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arg->has_bps_total_max) {
|
|
|
|
cfg->buckets[THROTTLE_BPS_TOTAL].max = arg->bps_total_max;
|
|
|
|
}
|
|
|
|
if (arg->has_bps_read_max) {
|
|
|
|
cfg->buckets[THROTTLE_BPS_READ].max = arg->bps_read_max;
|
|
|
|
}
|
|
|
|
if (arg->has_bps_write_max) {
|
|
|
|
cfg->buckets[THROTTLE_BPS_WRITE].max = arg->bps_write_max;
|
|
|
|
}
|
|
|
|
if (arg->has_iops_total_max) {
|
|
|
|
cfg->buckets[THROTTLE_OPS_TOTAL].max = arg->iops_total_max;
|
|
|
|
}
|
|
|
|
if (arg->has_iops_read_max) {
|
|
|
|
cfg->buckets[THROTTLE_OPS_READ].max = arg->iops_read_max;
|
|
|
|
}
|
|
|
|
if (arg->has_iops_write_max) {
|
|
|
|
cfg->buckets[THROTTLE_OPS_WRITE].max = arg->iops_write_max;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arg->has_bps_total_max_length) {
|
|
|
|
if (arg->bps_total_max_length > UINT_MAX) {
|
|
|
|
error_setg(errp, "bps-total-max-length value must be in"
|
|
|
|
" the range [0, %u]", UINT_MAX);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cfg->buckets[THROTTLE_BPS_TOTAL].burst_length = arg->bps_total_max_length;
|
|
|
|
}
|
|
|
|
if (arg->has_bps_read_max_length) {
|
|
|
|
if (arg->bps_read_max_length > UINT_MAX) {
|
|
|
|
error_setg(errp, "bps-read-max-length value must be in"
|
|
|
|
" the range [0, %u]", UINT_MAX);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cfg->buckets[THROTTLE_BPS_READ].burst_length = arg->bps_read_max_length;
|
|
|
|
}
|
|
|
|
if (arg->has_bps_write_max_length) {
|
|
|
|
if (arg->bps_write_max_length > UINT_MAX) {
|
|
|
|
error_setg(errp, "bps-write-max-length value must be in"
|
|
|
|
" the range [0, %u]", UINT_MAX);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cfg->buckets[THROTTLE_BPS_WRITE].burst_length = arg->bps_write_max_length;
|
|
|
|
}
|
|
|
|
if (arg->has_iops_total_max_length) {
|
|
|
|
if (arg->iops_total_max_length > UINT_MAX) {
|
|
|
|
error_setg(errp, "iops-total-max-length value must be in"
|
|
|
|
" the range [0, %u]", UINT_MAX);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cfg->buckets[THROTTLE_OPS_TOTAL].burst_length = arg->iops_total_max_length;
|
|
|
|
}
|
|
|
|
if (arg->has_iops_read_max_length) {
|
|
|
|
if (arg->iops_read_max_length > UINT_MAX) {
|
|
|
|
error_setg(errp, "iops-read-max-length value must be in"
|
|
|
|
" the range [0, %u]", UINT_MAX);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cfg->buckets[THROTTLE_OPS_READ].burst_length = arg->iops_read_max_length;
|
|
|
|
}
|
|
|
|
if (arg->has_iops_write_max_length) {
|
|
|
|
if (arg->iops_write_max_length > UINT_MAX) {
|
|
|
|
error_setg(errp, "iops-write-max-length value must be in"
|
|
|
|
" the range [0, %u]", UINT_MAX);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cfg->buckets[THROTTLE_OPS_WRITE].burst_length = arg->iops_write_max_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arg->has_iops_size) {
|
|
|
|
cfg->op_size = arg->iops_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
throttle_is_valid(cfg, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* write the options of a ThrottleConfig to a ThrottleLimits
|
|
|
|
*
|
|
|
|
* @cfg: the ThrottleConfig to read from
|
|
|
|
* @var: the ThrottleLimits to write to
|
|
|
|
*/
|
|
|
|
void throttle_config_to_limits(ThrottleConfig *cfg, ThrottleLimits *var)
|
|
|
|
{
|
|
|
|
var->bps_total = cfg->buckets[THROTTLE_BPS_TOTAL].avg;
|
|
|
|
var->bps_read = cfg->buckets[THROTTLE_BPS_READ].avg;
|
|
|
|
var->bps_write = cfg->buckets[THROTTLE_BPS_WRITE].avg;
|
|
|
|
var->iops_total = cfg->buckets[THROTTLE_OPS_TOTAL].avg;
|
|
|
|
var->iops_read = cfg->buckets[THROTTLE_OPS_READ].avg;
|
|
|
|
var->iops_write = cfg->buckets[THROTTLE_OPS_WRITE].avg;
|
|
|
|
var->bps_total_max = cfg->buckets[THROTTLE_BPS_TOTAL].max;
|
|
|
|
var->bps_read_max = cfg->buckets[THROTTLE_BPS_READ].max;
|
|
|
|
var->bps_write_max = cfg->buckets[THROTTLE_BPS_WRITE].max;
|
|
|
|
var->iops_total_max = cfg->buckets[THROTTLE_OPS_TOTAL].max;
|
|
|
|
var->iops_read_max = cfg->buckets[THROTTLE_OPS_READ].max;
|
|
|
|
var->iops_write_max = cfg->buckets[THROTTLE_OPS_WRITE].max;
|
|
|
|
var->bps_total_max_length = cfg->buckets[THROTTLE_BPS_TOTAL].burst_length;
|
|
|
|
var->bps_read_max_length = cfg->buckets[THROTTLE_BPS_READ].burst_length;
|
|
|
|
var->bps_write_max_length = cfg->buckets[THROTTLE_BPS_WRITE].burst_length;
|
|
|
|
var->iops_total_max_length = cfg->buckets[THROTTLE_OPS_TOTAL].burst_length;
|
|
|
|
var->iops_read_max_length = cfg->buckets[THROTTLE_OPS_READ].burst_length;
|
|
|
|
var->iops_write_max_length = cfg->buckets[THROTTLE_OPS_WRITE].burst_length;
|
|
|
|
var->iops_size = cfg->op_size;
|
|
|
|
|
|
|
|
var->has_bps_total = true;
|
|
|
|
var->has_bps_read = true;
|
|
|
|
var->has_bps_write = true;
|
|
|
|
var->has_iops_total = true;
|
|
|
|
var->has_iops_read = true;
|
|
|
|
var->has_iops_write = true;
|
|
|
|
var->has_bps_total_max = true;
|
|
|
|
var->has_bps_read_max = true;
|
|
|
|
var->has_bps_write_max = true;
|
|
|
|
var->has_iops_total_max = true;
|
|
|
|
var->has_iops_read_max = true;
|
|
|
|
var->has_iops_write_max = true;
|
|
|
|
var->has_bps_read_max_length = true;
|
|
|
|
var->has_bps_total_max_length = true;
|
|
|
|
var->has_bps_write_max_length = true;
|
|
|
|
var->has_iops_total_max_length = true;
|
|
|
|
var->has_iops_read_max_length = true;
|
|
|
|
var->has_iops_write_max_length = true;
|
|
|
|
var->has_iops_size = true;
|
|
|
|
}
|