2013-05-25 07:09:44 +04:00
|
|
|
/*
|
|
|
|
* Block layer qmp and info dump related functions
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2016-01-18 21:01:42 +03:00
|
|
|
#include "qemu/osdep.h"
|
2019-09-03 15:05:55 +03:00
|
|
|
#include "qemu/cutils.h"
|
2013-05-25 07:09:44 +04:00
|
|
|
#include "block/qapi.h"
|
|
|
|
#include "block/block_int.h"
|
2015-06-08 19:17:44 +03:00
|
|
|
#include "block/throttle-groups.h"
|
block: add event when disk usage exceeds threshold
Managing applications, like oVirt (http://www.ovirt.org), make extensive
use of thin-provisioned disk images.
To let the guest run smoothly and be not unnecessarily paused, oVirt sets
a disk usage threshold (so called 'high water mark') based on the occupation
of the device, and automatically extends the image once the threshold
is reached or exceeded.
In order to detect the crossing of the threshold, oVirt has no choice but
aggressively polling the QEMU monitor using the query-blockstats command.
This lead to unnecessary system load, and is made even worse under scale:
deployments with hundreds of VMs are no longer rare.
To fix this, this patch adds:
* A new monitor command `block-set-write-threshold', to set a mark for
a given block device.
* A new event `BLOCK_WRITE_THRESHOLD', to report if a block device
usage exceeds the threshold.
* A new `write_threshold' field into the `BlockDeviceInfo' structure,
to report the configured threshold.
This will allow the managing application to use smarter and more
efficient monitoring, greatly reducing the need of polling.
[Updated qemu-iotests 067 output to add the new 'write_threshold'
property. --Stefan]
[Changed g_assert_false() to !g_assert() to fix the build on older glib
versions. --Kevin]
Signed-off-by: Francesco Romani <fromani@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 16:11:13 +03:00
|
|
|
#include "block/write-threshold.h"
|
2018-02-01 14:18:31 +03:00
|
|
|
#include "qapi/error.h"
|
2018-02-11 12:36:01 +03:00
|
|
|
#include "qapi/qapi-commands-block-core.h"
|
2016-09-30 17:45:27 +03:00
|
|
|
#include "qapi/qobject-output-visitor.h"
|
2018-02-11 12:36:01 +03:00
|
|
|
#include "qapi/qapi-visit-block-core.h"
|
2018-02-01 14:18:35 +03:00
|
|
|
#include "qapi/qmp/qbool.h"
|
2018-02-01 14:18:39 +03:00
|
|
|
#include "qapi/qmp/qdict.h"
|
2018-02-01 14:18:38 +03:00
|
|
|
#include "qapi/qmp/qlist.h"
|
2018-02-01 14:18:36 +03:00
|
|
|
#include "qapi/qmp/qnum.h"
|
2018-02-01 14:18:35 +03:00
|
|
|
#include "qapi/qmp/qstring.h"
|
2019-04-17 22:17:55 +03:00
|
|
|
#include "qemu/qemu-print.h"
|
2014-10-07 15:59:23 +04:00
|
|
|
#include "sysemu/block-backend.h"
|
2016-03-20 20:16:19 +03:00
|
|
|
#include "qemu/cutils.h"
|
2013-05-25 07:09:44 +04:00
|
|
|
|
2016-03-03 13:37:48 +03:00
|
|
|
BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
|
|
|
|
BlockDriverState *bs, Error **errp)
|
2014-01-24 00:31:34 +04:00
|
|
|
{
|
2015-04-17 14:52:43 +03:00
|
|
|
ImageInfo **p_image_info;
|
|
|
|
BlockDriverState *bs0;
|
2017-11-10 23:31:09 +03:00
|
|
|
BlockDeviceInfo *info;
|
2014-01-24 00:31:34 +04:00
|
|
|
|
2017-11-10 23:31:09 +03:00
|
|
|
if (!bs->drv) {
|
|
|
|
error_setg(errp, "Block device %s is ejected", bs->node_name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
block: Use bdrv_refresh_filename() to pull
Before this patch, bdrv_refresh_filename() is used in a pushing manner:
Whenever the BDS graph is modified, the parents of the modified edges
are supposed to be updated (recursively upwards). However, that is
nonviable, considering that we want child changes not to concern
parents.
Also, in the long run we want a pull model anyway: Here, we would have a
bdrv_filename() function which returns a BDS's filename, freshly
constructed.
This patch is an intermediate step. It adds bdrv_refresh_filename()
calls before every place a BDS.filename value is used. The only
exceptions are protocol drivers that use their own filename, which
clearly would not profit from refreshing that filename before.
Also, bdrv_get_encrypted_filename() is removed along the way (as a user
of BDS.filename), since it is completely unused.
In turn, all of the calls to bdrv_refresh_filename() before this patch
are removed, because we no longer have to call this function on graph
changes.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20190201192935.18394-2-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-02-01 22:29:05 +03:00
|
|
|
bdrv_refresh_filename(bs);
|
|
|
|
|
2017-11-10 23:31:09 +03:00
|
|
|
info = g_malloc0(sizeof(*info));
|
2014-01-24 00:31:34 +04:00
|
|
|
info->file = g_strdup(bs->filename);
|
|
|
|
info->ro = bs->read_only;
|
|
|
|
info->drv = g_strdup(bs->drv->format_name);
|
|
|
|
info->encrypted = bs->encrypted;
|
2017-06-23 19:24:16 +03:00
|
|
|
info->encryption_key_missing = false;
|
2014-01-24 00:31:34 +04:00
|
|
|
|
2014-05-22 15:28:45 +04:00
|
|
|
info->cache = g_new(BlockdevCacheInfo, 1);
|
|
|
|
*info->cache = (BlockdevCacheInfo) {
|
2016-03-03 13:37:48 +03:00
|
|
|
.writeback = blk ? blk_enable_write_cache(blk) : true,
|
2014-05-22 15:28:45 +04:00
|
|
|
.direct = !!(bs->open_flags & BDRV_O_NOCACHE),
|
|
|
|
.no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
|
|
|
|
};
|
|
|
|
|
2014-01-24 00:31:34 +04:00
|
|
|
if (bs->node_name[0]) {
|
|
|
|
info->has_node_name = true;
|
|
|
|
info->node_name = g_strdup(bs->node_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bs->backing_file[0]) {
|
|
|
|
info->has_backing_file = true;
|
|
|
|
info->backing_file = g_strdup(bs->backing_file);
|
|
|
|
}
|
|
|
|
|
2019-07-29 23:35:56 +03:00
|
|
|
if (!QLIST_EMPTY(&bs->dirty_bitmaps)) {
|
|
|
|
info->has_dirty_bitmaps = true;
|
|
|
|
info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
|
|
|
|
}
|
|
|
|
|
2014-05-18 02:58:19 +04:00
|
|
|
info->detect_zeroes = bs->detect_zeroes;
|
2014-01-24 00:31:34 +04:00
|
|
|
|
2017-08-25 16:20:23 +03:00
|
|
|
if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) {
|
2014-01-24 00:31:34 +04:00
|
|
|
ThrottleConfig cfg;
|
2017-08-25 16:20:23 +03:00
|
|
|
BlockBackendPublic *blkp = blk_get_public(blk);
|
2015-06-08 19:17:44 +03:00
|
|
|
|
2017-08-25 16:20:23 +03:00
|
|
|
throttle_group_get_config(&blkp->throttle_group_member, &cfg);
|
2015-06-08 19:17:44 +03:00
|
|
|
|
2014-01-24 00:31:34 +04:00
|
|
|
info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
|
|
|
|
info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
|
|
|
|
info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
|
|
|
|
|
|
|
|
info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg;
|
|
|
|
info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg;
|
|
|
|
info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg;
|
|
|
|
|
|
|
|
info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
|
|
|
|
info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
|
|
|
|
info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
|
|
|
|
info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
|
|
|
|
info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
|
|
|
|
info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
|
|
|
|
|
|
|
|
info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
|
|
|
|
info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
|
|
|
|
info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
|
|
|
|
info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
|
|
|
|
info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
|
|
|
|
info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
|
|
|
|
|
2016-02-18 13:27:04 +03:00
|
|
|
info->has_bps_max_length = info->has_bps_max;
|
|
|
|
info->bps_max_length =
|
|
|
|
cfg.buckets[THROTTLE_BPS_TOTAL].burst_length;
|
|
|
|
info->has_bps_rd_max_length = info->has_bps_rd_max;
|
|
|
|
info->bps_rd_max_length =
|
|
|
|
cfg.buckets[THROTTLE_BPS_READ].burst_length;
|
|
|
|
info->has_bps_wr_max_length = info->has_bps_wr_max;
|
|
|
|
info->bps_wr_max_length =
|
|
|
|
cfg.buckets[THROTTLE_BPS_WRITE].burst_length;
|
|
|
|
|
|
|
|
info->has_iops_max_length = info->has_iops_max;
|
|
|
|
info->iops_max_length =
|
|
|
|
cfg.buckets[THROTTLE_OPS_TOTAL].burst_length;
|
|
|
|
info->has_iops_rd_max_length = info->has_iops_rd_max;
|
|
|
|
info->iops_rd_max_length =
|
|
|
|
cfg.buckets[THROTTLE_OPS_READ].burst_length;
|
|
|
|
info->has_iops_wr_max_length = info->has_iops_wr_max;
|
|
|
|
info->iops_wr_max_length =
|
|
|
|
cfg.buckets[THROTTLE_OPS_WRITE].burst_length;
|
|
|
|
|
2014-01-24 00:31:34 +04:00
|
|
|
info->has_iops_size = cfg.op_size;
|
|
|
|
info->iops_size = cfg.op_size;
|
2015-06-08 19:17:46 +03:00
|
|
|
|
|
|
|
info->has_group = true;
|
2017-08-25 16:20:23 +03:00
|
|
|
info->group =
|
|
|
|
g_strdup(throttle_group_get_name(&blkp->throttle_group_member));
|
2014-01-24 00:31:34 +04:00
|
|
|
}
|
|
|
|
|
block: add event when disk usage exceeds threshold
Managing applications, like oVirt (http://www.ovirt.org), make extensive
use of thin-provisioned disk images.
To let the guest run smoothly and be not unnecessarily paused, oVirt sets
a disk usage threshold (so called 'high water mark') based on the occupation
of the device, and automatically extends the image once the threshold
is reached or exceeded.
In order to detect the crossing of the threshold, oVirt has no choice but
aggressively polling the QEMU monitor using the query-blockstats command.
This lead to unnecessary system load, and is made even worse under scale:
deployments with hundreds of VMs are no longer rare.
To fix this, this patch adds:
* A new monitor command `block-set-write-threshold', to set a mark for
a given block device.
* A new event `BLOCK_WRITE_THRESHOLD', to report if a block device
usage exceeds the threshold.
* A new `write_threshold' field into the `BlockDeviceInfo' structure,
to report the configured threshold.
This will allow the managing application to use smarter and more
efficient monitoring, greatly reducing the need of polling.
[Updated qemu-iotests 067 output to add the new 'write_threshold'
property. --Stefan]
[Changed g_assert_false() to !g_assert() to fix the build on older glib
versions. --Kevin]
Signed-off-by: Francesco Romani <fromani@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 16:11:13 +03:00
|
|
|
info->write_threshold = bdrv_write_threshold_get(bs);
|
|
|
|
|
2015-04-17 14:52:43 +03:00
|
|
|
bs0 = bs;
|
|
|
|
p_image_info = &info->image;
|
2017-07-18 18:24:05 +03:00
|
|
|
info->backing_file_depth = 0;
|
2015-04-17 14:52:43 +03:00
|
|
|
while (1) {
|
|
|
|
Error *local_err = NULL;
|
|
|
|
bdrv_query_image_info(bs0, p_image_info, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
qapi_free_BlockDeviceInfo(info);
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-07-18 18:24:05 +03:00
|
|
|
|
2015-06-17 15:55:21 +03:00
|
|
|
if (bs0->drv && bs0->backing) {
|
2017-07-18 18:24:05 +03:00
|
|
|
info->backing_file_depth++;
|
2015-06-17 15:55:21 +03:00
|
|
|
bs0 = bs0->backing->bs;
|
2015-04-17 14:52:43 +03:00
|
|
|
(*p_image_info)->has_backing_image = true;
|
|
|
|
p_image_info = &((*p_image_info)->backing_image);
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2017-07-18 18:24:05 +03:00
|
|
|
|
|
|
|
/* Skip automatically inserted nodes that the user isn't aware of for
|
|
|
|
* query-block (blk != NULL), but not for query-named-block-nodes */
|
2017-07-31 15:43:19 +03:00
|
|
|
while (blk && bs0->drv && bs0->implicit) {
|
2017-07-18 18:24:05 +03:00
|
|
|
bs0 = backing_bs(bs0);
|
2017-07-31 15:43:19 +03:00
|
|
|
assert(bs0);
|
2017-07-18 18:24:05 +03:00
|
|
|
}
|
2015-04-17 14:52:43 +03:00
|
|
|
}
|
|
|
|
|
2014-01-24 00:31:34 +04:00
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
2013-06-06 08:27:57 +04:00
|
|
|
/*
|
|
|
|
* Returns 0 on success, with *p_list either set to describe snapshot
|
|
|
|
* information, or NULL because there are no snapshots. Returns -errno on
|
|
|
|
* error, with *p_list untouched.
|
|
|
|
*/
|
|
|
|
int bdrv_query_snapshot_info_list(BlockDriverState *bs,
|
|
|
|
SnapshotInfoList **p_list,
|
|
|
|
Error **errp)
|
2013-05-25 07:09:44 +04:00
|
|
|
{
|
|
|
|
int i, sn_count;
|
|
|
|
QEMUSnapshotInfo *sn_tab = NULL;
|
2013-06-06 08:27:57 +04:00
|
|
|
SnapshotInfoList *info_list, *cur_item = NULL, *head = NULL;
|
|
|
|
SnapshotInfo *info;
|
|
|
|
|
2013-05-25 07:09:44 +04:00
|
|
|
sn_count = bdrv_snapshot_list(bs, &sn_tab);
|
2013-06-06 08:27:57 +04:00
|
|
|
if (sn_count < 0) {
|
|
|
|
const char *dev = bdrv_get_device_name(bs);
|
|
|
|
switch (sn_count) {
|
|
|
|
case -ENOMEDIUM:
|
|
|
|
error_setg(errp, "Device '%s' is not inserted", dev);
|
|
|
|
break;
|
|
|
|
case -ENOTSUP:
|
|
|
|
error_setg(errp,
|
|
|
|
"Device '%s' does not support internal snapshots",
|
|
|
|
dev);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error_setg_errno(errp, -sn_count,
|
|
|
|
"Can't list snapshots of device '%s'", dev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return sn_count;
|
|
|
|
}
|
2013-05-25 07:09:44 +04:00
|
|
|
|
|
|
|
for (i = 0; i < sn_count; i++) {
|
2013-06-06 08:27:57 +04:00
|
|
|
info = g_new0(SnapshotInfo, 1);
|
|
|
|
info->id = g_strdup(sn_tab[i].id_str);
|
|
|
|
info->name = g_strdup(sn_tab[i].name);
|
|
|
|
info->vm_state_size = sn_tab[i].vm_state_size;
|
|
|
|
info->date_sec = sn_tab[i].date_sec;
|
|
|
|
info->date_nsec = sn_tab[i].date_nsec;
|
|
|
|
info->vm_clock_sec = sn_tab[i].vm_clock_nsec / 1000000000;
|
|
|
|
info->vm_clock_nsec = sn_tab[i].vm_clock_nsec % 1000000000;
|
2013-05-25 07:09:44 +04:00
|
|
|
|
2013-06-06 08:27:57 +04:00
|
|
|
info_list = g_new0(SnapshotInfoList, 1);
|
|
|
|
info_list->value = info;
|
2013-05-25 07:09:44 +04:00
|
|
|
|
|
|
|
/* XXX: waiting for the qapi to support qemu-queue.h types */
|
|
|
|
if (!cur_item) {
|
2013-06-06 08:27:57 +04:00
|
|
|
head = cur_item = info_list;
|
2013-05-25 07:09:44 +04:00
|
|
|
} else {
|
|
|
|
cur_item->next = info_list;
|
|
|
|
cur_item = info_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
g_free(sn_tab);
|
2013-06-06 08:27:57 +04:00
|
|
|
*p_list = head;
|
|
|
|
return 0;
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
2013-06-06 08:27:58 +04:00
|
|
|
/**
|
|
|
|
* bdrv_query_image_info:
|
|
|
|
* @bs: block device to examine
|
|
|
|
* @p_info: location to store image information
|
|
|
|
* @errp: location to store error information
|
|
|
|
*
|
2013-06-06 08:27:59 +04:00
|
|
|
* Store "flat" image information in @p_info.
|
|
|
|
*
|
|
|
|
* "Flat" means it does *not* query backing image information,
|
|
|
|
* i.e. (*pinfo)->has_backing_image will be set to false and
|
|
|
|
* (*pinfo)->backing_image to NULL even when the image does in fact have
|
|
|
|
* a backing image.
|
|
|
|
*
|
2013-06-06 08:27:58 +04:00
|
|
|
* @p_info will be set only on success. On error, store error in @errp.
|
|
|
|
*/
|
|
|
|
void bdrv_query_image_info(BlockDriverState *bs,
|
|
|
|
ImageInfo **p_info,
|
|
|
|
Error **errp)
|
2013-05-25 07:09:44 +04:00
|
|
|
{
|
2014-06-26 15:23:25 +04:00
|
|
|
int64_t size;
|
2013-06-06 08:27:58 +04:00
|
|
|
const char *backing_filename;
|
2013-05-25 07:09:44 +04:00
|
|
|
BlockDriverInfo bdi;
|
2013-06-06 08:27:58 +04:00
|
|
|
int ret;
|
|
|
|
Error *err = NULL;
|
2014-06-26 15:23:25 +04:00
|
|
|
ImageInfo *info;
|
2013-05-25 07:09:44 +04:00
|
|
|
|
2015-12-23 13:48:23 +03:00
|
|
|
aio_context_acquire(bdrv_get_aio_context(bs));
|
|
|
|
|
2014-06-26 15:23:25 +04:00
|
|
|
size = bdrv_getlength(bs);
|
|
|
|
if (size < 0) {
|
2017-01-19 16:07:58 +03:00
|
|
|
error_setg_errno(errp, -size, "Can't get image size '%s'",
|
|
|
|
bs->exact_filename);
|
2015-12-23 13:48:23 +03:00
|
|
|
goto out;
|
2014-06-26 15:23:25 +04:00
|
|
|
}
|
2013-05-25 07:09:44 +04:00
|
|
|
|
block: Use bdrv_refresh_filename() to pull
Before this patch, bdrv_refresh_filename() is used in a pushing manner:
Whenever the BDS graph is modified, the parents of the modified edges
are supposed to be updated (recursively upwards). However, that is
nonviable, considering that we want child changes not to concern
parents.
Also, in the long run we want a pull model anyway: Here, we would have a
bdrv_filename() function which returns a BDS's filename, freshly
constructed.
This patch is an intermediate step. It adds bdrv_refresh_filename()
calls before every place a BDS.filename value is used. The only
exceptions are protocol drivers that use their own filename, which
clearly would not profit from refreshing that filename before.
Also, bdrv_get_encrypted_filename() is removed along the way (as a user
of BDS.filename), since it is completely unused.
In turn, all of the calls to bdrv_refresh_filename() before this patch
are removed, because we no longer have to call this function on graph
changes.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20190201192935.18394-2-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-02-01 22:29:05 +03:00
|
|
|
bdrv_refresh_filename(bs);
|
|
|
|
|
2014-06-26 15:23:25 +04:00
|
|
|
info = g_new0(ImageInfo, 1);
|
2013-06-06 08:27:58 +04:00
|
|
|
info->filename = g_strdup(bs->filename);
|
2013-05-25 07:09:44 +04:00
|
|
|
info->format = g_strdup(bdrv_get_format_name(bs));
|
2014-06-26 15:23:25 +04:00
|
|
|
info->virtual_size = size;
|
2013-05-25 07:09:44 +04:00
|
|
|
info->actual_size = bdrv_get_allocated_file_size(bs);
|
|
|
|
info->has_actual_size = info->actual_size >= 0;
|
|
|
|
if (bdrv_is_encrypted(bs)) {
|
|
|
|
info->encrypted = true;
|
|
|
|
info->has_encrypted = true;
|
|
|
|
}
|
|
|
|
if (bdrv_get_info(bs, &bdi) >= 0) {
|
|
|
|
if (bdi.cluster_size != 0) {
|
|
|
|
info->cluster_size = bdi.cluster_size;
|
|
|
|
info->has_cluster_size = true;
|
|
|
|
}
|
|
|
|
info->dirty_flag = bdi.is_dirty;
|
|
|
|
info->has_dirty_flag = true;
|
|
|
|
}
|
2019-02-08 18:06:06 +03:00
|
|
|
info->format_specific = bdrv_get_specific_info(bs, &err);
|
|
|
|
if (err) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
qapi_free_ImageInfo(info);
|
|
|
|
goto out;
|
|
|
|
}
|
2013-10-09 12:46:16 +04:00
|
|
|
info->has_format_specific = info->format_specific != NULL;
|
|
|
|
|
2013-06-06 08:27:58 +04:00
|
|
|
backing_filename = bs->backing_file;
|
2013-05-25 07:09:44 +04:00
|
|
|
if (backing_filename[0] != '\0') {
|
2019-02-01 22:29:15 +03:00
|
|
|
char *backing_filename2;
|
2013-05-25 07:09:44 +04:00
|
|
|
info->backing_filename = g_strdup(backing_filename);
|
|
|
|
info->has_backing_filename = true;
|
2019-02-01 22:29:15 +03:00
|
|
|
backing_filename2 = bdrv_get_full_backing_filename(bs, NULL);
|
2013-05-25 07:09:44 +04:00
|
|
|
|
2015-12-14 22:55:13 +03:00
|
|
|
/* Always report the full_backing_filename if present, even if it's the
|
|
|
|
* same as backing_filename. That they are same is useful info. */
|
|
|
|
if (backing_filename2) {
|
|
|
|
info->full_backing_filename = g_strdup(backing_filename2);
|
2013-05-25 07:09:44 +04:00
|
|
|
info->has_full_backing_filename = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bs->backing_format[0]) {
|
|
|
|
info->backing_filename_format = g_strdup(bs->backing_format);
|
|
|
|
info->has_backing_filename_format = true;
|
|
|
|
}
|
2015-01-22 16:03:27 +03:00
|
|
|
g_free(backing_filename2);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
2013-06-06 08:27:58 +04:00
|
|
|
|
|
|
|
ret = bdrv_query_snapshot_info_list(bs, &info->snapshots, &err);
|
|
|
|
switch (ret) {
|
|
|
|
case 0:
|
|
|
|
if (info->snapshots) {
|
|
|
|
info->has_snapshots = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
/* recoverable error */
|
|
|
|
case -ENOMEDIUM:
|
|
|
|
case -ENOTSUP:
|
|
|
|
error_free(err);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error_propagate(errp, err);
|
|
|
|
qapi_free_ImageInfo(info);
|
2015-12-23 13:48:23 +03:00
|
|
|
goto out;
|
2013-06-06 08:27:58 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
*p_info = info;
|
2015-12-23 13:48:23 +03:00
|
|
|
|
|
|
|
out:
|
|
|
|
aio_context_release(bdrv_get_aio_context(bs));
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
2013-06-06 08:27:59 +04:00
|
|
|
/* @p_info will be set only on success. */
|
2014-10-07 15:59:23 +04:00
|
|
|
static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
|
|
|
|
Error **errp)
|
2013-05-25 07:09:44 +04:00
|
|
|
{
|
|
|
|
BlockInfo *info = g_malloc0(sizeof(*info));
|
2014-10-07 15:59:23 +04:00
|
|
|
BlockDriverState *bs = blk_bs(blk);
|
2017-07-11 14:27:38 +03:00
|
|
|
char *qdev;
|
|
|
|
|
2017-07-18 18:24:05 +03:00
|
|
|
/* Skip automatically inserted nodes that the user isn't aware of */
|
|
|
|
while (bs && bs->drv && bs->implicit) {
|
|
|
|
bs = backing_bs(bs);
|
|
|
|
}
|
|
|
|
|
2014-10-07 15:59:23 +04:00
|
|
|
info->device = g_strdup(blk_name(blk));
|
2013-05-25 07:09:44 +04:00
|
|
|
info->type = g_strdup("unknown");
|
2014-10-07 15:59:25 +04:00
|
|
|
info->locked = blk_dev_is_medium_locked(blk);
|
|
|
|
info->removable = blk_dev_has_removable_media(blk);
|
2013-05-25 07:09:44 +04:00
|
|
|
|
2017-07-11 14:27:38 +03:00
|
|
|
qdev = blk_get_attached_dev_id(blk);
|
|
|
|
if (qdev && *qdev) {
|
|
|
|
info->has_qdev = true;
|
|
|
|
info->qdev = qdev;
|
|
|
|
} else {
|
|
|
|
g_free(qdev);
|
|
|
|
}
|
|
|
|
|
2016-01-29 22:49:13 +03:00
|
|
|
if (blk_dev_has_tray(blk)) {
|
2013-05-25 07:09:44 +04:00
|
|
|
info->has_tray_open = true;
|
2014-10-07 15:59:25 +04:00
|
|
|
info->tray_open = blk_dev_is_tray_open(blk);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
2015-10-19 18:53:22 +03:00
|
|
|
if (blk_iostatus_is_enabled(blk)) {
|
2013-05-25 07:09:44 +04:00
|
|
|
info->has_io_status = true;
|
2015-10-19 18:53:22 +03:00
|
|
|
info->io_status = blk_iostatus(blk);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
2015-10-19 18:53:29 +03:00
|
|
|
if (bs && !QLIST_EMPTY(&bs->dirty_bitmaps)) {
|
2013-11-13 14:29:44 +04:00
|
|
|
info->has_dirty_bitmaps = true;
|
|
|
|
info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
|
|
|
|
}
|
|
|
|
|
2015-10-19 18:53:29 +03:00
|
|
|
if (bs && bs->drv) {
|
2013-05-25 07:09:44 +04:00
|
|
|
info->has_inserted = true;
|
2016-03-03 13:37:48 +03:00
|
|
|
info->inserted = bdrv_block_device_info(blk, bs, errp);
|
2015-04-17 14:52:43 +03:00
|
|
|
if (info->inserted == NULL) {
|
|
|
|
goto err;
|
2013-06-06 08:27:59 +04:00
|
|
|
}
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
2013-06-06 08:27:59 +04:00
|
|
|
|
|
|
|
*p_info = info;
|
|
|
|
return;
|
|
|
|
|
|
|
|
err:
|
|
|
|
qapi_free_BlockInfo(info);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
2018-03-09 19:52:12 +03:00
|
|
|
static uint64List *uint64_list(uint64_t *list, int size)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint64List *out_list = NULL;
|
|
|
|
uint64List **pout_list = &out_list;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
uint64List *entry = g_new(uint64List, 1);
|
|
|
|
entry->value = list[i];
|
|
|
|
*pout_list = entry;
|
|
|
|
pout_list = &entry->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pout_list = NULL;
|
|
|
|
|
|
|
|
return out_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bdrv_latency_histogram_stats(BlockLatencyHistogram *hist,
|
|
|
|
bool *not_null,
|
|
|
|
BlockLatencyHistogramInfo **info)
|
|
|
|
{
|
|
|
|
*not_null = hist->bins != NULL;
|
|
|
|
if (*not_null) {
|
|
|
|
*info = g_new0(BlockLatencyHistogramInfo, 1);
|
|
|
|
|
|
|
|
(*info)->boundaries = uint64_list(hist->boundaries, hist->nbins - 1);
|
|
|
|
(*info)->bins = uint64_list(hist->bins, hist->nbins);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-02 20:31:09 +03:00
|
|
|
static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
|
2016-02-26 21:02:30 +03:00
|
|
|
{
|
|
|
|
BlockAcctStats *stats = blk_get_stats(blk);
|
|
|
|
BlockAcctTimedStats *ts = NULL;
|
|
|
|
|
2016-03-02 20:31:09 +03:00
|
|
|
ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
|
|
|
|
ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
|
2019-09-23 15:17:30 +03:00
|
|
|
ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP];
|
2016-03-02 20:31:09 +03:00
|
|
|
ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
|
|
|
|
ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
|
2019-09-23 15:17:30 +03:00
|
|
|
ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP];
|
2016-02-26 21:02:30 +03:00
|
|
|
|
2016-03-02 20:31:09 +03:00
|
|
|
ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
|
|
|
|
ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
|
|
|
|
ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];
|
2019-09-23 15:17:30 +03:00
|
|
|
ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP];
|
2016-02-26 21:02:30 +03:00
|
|
|
|
2016-03-02 20:31:09 +03:00
|
|
|
ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
|
|
|
|
ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
|
|
|
|
ds->invalid_flush_operations =
|
2016-02-26 21:02:30 +03:00
|
|
|
stats->invalid_ops[BLOCK_ACCT_FLUSH];
|
2019-09-23 15:17:30 +03:00
|
|
|
ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP];
|
2016-02-26 21:02:30 +03:00
|
|
|
|
2016-03-02 20:31:09 +03:00
|
|
|
ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
|
|
|
|
ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
|
2019-09-23 15:17:30 +03:00
|
|
|
ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP];
|
2016-03-02 20:31:09 +03:00
|
|
|
ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
|
|
|
|
ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
|
|
|
|
ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
|
|
|
|
ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
|
2019-09-23 15:17:30 +03:00
|
|
|
ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP];
|
2016-02-26 21:02:30 +03:00
|
|
|
|
2016-03-02 20:31:09 +03:00
|
|
|
ds->has_idle_time_ns = stats->last_access_time_ns > 0;
|
|
|
|
if (ds->has_idle_time_ns) {
|
|
|
|
ds->idle_time_ns = block_acct_idle_time_ns(stats);
|
2016-02-26 21:02:30 +03:00
|
|
|
}
|
|
|
|
|
2016-03-02 20:31:09 +03:00
|
|
|
ds->account_invalid = stats->account_invalid;
|
|
|
|
ds->account_failed = stats->account_failed;
|
2016-02-26 21:02:30 +03:00
|
|
|
|
|
|
|
while ((ts = block_acct_interval_next(stats, ts))) {
|
|
|
|
BlockDeviceTimedStatsList *timed_stats =
|
|
|
|
g_malloc0(sizeof(*timed_stats));
|
|
|
|
BlockDeviceTimedStats *dev_stats = g_malloc0(sizeof(*dev_stats));
|
2016-03-02 20:31:09 +03:00
|
|
|
timed_stats->next = ds->timed_stats;
|
2016-02-26 21:02:30 +03:00
|
|
|
timed_stats->value = dev_stats;
|
2016-03-02 20:31:09 +03:00
|
|
|
ds->timed_stats = timed_stats;
|
2016-02-26 21:02:30 +03:00
|
|
|
|
|
|
|
TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ];
|
|
|
|
TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE];
|
|
|
|
TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH];
|
|
|
|
|
|
|
|
dev_stats->interval_length = ts->interval_length;
|
|
|
|
|
|
|
|
dev_stats->min_rd_latency_ns = timed_average_min(rd);
|
|
|
|
dev_stats->max_rd_latency_ns = timed_average_max(rd);
|
|
|
|
dev_stats->avg_rd_latency_ns = timed_average_avg(rd);
|
|
|
|
|
|
|
|
dev_stats->min_wr_latency_ns = timed_average_min(wr);
|
|
|
|
dev_stats->max_wr_latency_ns = timed_average_max(wr);
|
|
|
|
dev_stats->avg_wr_latency_ns = timed_average_avg(wr);
|
|
|
|
|
|
|
|
dev_stats->min_flush_latency_ns = timed_average_min(fl);
|
|
|
|
dev_stats->max_flush_latency_ns = timed_average_max(fl);
|
|
|
|
dev_stats->avg_flush_latency_ns = timed_average_avg(fl);
|
|
|
|
|
|
|
|
dev_stats->avg_rd_queue_depth =
|
|
|
|
block_acct_queue_depth(ts, BLOCK_ACCT_READ);
|
|
|
|
dev_stats->avg_wr_queue_depth =
|
|
|
|
block_acct_queue_depth(ts, BLOCK_ACCT_WRITE);
|
|
|
|
}
|
2018-03-09 19:52:12 +03:00
|
|
|
|
|
|
|
bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_READ],
|
2019-03-05 15:53:17 +03:00
|
|
|
&ds->has_rd_latency_histogram,
|
|
|
|
&ds->rd_latency_histogram);
|
2018-03-09 19:52:12 +03:00
|
|
|
bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_WRITE],
|
2019-03-05 15:53:17 +03:00
|
|
|
&ds->has_wr_latency_histogram,
|
|
|
|
&ds->wr_latency_histogram);
|
2018-03-09 19:52:12 +03:00
|
|
|
bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_FLUSH],
|
2019-03-05 15:53:17 +03:00
|
|
|
&ds->has_flush_latency_histogram,
|
|
|
|
&ds->flush_latency_histogram);
|
2016-02-26 21:02:30 +03:00
|
|
|
}
|
|
|
|
|
2017-07-18 18:24:05 +03:00
|
|
|
static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
|
|
|
|
bool blk_level)
|
2013-05-25 07:09:44 +04:00
|
|
|
{
|
2017-01-15 11:01:14 +03:00
|
|
|
BlockStats *s = NULL;
|
|
|
|
|
|
|
|
s = g_malloc0(sizeof(*s));
|
|
|
|
s->stats = g_malloc0(sizeof(*s->stats));
|
|
|
|
|
|
|
|
if (!bs) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-07-18 18:24:05 +03:00
|
|
|
/* Skip automatically inserted nodes that the user isn't aware of in
|
|
|
|
* a BlockBackend-level command. Stay at the exact node for a node-level
|
|
|
|
* command. */
|
|
|
|
while (blk_level && bs->drv && bs->implicit) {
|
|
|
|
bs = backing_bs(bs);
|
|
|
|
assert(bs);
|
|
|
|
}
|
|
|
|
|
2014-10-31 06:32:56 +03:00
|
|
|
if (bdrv_get_node_name(bs)[0]) {
|
|
|
|
s->has_node_name = true;
|
|
|
|
s->node_name = g_strdup(bdrv_get_node_name(bs));
|
|
|
|
}
|
|
|
|
|
2017-06-05 15:39:00 +03:00
|
|
|
s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset);
|
2015-10-19 18:53:20 +03:00
|
|
|
|
2019-09-23 15:17:37 +03:00
|
|
|
s->driver_specific = bdrv_get_specific_stats(bs);
|
|
|
|
if (s->driver_specific) {
|
|
|
|
s->has_driver_specific = true;
|
|
|
|
}
|
|
|
|
|
2013-05-25 07:09:44 +04:00
|
|
|
if (bs->file) {
|
|
|
|
s->has_parent = true;
|
2017-07-18 18:24:05 +03:00
|
|
|
s->parent = bdrv_query_bds_stats(bs->file->bs, blk_level);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
2017-07-18 18:24:05 +03:00
|
|
|
if (blk_level && bs->backing) {
|
2014-01-23 06:03:26 +04:00
|
|
|
s->has_backing = true;
|
2017-07-18 18:24:05 +03:00
|
|
|
s->backing = bdrv_query_bds_stats(bs->backing->bs, blk_level);
|
2014-01-23 06:03:26 +04:00
|
|
|
}
|
|
|
|
|
2017-01-15 11:01:14 +03:00
|
|
|
return s;
|
2016-02-26 23:03:01 +03:00
|
|
|
}
|
|
|
|
|
2013-05-25 07:09:44 +04:00
|
|
|
BlockInfoList *qmp_query_block(Error **errp)
|
|
|
|
{
|
|
|
|
BlockInfoList *head = NULL, **p_next = &head;
|
2014-10-07 15:59:23 +04:00
|
|
|
BlockBackend *blk;
|
2013-06-06 08:27:59 +04:00
|
|
|
Error *local_err = NULL;
|
2013-05-25 07:09:44 +04:00
|
|
|
|
2017-07-11 14:04:28 +03:00
|
|
|
for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
|
|
|
|
BlockInfoList *info;
|
|
|
|
|
2017-07-11 15:00:57 +03:00
|
|
|
if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
|
2017-07-11 14:04:28 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
info = g_malloc0(sizeof(*info));
|
2014-10-07 15:59:23 +04:00
|
|
|
bdrv_query_info(blk, &info->value, &local_err);
|
2014-01-30 18:07:28 +04:00
|
|
|
if (local_err) {
|
2013-06-06 08:27:59 +04:00
|
|
|
error_propagate(errp, local_err);
|
2015-11-20 15:53:35 +03:00
|
|
|
g_free(info);
|
|
|
|
qapi_free_BlockInfoList(head);
|
|
|
|
return NULL;
|
2013-06-06 08:27:59 +04:00
|
|
|
}
|
2013-05-25 07:09:44 +04:00
|
|
|
|
|
|
|
*p_next = info;
|
|
|
|
p_next = &info->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
2014-10-31 06:32:57 +03:00
|
|
|
BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
|
|
|
|
bool query_nodes,
|
|
|
|
Error **errp)
|
2013-05-25 07:09:44 +04:00
|
|
|
{
|
|
|
|
BlockStatsList *head = NULL, **p_next = &head;
|
2017-01-15 11:01:15 +03:00
|
|
|
BlockBackend *blk;
|
|
|
|
BlockDriverState *bs;
|
2013-05-25 07:09:44 +04:00
|
|
|
|
2014-10-31 06:32:57 +03:00
|
|
|
/* Just to be safe if query_nodes is not always initialized */
|
2017-01-15 11:01:15 +03:00
|
|
|
if (has_query_nodes && query_nodes) {
|
|
|
|
for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) {
|
|
|
|
BlockStatsList *info = g_malloc0(sizeof(*info));
|
|
|
|
AioContext *ctx = bdrv_get_aio_context(bs);
|
2014-06-17 10:32:05 +04:00
|
|
|
|
2017-01-15 11:01:15 +03:00
|
|
|
aio_context_acquire(ctx);
|
|
|
|
info->value = bdrv_query_bds_stats(bs, false);
|
|
|
|
aio_context_release(ctx);
|
2013-05-25 07:09:44 +04:00
|
|
|
|
2017-01-15 11:01:15 +03:00
|
|
|
*p_next = info;
|
|
|
|
p_next = &info->next;
|
|
|
|
}
|
|
|
|
} else {
|
2018-07-27 17:09:25 +03:00
|
|
|
for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
|
2018-08-13 16:23:49 +03:00
|
|
|
BlockStatsList *info;
|
2017-01-15 11:01:15 +03:00
|
|
|
AioContext *ctx = blk_get_aio_context(blk);
|
|
|
|
BlockStats *s;
|
2018-07-27 17:07:07 +03:00
|
|
|
char *qdev;
|
2017-01-15 11:01:15 +03:00
|
|
|
|
2018-07-27 17:09:25 +03:00
|
|
|
if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-01-15 11:01:15 +03:00
|
|
|
aio_context_acquire(ctx);
|
|
|
|
s = bdrv_query_bds_stats(blk_bs(blk), true);
|
|
|
|
s->has_device = true;
|
|
|
|
s->device = g_strdup(blk_name(blk));
|
2018-07-27 17:07:07 +03:00
|
|
|
|
|
|
|
qdev = blk_get_attached_dev_id(blk);
|
|
|
|
if (qdev && *qdev) {
|
|
|
|
s->has_qdev = true;
|
|
|
|
s->qdev = qdev;
|
|
|
|
} else {
|
|
|
|
g_free(qdev);
|
|
|
|
}
|
|
|
|
|
2017-01-15 11:01:15 +03:00
|
|
|
bdrv_query_blk_stats(s->stats, blk);
|
|
|
|
aio_context_release(ctx);
|
|
|
|
|
2018-08-13 16:23:49 +03:00
|
|
|
info = g_malloc0(sizeof(*info));
|
2017-01-15 11:01:15 +03:00
|
|
|
info->value = s;
|
|
|
|
*p_next = info;
|
|
|
|
p_next = &info->next;
|
|
|
|
}
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
2019-04-17 22:17:55 +03:00
|
|
|
void bdrv_snapshot_dump(QEMUSnapshotInfo *sn)
|
2013-05-25 07:09:44 +04:00
|
|
|
{
|
2019-04-17 20:11:01 +03:00
|
|
|
char date_buf[128], clock_buf[128];
|
2013-05-25 07:09:44 +04:00
|
|
|
struct tm tm;
|
|
|
|
time_t ti;
|
|
|
|
int64_t secs;
|
2019-04-17 20:11:01 +03:00
|
|
|
char *sizing = NULL;
|
2013-05-25 07:09:44 +04:00
|
|
|
|
|
|
|
if (!sn) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("%-10s%-20s%7s%20s%15s",
|
|
|
|
"ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
|
2013-05-25 07:09:44 +04:00
|
|
|
} else {
|
|
|
|
ti = sn->date_sec;
|
|
|
|
localtime_r(&ti, &tm);
|
|
|
|
strftime(date_buf, sizeof(date_buf),
|
|
|
|
"%Y-%m-%d %H:%M:%S", &tm);
|
|
|
|
secs = sn->vm_clock_nsec / 1000000000;
|
|
|
|
snprintf(clock_buf, sizeof(clock_buf),
|
|
|
|
"%02d:%02d:%02d.%03d",
|
|
|
|
(int)(secs / 3600),
|
|
|
|
(int)((secs / 60) % 60),
|
|
|
|
(int)(secs % 60),
|
|
|
|
(int)((sn->vm_clock_nsec / 1000000) % 1000));
|
2019-04-17 20:11:01 +03:00
|
|
|
sizing = size_to_str(sn->vm_state_size);
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("%-10s%-20s%7s%20s%15s",
|
|
|
|
sn->id_str, sn->name,
|
2019-04-17 20:11:01 +03:00
|
|
|
sizing,
|
2019-04-17 22:17:55 +03:00
|
|
|
date_buf,
|
|
|
|
clock_buf);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
2019-04-17 20:11:01 +03:00
|
|
|
g_free(sizing);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
2019-04-17 22:17:55 +03:00
|
|
|
static void dump_qdict(int indentation, QDict *dict);
|
|
|
|
static void dump_qlist(int indentation, QList *list);
|
2013-10-09 12:46:17 +04:00
|
|
|
|
2019-04-17 22:17:55 +03:00
|
|
|
static void dump_qobject(int comp_indent, QObject *obj)
|
2013-10-09 12:46:17 +04:00
|
|
|
{
|
|
|
|
switch (qobject_type(obj)) {
|
2017-06-07 19:35:58 +03:00
|
|
|
case QTYPE_QNUM: {
|
2018-02-24 18:40:29 +03:00
|
|
|
QNum *value = qobject_to(QNum, obj);
|
2017-06-07 19:35:58 +03:00
|
|
|
char *tmp = qnum_to_string(value);
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("%s", tmp);
|
2017-06-07 19:35:58 +03:00
|
|
|
g_free(tmp);
|
2013-10-09 12:46:17 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case QTYPE_QSTRING: {
|
2018-02-24 18:40:29 +03:00
|
|
|
QString *value = qobject_to(QString, obj);
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("%s", qstring_get_str(value));
|
2013-10-09 12:46:17 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case QTYPE_QDICT: {
|
2018-02-24 18:40:29 +03:00
|
|
|
QDict *value = qobject_to(QDict, obj);
|
2019-04-17 22:17:55 +03:00
|
|
|
dump_qdict(comp_indent, value);
|
2013-10-09 12:46:17 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case QTYPE_QLIST: {
|
2018-02-24 18:40:29 +03:00
|
|
|
QList *value = qobject_to(QList, obj);
|
2019-04-17 22:17:55 +03:00
|
|
|
dump_qlist(comp_indent, value);
|
2013-10-09 12:46:17 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case QTYPE_QBOOL: {
|
2018-02-24 18:40:29 +03:00
|
|
|
QBool *value = qobject_to(QBool, obj);
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("%s", qbool_get_bool(value) ? "true" : "false");
|
2013-10-09 12:46:17 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-17 22:17:55 +03:00
|
|
|
static void dump_qlist(int indentation, QList *list)
|
2013-10-09 12:46:17 +04:00
|
|
|
{
|
|
|
|
const QListEntry *entry;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
for (entry = qlist_first(list); entry; entry = qlist_next(entry), i++) {
|
2015-12-02 08:20:46 +03:00
|
|
|
QType type = qobject_type(entry->value);
|
2013-10-09 12:46:17 +04:00
|
|
|
bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("%*s[%i]:%c", indentation * 4, "", i,
|
|
|
|
composite ? '\n' : ' ');
|
|
|
|
dump_qobject(indentation + 1, entry->value);
|
2013-10-09 12:46:17 +04:00
|
|
|
if (!composite) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("\n");
|
2013-10-09 12:46:17 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-17 22:17:55 +03:00
|
|
|
static void dump_qdict(int indentation, QDict *dict)
|
2013-10-09 12:46:17 +04:00
|
|
|
{
|
|
|
|
const QDictEntry *entry;
|
|
|
|
|
|
|
|
for (entry = qdict_first(dict); entry; entry = qdict_next(dict, entry)) {
|
2015-12-02 08:20:46 +03:00
|
|
|
QType type = qobject_type(entry->value);
|
2013-10-09 12:46:17 +04:00
|
|
|
bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
|
2016-03-09 08:56:37 +03:00
|
|
|
char *key = g_malloc(strlen(entry->key) + 1);
|
2013-10-09 12:46:17 +04:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/* replace dashes with spaces in key (variable) names */
|
|
|
|
for (i = 0; entry->key[i]; i++) {
|
|
|
|
key[i] = entry->key[i] == '-' ? ' ' : entry->key[i];
|
|
|
|
}
|
|
|
|
key[i] = 0;
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("%*s%s:%c", indentation * 4, "", key,
|
|
|
|
composite ? '\n' : ' ');
|
|
|
|
dump_qobject(indentation + 1, entry->value);
|
2013-10-09 12:46:17 +04:00
|
|
|
if (!composite) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("\n");
|
2013-10-09 12:46:17 +04:00
|
|
|
}
|
2016-03-09 08:56:37 +03:00
|
|
|
g_free(key);
|
2013-10-09 12:46:17 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-17 22:17:55 +03:00
|
|
|
void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec)
|
2013-10-09 12:46:17 +04:00
|
|
|
{
|
|
|
|
QObject *obj, *data;
|
2016-09-30 17:45:28 +03:00
|
|
|
Visitor *v = qobject_output_visitor_new(&obj);
|
2013-10-09 12:46:17 +04:00
|
|
|
|
qapi: Add new visit_complete() function
Making each output visitor provide its own output collection
function was the only remaining reason for exposing visitor
sub-types to the rest of the code base. Add a polymorphic
visit_complete() function which is a no-op for input visitors,
and which populates an opaque pointer for output visitors. For
maximum type-safety, also add a parameter to the output visitor
constructors with a type-correct version of the output pointer,
and assert that the two uses match.
This approach was considered superior to either passing the
output parameter only during construction (action at a distance
during visit_free() feels awkward) or only during visit_complete()
(defeating type safety makes it easier to use incorrectly).
Most callers were function-local, and therefore a mechanical
conversion; the testsuite was a bit trickier, but the previous
cleanup patch minimized the churn here.
The visit_complete() function may be called at most once; doing
so lets us use transfer semantics rather than duplication or
ref-count semantics to get the just-built output back to the
caller, even though it means our behavior is not idempotent.
Generated code is simplified as follows for events:
|@@ -26,7 +26,7 @@ void qapi_event_send_acpi_device_ost(ACP
| QDict *qmp;
| Error *err = NULL;
| QMPEventFuncEmit emit;
|- QmpOutputVisitor *qov;
|+ QObject *obj;
| Visitor *v;
| q_obj_ACPI_DEVICE_OST_arg param = {
| info
|@@ -39,8 +39,7 @@ void qapi_event_send_acpi_device_ost(ACP
|
| qmp = qmp_event_build_dict("ACPI_DEVICE_OST");
|
|- qov = qmp_output_visitor_new();
|- v = qmp_output_get_visitor(qov);
|+ v = qmp_output_visitor_new(&obj);
|
| visit_start_struct(v, "ACPI_DEVICE_OST", NULL, 0, &err);
| if (err) {
|@@ -55,7 +54,8 @@ void qapi_event_send_acpi_device_ost(ACP
| goto out;
| }
|
|- qdict_put_obj(qmp, "data", qmp_output_get_qobject(qov));
|+ visit_complete(v, &obj);
|+ qdict_put_obj(qmp, "data", obj);
| emit(QAPI_EVENT_ACPI_DEVICE_OST, qmp, &err);
and for commands:
| {
| Error *err = NULL;
|- QmpOutputVisitor *qov = qmp_output_visitor_new();
| Visitor *v;
|
|- v = qmp_output_get_visitor(qov);
|+ v = qmp_output_visitor_new(ret_out);
| visit_type_AddfdInfo(v, "unused", &ret_in, &err);
|- if (err) {
|- goto out;
|+ if (!err) {
|+ visit_complete(v, ret_out);
| }
|- *ret_out = qmp_output_get_qobject(qov);
|-
|-out:
| error_propagate(errp, err);
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1465490926-28625-13-git-send-email-eblake@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-06-09 19:48:43 +03:00
|
|
|
visit_type_ImageInfoSpecific(v, NULL, &info_spec, &error_abort);
|
|
|
|
visit_complete(v, &obj);
|
2018-02-24 18:40:29 +03:00
|
|
|
data = qdict_get(qobject_to(QDict, obj), "data");
|
2019-04-17 22:17:55 +03:00
|
|
|
dump_qobject(1, data);
|
2018-04-19 18:01:43 +03:00
|
|
|
qobject_unref(obj);
|
qapi: Add new visit_complete() function
Making each output visitor provide its own output collection
function was the only remaining reason for exposing visitor
sub-types to the rest of the code base. Add a polymorphic
visit_complete() function which is a no-op for input visitors,
and which populates an opaque pointer for output visitors. For
maximum type-safety, also add a parameter to the output visitor
constructors with a type-correct version of the output pointer,
and assert that the two uses match.
This approach was considered superior to either passing the
output parameter only during construction (action at a distance
during visit_free() feels awkward) or only during visit_complete()
(defeating type safety makes it easier to use incorrectly).
Most callers were function-local, and therefore a mechanical
conversion; the testsuite was a bit trickier, but the previous
cleanup patch minimized the churn here.
The visit_complete() function may be called at most once; doing
so lets us use transfer semantics rather than duplication or
ref-count semantics to get the just-built output back to the
caller, even though it means our behavior is not idempotent.
Generated code is simplified as follows for events:
|@@ -26,7 +26,7 @@ void qapi_event_send_acpi_device_ost(ACP
| QDict *qmp;
| Error *err = NULL;
| QMPEventFuncEmit emit;
|- QmpOutputVisitor *qov;
|+ QObject *obj;
| Visitor *v;
| q_obj_ACPI_DEVICE_OST_arg param = {
| info
|@@ -39,8 +39,7 @@ void qapi_event_send_acpi_device_ost(ACP
|
| qmp = qmp_event_build_dict("ACPI_DEVICE_OST");
|
|- qov = qmp_output_visitor_new();
|- v = qmp_output_get_visitor(qov);
|+ v = qmp_output_visitor_new(&obj);
|
| visit_start_struct(v, "ACPI_DEVICE_OST", NULL, 0, &err);
| if (err) {
|@@ -55,7 +54,8 @@ void qapi_event_send_acpi_device_ost(ACP
| goto out;
| }
|
|- qdict_put_obj(qmp, "data", qmp_output_get_qobject(qov));
|+ visit_complete(v, &obj);
|+ qdict_put_obj(qmp, "data", obj);
| emit(QAPI_EVENT_ACPI_DEVICE_OST, qmp, &err);
and for commands:
| {
| Error *err = NULL;
|- QmpOutputVisitor *qov = qmp_output_visitor_new();
| Visitor *v;
|
|- v = qmp_output_get_visitor(qov);
|+ v = qmp_output_visitor_new(ret_out);
| visit_type_AddfdInfo(v, "unused", &ret_in, &err);
|- if (err) {
|- goto out;
|+ if (!err) {
|+ visit_complete(v, ret_out);
| }
|- *ret_out = qmp_output_get_qobject(qov);
|-
|-out:
| error_propagate(errp, err);
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1465490926-28625-13-git-send-email-eblake@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-06-09 19:48:43 +03:00
|
|
|
visit_free(v);
|
2013-10-09 12:46:17 +04:00
|
|
|
}
|
|
|
|
|
2019-04-17 22:17:55 +03:00
|
|
|
void bdrv_image_info_dump(ImageInfo *info)
|
2013-05-25 07:09:44 +04:00
|
|
|
{
|
2019-04-17 20:11:01 +03:00
|
|
|
char *size_buf, *dsize_buf;
|
2013-05-25 07:09:44 +04:00
|
|
|
if (!info->has_actual_size) {
|
2019-04-17 20:11:01 +03:00
|
|
|
dsize_buf = g_strdup("unavailable");
|
2013-05-25 07:09:44 +04:00
|
|
|
} else {
|
2019-04-17 20:11:01 +03:00
|
|
|
dsize_buf = size_to_str(info->actual_size);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
2019-04-17 20:11:01 +03:00
|
|
|
size_buf = size_to_str(info->virtual_size);
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("image: %s\n"
|
|
|
|
"file format: %s\n"
|
|
|
|
"virtual size: %s (%" PRId64 " bytes)\n"
|
|
|
|
"disk size: %s\n",
|
|
|
|
info->filename, info->format, size_buf,
|
|
|
|
info->virtual_size,
|
|
|
|
dsize_buf);
|
2019-04-17 20:11:01 +03:00
|
|
|
g_free(size_buf);
|
|
|
|
g_free(dsize_buf);
|
2013-05-25 07:09:44 +04:00
|
|
|
|
|
|
|
if (info->has_encrypted && info->encrypted) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("encrypted: yes\n");
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (info->has_cluster_size) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("cluster_size: %" PRId64 "\n",
|
|
|
|
info->cluster_size);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (info->has_dirty_flag && info->dirty_flag) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("cleanly shut down: no\n");
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (info->has_backing_filename) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("backing file: %s", info->backing_filename);
|
2015-12-14 22:55:14 +03:00
|
|
|
if (!info->has_full_backing_filename) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf(" (cannot determine actual path)");
|
2015-12-14 22:55:14 +03:00
|
|
|
} else if (strcmp(info->backing_filename,
|
|
|
|
info->full_backing_filename) != 0) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf(" (actual path: %s)", info->full_backing_filename);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("\n");
|
2013-05-25 07:09:44 +04:00
|
|
|
if (info->has_backing_filename_format) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("backing file format: %s\n",
|
|
|
|
info->backing_filename_format);
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info->has_snapshots) {
|
|
|
|
SnapshotInfoList *elem;
|
|
|
|
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("Snapshot list:\n");
|
|
|
|
bdrv_snapshot_dump(NULL);
|
|
|
|
qemu_printf("\n");
|
2013-05-25 07:09:44 +04:00
|
|
|
|
|
|
|
/* Ideally bdrv_snapshot_dump() would operate on SnapshotInfoList but
|
|
|
|
* we convert to the block layer's native QEMUSnapshotInfo for now.
|
|
|
|
*/
|
|
|
|
for (elem = info->snapshots; elem; elem = elem->next) {
|
|
|
|
QEMUSnapshotInfo sn = {
|
|
|
|
.vm_state_size = elem->value->vm_state_size,
|
|
|
|
.date_sec = elem->value->date_sec,
|
|
|
|
.date_nsec = elem->value->date_nsec,
|
|
|
|
.vm_clock_nsec = elem->value->vm_clock_sec * 1000000000ULL +
|
|
|
|
elem->value->vm_clock_nsec,
|
|
|
|
};
|
|
|
|
|
|
|
|
pstrcpy(sn.id_str, sizeof(sn.id_str), elem->value->id);
|
|
|
|
pstrcpy(sn.name, sizeof(sn.name), elem->value->name);
|
2019-04-17 22:17:55 +03:00
|
|
|
bdrv_snapshot_dump(&sn);
|
|
|
|
qemu_printf("\n");
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|
|
|
|
}
|
2013-10-09 12:46:17 +04:00
|
|
|
|
|
|
|
if (info->has_format_specific) {
|
2019-04-17 22:17:55 +03:00
|
|
|
qemu_printf("Format specific information:\n");
|
|
|
|
bdrv_image_info_specific_dump(info->format_specific);
|
2013-10-09 12:46:17 +04:00
|
|
|
}
|
2013-05-25 07:09:44 +04:00
|
|
|
}
|