qemu/monitor/qmp-cmds.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

212 lines
5.7 KiB
C
Raw Normal View History

/*
* QEMU Management Protocol commands
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* Contributions after 2012-01-13 are licensed under the terms of the
* GNU GPL, version 2 or (at your option) any later version.
*/
#include "qemu/osdep.h"
#include "qemu/sockets.h"
#include "monitor-internal.h"
#include "monitor/qdev.h"
#include "monitor/qmp-helpers.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "sysemu/runstate.h"
#include "sysemu/runstate-action.h"
#include "sysemu/block-backend.h"
#include "qapi/error.h"
#include "qapi/qapi-init-commands.h"
#include "qapi/qapi-commands-control.h"
#include "qapi/qapi-commands-misc.h"
#include "qapi/qmp/qerror.h"
#include "qapi/type-helpers.h"
#include "hw/mem/memory-device.h"
#include "hw/intc/intc.h"
#include "hw/rdma/rdma.h"
NameInfo *qmp_query_name(Error **errp)
{
NameInfo *info = g_malloc0(sizeof(*info));
info->name = g_strdup(qemu_name);
return info;
}
void qmp_quit(Error **errp)
{
shutdown_action = SHUTDOWN_ACTION_POWEROFF;
qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_QMP_QUIT);
}
void qmp_stop(Error **errp)
{
/* if there is a dump in background, we should wait until the dump
* finished */
if (qemu_system_dump_in_progress()) {
error_setg(errp, "There is a dump in process, please wait.");
return;
}
if (runstate_check(RUN_STATE_INMIGRATE)) {
autostart = 0;
} else {
vm_stop(RUN_STATE_PAUSED);
}
}
void qmp_cont(Error **errp)
{
BlockBackend *blk;
BlockJob *job;
Error *local_err = NULL;
/* if there is a dump in background, we should wait until the dump
* finished */
if (qemu_system_dump_in_progress()) {
error_setg(errp, "There is a dump in process, please wait.");
return;
}
if (runstate_needs_reset()) {
error_setg(errp, "Resetting the Virtual Machine is required");
return;
} else if (runstate_check(RUN_STATE_SUSPENDED)) {
return;
} else if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
error_setg(errp, "Migration is not finalized yet");
return;
}
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
blk_iostatus_reset(blk);
}
WITH_JOB_LOCK_GUARD() {
for (job = block_job_next_locked(NULL); job;
job = block_job_next_locked(job)) {
block_job_iostatus_reset_locked(job);
}
}
block: Inactivate BDS when migration completes So far, live migration with shared storage meant that the image is in a not-really-ready don't-touch-me state on the destination while the source is still actively using it, but after completing the migration, the image was fully opened on both sides. This is bad. This patch adds a block driver callback to inactivate images on the source before completing the migration. Inactivation means that it goes to a state as if it was just live migrated to the qemu instance on the source (i.e. BDRV_O_INACTIVE is set). You're then supposed to continue either on the source or on the destination, which takes ownership of the image. A typical migration looks like this now with respect to disk images: 1. Destination qemu is started, the image is opened with BDRV_O_INACTIVE. The image is fully opened on the source. 2. Migration is about to complete. The source flushes the image and inactivates it. Now both sides have the image opened with BDRV_O_INACTIVE and are expecting the other side to still modify it. 3. One side (the destination on success) continues and calls bdrv_invalidate_all() in order to take ownership of the image again. This removes BDRV_O_INACTIVE on the resuming side; the flag remains set on the other side. This ensures that the same image isn't written to by both instances (unless both are resumed, but then you get what you deserve). This is important because .bdrv_close for non-BDRV_O_INACTIVE images could write to the image file, which is definitely forbidden while another host is using the image. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com>
2015-12-22 16:07:08 +03:00
/* Continuing after completed migration. Images have been inactivated to
* allow the destination to take control. Need to get control back now.
*
* If there are no inactive block nodes (e.g. because the VM was just
* paused rather than completing a migration), bdrv_inactivate_all() simply
* doesn't do anything. */
bdrv_activate_all(&local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
block: Inactivate BDS when migration completes So far, live migration with shared storage meant that the image is in a not-really-ready don't-touch-me state on the destination while the source is still actively using it, but after completing the migration, the image was fully opened on both sides. This is bad. This patch adds a block driver callback to inactivate images on the source before completing the migration. Inactivation means that it goes to a state as if it was just live migrated to the qemu instance on the source (i.e. BDRV_O_INACTIVE is set). You're then supposed to continue either on the source or on the destination, which takes ownership of the image. A typical migration looks like this now with respect to disk images: 1. Destination qemu is started, the image is opened with BDRV_O_INACTIVE. The image is fully opened on the source. 2. Migration is about to complete. The source flushes the image and inactivates it. Now both sides have the image opened with BDRV_O_INACTIVE and are expecting the other side to still modify it. 3. One side (the destination on success) continues and calls bdrv_invalidate_all() in order to take ownership of the image again. This removes BDRV_O_INACTIVE on the resuming side; the flag remains set on the other side. This ensures that the same image isn't written to by both instances (unless both are resumed, but then you get what you deserve). This is important because .bdrv_close for non-BDRV_O_INACTIVE images could write to the image file, which is definitely forbidden while another host is using the image. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com>
2015-12-22 16:07:08 +03:00
}
if (runstate_check(RUN_STATE_INMIGRATE)) {
autostart = 1;
} else {
vm_start();
}
}
void qmp_add_client(const char *protocol, const char *fdname,
bool has_skipauth, bool skipauth, bool has_tls, bool tls,
Error **errp)
{
static const struct {
const char *name;
bool (*add_client)(int fd, bool has_skipauth, bool skipauth,
bool has_tls, bool tls, Error **errp);
} protocol_table[] = {
{ "spice", qmp_add_client_spice },
#ifdef CONFIG_VNC
{ "vnc", qmp_add_client_vnc },
#endif
#ifdef CONFIG_DBUS_DISPLAY
{ "@dbus-display", qmp_add_client_dbus_display },
#endif
};
int fd, i;
fd = monitor_get_fd(monitor_cur(), fdname, errp);
if (fd < 0) {
return;
}
if (!fd_is_socket(fd)) {
error_setg(errp, "parameter @fdname must name a socket");
close(fd);
return;
}
for (i = 0; i < ARRAY_SIZE(protocol_table); i++) {
if (!strcmp(protocol, protocol_table[i].name)) {
if (!protocol_table[i].add_client(fd, has_skipauth, skipauth,
has_tls, tls, errp)) {
close(fd);
}
return;
}
}
if (!qmp_add_client_char(fd, has_skipauth, skipauth, has_tls, tls,
protocol, errp)) {
close(fd);
}
}
char *qmp_human_monitor_command(const char *command_line, bool has_cpu_index,
int64_t cpu_index, Error **errp)
{
char *output = NULL;
MonitorHMP hmp = {};
monitor_data_init(&hmp.common, false, true, false);
if (has_cpu_index) {
int ret = monitor_set_cpu(&hmp.common, cpu_index);
if (ret < 0) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
"a CPU number");
goto out;
}
}
handle_hmp_command(&hmp, command_line);
WITH_QEMU_LOCK_GUARD(&hmp.common.mon_lock) {
output = g_strdup(hmp.common.outbuf->str);
}
out:
monitor_data_destroy(&hmp.common);
return output;
}
static void __attribute__((__constructor__)) monitor_init_qmp_commands(void)
{
/*
* Two command lists:
* - qmp_commands contains all QMP commands
* - qmp_cap_negotiation_commands contains just
* "qmp_capabilities", to enforce capability negotiation
*/
qmp_init_marshal(&qmp_commands);
qmp_register_command(&qmp_commands, "device_add",
qmp_device_add, 0, 0);
QTAILQ_INIT(&qmp_cap_negotiation_commands);
qmp_register_command(&qmp_cap_negotiation_commands, "qmp_capabilities",
qmp_marshal_qmp_capabilities,
QCO_ALLOW_PRECONFIG, 0);
}