Migration pull req.
Small fixes, nothing major. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWtKjiAAoJEB6aO1+FQIO29zMP/ioNL5kJMndDpYtXinhrNUIX L9gqvDB5jMlQygadq5SoZEEAuu1PNEK9CBC4t/kEGtOKBnTm11ns2R7aNk2/erf4 xFWC9UpvkCgDCTtTauIIYMkqP9oIw3NSjejB236l72L/ucTwvIF8a+Q6VLKGM21R kFp1M3St2wHbwpSdSAQtEUM3Fy+lAWXGhg3qzfjfLRs86jIPJJu/udsHWbtcgSxT 86B8YDFHecOeNwle3QbRD++rLPWmZ3pT+hurgJE4C39hJ85z9pYXuiCsralE8WFp zlJCD6qEUEJKeBWs4Q8NBTfTkKMk+zGBNt78ZNdsYtHMf/sxP1Jksw4GHSJr0Pbe a6v6qHkTeK7aqR0lRCwEazIoIPZ17b1u+Or/yRqydAAnJg2qM7gP6Ddq2Gt7YFPC w4iL9KyY1GURindfNbLjrD4dR+fXEE8nGO5JzitTi23bJ5bARl/ECRrvnP/vUTTH kosg0+P5lG/57kHHrZ693WZUqGvHPmrszXgXg28/PwMY2u45iKNWslVTpld79TcS W3Ky4Yobs2WByfoPYBvG/zIwSh0KSp00RjvueOljesE4LBNw5AXIp5863/1c91Ez BsAsdvdUw3XGp6naDFkJuvIqKDI8tMNu3iD/sFGtNNTzykIVo743/rpsJayTz0x+ yR88u3R1ZmSrvQarwNuc =q+KQ -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/amit-migration/tags/migration-for-2.6-2' into staging Migration pull req. Small fixes, nothing major. # gpg: Signature made Fri 05 Feb 2016 13:51:30 GMT using RSA key ID 854083B6 # gpg: Good signature from "Amit Shah <amit@amitshah.net>" # gpg: aka "Amit Shah <amit@kernel.org>" # gpg: aka "Amit Shah <amitshah@gmx.net>" * remotes/amit-migration/tags/migration-for-2.6-2: migration: fix bad string passed to error_report() static checker: e1000-82540em got aliased to e1000 migration: remove useless code. qmp-commands.hx: Document the missing options for migration capability commands qmp-commands.hx: Fix the missing options for migration parameters commands migration/ram: Fix some helper functions' parameter to use PageSearchStatus savevm: Split load vm state function qemu_loadvm_state migration: rename 'file' in MigrationState to 'to_dst_file' ram: Split host_from_stream_offset() into two helper functions Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
ee8e8f92a7
@ -38,10 +38,14 @@ struct RAMBlock {
|
||||
int fd;
|
||||
};
|
||||
|
||||
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
|
||||
{
|
||||
return (b && b->host && offset < b->used_length) ? true : false;
|
||||
}
|
||||
|
||||
static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
|
||||
{
|
||||
assert(offset < block->used_length);
|
||||
assert(block->host);
|
||||
assert(offset_in_ramblock(block, offset));
|
||||
return (char *)block->host + offset;
|
||||
}
|
||||
|
||||
|
@ -133,7 +133,7 @@ struct MigrationState
|
||||
size_t xfer_limit;
|
||||
QemuThread thread;
|
||||
QEMUBH *cleanup_bh;
|
||||
QEMUFile *file;
|
||||
QEMUFile *to_dst_file;
|
||||
int parameters[MIGRATION_PARAMETER__MAX];
|
||||
|
||||
int state;
|
||||
|
@ -36,8 +36,8 @@
|
||||
|
||||
void exec_start_outgoing_migration(MigrationState *s, const char *command, Error **errp)
|
||||
{
|
||||
s->file = qemu_popen_cmd(command, "w");
|
||||
if (s->file == NULL) {
|
||||
s->to_dst_file = qemu_popen_cmd(command, "w");
|
||||
if (s->to_dst_file == NULL) {
|
||||
error_setg_errno(errp, errno, "failed to popen the migration target");
|
||||
return;
|
||||
}
|
||||
|
@ -51,9 +51,9 @@ void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **
|
||||
}
|
||||
|
||||
if (fd_is_socket(fd)) {
|
||||
s->file = qemu_fopen_socket(fd, "wb");
|
||||
s->to_dst_file = qemu_fopen_socket(fd, "wb");
|
||||
} else {
|
||||
s->file = qemu_fdopen(fd, "wb");
|
||||
s->to_dst_file = qemu_fdopen(fd, "wb");
|
||||
}
|
||||
|
||||
migrate_fd_connect(s);
|
||||
|
@ -809,7 +809,7 @@ static void migrate_fd_cleanup(void *opaque)
|
||||
|
||||
flush_page_queue(s);
|
||||
|
||||
if (s->file) {
|
||||
if (s->to_dst_file) {
|
||||
trace_migrate_fd_cleanup();
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (s->migration_thread_running) {
|
||||
@ -819,8 +819,8 @@ static void migrate_fd_cleanup(void *opaque)
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
migrate_compress_threads_join();
|
||||
qemu_fclose(s->file);
|
||||
s->file = NULL;
|
||||
qemu_fclose(s->to_dst_file);
|
||||
s->to_dst_file = NULL;
|
||||
}
|
||||
|
||||
assert((s->state != MIGRATION_STATUS_ACTIVE) &&
|
||||
@ -837,7 +837,7 @@ static void migrate_fd_cleanup(void *opaque)
|
||||
void migrate_fd_error(MigrationState *s)
|
||||
{
|
||||
trace_migrate_fd_error();
|
||||
assert(s->file == NULL);
|
||||
assert(s->to_dst_file == NULL);
|
||||
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
||||
MIGRATION_STATUS_FAILED);
|
||||
notifier_list_notify(&migration_state_notifiers, s);
|
||||
@ -846,7 +846,7 @@ void migrate_fd_error(MigrationState *s)
|
||||
static void migrate_fd_cancel(MigrationState *s)
|
||||
{
|
||||
int old_state ;
|
||||
QEMUFile *f = migrate_get_current()->file;
|
||||
QEMUFile *f = migrate_get_current()->to_dst_file;
|
||||
trace_migrate_fd_cancel();
|
||||
|
||||
if (s->rp_state.from_dst_file) {
|
||||
@ -917,7 +917,7 @@ MigrationState *migrate_init(const MigrationParams *params)
|
||||
s->bytes_xfer = 0;
|
||||
s->xfer_limit = 0;
|
||||
s->cleanup_bh = 0;
|
||||
s->file = NULL;
|
||||
s->to_dst_file = NULL;
|
||||
s->state = MIGRATION_STATUS_NONE;
|
||||
s->params = *params;
|
||||
s->rp_state.from_dst_file = NULL;
|
||||
@ -1007,12 +1007,6 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
|
||||
return;
|
||||
}
|
||||
|
||||
/* We are starting a new migration, so we want to start in a clean
|
||||
state. This change is only needed if previous migration
|
||||
failed/was cancelled. We don't use migrate_set_state() because
|
||||
we are setting the initial state, not changing it. */
|
||||
s->state = MIGRATION_STATUS_NONE;
|
||||
|
||||
s = migrate_init(¶ms);
|
||||
|
||||
if (strstart(uri, "tcp:", &p)) {
|
||||
@ -1096,8 +1090,9 @@ void qmp_migrate_set_speed(int64_t value, Error **errp)
|
||||
|
||||
s = migrate_get_current();
|
||||
s->bandwidth_limit = value;
|
||||
if (s->file) {
|
||||
qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
|
||||
if (s->to_dst_file) {
|
||||
qemu_file_set_rate_limit(s->to_dst_file,
|
||||
s->bandwidth_limit / XFER_LIMIT_RATIO);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1367,7 +1362,7 @@ out:
|
||||
static int open_return_path_on_source(MigrationState *ms)
|
||||
{
|
||||
|
||||
ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->file);
|
||||
ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
|
||||
if (!ms->rp_state.from_dst_file) {
|
||||
return -1;
|
||||
}
|
||||
@ -1389,7 +1384,7 @@ static int await_return_path_close_on_source(MigrationState *ms)
|
||||
* rp_thread will exit, however if there's an error we need to cause
|
||||
* it to exit.
|
||||
*/
|
||||
if (qemu_file_get_error(ms->file) && ms->rp_state.from_dst_file) {
|
||||
if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
|
||||
/*
|
||||
* shutdown(2), if we have it, will cause it to unblock if it's stuck
|
||||
* waiting for the destination.
|
||||
@ -1436,7 +1431,7 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
|
||||
* Cause any non-postcopiable, but iterative devices to
|
||||
* send out their final data.
|
||||
*/
|
||||
qemu_savevm_state_complete_precopy(ms->file, true);
|
||||
qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
|
||||
|
||||
/*
|
||||
* in Finish migrate and with the io-lock held everything should
|
||||
@ -1454,9 +1449,9 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
|
||||
* will notice we're in POSTCOPY_ACTIVE and not actually
|
||||
* wrap their state up here
|
||||
*/
|
||||
qemu_file_set_rate_limit(ms->file, INT64_MAX);
|
||||
qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
|
||||
/* Ping just for debugging, helps line traces up */
|
||||
qemu_savevm_send_ping(ms->file, 2);
|
||||
qemu_savevm_send_ping(ms->to_dst_file, 2);
|
||||
|
||||
/*
|
||||
* While loading the device state we may trigger page transfer
|
||||
@ -1490,7 +1485,7 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
|
||||
qsb = qemu_buf_get(fb);
|
||||
|
||||
/* Now send that blob */
|
||||
if (qemu_savevm_send_packaged(ms->file, qsb)) {
|
||||
if (qemu_savevm_send_packaged(ms->to_dst_file, qsb)) {
|
||||
goto fail_closefb;
|
||||
}
|
||||
qemu_fclose(fb);
|
||||
@ -1502,9 +1497,9 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
|
||||
* Although this ping is just for debug, it could potentially be
|
||||
* used for getting a better measurement of downtime at the source.
|
||||
*/
|
||||
qemu_savevm_send_ping(ms->file, 4);
|
||||
qemu_savevm_send_ping(ms->to_dst_file, 4);
|
||||
|
||||
ret = qemu_file_get_error(ms->file);
|
||||
ret = qemu_file_get_error(ms->to_dst_file);
|
||||
if (ret) {
|
||||
error_report("postcopy_start: Migration stream errored");
|
||||
migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
|
||||
@ -1550,8 +1545,8 @@ static void migration_completion(MigrationState *s, int current_active_state,
|
||||
ret = bdrv_inactivate_all();
|
||||
}
|
||||
if (ret >= 0) {
|
||||
qemu_file_set_rate_limit(s->file, INT64_MAX);
|
||||
qemu_savevm_state_complete_precopy(s->file, false);
|
||||
qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
|
||||
qemu_savevm_state_complete_precopy(s->to_dst_file, false);
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
@ -1562,7 +1557,7 @@ static void migration_completion(MigrationState *s, int current_active_state,
|
||||
} else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
||||
trace_migration_completion_postcopy_end();
|
||||
|
||||
qemu_savevm_state_complete_postcopy(s->file);
|
||||
qemu_savevm_state_complete_postcopy(s->to_dst_file);
|
||||
trace_migration_completion_postcopy_end_after_complete();
|
||||
}
|
||||
|
||||
@ -1583,7 +1578,7 @@ static void migration_completion(MigrationState *s, int current_active_state,
|
||||
}
|
||||
}
|
||||
|
||||
if (qemu_file_get_error(s->file)) {
|
||||
if (qemu_file_get_error(s->to_dst_file)) {
|
||||
trace_migration_completion_file_err();
|
||||
goto fail;
|
||||
}
|
||||
@ -1618,24 +1613,24 @@ static void *migration_thread(void *opaque)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_savevm_state_header(s->file);
|
||||
qemu_savevm_state_header(s->to_dst_file);
|
||||
|
||||
if (migrate_postcopy_ram()) {
|
||||
/* Now tell the dest that it should open its end so it can reply */
|
||||
qemu_savevm_send_open_return_path(s->file);
|
||||
qemu_savevm_send_open_return_path(s->to_dst_file);
|
||||
|
||||
/* And do a ping that will make stuff easier to debug */
|
||||
qemu_savevm_send_ping(s->file, 1);
|
||||
qemu_savevm_send_ping(s->to_dst_file, 1);
|
||||
|
||||
/*
|
||||
* Tell the destination that we *might* want to do postcopy later;
|
||||
* if the other end can't do postcopy it should fail now, nice and
|
||||
* early.
|
||||
*/
|
||||
qemu_savevm_send_postcopy_advise(s->file);
|
||||
qemu_savevm_send_postcopy_advise(s->to_dst_file);
|
||||
}
|
||||
|
||||
qemu_savevm_state_begin(s->file, &s->params);
|
||||
qemu_savevm_state_begin(s->to_dst_file, &s->params);
|
||||
|
||||
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
|
||||
current_active_state = MIGRATION_STATUS_ACTIVE;
|
||||
@ -1649,10 +1644,10 @@ static void *migration_thread(void *opaque)
|
||||
int64_t current_time;
|
||||
uint64_t pending_size;
|
||||
|
||||
if (!qemu_file_rate_limit(s->file)) {
|
||||
if (!qemu_file_rate_limit(s->to_dst_file)) {
|
||||
uint64_t pend_post, pend_nonpost;
|
||||
|
||||
qemu_savevm_state_pending(s->file, max_size, &pend_nonpost,
|
||||
qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
|
||||
&pend_post);
|
||||
pending_size = pend_nonpost + pend_post;
|
||||
trace_migrate_pending(pending_size, max_size,
|
||||
@ -1673,7 +1668,7 @@ static void *migration_thread(void *opaque)
|
||||
continue;
|
||||
}
|
||||
/* Just another iteration step */
|
||||
qemu_savevm_state_iterate(s->file, entered_postcopy);
|
||||
qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
|
||||
} else {
|
||||
trace_migration_thread_low_pending(pending_size);
|
||||
migration_completion(s, current_active_state,
|
||||
@ -1682,7 +1677,7 @@ static void *migration_thread(void *opaque)
|
||||
}
|
||||
}
|
||||
|
||||
if (qemu_file_get_error(s->file)) {
|
||||
if (qemu_file_get_error(s->to_dst_file)) {
|
||||
migrate_set_state(&s->state, current_active_state,
|
||||
MIGRATION_STATUS_FAILED);
|
||||
trace_migration_thread_file_err();
|
||||
@ -1690,7 +1685,8 @@ static void *migration_thread(void *opaque)
|
||||
}
|
||||
current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||
if (current_time >= initial_time + BUFFER_DELAY) {
|
||||
uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
|
||||
uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
|
||||
initial_bytes;
|
||||
uint64_t time_spent = current_time - initial_time;
|
||||
double bandwidth = (double)transferred_bytes / time_spent;
|
||||
max_size = bandwidth * migrate_max_downtime() / 1000000;
|
||||
@ -1706,11 +1702,11 @@ static void *migration_thread(void *opaque)
|
||||
s->expected_downtime = s->dirty_bytes_rate / bandwidth;
|
||||
}
|
||||
|
||||
qemu_file_reset_rate_limit(s->file);
|
||||
qemu_file_reset_rate_limit(s->to_dst_file);
|
||||
initial_time = current_time;
|
||||
initial_bytes = qemu_ftell(s->file);
|
||||
initial_bytes = qemu_ftell(s->to_dst_file);
|
||||
}
|
||||
if (qemu_file_rate_limit(s->file)) {
|
||||
if (qemu_file_rate_limit(s->to_dst_file)) {
|
||||
/* usleep expects microseconds */
|
||||
g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
|
||||
}
|
||||
@ -1724,7 +1720,7 @@ static void *migration_thread(void *opaque)
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_savevm_state_cleanup();
|
||||
if (s->state == MIGRATION_STATUS_COMPLETED) {
|
||||
uint64_t transferred_bytes = qemu_ftell(s->file);
|
||||
uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
|
||||
s->total_time = end_time - s->total_time;
|
||||
if (!entered_postcopy) {
|
||||
s->downtime = end_time - start_time;
|
||||
@ -1752,7 +1748,7 @@ void migrate_fd_connect(MigrationState *s)
|
||||
s->expected_downtime = max_downtime/1000000;
|
||||
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
|
||||
|
||||
qemu_file_set_rate_limit(s->file,
|
||||
qemu_file_set_rate_limit(s->to_dst_file,
|
||||
s->bandwidth_limit / XFER_LIMIT_RATIO);
|
||||
|
||||
/* Notify before starting migration thread */
|
||||
|
@ -725,7 +725,8 @@ void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
|
||||
|
||||
if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) {
|
||||
/* Full set, ship it! */
|
||||
qemu_savevm_send_postcopy_ram_discard(ms->file, pds->ramblock_name,
|
||||
qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
|
||||
pds->ramblock_name,
|
||||
pds->cur_entry,
|
||||
pds->start_list,
|
||||
pds->length_list);
|
||||
@ -745,7 +746,8 @@ void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds)
|
||||
{
|
||||
/* Anything unsent? */
|
||||
if (pds->cur_entry) {
|
||||
qemu_savevm_send_postcopy_ram_discard(ms->file, pds->ramblock_name,
|
||||
qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
|
||||
pds->ramblock_name,
|
||||
pds->cur_entry,
|
||||
pds->start_list,
|
||||
pds->length_list);
|
||||
|
@ -729,7 +729,7 @@ static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
|
||||
* @last_stage: if we are at the completion stage
|
||||
* @bytes_transferred: increase it with the number of transferred bytes
|
||||
*/
|
||||
static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
|
||||
static int ram_save_page(QEMUFile *f, PageSearchStatus *pss,
|
||||
bool last_stage, uint64_t *bytes_transferred)
|
||||
{
|
||||
int pages = -1;
|
||||
@ -738,6 +738,8 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
|
||||
uint8_t *p;
|
||||
int ret;
|
||||
bool send_async = true;
|
||||
RAMBlock *block = pss->block;
|
||||
ram_addr_t offset = pss->offset;
|
||||
|
||||
p = block->host + offset;
|
||||
|
||||
@ -912,14 +914,16 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
|
||||
* @last_stage: if we are at the completion stage
|
||||
* @bytes_transferred: increase it with the number of transferred bytes
|
||||
*/
|
||||
static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
|
||||
ram_addr_t offset, bool last_stage,
|
||||
static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss,
|
||||
bool last_stage,
|
||||
uint64_t *bytes_transferred)
|
||||
{
|
||||
int pages = -1;
|
||||
uint64_t bytes_xmit;
|
||||
uint8_t *p;
|
||||
int ret;
|
||||
RAMBlock *block = pss->block;
|
||||
ram_addr_t offset = pss->offset;
|
||||
|
||||
p = block->host + offset;
|
||||
|
||||
@ -1229,7 +1233,7 @@ err:
|
||||
* Returns: Number of pages written.
|
||||
*/
|
||||
static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
|
||||
RAMBlock *block, ram_addr_t offset,
|
||||
PageSearchStatus *pss,
|
||||
bool last_stage,
|
||||
uint64_t *bytes_transferred,
|
||||
ram_addr_t dirty_ram_abs)
|
||||
@ -1240,11 +1244,11 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
|
||||
if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
|
||||
unsigned long *unsentmap;
|
||||
if (compression_switch && migrate_use_compression()) {
|
||||
res = ram_save_compressed_page(f, block, offset,
|
||||
res = ram_save_compressed_page(f, pss,
|
||||
last_stage,
|
||||
bytes_transferred);
|
||||
} else {
|
||||
res = ram_save_page(f, block, offset, last_stage,
|
||||
res = ram_save_page(f, pss, last_stage,
|
||||
bytes_transferred);
|
||||
}
|
||||
|
||||
@ -1260,7 +1264,7 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
|
||||
* to the stream.
|
||||
*/
|
||||
if (res > 0) {
|
||||
last_sent_block = block;
|
||||
last_sent_block = pss->block;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1284,26 +1288,27 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
|
||||
* @bytes_transferred: increase it with the number of transferred bytes
|
||||
* @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
|
||||
*/
|
||||
static int ram_save_host_page(MigrationState *ms, QEMUFile *f, RAMBlock *block,
|
||||
ram_addr_t *offset, bool last_stage,
|
||||
static int ram_save_host_page(MigrationState *ms, QEMUFile *f,
|
||||
PageSearchStatus *pss,
|
||||
bool last_stage,
|
||||
uint64_t *bytes_transferred,
|
||||
ram_addr_t dirty_ram_abs)
|
||||
{
|
||||
int tmppages, pages = 0;
|
||||
do {
|
||||
tmppages = ram_save_target_page(ms, f, block, *offset, last_stage,
|
||||
tmppages = ram_save_target_page(ms, f, pss, last_stage,
|
||||
bytes_transferred, dirty_ram_abs);
|
||||
if (tmppages < 0) {
|
||||
return tmppages;
|
||||
}
|
||||
|
||||
pages += tmppages;
|
||||
*offset += TARGET_PAGE_SIZE;
|
||||
pss->offset += TARGET_PAGE_SIZE;
|
||||
dirty_ram_abs += TARGET_PAGE_SIZE;
|
||||
} while (*offset & (qemu_host_page_size - 1));
|
||||
} while (pss->offset & (qemu_host_page_size - 1));
|
||||
|
||||
/* The offset we leave with is the last one we looked at */
|
||||
*offset -= TARGET_PAGE_SIZE;
|
||||
pss->offset -= TARGET_PAGE_SIZE;
|
||||
return pages;
|
||||
}
|
||||
|
||||
@ -1351,7 +1356,7 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
|
||||
}
|
||||
|
||||
if (found) {
|
||||
pages = ram_save_host_page(ms, f, pss.block, &pss.offset,
|
||||
pages = ram_save_host_page(ms, f, &pss,
|
||||
last_stage, bytes_transferred,
|
||||
dirty_ram_abs);
|
||||
}
|
||||
@ -2124,28 +2129,24 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
|
||||
* Returns a pointer from within the RCU-protected ram_list.
|
||||
*/
|
||||
/*
|
||||
* Read a RAMBlock ID from the stream f, find the host address of the
|
||||
* start of that block and add on 'offset'
|
||||
* Read a RAMBlock ID from the stream f.
|
||||
*
|
||||
* f: Stream to read from
|
||||
* offset: Offset within the block
|
||||
* flags: Page flags (mostly to see if it's a continuation of previous block)
|
||||
*/
|
||||
static inline void *host_from_stream_offset(QEMUFile *f,
|
||||
ram_addr_t offset,
|
||||
int flags)
|
||||
static inline RAMBlock *ram_block_from_stream(QEMUFile *f,
|
||||
int flags)
|
||||
{
|
||||
static RAMBlock *block = NULL;
|
||||
char id[256];
|
||||
uint8_t len;
|
||||
|
||||
if (flags & RAM_SAVE_FLAG_CONTINUE) {
|
||||
if (!block || block->max_length <= offset) {
|
||||
if (!block) {
|
||||
error_report("Ack, bad migration stream!");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return block->host + offset;
|
||||
return block;
|
||||
}
|
||||
|
||||
len = qemu_get_byte(f);
|
||||
@ -2153,12 +2154,22 @@ static inline void *host_from_stream_offset(QEMUFile *f,
|
||||
id[len] = 0;
|
||||
|
||||
block = qemu_ram_block_by_name(id);
|
||||
if (block && block->max_length > offset) {
|
||||
return block->host + offset;
|
||||
if (!block) {
|
||||
error_report("Can't find block %s", id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
error_report("Can't find block %s", id);
|
||||
return NULL;
|
||||
return block;
|
||||
}
|
||||
|
||||
static inline void *host_from_ram_block_offset(RAMBlock *block,
|
||||
ram_addr_t offset)
|
||||
{
|
||||
if (!offset_in_ramblock(block, offset)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return block->host + offset;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2302,7 +2313,9 @@ static int ram_load_postcopy(QEMUFile *f)
|
||||
trace_ram_load_postcopy_loop((uint64_t)addr, flags);
|
||||
place_needed = false;
|
||||
if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
|
||||
host = host_from_stream_offset(f, addr, flags);
|
||||
RAMBlock *block = ram_block_from_stream(f, flags);
|
||||
|
||||
host = host_from_ram_block_offset(block, addr);
|
||||
if (!host) {
|
||||
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
|
||||
ret = -EINVAL;
|
||||
@ -2433,7 +2446,9 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||
|
||||
if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
|
||||
RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
|
||||
host = host_from_stream_offset(f, addr, flags);
|
||||
RAMBlock *block = ram_block_from_stream(f, flags);
|
||||
|
||||
host = host_from_ram_block_offset(block, addr);
|
||||
if (!host) {
|
||||
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
|
||||
ret = -EINVAL;
|
||||
|
@ -3504,7 +3504,7 @@ void rdma_start_outgoing_migration(void *opaque,
|
||||
|
||||
trace_rdma_start_outgoing_migration_after_rdma_connect();
|
||||
|
||||
s->file = qemu_fopen_rdma(rdma, "wb");
|
||||
s->to_dst_file = qemu_fopen_rdma(rdma, "wb");
|
||||
migrate_fd_connect(s);
|
||||
return;
|
||||
err:
|
||||
|
@ -299,8 +299,8 @@ static int configuration_post_load(void *opaque, int version_id)
|
||||
const char *current_name = MACHINE_GET_CLASS(current_machine)->name;
|
||||
|
||||
if (strncmp(state->name, current_name, state->len) != 0) {
|
||||
error_report("Machine type received is '%s' and local is '%s'",
|
||||
state->name, current_name);
|
||||
error_report("Machine type received is '%.*s' and local is '%s'",
|
||||
(int) state->len, state->name, current_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
@ -1163,7 +1163,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
|
||||
.shared = 0
|
||||
};
|
||||
MigrationState *ms = migrate_init(¶ms);
|
||||
ms->file = f;
|
||||
ms->to_dst_file = f;
|
||||
|
||||
if (qemu_savevm_state_blocked(errp)) {
|
||||
return -EINVAL;
|
||||
@ -1718,90 +1718,118 @@ void loadvm_free_handlers(MigrationIncomingState *mis)
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
|
||||
{
|
||||
uint32_t instance_id, version_id, section_id;
|
||||
SaveStateEntry *se;
|
||||
LoadStateEntry *le;
|
||||
char idstr[256];
|
||||
int ret;
|
||||
|
||||
/* Read section start */
|
||||
section_id = qemu_get_be32(f);
|
||||
if (!qemu_get_counted_string(f, idstr)) {
|
||||
error_report("Unable to read ID string for section %u",
|
||||
section_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
instance_id = qemu_get_be32(f);
|
||||
version_id = qemu_get_be32(f);
|
||||
|
||||
trace_qemu_loadvm_state_section_startfull(section_id, idstr,
|
||||
instance_id, version_id);
|
||||
/* Find savevm section */
|
||||
se = find_se(idstr, instance_id);
|
||||
if (se == NULL) {
|
||||
error_report("Unknown savevm section or instance '%s' %d",
|
||||
idstr, instance_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Validate version */
|
||||
if (version_id > se->version_id) {
|
||||
error_report("savevm: unsupported version %d for '%s' v%d",
|
||||
version_id, idstr, se->version_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Add entry */
|
||||
le = g_malloc0(sizeof(*le));
|
||||
|
||||
le->se = se;
|
||||
le->section_id = section_id;
|
||||
le->version_id = version_id;
|
||||
QLIST_INSERT_HEAD(&mis->loadvm_handlers, le, entry);
|
||||
|
||||
ret = vmstate_load(f, le->se, le->version_id);
|
||||
if (ret < 0) {
|
||||
error_report("error while loading state for instance 0x%x of"
|
||||
" device '%s'", instance_id, idstr);
|
||||
return ret;
|
||||
}
|
||||
if (!check_section_footer(f, le)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
|
||||
{
|
||||
uint32_t section_id;
|
||||
LoadStateEntry *le;
|
||||
int ret;
|
||||
|
||||
section_id = qemu_get_be32(f);
|
||||
|
||||
trace_qemu_loadvm_state_section_partend(section_id);
|
||||
QLIST_FOREACH(le, &mis->loadvm_handlers, entry) {
|
||||
if (le->section_id == section_id) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (le == NULL) {
|
||||
error_report("Unknown savevm section %d", section_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = vmstate_load(f, le->se, le->version_id);
|
||||
if (ret < 0) {
|
||||
error_report("error while loading state section id %d(%s)",
|
||||
section_id, le->se->idstr);
|
||||
return ret;
|
||||
}
|
||||
if (!check_section_footer(f, le)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
|
||||
{
|
||||
uint8_t section_type;
|
||||
int ret;
|
||||
|
||||
while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
|
||||
uint32_t instance_id, version_id, section_id;
|
||||
SaveStateEntry *se;
|
||||
LoadStateEntry *le;
|
||||
char idstr[256];
|
||||
|
||||
trace_qemu_loadvm_state_section(section_type);
|
||||
switch (section_type) {
|
||||
case QEMU_VM_SECTION_START:
|
||||
case QEMU_VM_SECTION_FULL:
|
||||
/* Read section start */
|
||||
section_id = qemu_get_be32(f);
|
||||
if (!qemu_get_counted_string(f, idstr)) {
|
||||
error_report("Unable to read ID string for section %u",
|
||||
section_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
instance_id = qemu_get_be32(f);
|
||||
version_id = qemu_get_be32(f);
|
||||
|
||||
trace_qemu_loadvm_state_section_startfull(section_id, idstr,
|
||||
instance_id, version_id);
|
||||
/* Find savevm section */
|
||||
se = find_se(idstr, instance_id);
|
||||
if (se == NULL) {
|
||||
error_report("Unknown savevm section or instance '%s' %d",
|
||||
idstr, instance_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Validate version */
|
||||
if (version_id > se->version_id) {
|
||||
error_report("savevm: unsupported version %d for '%s' v%d",
|
||||
version_id, idstr, se->version_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Add entry */
|
||||
le = g_malloc0(sizeof(*le));
|
||||
|
||||
le->se = se;
|
||||
le->section_id = section_id;
|
||||
le->version_id = version_id;
|
||||
QLIST_INSERT_HEAD(&mis->loadvm_handlers, le, entry);
|
||||
|
||||
ret = vmstate_load(f, le->se, le->version_id);
|
||||
ret = qemu_loadvm_section_start_full(f, mis);
|
||||
if (ret < 0) {
|
||||
error_report("error while loading state for instance 0x%x of"
|
||||
" device '%s'", instance_id, idstr);
|
||||
return ret;
|
||||
}
|
||||
if (!check_section_footer(f, le)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case QEMU_VM_SECTION_PART:
|
||||
case QEMU_VM_SECTION_END:
|
||||
section_id = qemu_get_be32(f);
|
||||
|
||||
trace_qemu_loadvm_state_section_partend(section_id);
|
||||
QLIST_FOREACH(le, &mis->loadvm_handlers, entry) {
|
||||
if (le->section_id == section_id) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (le == NULL) {
|
||||
error_report("Unknown savevm section %d", section_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = vmstate_load(f, le->se, le->version_id);
|
||||
ret = qemu_loadvm_section_part_end(f, mis);
|
||||
if (ret < 0) {
|
||||
error_report("error while loading state section id %d(%s)",
|
||||
section_id, le->se->idstr);
|
||||
return ret;
|
||||
}
|
||||
if (!check_section_footer(f, le)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case QEMU_VM_COMMAND:
|
||||
ret = loadvm_process_command(f);
|
||||
|
@ -39,11 +39,11 @@ static void tcp_wait_for_connect(int fd, Error *err, void *opaque)
|
||||
|
||||
if (fd < 0) {
|
||||
DPRINTF("migrate connect error: %s\n", error_get_pretty(err));
|
||||
s->file = NULL;
|
||||
s->to_dst_file = NULL;
|
||||
migrate_fd_error(s);
|
||||
} else {
|
||||
DPRINTF("migrate connect success\n");
|
||||
s->file = qemu_fopen_socket(fd, "wb");
|
||||
s->to_dst_file = qemu_fopen_socket(fd, "wb");
|
||||
migrate_fd_connect(s);
|
||||
}
|
||||
}
|
||||
|
@ -39,11 +39,11 @@ static void unix_wait_for_connect(int fd, Error *err, void *opaque)
|
||||
|
||||
if (fd < 0) {
|
||||
DPRINTF("migrate connect error: %s\n", error_get_pretty(err));
|
||||
s->file = NULL;
|
||||
s->to_dst_file = NULL;
|
||||
migrate_fd_error(s);
|
||||
} else {
|
||||
DPRINTF("migrate connect success\n");
|
||||
s->file = qemu_fopen_socket(fd, "wb");
|
||||
s->to_dst_file = qemu_fopen_socket(fd, "wb");
|
||||
migrate_fd_connect(s);
|
||||
}
|
||||
}
|
||||
|
@ -3647,7 +3647,9 @@ Enable/Disable migration capabilities
|
||||
- "rdma-pin-all": pin all pages when using RDMA during migration
|
||||
- "auto-converge": throttle down guest to help convergence of migration
|
||||
- "zero-blocks": compress zero blocks during block migration
|
||||
- "compress": use multiple compression threads to accelerate live migration
|
||||
- "events": generate events for each migration state change
|
||||
- "x-postcopy-ram": postcopy mode for live migration
|
||||
|
||||
Arguments:
|
||||
|
||||
@ -3675,13 +3677,24 @@ Query current migration capabilities
|
||||
- "rdma-pin-all" : RDMA Pin Page state (json-bool)
|
||||
- "auto-converge" : Auto Converge state (json-bool)
|
||||
- "zero-blocks" : Zero Blocks state (json-bool)
|
||||
- "compress": Multiple compression threads state (json-bool)
|
||||
- "events": Migration state change event state (json-bool)
|
||||
- "x-postcopy-ram": postcopy ram state (json-bool)
|
||||
|
||||
Arguments:
|
||||
|
||||
Example:
|
||||
|
||||
-> { "execute": "query-migrate-capabilities" }
|
||||
<- { "return": [ { "state": false, "capability": "xbzrle" } ] }
|
||||
<- {"return": [
|
||||
{"state": false, "capability": "xbzrle"},
|
||||
{"state": false, "capability": "rdma-pin-all"},
|
||||
{"state": false, "capability": "auto-converge"},
|
||||
{"state": false, "capability": "zero-blocks"},
|
||||
{"state": false, "capability": "compress"},
|
||||
{"state": true, "capability": "events"},
|
||||
{"state": false, "capability": "x-postcopy-ram"}
|
||||
]}
|
||||
|
||||
EQMP
|
||||
|
||||
@ -3700,6 +3713,10 @@ Set migration parameters
|
||||
- "compress-level": set compression level during migration (json-int)
|
||||
- "compress-threads": set compression thread count for migration (json-int)
|
||||
- "decompress-threads": set decompression thread count for migration (json-int)
|
||||
- "x-cpu-throttle-initial": set initial percentage of time guest cpus are
|
||||
throttled for auto-converge (json-int)
|
||||
- "x-cpu-throttle-increment": set throttle increasing percentage for
|
||||
auto-converge (json-int)
|
||||
|
||||
Arguments:
|
||||
|
||||
@ -3713,7 +3730,7 @@ EQMP
|
||||
{
|
||||
.name = "migrate-set-parameters",
|
||||
.args_type =
|
||||
"compress-level:i?,compress-threads:i?,decompress-threads:i?",
|
||||
"compress-level:i?,compress-threads:i?,decompress-threads:i?,x-cpu-throttle-initial:i?,x-cpu-throttle-increment:i?",
|
||||
.mhandler.cmd_new = qmp_marshal_migrate_set_parameters,
|
||||
},
|
||||
SQMP
|
||||
@ -3726,6 +3743,10 @@ Query current migration parameters
|
||||
- "compress-level" : compression level value (json-int)
|
||||
- "compress-threads" : compression thread count value (json-int)
|
||||
- "decompress-threads" : decompression thread count value (json-int)
|
||||
- "x-cpu-throttle-initial" : initial percentage of time guest cpus are
|
||||
throttled (json-int)
|
||||
- "x-cpu-throttle-increment" : throttle increasing percentage for
|
||||
auto-converge (json-int)
|
||||
|
||||
Arguments:
|
||||
|
||||
@ -3734,9 +3755,11 @@ Example:
|
||||
-> { "execute": "query-migrate-parameters" }
|
||||
<- {
|
||||
"return": {
|
||||
"decompress-threads", 2,
|
||||
"compress-threads", 8,
|
||||
"compress-level", 1
|
||||
"decompress-threads": 2,
|
||||
"x-cpu-throttle-increment": 10,
|
||||
"compress-threads": 8,
|
||||
"compress-level": 1,
|
||||
"x-cpu-throttle-initial": 20
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,6 +99,7 @@ def get_changed_sec_name(sec):
|
||||
# Section names can change -- see commit 292b1634 for an example.
|
||||
changes = {
|
||||
"ICH9 LPC": "ICH9-LPC",
|
||||
"e1000-82540em": "e1000",
|
||||
}
|
||||
|
||||
for item in changes:
|
||||
|
Loading…
Reference in New Issue
Block a user