2010-03-29 23:23:52 +04:00
|
|
|
/*
|
|
|
|
* QEMU System Emulator
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdarg.h>
|
2010-11-22 20:52:34 +03:00
|
|
|
#include <stdlib.h>
|
2015-03-23 11:32:18 +03:00
|
|
|
#include <zlib.h>
|
2010-03-29 23:23:52 +04:00
|
|
|
#ifndef _WIN32
|
2010-03-30 23:27:34 +04:00
|
|
|
#include <sys/types.h>
|
2010-03-29 23:23:52 +04:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
|
|
|
#include "config.h"
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "monitor/monitor.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/sysemu.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/bitops.h"
|
|
|
|
#include "qemu/bitmap.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/arch_init.h"
|
2010-03-29 23:23:52 +04:00
|
|
|
#include "audio/audio.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/i386/pc.h"
|
2012-12-12 16:24:50 +04:00
|
|
|
#include "hw/pci/pci.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/audio/audio.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/kvm.h"
|
2012-12-17 21:19:50 +04:00
|
|
|
#include "migration/migration.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/i386/smbios.h"
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/address-spaces.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/audio/pcspk.h"
|
2012-12-17 21:19:50 +04:00
|
|
|
#include "migration/page_cache.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/config-file.h"
|
2014-03-19 22:32:31 +04:00
|
|
|
#include "qemu/error-report.h"
|
2012-08-20 18:31:38 +04:00
|
|
|
#include "qmp-commands.h"
|
2012-09-04 15:08:57 +04:00
|
|
|
#include "trace.h"
|
2012-11-14 18:45:02 +04:00
|
|
|
#include "exec/cpu-all.h"
|
2013-10-14 19:14:47 +04:00
|
|
|
#include "exec/ram_addr.h"
|
2013-04-15 10:19:22 +04:00
|
|
|
#include "hw/acpi/acpi.h"
|
2013-11-06 14:33:05 +04:00
|
|
|
#include "qemu/host-utils.h"
|
2013-09-05 22:41:35 +04:00
|
|
|
#include "qemu/rcu_queue.h"
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2012-06-19 19:43:15 +04:00
|
|
|
#ifdef DEBUG_ARCH_INIT
|
|
|
|
#define DPRINTF(fmt, ...) \
|
|
|
|
do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
|
|
|
|
#else
|
|
|
|
#define DPRINTF(fmt, ...) \
|
|
|
|
do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2010-03-29 23:23:52 +04:00
|
|
|
#ifdef TARGET_SPARC
|
|
|
|
int graphic_width = 1024;
|
|
|
|
int graphic_height = 768;
|
|
|
|
int graphic_depth = 8;
|
|
|
|
#else
|
|
|
|
int graphic_width = 800;
|
|
|
|
int graphic_height = 600;
|
2013-06-20 16:06:27 +04:00
|
|
|
int graphic_depth = 32;
|
2010-03-29 23:23:52 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(TARGET_ALPHA)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_ALPHA
|
|
|
|
#elif defined(TARGET_ARM)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_ARM
|
|
|
|
#elif defined(TARGET_CRIS)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_CRIS
|
|
|
|
#elif defined(TARGET_I386)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_I386
|
|
|
|
#elif defined(TARGET_M68K)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_M68K
|
2011-02-18 01:45:02 +03:00
|
|
|
#elif defined(TARGET_LM32)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_LM32
|
2010-03-29 23:23:52 +04:00
|
|
|
#elif defined(TARGET_MICROBLAZE)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
|
|
|
|
#elif defined(TARGET_MIPS)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_MIPS
|
2013-03-18 23:49:25 +04:00
|
|
|
#elif defined(TARGET_MOXIE)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_MOXIE
|
2012-07-20 11:50:39 +04:00
|
|
|
#elif defined(TARGET_OPENRISC)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_OPENRISC
|
2010-03-29 23:23:52 +04:00
|
|
|
#elif defined(TARGET_PPC)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_PPC
|
|
|
|
#elif defined(TARGET_S390X)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_S390X
|
|
|
|
#elif defined(TARGET_SH4)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_SH4
|
|
|
|
#elif defined(TARGET_SPARC)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_SPARC
|
2011-09-06 03:55:25 +04:00
|
|
|
#elif defined(TARGET_XTENSA)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_XTENSA
|
2012-08-10 10:42:21 +04:00
|
|
|
#elif defined(TARGET_UNICORE32)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_UNICORE32
|
2014-09-01 15:59:46 +04:00
|
|
|
#elif defined(TARGET_TRICORE)
|
|
|
|
#define QEMU_ARCH QEMU_ARCH_TRICORE
|
2010-03-29 23:23:52 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
const uint32_t arch_type = QEMU_ARCH;
|
2013-06-24 13:47:39 +04:00
|
|
|
static bool mig_throttle_on;
|
|
|
|
static int dirty_rate_high_cnt;
|
|
|
|
static void check_guest_throttling(void);
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2014-04-04 13:57:54 +04:00
|
|
|
static uint64_t bitmap_sync_count;
|
|
|
|
|
2010-03-29 23:23:52 +04:00
|
|
|
/***********************************************************/
|
|
|
|
/* ram save/restore */
|
|
|
|
|
2010-08-18 08:30:12 +04:00
|
|
|
#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
|
|
|
|
#define RAM_SAVE_FLAG_COMPRESS 0x02
|
|
|
|
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
|
|
|
|
#define RAM_SAVE_FLAG_PAGE 0x08
|
|
|
|
#define RAM_SAVE_FLAG_EOS 0x10
|
|
|
|
#define RAM_SAVE_FLAG_CONTINUE 0x20
|
2012-08-06 22:42:53 +04:00
|
|
|
#define RAM_SAVE_FLAG_XBZRLE 0x40
|
2013-07-22 18:01:55 +04:00
|
|
|
/* 0x80 is reserved in migration.h start with 0x100 next */
|
2015-03-23 11:32:18 +03:00
|
|
|
#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2012-05-02 20:07:27 +04:00
|
|
|
static struct defconfig_file {
|
|
|
|
const char *filename;
|
2012-05-02 20:07:29 +04:00
|
|
|
/* Indicates it is an user config file (disabled by -no-user-config) */
|
|
|
|
bool userconfig;
|
2012-05-02 20:07:27 +04:00
|
|
|
} default_config_files[] = {
|
2012-05-02 20:07:29 +04:00
|
|
|
{ CONFIG_QEMU_CONFDIR "/qemu.conf", true },
|
2012-05-02 20:07:27 +04:00
|
|
|
{ NULL }, /* end of list */
|
|
|
|
};
|
|
|
|
|
2014-02-13 23:44:45 +04:00
|
|
|
static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
|
2012-05-02 20:07:27 +04:00
|
|
|
|
2012-05-02 20:07:29 +04:00
|
|
|
int qemu_read_default_config_files(bool userconfig)
|
2012-05-02 20:07:25 +04:00
|
|
|
{
|
|
|
|
int ret;
|
2012-05-02 20:07:27 +04:00
|
|
|
struct defconfig_file *f;
|
2012-05-02 20:07:25 +04:00
|
|
|
|
2012-05-02 20:07:27 +04:00
|
|
|
for (f = default_config_files; f->filename; f++) {
|
2012-05-02 20:07:29 +04:00
|
|
|
if (!userconfig && f->userconfig) {
|
|
|
|
continue;
|
|
|
|
}
|
2012-05-02 20:07:27 +04:00
|
|
|
ret = qemu_read_config_file(f->filename);
|
|
|
|
if (ret < 0 && ret != -ENOENT) {
|
|
|
|
return ret;
|
|
|
|
}
|
2012-05-02 20:07:25 +04:00
|
|
|
}
|
2013-03-21 03:23:13 +04:00
|
|
|
|
2012-05-02 20:07:25 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-20 20:23:36 +04:00
|
|
|
static inline bool is_zero_range(uint8_t *p, uint64_t size)
|
2010-03-29 23:23:52 +04:00
|
|
|
{
|
2013-09-20 20:23:36 +04:00
|
|
|
return buffer_find_nonzero_offset(p, size) == size;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
|
2012-08-06 22:42:53 +04:00
|
|
|
/* struct contains XBZRLE cache and a static page
|
|
|
|
used by the compression */
|
|
|
|
static struct {
|
|
|
|
/* buffer used for XBZRLE encoding */
|
|
|
|
uint8_t *encoded_buf;
|
|
|
|
/* buffer for storing page content */
|
|
|
|
uint8_t *current_buf;
|
2014-03-04 17:29:21 +04:00
|
|
|
/* Cache for XBZRLE, Protected by lock. */
|
2012-08-06 22:42:53 +04:00
|
|
|
PageCache *cache;
|
2014-03-04 17:29:21 +04:00
|
|
|
QemuMutex lock;
|
2014-03-19 22:32:31 +04:00
|
|
|
} XBZRLE;
|
|
|
|
|
2014-01-30 22:08:35 +04:00
|
|
|
/* buffer used for XBZRLE decoding */
|
|
|
|
static uint8_t *xbzrle_decoded_buf;
|
2012-08-06 22:42:54 +04:00
|
|
|
|
2014-03-04 17:29:21 +04:00
|
|
|
static void XBZRLE_cache_lock(void)
|
|
|
|
{
|
|
|
|
if (migrate_use_xbzrle())
|
|
|
|
qemu_mutex_lock(&XBZRLE.lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void XBZRLE_cache_unlock(void)
|
|
|
|
{
|
|
|
|
if (migrate_use_xbzrle())
|
|
|
|
qemu_mutex_unlock(&XBZRLE.lock);
|
|
|
|
}
|
|
|
|
|
2014-03-19 22:32:31 +04:00
|
|
|
/*
|
|
|
|
* called from qmp_migrate_set_cache_size in main thread, possibly while
|
|
|
|
* a migration is in progress.
|
|
|
|
* A running migration maybe using the cache and might finish during this
|
|
|
|
* call, hence changes to the cache are protected by XBZRLE.lock().
|
|
|
|
*/
|
2012-08-06 22:42:54 +04:00
|
|
|
int64_t xbzrle_cache_resize(int64_t new_size)
|
|
|
|
{
|
2014-03-19 22:32:31 +04:00
|
|
|
PageCache *new_cache;
|
|
|
|
int64_t ret;
|
2014-03-04 17:29:21 +04:00
|
|
|
|
2014-01-30 22:08:34 +04:00
|
|
|
if (new_size < TARGET_PAGE_SIZE) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-03-19 22:32:31 +04:00
|
|
|
XBZRLE_cache_lock();
|
|
|
|
|
2012-08-06 22:42:54 +04:00
|
|
|
if (XBZRLE.cache != NULL) {
|
2014-03-04 17:29:21 +04:00
|
|
|
if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
|
2014-03-19 22:32:31 +04:00
|
|
|
goto out_new_size;
|
2014-03-04 17:29:21 +04:00
|
|
|
}
|
|
|
|
new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
|
|
|
|
TARGET_PAGE_SIZE);
|
|
|
|
if (!new_cache) {
|
2014-03-19 22:32:31 +04:00
|
|
|
error_report("Error creating cache");
|
|
|
|
ret = -1;
|
|
|
|
goto out;
|
2014-03-04 17:29:21 +04:00
|
|
|
}
|
|
|
|
|
2014-03-19 22:32:31 +04:00
|
|
|
cache_fini(XBZRLE.cache);
|
|
|
|
XBZRLE.cache = new_cache;
|
2012-08-06 22:42:54 +04:00
|
|
|
}
|
2014-03-04 17:29:21 +04:00
|
|
|
|
2014-03-19 22:32:31 +04:00
|
|
|
out_new_size:
|
|
|
|
ret = pow2floor(new_size);
|
|
|
|
out:
|
|
|
|
XBZRLE_cache_unlock();
|
|
|
|
return ret;
|
2012-08-06 22:42:54 +04:00
|
|
|
}
|
|
|
|
|
2012-08-06 22:42:56 +04:00
|
|
|
/* accounting for migration statistics */
|
|
|
|
typedef struct AccountingInfo {
|
|
|
|
uint64_t dup_pages;
|
2013-03-26 13:58:37 +04:00
|
|
|
uint64_t skipped_pages;
|
2012-08-06 22:42:56 +04:00
|
|
|
uint64_t norm_pages;
|
|
|
|
uint64_t iterations;
|
2012-08-06 22:42:57 +04:00
|
|
|
uint64_t xbzrle_bytes;
|
|
|
|
uint64_t xbzrle_pages;
|
|
|
|
uint64_t xbzrle_cache_miss;
|
2014-04-04 13:57:56 +04:00
|
|
|
double xbzrle_cache_miss_rate;
|
2012-08-06 22:42:57 +04:00
|
|
|
uint64_t xbzrle_overflows;
|
2012-08-06 22:42:56 +04:00
|
|
|
} AccountingInfo;
|
|
|
|
|
|
|
|
static AccountingInfo acct_info;
|
|
|
|
|
|
|
|
static void acct_clear(void)
|
|
|
|
{
|
|
|
|
memset(&acct_info, 0, sizeof(acct_info));
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t dup_mig_bytes_transferred(void)
|
|
|
|
{
|
|
|
|
return acct_info.dup_pages * TARGET_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t dup_mig_pages_transferred(void)
|
|
|
|
{
|
|
|
|
return acct_info.dup_pages;
|
|
|
|
}
|
|
|
|
|
2013-03-26 13:58:37 +04:00
|
|
|
uint64_t skipped_mig_bytes_transferred(void)
|
|
|
|
{
|
|
|
|
return acct_info.skipped_pages * TARGET_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t skipped_mig_pages_transferred(void)
|
|
|
|
{
|
|
|
|
return acct_info.skipped_pages;
|
|
|
|
}
|
|
|
|
|
2012-08-06 22:42:56 +04:00
|
|
|
uint64_t norm_mig_bytes_transferred(void)
|
|
|
|
{
|
|
|
|
return acct_info.norm_pages * TARGET_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t norm_mig_pages_transferred(void)
|
|
|
|
{
|
|
|
|
return acct_info.norm_pages;
|
|
|
|
}
|
|
|
|
|
2012-08-06 22:42:57 +04:00
|
|
|
uint64_t xbzrle_mig_bytes_transferred(void)
|
|
|
|
{
|
|
|
|
return acct_info.xbzrle_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t xbzrle_mig_pages_transferred(void)
|
|
|
|
{
|
|
|
|
return acct_info.xbzrle_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t xbzrle_mig_pages_cache_miss(void)
|
|
|
|
{
|
|
|
|
return acct_info.xbzrle_cache_miss;
|
|
|
|
}
|
|
|
|
|
2014-04-04 13:57:56 +04:00
|
|
|
double xbzrle_mig_cache_miss_rate(void)
|
|
|
|
{
|
|
|
|
return acct_info.xbzrle_cache_miss_rate;
|
|
|
|
}
|
|
|
|
|
2012-08-06 22:42:57 +04:00
|
|
|
uint64_t xbzrle_mig_pages_overflow(void)
|
|
|
|
{
|
|
|
|
return acct_info.xbzrle_overflows;
|
|
|
|
}
|
|
|
|
|
2015-02-12 23:41:39 +03:00
|
|
|
/* This is the last block that we have visited serching for dirty pages
|
|
|
|
*/
|
|
|
|
static RAMBlock *last_seen_block;
|
|
|
|
/* This is the last block from where we have sent data */
|
|
|
|
static RAMBlock *last_sent_block;
|
|
|
|
static ram_addr_t last_offset;
|
|
|
|
static unsigned long *migration_bitmap;
|
|
|
|
static uint64_t migration_dirty_pages;
|
|
|
|
static uint32_t last_version;
|
|
|
|
static bool ram_bulk_stage;
|
|
|
|
|
2015-03-23 11:32:17 +03:00
|
|
|
struct CompressParam {
|
2015-03-23 11:32:20 +03:00
|
|
|
bool start;
|
|
|
|
bool done;
|
|
|
|
QEMUFile *file;
|
|
|
|
QemuMutex mutex;
|
|
|
|
QemuCond cond;
|
|
|
|
RAMBlock *block;
|
|
|
|
ram_addr_t offset;
|
2015-03-23 11:32:17 +03:00
|
|
|
};
|
|
|
|
typedef struct CompressParam CompressParam;
|
|
|
|
|
2015-03-23 11:32:18 +03:00
|
|
|
struct DecompressParam {
|
2015-03-23 11:32:21 +03:00
|
|
|
bool start;
|
|
|
|
QemuMutex mutex;
|
|
|
|
QemuCond cond;
|
|
|
|
void *des;
|
|
|
|
uint8 *compbuf;
|
|
|
|
int len;
|
2015-03-23 11:32:18 +03:00
|
|
|
};
|
|
|
|
typedef struct DecompressParam DecompressParam;
|
|
|
|
|
2015-03-23 11:32:17 +03:00
|
|
|
static CompressParam *comp_param;
|
|
|
|
static QemuThread *compress_threads;
|
2015-03-23 11:32:20 +03:00
|
|
|
/* comp_done_cond is used to wake up the migration thread when
|
|
|
|
* one of the compression threads has finished the compression.
|
|
|
|
* comp_done_lock is used to co-work with comp_done_cond.
|
|
|
|
*/
|
|
|
|
static QemuMutex *comp_done_lock;
|
|
|
|
static QemuCond *comp_done_cond;
|
|
|
|
/* The empty QEMUFileOps will be used by file in CompressParam */
|
|
|
|
static const QEMUFileOps empty_ops = { };
|
2015-03-23 11:32:24 +03:00
|
|
|
|
|
|
|
static bool compression_switch;
|
2015-03-23 11:32:17 +03:00
|
|
|
static bool quit_comp_thread;
|
2015-03-23 11:32:18 +03:00
|
|
|
static bool quit_decomp_thread;
|
|
|
|
static DecompressParam *decomp_param;
|
|
|
|
static QemuThread *decompress_threads;
|
|
|
|
static uint8_t *compressed_data_buf;
|
2015-03-23 11:32:17 +03:00
|
|
|
|
2015-03-23 11:32:23 +03:00
|
|
|
static int do_compress_ram_page(CompressParam *param);
|
|
|
|
|
2015-03-23 11:32:17 +03:00
|
|
|
static void *do_data_compress(void *opaque)
|
|
|
|
{
|
2015-03-23 11:32:23 +03:00
|
|
|
CompressParam *param = opaque;
|
2015-03-23 11:32:17 +03:00
|
|
|
|
2015-03-23 11:32:23 +03:00
|
|
|
while (!quit_comp_thread) {
|
|
|
|
qemu_mutex_lock(¶m->mutex);
|
|
|
|
/* Re-check the quit_comp_thread in case of
|
|
|
|
* terminate_compression_threads is called just before
|
|
|
|
* qemu_mutex_lock(¶m->mutex) and after
|
|
|
|
* while(!quit_comp_thread), re-check it here can make
|
|
|
|
* sure the compression thread terminate as expected.
|
|
|
|
*/
|
|
|
|
while (!param->start && !quit_comp_thread) {
|
|
|
|
qemu_cond_wait(¶m->cond, ¶m->mutex);
|
|
|
|
}
|
|
|
|
if (!quit_comp_thread) {
|
|
|
|
do_compress_ram_page(param);
|
|
|
|
}
|
|
|
|
param->start = false;
|
|
|
|
qemu_mutex_unlock(¶m->mutex);
|
2015-03-23 11:32:17 +03:00
|
|
|
|
2015-03-23 11:32:23 +03:00
|
|
|
qemu_mutex_lock(comp_done_lock);
|
|
|
|
param->done = true;
|
|
|
|
qemu_cond_signal(comp_done_cond);
|
|
|
|
qemu_mutex_unlock(comp_done_lock);
|
2015-03-23 11:32:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void terminate_compression_threads(void)
|
|
|
|
{
|
2015-03-23 11:32:23 +03:00
|
|
|
int idx, thread_count;
|
2015-03-23 11:32:17 +03:00
|
|
|
|
2015-03-23 11:32:23 +03:00
|
|
|
thread_count = migrate_compress_threads();
|
|
|
|
quit_comp_thread = true;
|
|
|
|
for (idx = 0; idx < thread_count; idx++) {
|
|
|
|
qemu_mutex_lock(&comp_param[idx].mutex);
|
|
|
|
qemu_cond_signal(&comp_param[idx].cond);
|
|
|
|
qemu_mutex_unlock(&comp_param[idx].mutex);
|
|
|
|
}
|
2015-03-23 11:32:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void migrate_compress_threads_join(void)
|
|
|
|
{
|
|
|
|
int i, thread_count;
|
|
|
|
|
|
|
|
if (!migrate_use_compression()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
terminate_compression_threads();
|
|
|
|
thread_count = migrate_compress_threads();
|
|
|
|
for (i = 0; i < thread_count; i++) {
|
|
|
|
qemu_thread_join(compress_threads + i);
|
2015-03-23 11:32:20 +03:00
|
|
|
qemu_fclose(comp_param[i].file);
|
|
|
|
qemu_mutex_destroy(&comp_param[i].mutex);
|
|
|
|
qemu_cond_destroy(&comp_param[i].cond);
|
2015-03-23 11:32:17 +03:00
|
|
|
}
|
2015-03-23 11:32:20 +03:00
|
|
|
qemu_mutex_destroy(comp_done_lock);
|
|
|
|
qemu_cond_destroy(comp_done_cond);
|
2015-03-23 11:32:17 +03:00
|
|
|
g_free(compress_threads);
|
|
|
|
g_free(comp_param);
|
2015-03-23 11:32:20 +03:00
|
|
|
g_free(comp_done_cond);
|
|
|
|
g_free(comp_done_lock);
|
2015-03-23 11:32:17 +03:00
|
|
|
compress_threads = NULL;
|
|
|
|
comp_param = NULL;
|
2015-03-23 11:32:20 +03:00
|
|
|
comp_done_cond = NULL;
|
|
|
|
comp_done_lock = NULL;
|
2015-03-23 11:32:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void migrate_compress_threads_create(void)
|
|
|
|
{
|
|
|
|
int i, thread_count;
|
|
|
|
|
|
|
|
if (!migrate_use_compression()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
quit_comp_thread = false;
|
2015-03-23 11:32:24 +03:00
|
|
|
compression_switch = true;
|
2015-03-23 11:32:17 +03:00
|
|
|
thread_count = migrate_compress_threads();
|
|
|
|
compress_threads = g_new0(QemuThread, thread_count);
|
|
|
|
comp_param = g_new0(CompressParam, thread_count);
|
2015-03-23 11:32:20 +03:00
|
|
|
comp_done_cond = g_new0(QemuCond, 1);
|
|
|
|
comp_done_lock = g_new0(QemuMutex, 1);
|
|
|
|
qemu_cond_init(comp_done_cond);
|
|
|
|
qemu_mutex_init(comp_done_lock);
|
2015-03-23 11:32:17 +03:00
|
|
|
for (i = 0; i < thread_count; i++) {
|
2015-03-23 11:32:20 +03:00
|
|
|
/* com_param[i].file is just used as a dummy buffer to save data, set
|
|
|
|
* it's ops to empty.
|
|
|
|
*/
|
|
|
|
comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
|
2015-03-23 11:32:23 +03:00
|
|
|
comp_param[i].done = true;
|
2015-03-23 11:32:20 +03:00
|
|
|
qemu_mutex_init(&comp_param[i].mutex);
|
|
|
|
qemu_cond_init(&comp_param[i].cond);
|
2015-03-23 11:32:17 +03:00
|
|
|
qemu_thread_create(compress_threads + i, "compress",
|
|
|
|
do_data_compress, comp_param + i,
|
|
|
|
QEMU_THREAD_JOINABLE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-12 23:46:40 +03:00
|
|
|
/**
|
|
|
|
* save_page_header: Write page header to wire
|
|
|
|
*
|
|
|
|
* If this is the 1st block, it also writes the block identification
|
|
|
|
*
|
|
|
|
* Returns: Number of bytes written
|
|
|
|
*
|
|
|
|
* @f: QEMUFile where to send the data
|
|
|
|
* @block: block that contains the page we want to send
|
|
|
|
* @offset: offset inside the block for the page
|
|
|
|
* in the lower bits, it contains flags
|
|
|
|
*/
|
|
|
|
static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
|
2012-06-19 19:43:14 +04:00
|
|
|
{
|
2012-10-18 15:56:35 +04:00
|
|
|
size_t size;
|
|
|
|
|
2015-02-12 23:46:40 +03:00
|
|
|
qemu_put_be64(f, offset);
|
2012-10-18 15:56:35 +04:00
|
|
|
size = 8;
|
2012-06-19 19:43:14 +04:00
|
|
|
|
2015-03-17 14:56:13 +03:00
|
|
|
if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
|
2012-10-18 15:56:35 +04:00
|
|
|
qemu_put_byte(f, strlen(block->idstr));
|
|
|
|
qemu_put_buffer(f, (uint8_t *)block->idstr,
|
|
|
|
strlen(block->idstr));
|
|
|
|
size += 1 + strlen(block->idstr);
|
|
|
|
}
|
|
|
|
return size;
|
2012-06-19 19:43:14 +04:00
|
|
|
}
|
|
|
|
|
2014-02-13 23:44:45 +04:00
|
|
|
/* Update the xbzrle cache to reflect a page that's been sent as all 0.
|
|
|
|
* The important thing is that a stale (not-yet-0'd) page be replaced
|
|
|
|
* by the new data.
|
|
|
|
* As a bonus, if the page wasn't in the cache it gets added so that
|
|
|
|
* when a small write is made into the 0'd page it gets XBZRLE sent
|
|
|
|
*/
|
|
|
|
static void xbzrle_cache_zero_page(ram_addr_t current_addr)
|
|
|
|
{
|
|
|
|
if (ram_bulk_stage || !migrate_use_xbzrle()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We don't care if this fails to allocate a new cache page
|
|
|
|
* as long as it updated an old one */
|
xbzrle: optimize XBZRLE to decrease the cache misses
Avoid hot pages being replaced by others to remarkably decrease cache
misses
Sample results with the test program which quote from xbzrle.txt ran in
vm:(migrate bandwidth:1GE and xbzrle cache size 8MB)
the test program:
include <stdlib.h>
include <stdio.h>
int main()
{
char *buf = (char *) calloc(4096, 4096);
while (1) {
int i;
for (i = 0; i < 4096 * 4; i++) {
buf[i * 4096 / 4]++;
}
printf(".");
}
}
before this patch:
virsh qemu-monitor-command test_vm '{"execute": "query-migrate"}'
{"return":{"expected-downtime":1020,"xbzrle-cache":{"bytes":1108284,
"cache-size":8388608,"cache-miss-rate":0.987013,"pages":18297,"overflow":8,
"cache-miss":1228737},"status":"active","setup-time":10,"total-time":52398,
"ram":{"total":12466991104,"remaining":1695744,"mbps":935.559472,
"transferred":5780760580,"dirty-sync-counter":271,"duplicate":2878530,
"dirty-pages-rate":29130,"skipped":0,"normal-bytes":5748592640,
"normal":1403465}},"id":"libvirt-706"}
18k pages sent compressed in 52 seconds.
cache-miss-rate is 98.7%, totally miss.
after optimizing:
virsh qemu-monitor-command test_vm '{"execute": "query-migrate"}'
{"return":{"expected-downtime":2054,"xbzrle-cache":{"bytes":5066763,
"cache-size":8388608,"cache-miss-rate":0.485924,"pages":194823,"overflow":0,
"cache-miss":210653},"status":"active","setup-time":11,"total-time":18729,
"ram":{"total":12466991104,"remaining":3895296,"mbps":937.663549,
"transferred":1615042219,"dirty-sync-counter":98,"duplicate":2869840,
"dirty-pages-rate":58781,"skipped":0,"normal-bytes":1588404224,
"normal":387794}},"id":"libvirt-266"}
194k pages sent compressed in 18 seconds.
The value of cache-miss-rate decrease to 48.59%.
Signed-off-by: ChenLiang <chenliang88@huawei.com>
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2014-11-24 14:55:47 +03:00
|
|
|
cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
|
|
|
|
bitmap_sync_count);
|
2014-02-13 23:44:45 +04:00
|
|
|
}
|
|
|
|
|
2012-08-06 22:42:53 +04:00
|
|
|
#define ENCODING_FLAG_XBZRLE 0x1
|
|
|
|
|
2015-02-12 22:16:33 +03:00
|
|
|
/**
|
|
|
|
* save_xbzrle_page: compress and send current page
|
|
|
|
*
|
|
|
|
* Returns: 1 means that we wrote the page
|
|
|
|
* 0 means that page is identical to the one already sent
|
|
|
|
* -1 means that xbzrle would be longer than normal
|
|
|
|
*
|
|
|
|
* @f: QEMUFile where to send the data
|
|
|
|
* @current_data:
|
|
|
|
* @current_addr:
|
|
|
|
* @block: block that contains the page we want to send
|
|
|
|
* @offset: offset inside the block for the page
|
|
|
|
* @last_stage: if we are at the completion stage
|
|
|
|
* @bytes_transferred: increase it with the number of transferred bytes
|
|
|
|
*/
|
2014-04-04 13:57:53 +04:00
|
|
|
static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
|
2012-08-06 22:42:53 +04:00
|
|
|
ram_addr_t current_addr, RAMBlock *block,
|
2015-02-12 23:41:39 +03:00
|
|
|
ram_addr_t offset, bool last_stage,
|
2015-02-12 22:16:33 +03:00
|
|
|
uint64_t *bytes_transferred)
|
2012-08-06 22:42:53 +04:00
|
|
|
{
|
2015-02-12 22:16:33 +03:00
|
|
|
int encoded_len = 0, bytes_xbzrle;
|
2012-08-06 22:42:53 +04:00
|
|
|
uint8_t *prev_cached_page;
|
|
|
|
|
xbzrle: optimize XBZRLE to decrease the cache misses
Avoid hot pages being replaced by others to remarkably decrease cache
misses
Sample results with the test program which quote from xbzrle.txt ran in
vm:(migrate bandwidth:1GE and xbzrle cache size 8MB)
the test program:
include <stdlib.h>
include <stdio.h>
int main()
{
char *buf = (char *) calloc(4096, 4096);
while (1) {
int i;
for (i = 0; i < 4096 * 4; i++) {
buf[i * 4096 / 4]++;
}
printf(".");
}
}
before this patch:
virsh qemu-monitor-command test_vm '{"execute": "query-migrate"}'
{"return":{"expected-downtime":1020,"xbzrle-cache":{"bytes":1108284,
"cache-size":8388608,"cache-miss-rate":0.987013,"pages":18297,"overflow":8,
"cache-miss":1228737},"status":"active","setup-time":10,"total-time":52398,
"ram":{"total":12466991104,"remaining":1695744,"mbps":935.559472,
"transferred":5780760580,"dirty-sync-counter":271,"duplicate":2878530,
"dirty-pages-rate":29130,"skipped":0,"normal-bytes":5748592640,
"normal":1403465}},"id":"libvirt-706"}
18k pages sent compressed in 52 seconds.
cache-miss-rate is 98.7%, totally miss.
after optimizing:
virsh qemu-monitor-command test_vm '{"execute": "query-migrate"}'
{"return":{"expected-downtime":2054,"xbzrle-cache":{"bytes":5066763,
"cache-size":8388608,"cache-miss-rate":0.485924,"pages":194823,"overflow":0,
"cache-miss":210653},"status":"active","setup-time":11,"total-time":18729,
"ram":{"total":12466991104,"remaining":3895296,"mbps":937.663549,
"transferred":1615042219,"dirty-sync-counter":98,"duplicate":2869840,
"dirty-pages-rate":58781,"skipped":0,"normal-bytes":1588404224,
"normal":387794}},"id":"libvirt-266"}
194k pages sent compressed in 18 seconds.
The value of cache-miss-rate decrease to 48.59%.
Signed-off-by: ChenLiang <chenliang88@huawei.com>
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2014-11-24 14:55:47 +03:00
|
|
|
if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
|
2014-04-04 13:57:53 +04:00
|
|
|
acct_info.xbzrle_cache_miss++;
|
2012-08-06 22:42:58 +04:00
|
|
|
if (!last_stage) {
|
xbzrle: optimize XBZRLE to decrease the cache misses
Avoid hot pages being replaced by others to remarkably decrease cache
misses
Sample results with the test program which quote from xbzrle.txt ran in
vm:(migrate bandwidth:1GE and xbzrle cache size 8MB)
the test program:
include <stdlib.h>
include <stdio.h>
int main()
{
char *buf = (char *) calloc(4096, 4096);
while (1) {
int i;
for (i = 0; i < 4096 * 4; i++) {
buf[i * 4096 / 4]++;
}
printf(".");
}
}
before this patch:
virsh qemu-monitor-command test_vm '{"execute": "query-migrate"}'
{"return":{"expected-downtime":1020,"xbzrle-cache":{"bytes":1108284,
"cache-size":8388608,"cache-miss-rate":0.987013,"pages":18297,"overflow":8,
"cache-miss":1228737},"status":"active","setup-time":10,"total-time":52398,
"ram":{"total":12466991104,"remaining":1695744,"mbps":935.559472,
"transferred":5780760580,"dirty-sync-counter":271,"duplicate":2878530,
"dirty-pages-rate":29130,"skipped":0,"normal-bytes":5748592640,
"normal":1403465}},"id":"libvirt-706"}
18k pages sent compressed in 52 seconds.
cache-miss-rate is 98.7%, totally miss.
after optimizing:
virsh qemu-monitor-command test_vm '{"execute": "query-migrate"}'
{"return":{"expected-downtime":2054,"xbzrle-cache":{"bytes":5066763,
"cache-size":8388608,"cache-miss-rate":0.485924,"pages":194823,"overflow":0,
"cache-miss":210653},"status":"active","setup-time":11,"total-time":18729,
"ram":{"total":12466991104,"remaining":3895296,"mbps":937.663549,
"transferred":1615042219,"dirty-sync-counter":98,"duplicate":2869840,
"dirty-pages-rate":58781,"skipped":0,"normal-bytes":1588404224,
"normal":387794}},"id":"libvirt-266"}
194k pages sent compressed in 18 seconds.
The value of cache-miss-rate decrease to 48.59%.
Signed-off-by: ChenLiang <chenliang88@huawei.com>
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2014-11-24 14:55:47 +03:00
|
|
|
if (cache_insert(XBZRLE.cache, current_addr, *current_data,
|
|
|
|
bitmap_sync_count) == -1) {
|
2014-01-30 22:08:38 +04:00
|
|
|
return -1;
|
2014-04-04 13:57:53 +04:00
|
|
|
} else {
|
|
|
|
/* update *current_data when the page has been
|
|
|
|
inserted into cache */
|
|
|
|
*current_data = get_cached_data(XBZRLE.cache, current_addr);
|
2014-01-30 22:08:38 +04:00
|
|
|
}
|
2012-08-06 22:42:58 +04:00
|
|
|
}
|
2012-08-06 22:42:53 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
|
|
|
|
|
|
|
|
/* save current buffer into memory */
|
2014-04-04 13:57:53 +04:00
|
|
|
memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
|
2012-08-06 22:42:53 +04:00
|
|
|
|
|
|
|
/* XBZRLE encoding (if there is no overflow) */
|
|
|
|
encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
|
|
|
|
TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
|
|
|
|
TARGET_PAGE_SIZE);
|
|
|
|
if (encoded_len == 0) {
|
|
|
|
DPRINTF("Skipping unmodified page\n");
|
|
|
|
return 0;
|
|
|
|
} else if (encoded_len == -1) {
|
|
|
|
DPRINTF("Overflow\n");
|
2012-08-06 22:42:57 +04:00
|
|
|
acct_info.xbzrle_overflows++;
|
2012-08-06 22:42:53 +04:00
|
|
|
/* update data in the cache */
|
2014-04-04 13:57:53 +04:00
|
|
|
if (!last_stage) {
|
|
|
|
memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
|
|
|
|
*current_data = prev_cached_page;
|
|
|
|
}
|
2012-08-06 22:42:53 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we need to update the data in the cache, in order to get the same data */
|
2012-08-06 22:42:58 +04:00
|
|
|
if (!last_stage) {
|
|
|
|
memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
|
|
|
|
}
|
2012-08-06 22:42:53 +04:00
|
|
|
|
|
|
|
/* Send XBZRLE based compressed page */
|
2015-02-12 23:46:40 +03:00
|
|
|
bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
|
2012-08-06 22:42:53 +04:00
|
|
|
qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
|
|
|
|
qemu_put_be16(f, encoded_len);
|
|
|
|
qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
|
2015-02-12 22:16:33 +03:00
|
|
|
bytes_xbzrle += encoded_len + 1 + 2;
|
2012-08-06 22:42:57 +04:00
|
|
|
acct_info.xbzrle_pages++;
|
2015-02-12 22:16:33 +03:00
|
|
|
acct_info.xbzrle_bytes += bytes_xbzrle;
|
|
|
|
*bytes_transferred += bytes_xbzrle;
|
2012-08-06 22:42:53 +04:00
|
|
|
|
2015-02-12 22:16:33 +03:00
|
|
|
return 1;
|
2012-08-06 22:42:53 +04:00
|
|
|
}
|
|
|
|
|
2012-10-18 02:00:59 +04:00
|
|
|
static inline
|
|
|
|
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
|
|
|
|
ram_addr_t start)
|
2012-07-20 12:36:12 +04:00
|
|
|
{
|
2012-10-18 02:00:59 +04:00
|
|
|
unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
|
|
|
|
unsigned long nr = base + (start >> TARGET_PAGE_BITS);
|
2013-08-19 18:26:52 +04:00
|
|
|
uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
|
|
|
|
unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
|
2012-07-20 14:33:00 +04:00
|
|
|
|
2013-03-26 13:58:38 +04:00
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
if (ram_bulk_stage && nr > base) {
|
|
|
|
next = nr + 1;
|
|
|
|
} else {
|
|
|
|
next = find_next_bit(migration_bitmap, size, nr);
|
|
|
|
}
|
2012-07-20 12:36:12 +04:00
|
|
|
|
2012-10-18 02:00:59 +04:00
|
|
|
if (next < size) {
|
|
|
|
clear_bit(next, migration_bitmap);
|
2012-07-20 14:33:00 +04:00
|
|
|
migration_dirty_pages--;
|
2012-07-20 12:36:12 +04:00
|
|
|
}
|
2012-10-18 02:00:59 +04:00
|
|
|
return (next - base) << TARGET_PAGE_BITS;
|
2012-07-20 12:36:12 +04:00
|
|
|
}
|
|
|
|
|
2013-11-05 19:47:20 +04:00
|
|
|
static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
|
2012-07-20 12:16:08 +04:00
|
|
|
{
|
2012-07-20 14:33:00 +04:00
|
|
|
bool ret;
|
2013-11-05 19:47:20 +04:00
|
|
|
int nr = addr >> TARGET_PAGE_BITS;
|
2012-07-20 12:16:08 +04:00
|
|
|
|
2012-07-20 14:33:00 +04:00
|
|
|
ret = test_and_set_bit(nr, migration_bitmap);
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
migration_dirty_pages++;
|
2012-07-20 12:16:08 +04:00
|
|
|
}
|
2012-07-20 14:33:00 +04:00
|
|
|
return ret;
|
2012-07-20 12:16:08 +04:00
|
|
|
}
|
|
|
|
|
2013-11-05 19:47:20 +04:00
|
|
|
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
|
|
|
|
{
|
|
|
|
ram_addr_t addr;
|
2013-11-06 14:33:05 +04:00
|
|
|
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
|
|
|
|
|
|
|
|
/* start address is aligned at the start of a word? */
|
|
|
|
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
|
|
|
|
int k;
|
|
|
|
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
|
|
|
|
unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
|
|
|
|
|
|
|
|
for (k = page; k < page + nr; k++) {
|
|
|
|
if (src[k]) {
|
|
|
|
unsigned long new_dirty;
|
|
|
|
new_dirty = ~migration_bitmap[k];
|
|
|
|
migration_bitmap[k] |= src[k];
|
|
|
|
new_dirty &= src[k];
|
|
|
|
migration_dirty_pages += ctpopl(new_dirty);
|
|
|
|
src[k] = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
|
|
|
|
if (cpu_physical_memory_get_dirty(start + addr,
|
|
|
|
TARGET_PAGE_SIZE,
|
|
|
|
DIRTY_MEMORY_MIGRATION)) {
|
|
|
|
cpu_physical_memory_reset_dirty(start + addr,
|
|
|
|
TARGET_PAGE_SIZE,
|
|
|
|
DIRTY_MEMORY_MIGRATION);
|
|
|
|
migration_bitmap_set_dirty(start + addr);
|
|
|
|
}
|
2013-11-05 19:47:20 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-03-20 16:15:03 +04:00
|
|
|
/* Fix me: there are too many global variables used in migration process. */
|
|
|
|
static int64_t start_time;
|
|
|
|
static int64_t bytes_xfer_prev;
|
|
|
|
static int64_t num_dirty_pages_period;
|
2015-04-15 06:59:14 +03:00
|
|
|
static uint64_t xbzrle_cache_miss_prev;
|
|
|
|
static uint64_t iterations_prev;
|
2014-03-20 16:15:03 +04:00
|
|
|
|
|
|
|
static void migration_bitmap_sync_init(void)
|
|
|
|
{
|
|
|
|
start_time = 0;
|
|
|
|
bytes_xfer_prev = 0;
|
|
|
|
num_dirty_pages_period = 0;
|
2015-04-15 06:59:14 +03:00
|
|
|
xbzrle_cache_miss_prev = 0;
|
|
|
|
iterations_prev = 0;
|
2014-03-20 16:15:03 +04:00
|
|
|
}
|
2013-02-22 20:36:27 +04:00
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
/* Called with iothread lock held, to protect ram_list.dirty_memory[] */
|
2012-07-20 12:52:51 +04:00
|
|
|
static void migration_bitmap_sync(void)
|
|
|
|
{
|
2012-07-20 14:33:00 +04:00
|
|
|
RAMBlock *block;
|
|
|
|
uint64_t num_dirty_pages_init = migration_dirty_pages;
|
2012-08-13 14:31:25 +04:00
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
int64_t end_time;
|
2013-06-24 13:47:39 +04:00
|
|
|
int64_t bytes_xfer_now;
|
|
|
|
|
2014-04-04 13:57:54 +04:00
|
|
|
bitmap_sync_count++;
|
|
|
|
|
2013-06-24 13:47:39 +04:00
|
|
|
if (!bytes_xfer_prev) {
|
|
|
|
bytes_xfer_prev = ram_bytes_transferred();
|
|
|
|
}
|
2012-08-13 14:31:25 +04:00
|
|
|
|
|
|
|
if (!start_time) {
|
2013-08-21 19:03:08 +04:00
|
|
|
start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2012-08-13 14:31:25 +04:00
|
|
|
}
|
2012-09-04 15:08:57 +04:00
|
|
|
|
|
|
|
trace_migration_bitmap_sync_start();
|
2013-04-24 12:46:55 +04:00
|
|
|
address_space_sync_dirty_bitmap(&address_space_memory);
|
2012-07-20 14:33:00 +04:00
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
2014-12-15 23:55:32 +03:00
|
|
|
migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
|
2012-07-20 14:33:00 +04:00
|
|
|
}
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2012-07-20 14:33:00 +04:00
|
|
|
trace_migration_bitmap_sync_end(migration_dirty_pages
|
2012-09-04 15:08:57 +04:00
|
|
|
- num_dirty_pages_init);
|
2012-08-13 14:31:25 +04:00
|
|
|
num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
|
2013-08-21 19:03:08 +04:00
|
|
|
end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2012-08-13 14:31:25 +04:00
|
|
|
|
|
|
|
/* more than 1 second = 1000 millisecons */
|
|
|
|
if (end_time > start_time + 1000) {
|
2013-06-24 13:47:39 +04:00
|
|
|
if (migrate_auto_converge()) {
|
|
|
|
/* The following detection logic can be refined later. For now:
|
|
|
|
Check to see if the dirtied bytes is 50% more than the approx.
|
|
|
|
amount of bytes that just got transferred since the last time we
|
|
|
|
were in this routine. If that happens >N times (for now N==4)
|
|
|
|
we turn on the throttle down logic */
|
|
|
|
bytes_xfer_now = ram_bytes_transferred();
|
|
|
|
if (s->dirty_pages_rate &&
|
|
|
|
(num_dirty_pages_period * TARGET_PAGE_SIZE >
|
|
|
|
(bytes_xfer_now - bytes_xfer_prev)/2) &&
|
|
|
|
(dirty_rate_high_cnt++ > 4)) {
|
|
|
|
trace_migration_throttle();
|
|
|
|
mig_throttle_on = true;
|
|
|
|
dirty_rate_high_cnt = 0;
|
|
|
|
}
|
|
|
|
bytes_xfer_prev = bytes_xfer_now;
|
|
|
|
} else {
|
|
|
|
mig_throttle_on = false;
|
|
|
|
}
|
2014-04-04 13:57:56 +04:00
|
|
|
if (migrate_use_xbzrle()) {
|
2015-04-15 06:59:14 +03:00
|
|
|
if (iterations_prev != acct_info.iterations) {
|
2014-04-04 13:57:56 +04:00
|
|
|
acct_info.xbzrle_cache_miss_rate =
|
|
|
|
(double)(acct_info.xbzrle_cache_miss -
|
|
|
|
xbzrle_cache_miss_prev) /
|
|
|
|
(acct_info.iterations - iterations_prev);
|
|
|
|
}
|
|
|
|
iterations_prev = acct_info.iterations;
|
|
|
|
xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
|
|
|
|
}
|
2012-08-13 14:31:25 +04:00
|
|
|
s->dirty_pages_rate = num_dirty_pages_period * 1000
|
|
|
|
/ (end_time - start_time);
|
2013-02-01 16:22:37 +04:00
|
|
|
s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
|
2012-08-13 14:31:25 +04:00
|
|
|
start_time = end_time;
|
|
|
|
num_dirty_pages_period = 0;
|
|
|
|
}
|
2015-05-07 21:31:22 +03:00
|
|
|
s->dirty_sync_count = bitmap_sync_count;
|
2012-07-20 12:52:51 +04:00
|
|
|
}
|
|
|
|
|
2015-03-23 11:32:22 +03:00
|
|
|
/**
|
|
|
|
* save_zero_page: Send the zero page to the stream
|
|
|
|
*
|
|
|
|
* Returns: Number of pages written.
|
|
|
|
*
|
|
|
|
* @f: QEMUFile where to send the data
|
|
|
|
* @block: block that contains the page we want to send
|
|
|
|
* @offset: offset inside the block for the page
|
|
|
|
* @p: pointer to the page
|
|
|
|
* @bytes_transferred: increase it with the number of transferred bytes
|
|
|
|
*/
|
|
|
|
static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
|
|
|
|
uint8_t *p, uint64_t *bytes_transferred)
|
|
|
|
{
|
|
|
|
int pages = -1;
|
|
|
|
|
|
|
|
if (is_zero_range(p, TARGET_PAGE_SIZE)) {
|
|
|
|
acct_info.dup_pages++;
|
|
|
|
*bytes_transferred += save_page_header(f, block,
|
|
|
|
offset | RAM_SAVE_FLAG_COMPRESS);
|
|
|
|
qemu_put_byte(f, 0);
|
|
|
|
*bytes_transferred += 1;
|
|
|
|
pages = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
2015-02-12 22:03:45 +03:00
|
|
|
/**
|
2014-05-09 14:54:55 +04:00
|
|
|
* ram_save_page: Send the given page to the stream
|
|
|
|
*
|
2015-02-12 22:03:45 +03:00
|
|
|
* Returns: Number of pages written.
|
|
|
|
*
|
|
|
|
* @f: QEMUFile where to send the data
|
|
|
|
* @block: block that contains the page we want to send
|
|
|
|
* @offset: offset inside the block for the page
|
|
|
|
* @last_stage: if we are at the completion stage
|
|
|
|
* @bytes_transferred: increase it with the number of transferred bytes
|
2014-05-09 14:54:55 +04:00
|
|
|
*/
|
|
|
|
static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
|
2015-02-12 22:03:45 +03:00
|
|
|
bool last_stage, uint64_t *bytes_transferred)
|
2014-05-09 14:54:55 +04:00
|
|
|
{
|
2015-02-12 22:03:45 +03:00
|
|
|
int pages = -1;
|
2015-02-12 21:02:42 +03:00
|
|
|
uint64_t bytes_xmit;
|
2014-05-09 14:54:55 +04:00
|
|
|
ram_addr_t current_addr;
|
|
|
|
MemoryRegion *mr = block->mr;
|
|
|
|
uint8_t *p;
|
|
|
|
int ret;
|
|
|
|
bool send_async = true;
|
|
|
|
|
|
|
|
p = memory_region_get_ram_ptr(mr) + offset;
|
|
|
|
|
|
|
|
/* In doubt sent page as normal */
|
2015-02-12 21:02:42 +03:00
|
|
|
bytes_xmit = 0;
|
2014-05-09 14:54:55 +04:00
|
|
|
ret = ram_control_save_page(f, block->offset,
|
2015-02-12 21:02:42 +03:00
|
|
|
offset, TARGET_PAGE_SIZE, &bytes_xmit);
|
|
|
|
if (bytes_xmit) {
|
2015-02-12 22:03:45 +03:00
|
|
|
*bytes_transferred += bytes_xmit;
|
|
|
|
pages = 1;
|
2015-02-12 21:02:42 +03:00
|
|
|
}
|
2014-05-09 14:54:55 +04:00
|
|
|
|
|
|
|
XBZRLE_cache_lock();
|
|
|
|
|
|
|
|
current_addr = block->offset + offset;
|
2015-03-17 14:56:13 +03:00
|
|
|
|
|
|
|
if (block == last_sent_block) {
|
|
|
|
offset |= RAM_SAVE_FLAG_CONTINUE;
|
|
|
|
}
|
2014-05-09 14:54:55 +04:00
|
|
|
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
|
|
|
|
if (ret != RAM_SAVE_CONTROL_DELAYED) {
|
2015-02-12 21:02:42 +03:00
|
|
|
if (bytes_xmit > 0) {
|
2014-05-09 14:54:55 +04:00
|
|
|
acct_info.norm_pages++;
|
2015-02-12 21:02:42 +03:00
|
|
|
} else if (bytes_xmit == 0) {
|
2014-05-09 14:54:55 +04:00
|
|
|
acct_info.dup_pages++;
|
|
|
|
}
|
|
|
|
}
|
2015-03-23 11:32:22 +03:00
|
|
|
} else {
|
|
|
|
pages = save_zero_page(f, block, offset, p, bytes_transferred);
|
|
|
|
if (pages > 0) {
|
|
|
|
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
|
|
|
|
* page would be stale
|
2014-05-09 14:54:55 +04:00
|
|
|
*/
|
2015-03-23 11:32:22 +03:00
|
|
|
xbzrle_cache_zero_page(current_addr);
|
|
|
|
} else if (!ram_bulk_stage && migrate_use_xbzrle()) {
|
|
|
|
pages = save_xbzrle_page(f, &p, current_addr, block,
|
|
|
|
offset, last_stage, bytes_transferred);
|
|
|
|
if (!last_stage) {
|
|
|
|
/* Can't send this cached data async, since the cache page
|
|
|
|
* might get updated before it gets to the wire
|
|
|
|
*/
|
|
|
|
send_async = false;
|
|
|
|
}
|
2014-05-09 14:54:55 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XBZRLE overflow or normal page */
|
2015-02-12 22:03:45 +03:00
|
|
|
if (pages == -1) {
|
2015-02-12 23:46:40 +03:00
|
|
|
*bytes_transferred += save_page_header(f, block,
|
|
|
|
offset | RAM_SAVE_FLAG_PAGE);
|
2014-05-09 14:54:55 +04:00
|
|
|
if (send_async) {
|
|
|
|
qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
|
|
|
|
} else {
|
|
|
|
qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
|
|
|
|
}
|
2015-02-12 22:03:45 +03:00
|
|
|
*bytes_transferred += TARGET_PAGE_SIZE;
|
|
|
|
pages = 1;
|
2014-05-09 14:54:55 +04:00
|
|
|
acct_info.norm_pages++;
|
|
|
|
}
|
|
|
|
|
|
|
|
XBZRLE_cache_unlock();
|
|
|
|
|
2015-02-12 22:03:45 +03:00
|
|
|
return pages;
|
2014-05-09 14:54:55 +04:00
|
|
|
}
|
|
|
|
|
2015-03-23 11:32:23 +03:00
|
|
|
static int do_compress_ram_page(CompressParam *param)
|
|
|
|
{
|
|
|
|
int bytes_sent, blen;
|
|
|
|
uint8_t *p;
|
|
|
|
RAMBlock *block = param->block;
|
|
|
|
ram_addr_t offset = param->offset;
|
|
|
|
|
|
|
|
p = memory_region_get_ram_ptr(block->mr) + (offset & TARGET_PAGE_MASK);
|
|
|
|
|
|
|
|
bytes_sent = save_page_header(param->file, block, offset |
|
|
|
|
RAM_SAVE_FLAG_COMPRESS_PAGE);
|
|
|
|
blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
|
|
|
|
migrate_compress_level());
|
|
|
|
bytes_sent += blen;
|
|
|
|
|
|
|
|
return bytes_sent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void start_compression(CompressParam *param)
|
|
|
|
{
|
|
|
|
param->done = false;
|
|
|
|
qemu_mutex_lock(¶m->mutex);
|
|
|
|
param->start = true;
|
|
|
|
qemu_cond_signal(¶m->cond);
|
|
|
|
qemu_mutex_unlock(¶m->mutex);
|
|
|
|
}
|
|
|
|
|
2015-03-23 11:32:25 +03:00
|
|
|
static inline void start_decompression(DecompressParam *param)
|
|
|
|
{
|
|
|
|
qemu_mutex_lock(¶m->mutex);
|
|
|
|
param->start = true;
|
|
|
|
qemu_cond_signal(¶m->cond);
|
|
|
|
qemu_mutex_unlock(¶m->mutex);
|
|
|
|
}
|
2015-03-23 11:32:23 +03:00
|
|
|
|
|
|
|
static uint64_t bytes_transferred;
|
|
|
|
|
|
|
|
static void flush_compressed_data(QEMUFile *f)
|
|
|
|
{
|
|
|
|
int idx, len, thread_count;
|
|
|
|
|
|
|
|
if (!migrate_use_compression()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
thread_count = migrate_compress_threads();
|
|
|
|
for (idx = 0; idx < thread_count; idx++) {
|
|
|
|
if (!comp_param[idx].done) {
|
|
|
|
qemu_mutex_lock(comp_done_lock);
|
|
|
|
while (!comp_param[idx].done && !quit_comp_thread) {
|
|
|
|
qemu_cond_wait(comp_done_cond, comp_done_lock);
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(comp_done_lock);
|
|
|
|
}
|
|
|
|
if (!quit_comp_thread) {
|
|
|
|
len = qemu_put_qemu_file(f, comp_param[idx].file);
|
|
|
|
bytes_transferred += len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_compress_params(CompressParam *param, RAMBlock *block,
|
|
|
|
ram_addr_t offset)
|
|
|
|
{
|
|
|
|
param->block = block;
|
|
|
|
param->offset = offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
|
|
|
|
ram_addr_t offset,
|
|
|
|
uint64_t *bytes_transferred)
|
|
|
|
{
|
|
|
|
int idx, thread_count, bytes_xmit = -1, pages = -1;
|
|
|
|
|
|
|
|
thread_count = migrate_compress_threads();
|
|
|
|
qemu_mutex_lock(comp_done_lock);
|
|
|
|
while (true) {
|
|
|
|
for (idx = 0; idx < thread_count; idx++) {
|
|
|
|
if (comp_param[idx].done) {
|
|
|
|
bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
|
|
|
|
set_compress_params(&comp_param[idx], block, offset);
|
|
|
|
start_compression(&comp_param[idx]);
|
|
|
|
pages = 1;
|
|
|
|
acct_info.norm_pages++;
|
|
|
|
*bytes_transferred += bytes_xmit;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (pages > 0) {
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
qemu_cond_wait(comp_done_cond, comp_done_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(comp_done_lock);
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
2015-03-23 11:32:17 +03:00
|
|
|
/**
|
|
|
|
* ram_save_compressed_page: compress the given page and send it to the stream
|
|
|
|
*
|
|
|
|
* Returns: Number of pages written.
|
|
|
|
*
|
|
|
|
* @f: QEMUFile where to send the data
|
|
|
|
* @block: block that contains the page we want to send
|
|
|
|
* @offset: offset inside the block for the page
|
|
|
|
* @last_stage: if we are at the completion stage
|
|
|
|
* @bytes_transferred: increase it with the number of transferred bytes
|
|
|
|
*/
|
|
|
|
static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
|
|
|
|
ram_addr_t offset, bool last_stage,
|
|
|
|
uint64_t *bytes_transferred)
|
|
|
|
{
|
|
|
|
int pages = -1;
|
2015-03-23 11:32:23 +03:00
|
|
|
uint64_t bytes_xmit;
|
|
|
|
MemoryRegion *mr = block->mr;
|
|
|
|
uint8_t *p;
|
|
|
|
int ret;
|
2015-03-23 11:32:17 +03:00
|
|
|
|
2015-03-23 11:32:23 +03:00
|
|
|
p = memory_region_get_ram_ptr(mr) + offset;
|
|
|
|
|
|
|
|
bytes_xmit = 0;
|
|
|
|
ret = ram_control_save_page(f, block->offset,
|
|
|
|
offset, TARGET_PAGE_SIZE, &bytes_xmit);
|
|
|
|
if (bytes_xmit) {
|
|
|
|
*bytes_transferred += bytes_xmit;
|
|
|
|
pages = 1;
|
|
|
|
}
|
|
|
|
if (block == last_sent_block) {
|
|
|
|
offset |= RAM_SAVE_FLAG_CONTINUE;
|
|
|
|
}
|
|
|
|
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
|
|
|
|
if (ret != RAM_SAVE_CONTROL_DELAYED) {
|
|
|
|
if (bytes_xmit > 0) {
|
|
|
|
acct_info.norm_pages++;
|
|
|
|
} else if (bytes_xmit == 0) {
|
|
|
|
acct_info.dup_pages++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* When starting the process of a new block, the first page of
|
|
|
|
* the block should be sent out before other pages in the same
|
|
|
|
* block, and all the pages in last block should have been sent
|
|
|
|
* out, keeping this order is important, because the 'cont' flag
|
|
|
|
* is used to avoid resending the block name.
|
|
|
|
*/
|
|
|
|
if (block != last_sent_block) {
|
|
|
|
flush_compressed_data(f);
|
|
|
|
pages = save_zero_page(f, block, offset, p, bytes_transferred);
|
|
|
|
if (pages == -1) {
|
|
|
|
set_compress_params(&comp_param[0], block, offset);
|
|
|
|
/* Use the qemu thread to compress the data to make sure the
|
|
|
|
* first page is sent out before other pages
|
|
|
|
*/
|
|
|
|
bytes_xmit = do_compress_ram_page(&comp_param[0]);
|
|
|
|
acct_info.norm_pages++;
|
|
|
|
qemu_put_qemu_file(f, comp_param[0].file);
|
|
|
|
*bytes_transferred += bytes_xmit;
|
|
|
|
pages = 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pages = save_zero_page(f, block, offset, p, bytes_transferred);
|
|
|
|
if (pages == -1) {
|
|
|
|
pages = compress_page_with_multi_thread(f, block, offset,
|
|
|
|
bytes_transferred);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-03-23 11:32:17 +03:00
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
2015-02-12 21:33:05 +03:00
|
|
|
/**
|
|
|
|
* ram_find_and_save_block: Finds a dirty page and sends it to f
|
2012-07-10 13:37:13 +04:00
|
|
|
*
|
2013-09-05 22:41:35 +04:00
|
|
|
* Called within an RCU critical section.
|
|
|
|
*
|
2015-02-12 21:33:05 +03:00
|
|
|
* Returns: The number of pages written
|
2012-12-10 16:27:50 +04:00
|
|
|
* 0 means no dirty pages
|
2015-02-12 21:33:05 +03:00
|
|
|
*
|
|
|
|
* @f: QEMUFile where to send the data
|
|
|
|
* @last_stage: if we are at the completion stage
|
|
|
|
* @bytes_transferred: increase it with the number of transferred bytes
|
2012-07-10 13:37:13 +04:00
|
|
|
*/
|
|
|
|
|
2015-02-12 21:33:05 +03:00
|
|
|
static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
|
|
|
|
uint64_t *bytes_transferred)
|
2010-03-29 23:23:52 +04:00
|
|
|
{
|
2012-10-17 22:08:04 +04:00
|
|
|
RAMBlock *block = last_seen_block;
|
2010-06-25 21:09:57 +04:00
|
|
|
ram_addr_t offset = last_offset;
|
2012-10-18 02:00:59 +04:00
|
|
|
bool complete_round = false;
|
2015-02-12 22:03:45 +03:00
|
|
|
int pages = 0;
|
2011-12-21 15:11:22 +04:00
|
|
|
MemoryRegion *mr;
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2010-06-25 21:09:57 +04:00
|
|
|
if (!block)
|
2013-09-05 22:41:35 +04:00
|
|
|
block = QLIST_FIRST_RCU(&ram_list.blocks);
|
2010-06-25 21:09:57 +04:00
|
|
|
|
2012-10-18 02:00:59 +04:00
|
|
|
while (true) {
|
2011-12-21 15:11:22 +04:00
|
|
|
mr = block->mr;
|
2012-10-18 02:00:59 +04:00
|
|
|
offset = migration_bitmap_find_and_reset_dirty(mr, offset);
|
|
|
|
if (complete_round && block == last_seen_block &&
|
|
|
|
offset >= last_offset) {
|
|
|
|
break;
|
|
|
|
}
|
2014-12-15 23:55:32 +03:00
|
|
|
if (offset >= block->used_length) {
|
2012-10-18 02:00:59 +04:00
|
|
|
offset = 0;
|
2013-09-05 22:41:35 +04:00
|
|
|
block = QLIST_NEXT_RCU(block, next);
|
2012-10-18 02:00:59 +04:00
|
|
|
if (!block) {
|
2013-09-05 22:41:35 +04:00
|
|
|
block = QLIST_FIRST_RCU(&ram_list.blocks);
|
2012-10-18 02:00:59 +04:00
|
|
|
complete_round = true;
|
2013-03-26 13:58:36 +04:00
|
|
|
ram_bulk_stage = false;
|
2015-03-23 11:32:24 +03:00
|
|
|
if (migrate_use_xbzrle()) {
|
|
|
|
/* If xbzrle is on, stop using the data compression at this
|
|
|
|
* point. In theory, xbzrle can do better than compression.
|
|
|
|
*/
|
|
|
|
flush_compressed_data(f);
|
|
|
|
compression_switch = false;
|
|
|
|
}
|
2012-10-18 02:00:59 +04:00
|
|
|
}
|
|
|
|
} else {
|
2015-03-23 11:32:24 +03:00
|
|
|
if (compression_switch && migrate_use_compression()) {
|
2015-03-23 11:32:17 +03:00
|
|
|
pages = ram_save_compressed_page(f, block, offset, last_stage,
|
|
|
|
bytes_transferred);
|
|
|
|
} else {
|
|
|
|
pages = ram_save_page(f, block, offset, last_stage,
|
|
|
|
bytes_transferred);
|
|
|
|
}
|
2012-08-06 22:42:53 +04:00
|
|
|
|
|
|
|
/* if page is unmodified, continue to the next */
|
2015-02-12 22:03:45 +03:00
|
|
|
if (pages > 0) {
|
2015-03-17 14:56:13 +03:00
|
|
|
last_sent_block = block;
|
2012-08-06 22:42:53 +04:00
|
|
|
break;
|
|
|
|
}
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
2012-10-18 02:00:59 +04:00
|
|
|
}
|
2013-09-05 22:41:35 +04:00
|
|
|
|
2012-10-17 22:08:04 +04:00
|
|
|
last_seen_block = block;
|
2010-06-25 21:09:57 +04:00
|
|
|
last_offset = offset;
|
2015-02-12 21:33:05 +03:00
|
|
|
|
2015-02-12 22:03:45 +03:00
|
|
|
return pages;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
|
2013-06-26 05:35:28 +04:00
|
|
|
void acct_update_position(QEMUFile *f, size_t size, bool zero)
|
|
|
|
{
|
|
|
|
uint64_t pages = size / TARGET_PAGE_SIZE;
|
|
|
|
if (zero) {
|
|
|
|
acct_info.dup_pages += pages;
|
|
|
|
} else {
|
|
|
|
acct_info.norm_pages += pages;
|
|
|
|
bytes_transferred += size;
|
|
|
|
qemu_update_position(f, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-29 23:23:52 +04:00
|
|
|
static ram_addr_t ram_save_remaining(void)
|
|
|
|
{
|
2012-07-20 14:33:00 +04:00
|
|
|
return migration_dirty_pages;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t ram_bytes_remaining(void)
|
|
|
|
{
|
|
|
|
return ram_save_remaining() * TARGET_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t ram_bytes_transferred(void)
|
|
|
|
{
|
|
|
|
return bytes_transferred;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t ram_bytes_total(void)
|
|
|
|
{
|
2010-06-25 21:08:38 +04:00
|
|
|
RAMBlock *block;
|
|
|
|
uint64_t total = 0;
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
|
2014-12-15 23:55:32 +03:00
|
|
|
total += block->used_length;
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_unlock();
|
2010-06-25 21:08:38 +04:00
|
|
|
return total;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
|
2014-01-30 22:08:35 +04:00
|
|
|
void free_xbzrle_decoded_buf(void)
|
|
|
|
{
|
|
|
|
g_free(xbzrle_decoded_buf);
|
|
|
|
xbzrle_decoded_buf = NULL;
|
|
|
|
}
|
|
|
|
|
2012-06-19 19:43:17 +04:00
|
|
|
static void migration_end(void)
|
|
|
|
{
|
2012-12-12 15:54:43 +04:00
|
|
|
if (migration_bitmap) {
|
|
|
|
memory_global_dirty_log_stop();
|
|
|
|
g_free(migration_bitmap);
|
|
|
|
migration_bitmap = NULL;
|
|
|
|
}
|
2012-08-06 22:42:53 +04:00
|
|
|
|
2014-03-04 17:29:21 +04:00
|
|
|
XBZRLE_cache_lock();
|
2012-12-12 15:54:43 +04:00
|
|
|
if (XBZRLE.cache) {
|
2012-08-06 22:42:53 +04:00
|
|
|
cache_fini(XBZRLE.cache);
|
|
|
|
g_free(XBZRLE.encoded_buf);
|
|
|
|
g_free(XBZRLE.current_buf);
|
|
|
|
XBZRLE.cache = NULL;
|
2014-01-30 22:08:33 +04:00
|
|
|
XBZRLE.encoded_buf = NULL;
|
|
|
|
XBZRLE.current_buf = NULL;
|
2012-08-06 22:42:53 +04:00
|
|
|
}
|
2014-03-04 17:29:21 +04:00
|
|
|
XBZRLE_cache_unlock();
|
2012-06-19 19:43:17 +04:00
|
|
|
}
|
|
|
|
|
2012-06-26 21:26:41 +04:00
|
|
|
static void ram_migration_cancel(void *opaque)
|
|
|
|
{
|
|
|
|
migration_end();
|
|
|
|
}
|
|
|
|
|
2012-07-17 19:02:24 +04:00
|
|
|
static void reset_ram_globals(void)
|
|
|
|
{
|
2012-10-17 22:08:04 +04:00
|
|
|
last_seen_block = NULL;
|
2012-10-17 22:10:55 +04:00
|
|
|
last_sent_block = NULL;
|
2012-07-17 19:02:24 +04:00
|
|
|
last_offset = 0;
|
2011-08-18 22:41:17 +04:00
|
|
|
last_version = ram_list.version;
|
2013-03-26 13:58:36 +04:00
|
|
|
ram_bulk_stage = true;
|
2012-07-17 19:02:24 +04:00
|
|
|
}
|
|
|
|
|
2012-05-22 18:27:59 +04:00
|
|
|
#define MAX_WAIT 50 /* ms, half buffered_file limit */
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
|
|
|
|
/* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
|
|
|
|
* long-running RCU critical section. When rcu-reclaims in the code
|
|
|
|
* start to become numerous it will be necessary to reduce the
|
|
|
|
* granularity of these critical sections.
|
|
|
|
*/
|
|
|
|
|
2012-06-28 17:11:57 +04:00
|
|
|
static int ram_save_setup(QEMUFile *f, void *opaque)
|
2010-03-29 23:23:52 +04:00
|
|
|
{
|
2012-06-28 17:11:57 +04:00
|
|
|
RAMBlock *block;
|
2014-03-27 19:01:48 +04:00
|
|
|
int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
|
2012-07-20 14:33:00 +04:00
|
|
|
|
2013-06-24 13:47:39 +04:00
|
|
|
mig_throttle_on = false;
|
|
|
|
dirty_rate_high_cnt = 0;
|
2014-04-04 13:57:54 +04:00
|
|
|
bitmap_sync_count = 0;
|
2014-03-20 16:15:03 +04:00
|
|
|
migration_bitmap_sync_init();
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2012-08-06 22:42:53 +04:00
|
|
|
if (migrate_use_xbzrle()) {
|
2014-03-19 22:32:31 +04:00
|
|
|
XBZRLE_cache_lock();
|
2012-08-06 22:42:53 +04:00
|
|
|
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
|
|
|
|
TARGET_PAGE_SIZE,
|
|
|
|
TARGET_PAGE_SIZE);
|
|
|
|
if (!XBZRLE.cache) {
|
2014-03-19 22:32:31 +04:00
|
|
|
XBZRLE_cache_unlock();
|
|
|
|
error_report("Error creating cache");
|
2012-08-06 22:42:53 +04:00
|
|
|
return -1;
|
|
|
|
}
|
2014-03-19 22:32:31 +04:00
|
|
|
XBZRLE_cache_unlock();
|
2014-01-30 22:08:37 +04:00
|
|
|
|
|
|
|
/* We prefer not to abort if there is no memory */
|
|
|
|
XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
|
|
|
|
if (!XBZRLE.encoded_buf) {
|
2014-03-19 22:32:31 +04:00
|
|
|
error_report("Error allocating encoded_buf");
|
2014-01-30 22:08:37 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
|
|
|
|
if (!XBZRLE.current_buf) {
|
2014-03-19 22:32:31 +04:00
|
|
|
error_report("Error allocating current_buf");
|
2014-01-30 22:08:37 +04:00
|
|
|
g_free(XBZRLE.encoded_buf);
|
|
|
|
XBZRLE.encoded_buf = NULL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-08-06 22:42:56 +04:00
|
|
|
acct_clear();
|
2012-08-06 22:42:53 +04:00
|
|
|
}
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
/* iothread lock needed for ram_list.dirty_memory[] */
|
2013-02-22 20:36:28 +04:00
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
qemu_mutex_lock_ramlist();
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_lock();
|
2013-02-22 20:36:28 +04:00
|
|
|
bytes_transferred = 0;
|
|
|
|
reset_ram_globals();
|
|
|
|
|
2014-03-27 19:01:48 +04:00
|
|
|
ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
|
|
|
|
migration_bitmap = bitmap_new(ram_bitmap_pages);
|
|
|
|
bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Count the total number of pages used by ram blocks not including any
|
|
|
|
* gaps due to alignment or unplugs.
|
|
|
|
*/
|
2015-03-09 12:27:38 +03:00
|
|
|
migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
|
2014-03-27 19:01:48 +04:00
|
|
|
|
2012-06-28 17:11:57 +04:00
|
|
|
memory_global_dirty_log_start();
|
2012-07-20 14:33:00 +04:00
|
|
|
migration_bitmap_sync();
|
2013-09-05 22:41:35 +04:00
|
|
|
qemu_mutex_unlock_ramlist();
|
2013-02-22 20:36:28 +04:00
|
|
|
qemu_mutex_unlock_iothread();
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2012-06-28 17:11:57 +04:00
|
|
|
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
|
2010-06-25 21:09:50 +04:00
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
2012-06-28 17:11:57 +04:00
|
|
|
qemu_put_byte(f, strlen(block->idstr));
|
|
|
|
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
|
2014-12-15 23:55:32 +03:00
|
|
|
qemu_put_be64(f, block->used_length);
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_unlock();
|
2013-07-22 18:01:55 +04:00
|
|
|
|
|
|
|
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
|
|
|
|
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
|
|
|
|
|
2012-06-28 17:11:57 +04:00
|
|
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-28 17:31:37 +04:00
|
|
|
static int ram_save_iterate(QEMUFile *f, void *opaque)
|
2012-06-28 17:11:57 +04:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int i;
|
2012-09-21 13:18:18 +04:00
|
|
|
int64_t t0;
|
2015-02-12 21:33:05 +03:00
|
|
|
int pages_sent = 0;
|
2012-06-28 17:11:57 +04:00
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_lock();
|
2011-08-18 22:41:17 +04:00
|
|
|
if (ram_list.version != last_version) {
|
|
|
|
reset_ram_globals();
|
|
|
|
}
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
/* Read version before ram_list.blocks */
|
|
|
|
smp_rmb();
|
|
|
|
|
2013-07-22 18:01:55 +04:00
|
|
|
ram_control_before_iterate(f, RAM_CONTROL_ROUND);
|
|
|
|
|
2013-08-21 19:03:08 +04:00
|
|
|
t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
2012-05-22 18:27:59 +04:00
|
|
|
i = 0;
|
2011-10-19 17:22:18 +04:00
|
|
|
while ((ret = qemu_file_rate_limit(f)) == 0) {
|
2015-02-12 21:33:05 +03:00
|
|
|
int pages;
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2015-02-12 21:33:05 +03:00
|
|
|
pages = ram_find_and_save_block(f, false, &bytes_transferred);
|
|
|
|
/* no more pages to sent */
|
|
|
|
if (pages == 0) {
|
2010-03-29 23:23:52 +04:00
|
|
|
break;
|
|
|
|
}
|
2015-02-12 21:33:05 +03:00
|
|
|
pages_sent += pages;
|
2012-08-06 22:42:56 +04:00
|
|
|
acct_info.iterations++;
|
2013-06-24 13:47:39 +04:00
|
|
|
check_guest_throttling();
|
2012-05-22 18:27:59 +04:00
|
|
|
/* we want to check in the 1st loop, just in case it was the 1st time
|
|
|
|
and we had to sync the dirty bitmap.
|
|
|
|
qemu_get_clock_ns() is a bit expensive, so we only check each some
|
|
|
|
iterations
|
|
|
|
*/
|
|
|
|
if ((i & 63) == 0) {
|
2013-08-21 19:03:08 +04:00
|
|
|
uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
|
2012-05-22 18:27:59 +04:00
|
|
|
if (t1 > MAX_WAIT) {
|
2012-09-05 13:04:56 +04:00
|
|
|
DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
|
2012-05-22 18:27:59 +04:00
|
|
|
t1, i);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
i++;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
2015-03-23 11:32:23 +03:00
|
|
|
flush_compressed_data(f);
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_unlock();
|
2012-12-20 14:25:45 +04:00
|
|
|
|
2013-07-22 18:01:55 +04:00
|
|
|
/*
|
|
|
|
* Must occur before EOS (or any QEMUFile operation)
|
|
|
|
* because of RDMA protocol.
|
|
|
|
*/
|
|
|
|
ram_control_after_iterate(f, RAM_CONTROL_ROUND);
|
|
|
|
|
2013-09-04 13:02:36 +04:00
|
|
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
|
|
|
bytes_transferred += 8;
|
|
|
|
|
|
|
|
ret = qemu_file_get_error(f);
|
2011-10-19 17:22:18 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-02-12 21:33:05 +03:00
|
|
|
return pages_sent;
|
2012-06-28 17:31:37 +04:00
|
|
|
}
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
/* Called with iothread lock */
|
2012-06-28 17:31:37 +04:00
|
|
|
static int ram_save_complete(QEMUFile *f, void *opaque)
|
|
|
|
{
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
|
2012-12-20 14:26:04 +04:00
|
|
|
migration_bitmap_sync();
|
2011-08-17 11:01:33 +04:00
|
|
|
|
2013-07-22 18:01:55 +04:00
|
|
|
ram_control_before_iterate(f, RAM_CONTROL_FINISH);
|
|
|
|
|
2010-03-29 23:23:52 +04:00
|
|
|
/* try transferring iterative blocks of memory */
|
2012-06-19 19:43:15 +04:00
|
|
|
|
2012-06-28 17:31:37 +04:00
|
|
|
/* flush all remaining blocks regardless of rate limiting */
|
2012-07-10 13:37:13 +04:00
|
|
|
while (true) {
|
2015-02-12 21:33:05 +03:00
|
|
|
int pages;
|
2010-05-12 17:12:44 +04:00
|
|
|
|
2015-02-12 21:33:05 +03:00
|
|
|
pages = ram_find_and_save_block(f, true, &bytes_transferred);
|
2012-07-10 13:37:13 +04:00
|
|
|
/* no more blocks to sent */
|
2015-02-12 21:33:05 +03:00
|
|
|
if (pages == 0) {
|
2012-07-10 13:37:13 +04:00
|
|
|
break;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
}
|
2013-07-22 18:01:55 +04:00
|
|
|
|
2015-03-23 11:32:23 +03:00
|
|
|
flush_compressed_data(f);
|
2013-07-22 18:01:55 +04:00
|
|
|
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
|
2012-12-12 15:54:43 +04:00
|
|
|
migration_end();
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_unlock();
|
2010-03-29 23:23:52 +04:00
|
|
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
|
|
|
|
2012-05-22 02:44:24 +04:00
|
|
|
return 0;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
|
2012-09-21 13:18:18 +04:00
|
|
|
static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
|
|
|
|
{
|
|
|
|
uint64_t remaining_size;
|
|
|
|
|
|
|
|
remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
|
|
|
|
|
|
|
|
if (remaining_size < max_size) {
|
2013-02-22 20:36:27 +04:00
|
|
|
qemu_mutex_lock_iothread();
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_lock();
|
2012-09-21 13:18:18 +04:00
|
|
|
migration_bitmap_sync();
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_unlock();
|
2013-02-22 20:36:27 +04:00
|
|
|
qemu_mutex_unlock_iothread();
|
2012-09-21 13:18:18 +04:00
|
|
|
remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
return remaining_size;
|
|
|
|
}
|
|
|
|
|
2012-08-06 22:42:53 +04:00
|
|
|
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
|
|
|
|
{
|
|
|
|
unsigned int xh_len;
|
|
|
|
int xh_flags;
|
|
|
|
|
2014-01-30 22:08:35 +04:00
|
|
|
if (!xbzrle_decoded_buf) {
|
|
|
|
xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
|
2012-08-06 22:42:53 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* extract RLE header */
|
|
|
|
xh_flags = qemu_get_byte(f);
|
|
|
|
xh_len = qemu_get_be16(f);
|
|
|
|
|
|
|
|
if (xh_flags != ENCODING_FLAG_XBZRLE) {
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("Failed to load XBZRLE page - wrong compression!");
|
2012-08-06 22:42:53 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xh_len > TARGET_PAGE_SIZE) {
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("Failed to load XBZRLE page - len overflow!");
|
2012-08-06 22:42:53 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* load data and decode */
|
2014-01-30 22:08:35 +04:00
|
|
|
qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
|
2012-08-06 22:42:53 +04:00
|
|
|
|
|
|
|
/* decode RLE */
|
2014-05-10 16:51:24 +04:00
|
|
|
if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
|
|
|
|
TARGET_PAGE_SIZE) == -1) {
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("Failed to load XBZRLE page - decode error!");
|
2014-05-10 16:51:24 +04:00
|
|
|
return -1;
|
2012-08-06 22:42:53 +04:00
|
|
|
}
|
|
|
|
|
2014-05-10 16:51:24 +04:00
|
|
|
return 0;
|
2012-08-06 22:42:53 +04:00
|
|
|
}
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
/* Must be called from within a rcu critical section.
|
|
|
|
* Returns a pointer from within the RCU-protected ram_list.
|
|
|
|
*/
|
2010-06-25 21:10:05 +04:00
|
|
|
static inline void *host_from_stream_offset(QEMUFile *f,
|
|
|
|
ram_addr_t offset,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
static RAMBlock *block = NULL;
|
|
|
|
char id[256];
|
|
|
|
uint8_t len;
|
|
|
|
|
|
|
|
if (flags & RAM_SAVE_FLAG_CONTINUE) {
|
2014-12-15 23:55:32 +03:00
|
|
|
if (!block || block->max_length <= offset) {
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("Ack, bad migration stream!");
|
2010-06-25 21:10:05 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-12-21 15:54:33 +04:00
|
|
|
return memory_region_get_ram_ptr(block->mr) + offset;
|
2010-06-25 21:10:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
len = qemu_get_byte(f);
|
|
|
|
qemu_get_buffer(f, (uint8_t *)id, len);
|
|
|
|
id[len] = 0;
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
2014-12-15 23:55:32 +03:00
|
|
|
if (!strncmp(id, block->idstr, sizeof(id)) &&
|
|
|
|
block->max_length > offset) {
|
2011-12-21 15:54:33 +04:00
|
|
|
return memory_region_get_ram_ptr(block->mr) + offset;
|
2014-11-12 12:44:39 +03:00
|
|
|
}
|
2010-06-25 21:10:05 +04:00
|
|
|
}
|
|
|
|
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("Can't find block %s!", id);
|
2010-06-25 21:10:05 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-07-22 18:01:53 +04:00
|
|
|
/*
|
|
|
|
* If a page (or a whole RDMA chunk) has been
|
|
|
|
* determined to be zero, then zap it.
|
|
|
|
*/
|
|
|
|
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
|
|
|
|
{
|
2013-09-20 20:23:37 +04:00
|
|
|
if (ch != 0 || !is_zero_range(host, size)) {
|
2013-07-22 18:01:53 +04:00
|
|
|
memset(host, ch, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-23 11:32:18 +03:00
|
|
|
static void *do_data_decompress(void *opaque)
|
|
|
|
{
|
2015-03-23 11:32:25 +03:00
|
|
|
DecompressParam *param = opaque;
|
|
|
|
unsigned long pagesize;
|
|
|
|
|
2015-03-23 11:32:18 +03:00
|
|
|
while (!quit_decomp_thread) {
|
2015-03-23 11:32:25 +03:00
|
|
|
qemu_mutex_lock(¶m->mutex);
|
|
|
|
while (!param->start && !quit_decomp_thread) {
|
|
|
|
qemu_cond_wait(¶m->cond, ¶m->mutex);
|
|
|
|
pagesize = TARGET_PAGE_SIZE;
|
|
|
|
if (!quit_decomp_thread) {
|
|
|
|
/* uncompress() will return failed in some case, especially
|
|
|
|
* when the page is dirted when doing the compression, it's
|
|
|
|
* not a problem because the dirty page will be retransferred
|
|
|
|
* and uncompress() won't break the data in other pages.
|
|
|
|
*/
|
|
|
|
uncompress((Bytef *)param->des, &pagesize,
|
|
|
|
(const Bytef *)param->compbuf, param->len);
|
|
|
|
}
|
|
|
|
param->start = false;
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(¶m->mutex);
|
2015-03-23 11:32:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void migrate_decompress_threads_create(void)
|
|
|
|
{
|
|
|
|
int i, thread_count;
|
|
|
|
|
|
|
|
thread_count = migrate_decompress_threads();
|
|
|
|
decompress_threads = g_new0(QemuThread, thread_count);
|
|
|
|
decomp_param = g_new0(DecompressParam, thread_count);
|
|
|
|
compressed_data_buf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
|
|
|
|
quit_decomp_thread = false;
|
|
|
|
for (i = 0; i < thread_count; i++) {
|
2015-03-23 11:32:21 +03:00
|
|
|
qemu_mutex_init(&decomp_param[i].mutex);
|
|
|
|
qemu_cond_init(&decomp_param[i].cond);
|
|
|
|
decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
|
2015-03-23 11:32:18 +03:00
|
|
|
qemu_thread_create(decompress_threads + i, "decompress",
|
|
|
|
do_data_decompress, decomp_param + i,
|
|
|
|
QEMU_THREAD_JOINABLE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void migrate_decompress_threads_join(void)
|
|
|
|
{
|
|
|
|
int i, thread_count;
|
|
|
|
|
|
|
|
quit_decomp_thread = true;
|
|
|
|
thread_count = migrate_decompress_threads();
|
2015-03-23 11:32:25 +03:00
|
|
|
for (i = 0; i < thread_count; i++) {
|
|
|
|
qemu_mutex_lock(&decomp_param[i].mutex);
|
|
|
|
qemu_cond_signal(&decomp_param[i].cond);
|
|
|
|
qemu_mutex_unlock(&decomp_param[i].mutex);
|
|
|
|
}
|
2015-03-23 11:32:18 +03:00
|
|
|
for (i = 0; i < thread_count; i++) {
|
|
|
|
qemu_thread_join(decompress_threads + i);
|
2015-03-23 11:32:21 +03:00
|
|
|
qemu_mutex_destroy(&decomp_param[i].mutex);
|
|
|
|
qemu_cond_destroy(&decomp_param[i].cond);
|
|
|
|
g_free(decomp_param[i].compbuf);
|
2015-03-23 11:32:18 +03:00
|
|
|
}
|
|
|
|
g_free(decompress_threads);
|
|
|
|
g_free(decomp_param);
|
|
|
|
g_free(compressed_data_buf);
|
|
|
|
decompress_threads = NULL;
|
|
|
|
decomp_param = NULL;
|
|
|
|
compressed_data_buf = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void decompress_data_with_multi_threads(uint8_t *compbuf,
|
|
|
|
void *host, int len)
|
|
|
|
{
|
2015-03-23 11:32:25 +03:00
|
|
|
int idx, thread_count;
|
|
|
|
|
|
|
|
thread_count = migrate_decompress_threads();
|
|
|
|
while (true) {
|
|
|
|
for (idx = 0; idx < thread_count; idx++) {
|
|
|
|
if (!decomp_param[idx].start) {
|
|
|
|
memcpy(decomp_param[idx].compbuf, compbuf, len);
|
|
|
|
decomp_param[idx].des = host;
|
|
|
|
decomp_param[idx].len = len;
|
|
|
|
start_decompression(&decomp_param[idx]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (idx < thread_count) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-03-23 11:32:18 +03:00
|
|
|
}
|
|
|
|
|
2012-06-26 20:46:10 +04:00
|
|
|
static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
2010-03-29 23:23:52 +04:00
|
|
|
{
|
2014-06-24 13:32:36 +04:00
|
|
|
int flags = 0, ret = 0;
|
2012-06-19 19:43:15 +04:00
|
|
|
static uint64_t seq_iter;
|
2015-03-23 11:32:18 +03:00
|
|
|
int len = 0;
|
2012-06-19 19:43:15 +04:00
|
|
|
|
|
|
|
seq_iter++;
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2014-04-25 13:06:20 +04:00
|
|
|
if (version_id != 4) {
|
2014-05-01 17:28:11 +04:00
|
|
|
ret = -EINVAL;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
/* This RCU critical section can be very long running.
|
|
|
|
* When RCU reclaims in the code start to become numerous,
|
|
|
|
* it will be necessary to reduce the granularity of this
|
|
|
|
* critical section.
|
|
|
|
*/
|
|
|
|
rcu_read_lock();
|
2014-06-24 13:32:36 +04:00
|
|
|
while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
|
|
|
|
ram_addr_t addr, total_ram_bytes;
|
|
|
|
void *host;
|
|
|
|
uint8_t ch;
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2014-06-24 13:32:36 +04:00
|
|
|
addr = qemu_get_be64(f);
|
2010-03-29 23:23:52 +04:00
|
|
|
flags = addr & ~TARGET_PAGE_MASK;
|
|
|
|
addr &= TARGET_PAGE_MASK;
|
|
|
|
|
2014-06-24 13:32:36 +04:00
|
|
|
switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
|
|
|
|
case RAM_SAVE_FLAG_MEM_SIZE:
|
2014-04-25 13:06:20 +04:00
|
|
|
/* Synchronize RAM block list */
|
2014-06-24 13:32:36 +04:00
|
|
|
total_ram_bytes = addr;
|
|
|
|
while (!ret && total_ram_bytes) {
|
2014-04-25 13:06:20 +04:00
|
|
|
RAMBlock *block;
|
|
|
|
uint8_t len;
|
2014-06-24 13:32:36 +04:00
|
|
|
char id[256];
|
|
|
|
ram_addr_t length;
|
2014-04-25 13:06:20 +04:00
|
|
|
|
|
|
|
len = qemu_get_byte(f);
|
|
|
|
qemu_get_buffer(f, (uint8_t *)id, len);
|
|
|
|
id[len] = 0;
|
|
|
|
length = qemu_get_be64(f);
|
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
2014-04-25 13:06:20 +04:00
|
|
|
if (!strncmp(id, block->idstr, sizeof(id))) {
|
2014-11-17 18:55:43 +03:00
|
|
|
if (length != block->used_length) {
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
ret = qemu_ram_resize(block->offset, length, &local_err);
|
|
|
|
if (local_err) {
|
2015-02-12 15:55:05 +03:00
|
|
|
error_report_err(local_err);
|
2014-11-17 18:55:43 +03:00
|
|
|
}
|
2010-06-25 21:09:50 +04:00
|
|
|
}
|
2014-04-25 13:06:20 +04:00
|
|
|
break;
|
2010-06-25 21:09:50 +04:00
|
|
|
}
|
2014-04-25 13:06:20 +04:00
|
|
|
}
|
2010-06-25 21:09:50 +04:00
|
|
|
|
2014-04-25 13:06:20 +04:00
|
|
|
if (!block) {
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("Unknown ramblock \"%s\", cannot "
|
|
|
|
"accept migration", id);
|
2014-04-25 13:06:20 +04:00
|
|
|
ret = -EINVAL;
|
2014-06-10 13:29:16 +04:00
|
|
|
}
|
2014-04-25 13:06:20 +04:00
|
|
|
|
|
|
|
total_ram_bytes -= length;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
2014-06-24 13:32:36 +04:00
|
|
|
break;
|
|
|
|
case RAM_SAVE_FLAG_COMPRESS:
|
2011-12-21 15:37:56 +04:00
|
|
|
host = host_from_stream_offset(f, addr, flags);
|
2010-10-17 22:43:40 +04:00
|
|
|
if (!host) {
|
2014-06-10 13:29:16 +04:00
|
|
|
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
|
2014-05-01 17:28:11 +04:00
|
|
|
ret = -EINVAL;
|
2014-06-10 13:29:16 +04:00
|
|
|
break;
|
2010-10-17 22:43:40 +04:00
|
|
|
}
|
2010-06-25 21:09:50 +04:00
|
|
|
ch = qemu_get_byte(f);
|
2013-07-22 18:01:53 +04:00
|
|
|
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
|
2014-06-24 13:32:36 +04:00
|
|
|
break;
|
|
|
|
case RAM_SAVE_FLAG_PAGE:
|
2011-12-21 15:37:56 +04:00
|
|
|
host = host_from_stream_offset(f, addr, flags);
|
2012-06-19 12:51:37 +04:00
|
|
|
if (!host) {
|
2014-06-10 13:29:16 +04:00
|
|
|
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
|
2014-05-01 17:28:11 +04:00
|
|
|
ret = -EINVAL;
|
2014-06-10 13:29:16 +04:00
|
|
|
break;
|
2012-06-19 12:51:37 +04:00
|
|
|
}
|
2010-06-25 21:09:50 +04:00
|
|
|
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
|
2014-06-24 13:32:36 +04:00
|
|
|
break;
|
2015-03-23 11:32:18 +03:00
|
|
|
case RAM_SAVE_FLAG_COMPRESS_PAGE:
|
|
|
|
host = host_from_stream_offset(f, addr, flags);
|
|
|
|
if (!host) {
|
|
|
|
error_report("Invalid RAM offset " RAM_ADDR_FMT, addr);
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = qemu_get_be32(f);
|
|
|
|
if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
|
|
|
|
error_report("Invalid compressed data length: %d", len);
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
qemu_get_buffer(f, compressed_data_buf, len);
|
|
|
|
decompress_data_with_multi_threads(compressed_data_buf, host, len);
|
|
|
|
break;
|
2014-06-24 13:32:36 +04:00
|
|
|
case RAM_SAVE_FLAG_XBZRLE:
|
|
|
|
host = host_from_stream_offset(f, addr, flags);
|
2012-08-06 22:42:53 +04:00
|
|
|
if (!host) {
|
2014-06-10 13:29:16 +04:00
|
|
|
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
|
2014-05-01 17:28:11 +04:00
|
|
|
ret = -EINVAL;
|
2014-06-10 13:29:16 +04:00
|
|
|
break;
|
2012-08-06 22:42:53 +04:00
|
|
|
}
|
|
|
|
if (load_xbzrle(f, addr, host) < 0) {
|
2014-06-10 13:29:16 +04:00
|
|
|
error_report("Failed to decompress XBZRLE page at "
|
|
|
|
RAM_ADDR_FMT, addr);
|
2012-08-06 22:42:53 +04:00
|
|
|
ret = -EINVAL;
|
2014-06-10 13:29:16 +04:00
|
|
|
break;
|
2012-08-06 22:42:53 +04:00
|
|
|
}
|
2014-06-10 13:29:16 +04:00
|
|
|
break;
|
2014-06-24 13:32:36 +04:00
|
|
|
case RAM_SAVE_FLAG_EOS:
|
|
|
|
/* normal exit */
|
2014-06-10 13:29:16 +04:00
|
|
|
break;
|
2014-06-24 13:32:36 +04:00
|
|
|
default:
|
|
|
|
if (flags & RAM_SAVE_FLAG_HOOK) {
|
|
|
|
ram_control_load_hook(f, flags);
|
|
|
|
} else {
|
|
|
|
error_report("Unknown combination of migration flags: %#x",
|
|
|
|
flags);
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!ret) {
|
|
|
|
ret = qemu_file_get_error(f);
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
2014-06-10 13:29:16 +04:00
|
|
|
}
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2013-09-05 22:41:35 +04:00
|
|
|
rcu_read_unlock();
|
2012-09-05 13:04:56 +04:00
|
|
|
DPRINTF("Completed load of VM with exit code %d seq iteration "
|
|
|
|
"%" PRIu64 "\n", ret, seq_iter);
|
2012-06-19 19:43:15 +04:00
|
|
|
return ret;
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
|
2014-03-19 22:32:30 +04:00
|
|
|
static SaveVMHandlers savevm_ram_handlers = {
|
2012-06-28 17:11:57 +04:00
|
|
|
.save_live_setup = ram_save_setup,
|
2012-06-28 17:31:37 +04:00
|
|
|
.save_live_iterate = ram_save_iterate,
|
|
|
|
.save_live_complete = ram_save_complete,
|
2012-09-21 13:18:18 +04:00
|
|
|
.save_live_pending = ram_save_pending,
|
2012-06-26 20:46:10 +04:00
|
|
|
.load_state = ram_load,
|
2012-06-26 21:26:41 +04:00
|
|
|
.cancel = ram_migration_cancel,
|
2012-06-26 20:46:10 +04:00
|
|
|
};
|
|
|
|
|
2014-03-19 22:32:30 +04:00
|
|
|
void ram_mig_init(void)
|
|
|
|
{
|
2014-03-19 22:32:31 +04:00
|
|
|
qemu_mutex_init(&XBZRLE.lock);
|
2014-03-19 22:32:30 +04:00
|
|
|
register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
|
|
|
|
}
|
|
|
|
|
2011-01-21 13:53:45 +03:00
|
|
|
struct soundhw {
|
|
|
|
const char *name;
|
|
|
|
const char *descr;
|
|
|
|
int enabled;
|
|
|
|
int isa;
|
|
|
|
union {
|
2011-12-16 01:10:01 +04:00
|
|
|
int (*init_isa) (ISABus *bus);
|
2011-01-21 13:53:45 +03:00
|
|
|
int (*init_pci) (PCIBus *bus);
|
|
|
|
} init;
|
|
|
|
};
|
|
|
|
|
2013-04-18 20:43:58 +04:00
|
|
|
static struct soundhw soundhw[9];
|
|
|
|
static int soundhw_count;
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2013-04-18 20:43:58 +04:00
|
|
|
void isa_register_soundhw(const char *name, const char *descr,
|
|
|
|
int (*init_isa)(ISABus *bus))
|
|
|
|
{
|
|
|
|
assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
|
|
|
|
soundhw[soundhw_count].name = name;
|
|
|
|
soundhw[soundhw_count].descr = descr;
|
|
|
|
soundhw[soundhw_count].isa = 1;
|
|
|
|
soundhw[soundhw_count].init.init_isa = init_isa;
|
|
|
|
soundhw_count++;
|
|
|
|
}
|
2010-03-29 23:23:52 +04:00
|
|
|
|
2013-04-18 20:43:58 +04:00
|
|
|
void pci_register_soundhw(const char *name, const char *descr,
|
|
|
|
int (*init_pci)(PCIBus *bus))
|
|
|
|
{
|
|
|
|
assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
|
|
|
|
soundhw[soundhw_count].name = name;
|
|
|
|
soundhw[soundhw_count].descr = descr;
|
|
|
|
soundhw[soundhw_count].isa = 0;
|
|
|
|
soundhw[soundhw_count].init.init_pci = init_pci;
|
|
|
|
soundhw_count++;
|
|
|
|
}
|
2010-03-29 23:23:52 +04:00
|
|
|
|
|
|
|
void select_soundhw(const char *optarg)
|
|
|
|
{
|
|
|
|
struct soundhw *c;
|
|
|
|
|
2012-08-02 16:45:54 +04:00
|
|
|
if (is_help_option(optarg)) {
|
2010-03-29 23:23:52 +04:00
|
|
|
show_valid_cards:
|
|
|
|
|
2013-04-18 20:43:58 +04:00
|
|
|
if (soundhw_count) {
|
|
|
|
printf("Valid sound card names (comma separated):\n");
|
|
|
|
for (c = soundhw; c->name; ++c) {
|
|
|
|
printf ("%-11s %s\n", c->name, c->descr);
|
|
|
|
}
|
|
|
|
printf("\n-soundhw all will enable all of the above\n");
|
|
|
|
} else {
|
|
|
|
printf("Machine has no user-selectable audio hardware "
|
|
|
|
"(it may or may not have always-present audio hardware).\n");
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
2012-08-02 16:45:54 +04:00
|
|
|
exit(!is_help_option(optarg));
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
size_t l;
|
|
|
|
const char *p;
|
|
|
|
char *e;
|
|
|
|
int bad_card = 0;
|
|
|
|
|
|
|
|
if (!strcmp(optarg, "all")) {
|
|
|
|
for (c = soundhw; c->name; ++c) {
|
|
|
|
c->enabled = 1;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
p = optarg;
|
|
|
|
while (*p) {
|
|
|
|
e = strchr(p, ',');
|
|
|
|
l = !e ? strlen(p) : (size_t) (e - p);
|
|
|
|
|
|
|
|
for (c = soundhw; c->name; ++c) {
|
|
|
|
if (!strncmp(c->name, p, l) && !c->name[l]) {
|
|
|
|
c->enabled = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!c->name) {
|
|
|
|
if (l > 80) {
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("Unknown sound card name (too big to show)");
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
else {
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("Unknown sound card name `%.*s'",
|
|
|
|
(int) l, p);
|
2010-03-29 23:23:52 +04:00
|
|
|
}
|
|
|
|
bad_card = 1;
|
|
|
|
}
|
|
|
|
p += l + (e != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bad_card) {
|
|
|
|
goto show_valid_cards;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-01-21 13:53:45 +03:00
|
|
|
|
2013-04-18 20:44:03 +04:00
|
|
|
void audio_init(void)
|
2011-01-21 13:53:45 +03:00
|
|
|
{
|
|
|
|
struct soundhw *c;
|
2013-04-18 20:44:03 +04:00
|
|
|
ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
|
|
|
|
PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
|
2011-01-21 13:53:45 +03:00
|
|
|
|
|
|
|
for (c = soundhw; c->name; ++c) {
|
|
|
|
if (c->enabled) {
|
|
|
|
if (c->isa) {
|
2013-04-18 20:44:03 +04:00
|
|
|
if (!isa_bus) {
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("ISA bus not available for %s", c->name);
|
2013-04-18 20:44:03 +04:00
|
|
|
exit(1);
|
2011-01-21 13:53:45 +03:00
|
|
|
}
|
2013-04-18 20:44:03 +04:00
|
|
|
c->init.init_isa(isa_bus);
|
2011-01-21 13:53:45 +03:00
|
|
|
} else {
|
2013-04-18 20:44:03 +04:00
|
|
|
if (!pci_bus) {
|
2014-05-21 04:10:38 +04:00
|
|
|
error_report("PCI bus not available for %s", c->name);
|
2013-04-18 20:44:03 +04:00
|
|
|
exit(1);
|
2011-01-21 13:53:45 +03:00
|
|
|
}
|
2013-04-18 20:44:03 +04:00
|
|
|
c->init.init_pci(pci_bus);
|
2011-01-21 13:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-03-29 23:23:52 +04:00
|
|
|
|
|
|
|
int qemu_uuid_parse(const char *str, uint8_t *uuid)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (strlen(str) != 36) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
|
|
|
|
&uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
|
|
|
|
&uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
|
|
|
|
&uuid[15]);
|
|
|
|
|
|
|
|
if (ret != 16) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-21 03:23:17 +04:00
|
|
|
void do_acpitable_option(const QemuOpts *opts)
|
2010-03-29 23:23:52 +04:00
|
|
|
{
|
|
|
|
#ifdef TARGET_I386
|
2013-03-21 03:23:19 +04:00
|
|
|
Error *err = NULL;
|
|
|
|
|
|
|
|
acpi_table_add(opts, &err);
|
|
|
|
if (err) {
|
2013-08-05 23:40:44 +04:00
|
|
|
error_report("Wrong acpi table provided: %s",
|
|
|
|
error_get_pretty(err));
|
2013-03-21 03:23:19 +04:00
|
|
|
error_free(err);
|
2010-03-29 23:23:52 +04:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-08-16 17:18:29 +04:00
|
|
|
void do_smbios_option(QemuOpts *opts)
|
2010-03-29 23:23:52 +04:00
|
|
|
{
|
|
|
|
#ifdef TARGET_I386
|
2013-08-16 17:18:29 +04:00
|
|
|
smbios_entry_add(opts);
|
2010-03-29 23:23:52 +04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void cpudef_init(void)
|
|
|
|
{
|
|
|
|
#if defined(cpudef_setup)
|
|
|
|
cpudef_setup(); /* parse cpu definitions in target config file */
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_available(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_KVM
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int xen_available(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_XEN
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
2012-08-20 18:31:38 +04:00
|
|
|
|
|
|
|
|
|
|
|
TargetInfo *qmp_query_target(Error **errp)
|
|
|
|
{
|
|
|
|
TargetInfo *info = g_malloc0(sizeof(*info));
|
|
|
|
|
2013-06-04 16:45:28 +04:00
|
|
|
info->arch = g_strdup(TARGET_NAME);
|
2012-08-20 18:31:38 +04:00
|
|
|
|
|
|
|
return info;
|
|
|
|
}
|
2013-06-24 13:47:39 +04:00
|
|
|
|
|
|
|
/* Stub function that's gets run on the vcpu when its brought out of the
|
|
|
|
VM to run inside qemu via async_run_on_cpu()*/
|
|
|
|
static void mig_sleep_cpu(void *opq)
|
|
|
|
{
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
g_usleep(30*1000);
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* To reduce the dirty rate explicitly disallow the VCPUs from spending
|
|
|
|
much time in the VM. The migration thread will try to catchup.
|
|
|
|
Workload will experience a performance drop.
|
|
|
|
*/
|
|
|
|
static void mig_throttle_guest_down(void)
|
|
|
|
{
|
2013-07-07 21:50:23 +04:00
|
|
|
CPUState *cpu;
|
|
|
|
|
2013-06-24 13:47:39 +04:00
|
|
|
qemu_mutex_lock_iothread();
|
2013-07-07 21:50:23 +04:00
|
|
|
CPU_FOREACH(cpu) {
|
|
|
|
async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
|
|
|
|
}
|
2013-06-24 13:47:39 +04:00
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void check_guest_throttling(void)
|
|
|
|
{
|
|
|
|
static int64_t t0;
|
|
|
|
int64_t t1;
|
|
|
|
|
|
|
|
if (!mig_throttle_on) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!t0) {
|
2013-08-21 19:03:08 +04:00
|
|
|
t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
2013-06-24 13:47:39 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-08-21 19:03:08 +04:00
|
|
|
t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
2013-06-24 13:47:39 +04:00
|
|
|
|
|
|
|
/* If it has been more than 40 ms since the last time the guest
|
|
|
|
* was throttled then do it again.
|
|
|
|
*/
|
|
|
|
if (40 < (t1-t0)/1000000) {
|
|
|
|
mig_throttle_guest_down();
|
|
|
|
t0 = t1;
|
|
|
|
}
|
|
|
|
}
|