migration: do not sent zero pages in bulk stage
during bulk stage of ram migration if a page is a zero page do not send it at all. the memory at the destination reads as zero anyway. even if there is an madvise with QEMU_MADV_DONTNEED at the target upon receipt of a zero page I have observed that the target starts swapping if the memory is overcommitted. it seems that the pages are dropped asynchronously. this patch also updates QMP to return the number of skipped pages in MigrationStats. Signed-off-by: Peter Lieven <pl@kamp.de> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
78d07ae7ac
commit
f1c72795af
24
arch_init.c
24
arch_init.c
@ -183,6 +183,7 @@ int64_t xbzrle_cache_resize(int64_t new_size)
|
||||
/* accounting for migration statistics */
|
||||
typedef struct AccountingInfo {
|
||||
uint64_t dup_pages;
|
||||
uint64_t skipped_pages;
|
||||
uint64_t norm_pages;
|
||||
uint64_t iterations;
|
||||
uint64_t xbzrle_bytes;
|
||||
@ -208,6 +209,16 @@ uint64_t dup_mig_pages_transferred(void)
|
||||
return acct_info.dup_pages;
|
||||
}
|
||||
|
||||
uint64_t skipped_mig_bytes_transferred(void)
|
||||
{
|
||||
return acct_info.skipped_pages * TARGET_PAGE_SIZE;
|
||||
}
|
||||
|
||||
uint64_t skipped_mig_pages_transferred(void)
|
||||
{
|
||||
return acct_info.skipped_pages;
|
||||
}
|
||||
|
||||
uint64_t norm_mig_bytes_transferred(void)
|
||||
{
|
||||
return acct_info.norm_pages * TARGET_PAGE_SIZE;
|
||||
@ -440,10 +451,15 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
|
||||
bytes_sent = -1;
|
||||
if (is_zero_page(p)) {
|
||||
acct_info.dup_pages++;
|
||||
bytes_sent = save_block_hdr(f, block, offset, cont,
|
||||
RAM_SAVE_FLAG_COMPRESS);
|
||||
qemu_put_byte(f, 0);
|
||||
bytes_sent++;
|
||||
if (!ram_bulk_stage) {
|
||||
bytes_sent = save_block_hdr(f, block, offset, cont,
|
||||
RAM_SAVE_FLAG_COMPRESS);
|
||||
qemu_put_byte(f, 0);
|
||||
bytes_sent++;
|
||||
} else {
|
||||
acct_info.skipped_pages++;
|
||||
bytes_sent = 0;
|
||||
}
|
||||
} else if (migrate_use_xbzrle()) {
|
||||
current_addr = block->offset + offset;
|
||||
bytes_sent = save_xbzrle_page(f, p, current_addr, block,
|
||||
|
2
hmp.c
2
hmp.c
@ -173,6 +173,8 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
|
||||
info->ram->total >> 10);
|
||||
monitor_printf(mon, "duplicate: %" PRIu64 " pages\n",
|
||||
info->ram->duplicate);
|
||||
monitor_printf(mon, "skipped: %" PRIu64 " pages\n",
|
||||
info->ram->skipped);
|
||||
monitor_printf(mon, "normal: %" PRIu64 " pages\n",
|
||||
info->ram->normal);
|
||||
monitor_printf(mon, "normal bytes: %" PRIu64 " kbytes\n",
|
||||
|
@ -96,6 +96,8 @@ extern SaveVMHandlers savevm_ram_handlers;
|
||||
|
||||
uint64_t dup_mig_bytes_transferred(void);
|
||||
uint64_t dup_mig_pages_transferred(void);
|
||||
uint64_t skipped_mig_bytes_transferred(void);
|
||||
uint64_t skipped_mig_pages_transferred(void);
|
||||
uint64_t norm_mig_bytes_transferred(void);
|
||||
uint64_t norm_mig_pages_transferred(void);
|
||||
uint64_t xbzrle_mig_bytes_transferred(void);
|
||||
|
@ -197,11 +197,11 @@ MigrationInfo *qmp_query_migrate(Error **errp)
|
||||
info->ram->remaining = ram_bytes_remaining();
|
||||
info->ram->total = ram_bytes_total();
|
||||
info->ram->duplicate = dup_mig_pages_transferred();
|
||||
info->ram->skipped = skipped_mig_pages_transferred();
|
||||
info->ram->normal = norm_mig_pages_transferred();
|
||||
info->ram->normal_bytes = norm_mig_bytes_transferred();
|
||||
info->ram->dirty_pages_rate = s->dirty_pages_rate;
|
||||
|
||||
|
||||
if (blk_mig_active()) {
|
||||
info->has_disk = true;
|
||||
info->disk = g_malloc0(sizeof(*info->disk));
|
||||
@ -227,6 +227,7 @@ MigrationInfo *qmp_query_migrate(Error **errp)
|
||||
info->ram->remaining = 0;
|
||||
info->ram->total = ram_bytes_total();
|
||||
info->ram->duplicate = dup_mig_pages_transferred();
|
||||
info->ram->skipped = skipped_mig_pages_transferred();
|
||||
info->ram->normal = norm_mig_pages_transferred();
|
||||
info->ram->normal_bytes = norm_mig_bytes_transferred();
|
||||
break;
|
||||
|
@ -496,7 +496,9 @@
|
||||
#
|
||||
# @total: total amount of bytes involved in the migration process
|
||||
#
|
||||
# @duplicate: number of duplicate pages (since 1.2)
|
||||
# @duplicate: number of duplicate (zero) pages (since 1.2)
|
||||
#
|
||||
# @skipped: number of skipped zero pages (since 1.5)
|
||||
#
|
||||
# @normal : number of normal pages (since 1.2)
|
||||
#
|
||||
@ -509,8 +511,8 @@
|
||||
##
|
||||
{ 'type': 'MigrationStats',
|
||||
'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
|
||||
'duplicate': 'int', 'normal': 'int', 'normal-bytes': 'int',
|
||||
'dirty-pages-rate' : 'int' } }
|
||||
'duplicate': 'int', 'skipped': 'int', 'normal': 'int',
|
||||
'normal-bytes': 'int', 'dirty-pages-rate' : 'int' } }
|
||||
|
||||
##
|
||||
# @XBZRLECacheStats
|
||||
|
@ -2445,6 +2445,7 @@ The main json-object contains the following:
|
||||
- "duplicate": number of pages filled entirely with the same
|
||||
byte (json-int)
|
||||
These are sent over the wire much more efficiently.
|
||||
- "skipped": number of skipped zero pages (json-int)
|
||||
- "normal" : number of whole pages transfered. I.e. they
|
||||
were not sent as duplicate or xbzrle pages (json-int)
|
||||
- "normal-bytes" : number of bytes transferred in whole
|
||||
|
Loading…
Reference in New Issue
Block a user