Separate migration bitmap
This patch creates a migration bitmap, which is periodically kept in sync with the qemu bitmap. A separate copy of the dirty bitmap for the migration limits the amount of concurrent access to the qemu bitmap from iothread and migration thread (which requires taking the big lock). We use the qemu bitmap type. We have to "undo" the dirty_pages counting optimization on the general dirty bitmap and do the counting optimization with the migration local bitmap. Signed-off-by: Umesh Deshpande <udeshpan@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
3c12193d99
commit
c6bf8e0e0c
63
arch_init.c
63
arch_init.c
@ -31,6 +31,8 @@
|
|||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include "monitor.h"
|
#include "monitor.h"
|
||||||
#include "sysemu.h"
|
#include "sysemu.h"
|
||||||
|
#include "bitops.h"
|
||||||
|
#include "bitmap.h"
|
||||||
#include "arch_init.h"
|
#include "arch_init.h"
|
||||||
#include "audio/audio.h"
|
#include "audio/audio.h"
|
||||||
#include "hw/pc.h"
|
#include "hw/pc.h"
|
||||||
@ -331,39 +333,57 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
|
|||||||
|
|
||||||
static RAMBlock *last_block;
|
static RAMBlock *last_block;
|
||||||
static ram_addr_t last_offset;
|
static ram_addr_t last_offset;
|
||||||
|
static unsigned long *migration_bitmap;
|
||||||
|
static uint64_t migration_dirty_pages;
|
||||||
|
|
||||||
static inline bool migration_bitmap_test_and_reset_dirty(MemoryRegion *mr,
|
static inline bool migration_bitmap_test_and_reset_dirty(MemoryRegion *mr,
|
||||||
ram_addr_t offset)
|
ram_addr_t offset)
|
||||||
{
|
{
|
||||||
bool ret = memory_region_get_dirty(mr, offset, TARGET_PAGE_SIZE,
|
bool ret;
|
||||||
DIRTY_MEMORY_MIGRATION);
|
int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
|
||||||
|
|
||||||
|
ret = test_and_clear_bit(nr, migration_bitmap);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
memory_region_reset_dirty(mr, offset, TARGET_PAGE_SIZE,
|
migration_dirty_pages--;
|
||||||
DIRTY_MEMORY_MIGRATION);
|
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void migration_bitmap_set_dirty(MemoryRegion *mr, int length)
|
static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
|
||||||
|
ram_addr_t offset)
|
||||||
{
|
{
|
||||||
ram_addr_t addr;
|
bool ret;
|
||||||
|
int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
|
||||||
|
|
||||||
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
|
ret = test_and_set_bit(nr, migration_bitmap);
|
||||||
if (!memory_region_get_dirty(mr, addr, TARGET_PAGE_SIZE,
|
|
||||||
DIRTY_MEMORY_MIGRATION)) {
|
if (!ret) {
|
||||||
memory_region_set_dirty(mr, addr, TARGET_PAGE_SIZE);
|
migration_dirty_pages++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migration_bitmap_sync(void)
|
static void migration_bitmap_sync(void)
|
||||||
{
|
{
|
||||||
uint64_t num_dirty_pages_init = ram_list.dirty_pages;
|
RAMBlock *block;
|
||||||
|
ram_addr_t addr;
|
||||||
|
uint64_t num_dirty_pages_init = migration_dirty_pages;
|
||||||
|
|
||||||
trace_migration_bitmap_sync_start();
|
trace_migration_bitmap_sync_start();
|
||||||
memory_global_sync_dirty_bitmap(get_system_memory());
|
memory_global_sync_dirty_bitmap(get_system_memory());
|
||||||
trace_migration_bitmap_sync_end(ram_list.dirty_pages
|
|
||||||
|
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
||||||
|
for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
|
||||||
|
if (memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
|
||||||
|
DIRTY_MEMORY_MIGRATION)) {
|
||||||
|
migration_bitmap_set_dirty(block->mr, addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
memory_region_reset_dirty(block->mr, 0, block->length,
|
||||||
|
DIRTY_MEMORY_MIGRATION);
|
||||||
|
}
|
||||||
|
trace_migration_bitmap_sync_end(migration_dirty_pages
|
||||||
- num_dirty_pages_init);
|
- num_dirty_pages_init);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -442,7 +462,7 @@ static uint64_t bytes_transferred;
|
|||||||
|
|
||||||
static ram_addr_t ram_save_remaining(void)
|
static ram_addr_t ram_save_remaining(void)
|
||||||
{
|
{
|
||||||
return ram_list.dirty_pages;
|
return migration_dirty_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t ram_bytes_remaining(void)
|
uint64_t ram_bytes_remaining(void)
|
||||||
@ -527,6 +547,11 @@ static void reset_ram_globals(void)
|
|||||||
static int ram_save_setup(QEMUFile *f, void *opaque)
|
static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
|
||||||
|
|
||||||
|
migration_bitmap = bitmap_new(ram_pages);
|
||||||
|
bitmap_set(migration_bitmap, 1, ram_pages);
|
||||||
|
migration_dirty_pages = ram_pages;
|
||||||
|
|
||||||
bytes_transferred = 0;
|
bytes_transferred = 0;
|
||||||
reset_ram_globals();
|
reset_ram_globals();
|
||||||
@ -544,13 +569,8 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|||||||
acct_clear();
|
acct_clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make sure all dirty bits are set */
|
|
||||||
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
||||||
migration_bitmap_set_dirty(block->mr, block->length);
|
|
||||||
}
|
|
||||||
|
|
||||||
memory_global_dirty_log_start();
|
memory_global_dirty_log_start();
|
||||||
memory_global_sync_dirty_bitmap(get_system_memory());
|
migration_bitmap_sync();
|
||||||
|
|
||||||
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
|
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
|
||||||
|
|
||||||
@ -655,6 +675,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
|||||||
|
|
||||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||||
|
|
||||||
|
g_free(migration_bitmap);
|
||||||
|
migration_bitmap = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,7 +500,6 @@ typedef struct RAMBlock {
|
|||||||
typedef struct RAMList {
|
typedef struct RAMList {
|
||||||
uint8_t *phys_dirty;
|
uint8_t *phys_dirty;
|
||||||
QLIST_HEAD(, RAMBlock) blocks;
|
QLIST_HEAD(, RAMBlock) blocks;
|
||||||
uint64_t dirty_pages;
|
|
||||||
} RAMList;
|
} RAMList;
|
||||||
extern RAMList ram_list;
|
extern RAMList ram_list;
|
||||||
|
|
||||||
|
@ -75,11 +75,6 @@ static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
|
|||||||
static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
|
static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
|
||||||
int dirty_flags)
|
int dirty_flags)
|
||||||
{
|
{
|
||||||
if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
|
|
||||||
!cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
|
|
||||||
MIGRATION_DIRTY_FLAG)) {
|
|
||||||
ram_list.dirty_pages++;
|
|
||||||
}
|
|
||||||
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
|
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,11 +88,6 @@ static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
|
|||||||
{
|
{
|
||||||
int mask = ~dirty_flags;
|
int mask = ~dirty_flags;
|
||||||
|
|
||||||
if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
|
|
||||||
cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
|
|
||||||
MIGRATION_DIRTY_FLAG)) {
|
|
||||||
ram_list.dirty_pages--;
|
|
||||||
}
|
|
||||||
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
|
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user