ram: Split dirty bitmap by RAMBlock
Both the ram bitmap and the unsent bitmap are split by RAMBlock. Signed-off-by: Juan Quintela <quintela@redhat.com> Reviewed-by: zhanghailiang <zhang.zhanghailiang@huawei.com> Reviewed-by: Peter Xu <peterx@redhat.com> -- Fix compilation when DEBUG_POSTCOPY is enabled (thanks Hailiang)
This commit is contained in:
parent
e619b14746
commit
6b6712efcc
@ -39,6 +39,14 @@ struct RAMBlock {
|
|||||||
QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
|
QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
|
||||||
int fd;
|
int fd;
|
||||||
size_t page_size;
|
size_t page_size;
|
||||||
|
/* dirty bitmap used during migration */
|
||||||
|
unsigned long *bmap;
|
||||||
|
/* bitmap of pages that haven't been sent even once
|
||||||
|
* only maintained and used in postcopy at the moment
|
||||||
|
* where it's used to send the dirtymap at the start
|
||||||
|
* of the postcopy phase
|
||||||
|
*/
|
||||||
|
unsigned long *unsentmap;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
|
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
|
||||||
@ -360,16 +368,15 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
|
|||||||
|
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
|
uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
|
||||||
RAMBlock *rb,
|
|
||||||
ram_addr_t start,
|
ram_addr_t start,
|
||||||
ram_addr_t length,
|
ram_addr_t length,
|
||||||
uint64_t *real_dirty_pages)
|
uint64_t *real_dirty_pages)
|
||||||
{
|
{
|
||||||
ram_addr_t addr;
|
ram_addr_t addr;
|
||||||
start = rb->offset + start;
|
|
||||||
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
|
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
|
||||||
uint64_t num_dirty = 0;
|
uint64_t num_dirty = 0;
|
||||||
|
unsigned long *dest = rb->bmap;
|
||||||
|
|
||||||
/* start address is aligned at the start of a word? */
|
/* start address is aligned at the start of a word? */
|
||||||
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
|
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
|
||||||
|
@ -266,7 +266,8 @@ uint64_t xbzrle_mig_pages_cache_miss(void);
|
|||||||
double xbzrle_mig_cache_miss_rate(void);
|
double xbzrle_mig_cache_miss_rate(void);
|
||||||
|
|
||||||
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
|
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
|
||||||
void ram_debug_dump_bitmap(unsigned long *todump, bool expected);
|
void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
|
||||||
|
unsigned long pages);
|
||||||
/* For outgoing discard bitmap */
|
/* For outgoing discard bitmap */
|
||||||
int ram_postcopy_send_discard_bitmap(MigrationState *ms);
|
int ram_postcopy_send_discard_bitmap(MigrationState *ms);
|
||||||
/* For incoming postcopy discard */
|
/* For incoming postcopy discard */
|
||||||
|
@ -43,12 +43,9 @@ int postcopy_ram_prepare_discard(MigrationIncomingState *mis);
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Called at the start of each RAMBlock by the bitmap code.
|
* Called at the start of each RAMBlock by the bitmap code.
|
||||||
* 'offset' is the bitmap offset of the named RAMBlock in the migration
|
|
||||||
* bitmap.
|
|
||||||
* Returns a new PDS
|
* Returns a new PDS
|
||||||
*/
|
*/
|
||||||
PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
|
PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
|
||||||
unsigned long offset,
|
|
||||||
const char *name);
|
const char *name);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -33,7 +33,6 @@
|
|||||||
|
|
||||||
struct PostcopyDiscardState {
|
struct PostcopyDiscardState {
|
||||||
const char *ramblock_name;
|
const char *ramblock_name;
|
||||||
uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */
|
|
||||||
uint16_t cur_entry;
|
uint16_t cur_entry;
|
||||||
/*
|
/*
|
||||||
* Start and length of a discard range (bytes)
|
* Start and length of a discard range (bytes)
|
||||||
@ -717,14 +716,12 @@ void *postcopy_get_tmp_page(MigrationIncomingState *mis)
|
|||||||
* returns: a new PDS.
|
* returns: a new PDS.
|
||||||
*/
|
*/
|
||||||
PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
|
PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
|
||||||
unsigned long offset,
|
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
|
PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
|
||||||
|
|
||||||
if (res) {
|
if (res) {
|
||||||
res->ramblock_name = name;
|
res->ramblock_name = name;
|
||||||
res->offset = offset;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
@ -745,7 +742,7 @@ void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
|
|||||||
{
|
{
|
||||||
size_t tp_size = qemu_target_page_size();
|
size_t tp_size = qemu_target_page_size();
|
||||||
/* Convert to byte offsets within the RAM block */
|
/* Convert to byte offsets within the RAM block */
|
||||||
pds->start_list[pds->cur_entry] = (start - pds->offset) * tp_size;
|
pds->start_list[pds->cur_entry] = start * tp_size;
|
||||||
pds->length_list[pds->cur_entry] = length * tp_size;
|
pds->length_list[pds->cur_entry] = length * tp_size;
|
||||||
trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
|
trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
|
||||||
pds->cur_entry++;
|
pds->cur_entry++;
|
||||||
|
267
migration/ram.c
267
migration/ram.c
@ -138,19 +138,6 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RAMBitmap {
|
|
||||||
struct rcu_head rcu;
|
|
||||||
/* Main migration bitmap */
|
|
||||||
unsigned long *bmap;
|
|
||||||
/* bitmap of pages that haven't been sent even once
|
|
||||||
* only maintained and used in postcopy at the moment
|
|
||||||
* where it's used to send the dirtymap at the start
|
|
||||||
* of the postcopy phase
|
|
||||||
*/
|
|
||||||
unsigned long *unsentmap;
|
|
||||||
};
|
|
||||||
typedef struct RAMBitmap RAMBitmap;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* An outstanding page request, on the source, having been received
|
* An outstanding page request, on the source, having been received
|
||||||
* and queued
|
* and queued
|
||||||
@ -220,8 +207,6 @@ struct RAMState {
|
|||||||
uint64_t postcopy_requests;
|
uint64_t postcopy_requests;
|
||||||
/* protects modification of the bitmap */
|
/* protects modification of the bitmap */
|
||||||
QemuMutex bitmap_mutex;
|
QemuMutex bitmap_mutex;
|
||||||
/* Ram Bitmap protected by RCU */
|
|
||||||
RAMBitmap *ram_bitmap;
|
|
||||||
/* The RAMBlock used in the last src_page_requests */
|
/* The RAMBlock used in the last src_page_requests */
|
||||||
RAMBlock *last_req_rb;
|
RAMBlock *last_req_rb;
|
||||||
/* Queue of outstanding page requests from the destination */
|
/* Queue of outstanding page requests from the destination */
|
||||||
@ -614,22 +599,17 @@ static inline
|
|||||||
unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
|
unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
|
||||||
unsigned long start)
|
unsigned long start)
|
||||||
{
|
{
|
||||||
unsigned long base = rb->offset >> TARGET_PAGE_BITS;
|
unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
|
||||||
unsigned long nr = base + start;
|
unsigned long *bitmap = rb->bmap;
|
||||||
uint64_t rb_size = rb->used_length;
|
|
||||||
unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
|
|
||||||
unsigned long *bitmap;
|
|
||||||
|
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
if (rs->ram_bulk_stage && start > 0) {
|
||||||
if (rs->ram_bulk_stage && nr > base) {
|
next = start + 1;
|
||||||
next = nr + 1;
|
|
||||||
} else {
|
} else {
|
||||||
next = find_next_bit(bitmap, size, nr);
|
next = find_next_bit(bitmap, size, start);
|
||||||
}
|
}
|
||||||
|
|
||||||
return next - base;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
||||||
@ -637,10 +617,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
|||||||
unsigned long page)
|
unsigned long page)
|
||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
|
||||||
unsigned long nr = (rb->offset >> TARGET_PAGE_BITS) + page;
|
|
||||||
|
|
||||||
ret = test_and_clear_bit(nr, bitmap);
|
ret = test_and_clear_bit(page, rb->bmap);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
rs->migration_dirty_pages--;
|
rs->migration_dirty_pages--;
|
||||||
@ -651,10 +629,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
|||||||
static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
|
static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
|
||||||
ram_addr_t start, ram_addr_t length)
|
ram_addr_t start, ram_addr_t length)
|
||||||
{
|
{
|
||||||
unsigned long *bitmap;
|
|
||||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
|
||||||
rs->migration_dirty_pages +=
|
rs->migration_dirty_pages +=
|
||||||
cpu_physical_memory_sync_dirty_bitmap(bitmap, rb, start, length,
|
cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
|
||||||
&rs->num_dirty_pages_period);
|
&rs->num_dirty_pages_period);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1153,17 +1129,13 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
|
|||||||
* search already sent it.
|
* search already sent it.
|
||||||
*/
|
*/
|
||||||
if (block) {
|
if (block) {
|
||||||
unsigned long *bitmap;
|
|
||||||
unsigned long page;
|
unsigned long page;
|
||||||
|
|
||||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
page = offset >> TARGET_PAGE_BITS;
|
||||||
page = (block->offset + offset) >> TARGET_PAGE_BITS;
|
dirty = test_bit(page, block->bmap);
|
||||||
dirty = test_bit(page, bitmap);
|
|
||||||
if (!dirty) {
|
if (!dirty) {
|
||||||
trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
|
trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
|
||||||
page,
|
page, test_bit(page, block->unsentmap));
|
||||||
test_bit(page,
|
|
||||||
atomic_rcu_read(&rs->ram_bitmap)->unsentmap));
|
|
||||||
} else {
|
} else {
|
||||||
trace_get_queued_page(block->idstr, (uint64_t)offset, page);
|
trace_get_queued_page(block->idstr, (uint64_t)offset, page);
|
||||||
}
|
}
|
||||||
@ -1301,16 +1273,13 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
|||||||
|
|
||||||
/* Check the pages is dirty and if it is send it */
|
/* Check the pages is dirty and if it is send it */
|
||||||
if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
|
if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
|
||||||
unsigned long *unsentmap;
|
|
||||||
/*
|
/*
|
||||||
* If xbzrle is on, stop using the data compression after first
|
* If xbzrle is on, stop using the data compression after first
|
||||||
* round of migration even if compression is enabled. In theory,
|
* round of migration even if compression is enabled. In theory,
|
||||||
* xbzrle can do better than compression.
|
* xbzrle can do better than compression.
|
||||||
*/
|
*/
|
||||||
unsigned long page =
|
if (migrate_use_compression() &&
|
||||||
(pss->block->offset >> TARGET_PAGE_BITS) + pss->page;
|
(rs->ram_bulk_stage || !migrate_use_xbzrle())) {
|
||||||
if (migrate_use_compression()
|
|
||||||
&& (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
|
|
||||||
res = ram_save_compressed_page(rs, pss, last_stage);
|
res = ram_save_compressed_page(rs, pss, last_stage);
|
||||||
} else {
|
} else {
|
||||||
res = ram_save_page(rs, pss, last_stage);
|
res = ram_save_page(rs, pss, last_stage);
|
||||||
@ -1319,9 +1288,8 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
|||||||
if (res < 0) {
|
if (res < 0) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
if (pss->block->unsentmap) {
|
||||||
if (unsentmap) {
|
clear_bit(pss->page, pss->block->unsentmap);
|
||||||
clear_bit(page, unsentmap);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1451,25 +1419,20 @@ void free_xbzrle_decoded_buf(void)
|
|||||||
xbzrle_decoded_buf = NULL;
|
xbzrle_decoded_buf = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migration_bitmap_free(RAMBitmap *bmap)
|
|
||||||
{
|
|
||||||
g_free(bmap->bmap);
|
|
||||||
g_free(bmap->unsentmap);
|
|
||||||
g_free(bmap);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ram_migration_cleanup(void *opaque)
|
static void ram_migration_cleanup(void *opaque)
|
||||||
{
|
{
|
||||||
RAMState *rs = opaque;
|
RAMBlock *block;
|
||||||
|
|
||||||
/* caller have hold iothread lock or is in a bh, so there is
|
/* caller have hold iothread lock or is in a bh, so there is
|
||||||
* no writing race against this migration_bitmap
|
* no writing race against this migration_bitmap
|
||||||
*/
|
*/
|
||||||
RAMBitmap *bitmap = rs->ram_bitmap;
|
memory_global_dirty_log_stop();
|
||||||
atomic_rcu_set(&rs->ram_bitmap, NULL);
|
|
||||||
if (bitmap) {
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||||
memory_global_dirty_log_stop();
|
g_free(block->bmap);
|
||||||
call_rcu(bitmap, migration_bitmap_free, rcu);
|
block->bmap = NULL;
|
||||||
|
g_free(block->unsentmap);
|
||||||
|
block->unsentmap = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
XBZRLE_cache_lock();
|
XBZRLE_cache_lock();
|
||||||
@ -1501,27 +1464,22 @@ static void ram_state_reset(RAMState *rs)
|
|||||||
* of; it won't bother printing lines that are all this value.
|
* of; it won't bother printing lines that are all this value.
|
||||||
* If 'todump' is null the migration bitmap is dumped.
|
* If 'todump' is null the migration bitmap is dumped.
|
||||||
*/
|
*/
|
||||||
void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
|
void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
|
||||||
|
unsigned long pages)
|
||||||
{
|
{
|
||||||
unsigned long ram_pages = last_ram_page();
|
|
||||||
RAMState *rs = &ram_state;
|
|
||||||
int64_t cur;
|
int64_t cur;
|
||||||
int64_t linelen = 128;
|
int64_t linelen = 128;
|
||||||
char linebuf[129];
|
char linebuf[129];
|
||||||
|
|
||||||
if (!todump) {
|
for (cur = 0; cur < pages; cur += linelen) {
|
||||||
todump = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (cur = 0; cur < ram_pages; cur += linelen) {
|
|
||||||
int64_t curb;
|
int64_t curb;
|
||||||
bool found = false;
|
bool found = false;
|
||||||
/*
|
/*
|
||||||
* Last line; catch the case where the line length
|
* Last line; catch the case where the line length
|
||||||
* is longer than remaining ram
|
* is longer than remaining ram
|
||||||
*/
|
*/
|
||||||
if (cur + linelen > ram_pages) {
|
if (cur + linelen > pages) {
|
||||||
linelen = ram_pages - cur;
|
linelen = pages - cur;
|
||||||
}
|
}
|
||||||
for (curb = 0; curb < linelen; curb++) {
|
for (curb = 0; curb < linelen; curb++) {
|
||||||
bool thisbit = test_bit(cur + curb, todump);
|
bool thisbit = test_bit(cur + curb, todump);
|
||||||
@ -1539,14 +1497,12 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
|
|||||||
|
|
||||||
void ram_postcopy_migrated_memory_release(MigrationState *ms)
|
void ram_postcopy_migrated_memory_release(MigrationState *ms)
|
||||||
{
|
{
|
||||||
RAMState *rs = &ram_state;
|
|
||||||
struct RAMBlock *block;
|
struct RAMBlock *block;
|
||||||
unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
|
||||||
|
|
||||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||||
unsigned long first = block->offset >> TARGET_PAGE_BITS;
|
unsigned long *bitmap = block->bmap;
|
||||||
unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS);
|
unsigned long range = block->used_length >> TARGET_PAGE_BITS;
|
||||||
unsigned long run_start = find_next_zero_bit(bitmap, range, first);
|
unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
|
||||||
|
|
||||||
while (run_start < range) {
|
while (run_start < range) {
|
||||||
unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
|
unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
|
||||||
@ -1573,16 +1529,13 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
|
|||||||
*/
|
*/
|
||||||
static int postcopy_send_discard_bm_ram(MigrationState *ms,
|
static int postcopy_send_discard_bm_ram(MigrationState *ms,
|
||||||
PostcopyDiscardState *pds,
|
PostcopyDiscardState *pds,
|
||||||
unsigned long start,
|
RAMBlock *block)
|
||||||
unsigned long length)
|
|
||||||
{
|
{
|
||||||
RAMState *rs = &ram_state;
|
unsigned long end = block->used_length >> TARGET_PAGE_BITS;
|
||||||
unsigned long end = start + length; /* one after the end */
|
|
||||||
unsigned long current;
|
unsigned long current;
|
||||||
unsigned long *unsentmap;
|
unsigned long *unsentmap = block->unsentmap;
|
||||||
|
|
||||||
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
for (current = 0; current < end; ) {
|
||||||
for (current = start; current < end; ) {
|
|
||||||
unsigned long one = find_next_bit(unsentmap, end, current);
|
unsigned long one = find_next_bit(unsentmap, end, current);
|
||||||
|
|
||||||
if (one <= end) {
|
if (one <= end) {
|
||||||
@ -1625,18 +1578,15 @@ static int postcopy_each_ram_send_discard(MigrationState *ms)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||||
unsigned long first = block->offset >> TARGET_PAGE_BITS;
|
PostcopyDiscardState *pds =
|
||||||
PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
|
postcopy_discard_send_init(ms, block->idstr);
|
||||||
first,
|
|
||||||
block->idstr);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Postcopy sends chunks of bitmap over the wire, but it
|
* Postcopy sends chunks of bitmap over the wire, but it
|
||||||
* just needs indexes at this point, avoids it having
|
* just needs indexes at this point, avoids it having
|
||||||
* target page specific code.
|
* target page specific code.
|
||||||
*/
|
*/
|
||||||
ret = postcopy_send_discard_bm_ram(ms, pds, first,
|
ret = postcopy_send_discard_bm_ram(ms, pds, block);
|
||||||
block->used_length >> TARGET_PAGE_BITS);
|
|
||||||
postcopy_discard_send_finish(ms, pds);
|
postcopy_discard_send_finish(ms, pds);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
return ret;
|
return ret;
|
||||||
@ -1667,12 +1617,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
|||||||
PostcopyDiscardState *pds)
|
PostcopyDiscardState *pds)
|
||||||
{
|
{
|
||||||
RAMState *rs = &ram_state;
|
RAMState *rs = &ram_state;
|
||||||
unsigned long *bitmap;
|
unsigned long *bitmap = block->bmap;
|
||||||
unsigned long *unsentmap;
|
unsigned long *unsentmap = block->unsentmap;
|
||||||
unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
|
unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
|
||||||
unsigned long first = block->offset >> TARGET_PAGE_BITS;
|
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
|
||||||
unsigned long len = block->used_length >> TARGET_PAGE_BITS;
|
|
||||||
unsigned long last = first + (len - 1);
|
|
||||||
unsigned long run_start;
|
unsigned long run_start;
|
||||||
|
|
||||||
if (block->page_size == TARGET_PAGE_SIZE) {
|
if (block->page_size == TARGET_PAGE_SIZE) {
|
||||||
@ -1680,18 +1628,15 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
|
||||||
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
|
||||||
|
|
||||||
if (unsent_pass) {
|
if (unsent_pass) {
|
||||||
/* Find a sent page */
|
/* Find a sent page */
|
||||||
run_start = find_next_zero_bit(unsentmap, last + 1, first);
|
run_start = find_next_zero_bit(unsentmap, pages, 0);
|
||||||
} else {
|
} else {
|
||||||
/* Find a dirty page */
|
/* Find a dirty page */
|
||||||
run_start = find_next_bit(bitmap, last + 1, first);
|
run_start = find_next_bit(bitmap, pages, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (run_start <= last) {
|
while (run_start < pages) {
|
||||||
bool do_fixup = false;
|
bool do_fixup = false;
|
||||||
unsigned long fixup_start_addr;
|
unsigned long fixup_start_addr;
|
||||||
unsigned long host_offset;
|
unsigned long host_offset;
|
||||||
@ -1711,9 +1656,9 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
|||||||
/* Find the end of this run */
|
/* Find the end of this run */
|
||||||
unsigned long run_end;
|
unsigned long run_end;
|
||||||
if (unsent_pass) {
|
if (unsent_pass) {
|
||||||
run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
|
run_end = find_next_bit(unsentmap, pages, run_start + 1);
|
||||||
} else {
|
} else {
|
||||||
run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
|
run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* If the end isn't at the start of a host page, then the
|
* If the end isn't at the start of a host page, then the
|
||||||
@ -1770,11 +1715,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
|||||||
|
|
||||||
if (unsent_pass) {
|
if (unsent_pass) {
|
||||||
/* Find the next sent page for the next iteration */
|
/* Find the next sent page for the next iteration */
|
||||||
run_start = find_next_zero_bit(unsentmap, last + 1,
|
run_start = find_next_zero_bit(unsentmap, pages, run_start);
|
||||||
run_start);
|
|
||||||
} else {
|
} else {
|
||||||
/* Find the next dirty page for the next iteration */
|
/* Find the next dirty page for the next iteration */
|
||||||
run_start = find_next_bit(bitmap, last + 1, run_start);
|
run_start = find_next_bit(bitmap, pages, run_start);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1791,34 +1735,22 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
|||||||
* Returns zero on success
|
* Returns zero on success
|
||||||
*
|
*
|
||||||
* @ms: current migration state
|
* @ms: current migration state
|
||||||
|
* @block: block we want to work with
|
||||||
*/
|
*/
|
||||||
static int postcopy_chunk_hostpages(MigrationState *ms)
|
static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
|
||||||
{
|
{
|
||||||
RAMState *rs = &ram_state;
|
PostcopyDiscardState *pds =
|
||||||
struct RAMBlock *block;
|
postcopy_discard_send_init(ms, block->idstr);
|
||||||
|
|
||||||
/* Easiest way to make sure we don't resume in the middle of a host-page */
|
/* First pass: Discard all partially sent host pages */
|
||||||
rs->last_seen_block = NULL;
|
postcopy_chunk_hostpages_pass(ms, true, block, pds);
|
||||||
rs->last_sent_block = NULL;
|
/*
|
||||||
rs->last_page = 0;
|
* Second pass: Ensure that all partially dirty host pages are made
|
||||||
|
* fully dirty.
|
||||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
*/
|
||||||
unsigned long first = block->offset >> TARGET_PAGE_BITS;
|
postcopy_chunk_hostpages_pass(ms, false, block, pds);
|
||||||
|
|
||||||
PostcopyDiscardState *pds =
|
|
||||||
postcopy_discard_send_init(ms, first, block->idstr);
|
|
||||||
|
|
||||||
/* First pass: Discard all partially sent host pages */
|
|
||||||
postcopy_chunk_hostpages_pass(ms, true, block, pds);
|
|
||||||
/*
|
|
||||||
* Second pass: Ensure that all partially dirty host pages are made
|
|
||||||
* fully dirty.
|
|
||||||
*/
|
|
||||||
postcopy_chunk_hostpages_pass(ms, false, block, pds);
|
|
||||||
|
|
||||||
postcopy_discard_send_finish(ms, pds);
|
|
||||||
} /* ram_list loop */
|
|
||||||
|
|
||||||
|
postcopy_discard_send_finish(ms, pds);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1840,43 +1772,49 @@ static int postcopy_chunk_hostpages(MigrationState *ms)
|
|||||||
int ram_postcopy_send_discard_bitmap(MigrationState *ms)
|
int ram_postcopy_send_discard_bitmap(MigrationState *ms)
|
||||||
{
|
{
|
||||||
RAMState *rs = &ram_state;
|
RAMState *rs = &ram_state;
|
||||||
|
RAMBlock *block;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long *bitmap, *unsentmap;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
/* This should be our last sync, the src is now paused */
|
/* This should be our last sync, the src is now paused */
|
||||||
migration_bitmap_sync(rs);
|
migration_bitmap_sync(rs);
|
||||||
|
|
||||||
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
/* Easiest way to make sure we don't resume in the middle of a host-page */
|
||||||
if (!unsentmap) {
|
rs->last_seen_block = NULL;
|
||||||
/* We don't have a safe way to resize the sentmap, so
|
rs->last_sent_block = NULL;
|
||||||
* if the bitmap was resized it will be NULL at this
|
rs->last_page = 0;
|
||||||
* point.
|
|
||||||
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||||
|
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
|
||||||
|
unsigned long *bitmap = block->bmap;
|
||||||
|
unsigned long *unsentmap = block->unsentmap;
|
||||||
|
|
||||||
|
if (!unsentmap) {
|
||||||
|
/* We don't have a safe way to resize the sentmap, so
|
||||||
|
* if the bitmap was resized it will be NULL at this
|
||||||
|
* point.
|
||||||
|
*/
|
||||||
|
error_report("migration ram resized during precopy phase");
|
||||||
|
rcu_read_unlock();
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
/* Deal with TPS != HPS and huge pages */
|
||||||
|
ret = postcopy_chunk_hostpages(ms, block);
|
||||||
|
if (ret) {
|
||||||
|
rcu_read_unlock();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update the unsentmap to be unsentmap = unsentmap | dirty
|
||||||
*/
|
*/
|
||||||
error_report("migration ram resized during precopy phase");
|
bitmap_or(unsentmap, unsentmap, bitmap, pages);
|
||||||
rcu_read_unlock();
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Deal with TPS != HPS and huge pages */
|
|
||||||
ret = postcopy_chunk_hostpages(ms);
|
|
||||||
if (ret) {
|
|
||||||
rcu_read_unlock();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Update the unsentmap to be unsentmap = unsentmap | dirty
|
|
||||||
*/
|
|
||||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
|
||||||
bitmap_or(unsentmap, unsentmap, bitmap, last_ram_page());
|
|
||||||
|
|
||||||
|
|
||||||
trace_ram_postcopy_send_discard_bitmap();
|
|
||||||
#ifdef DEBUG_POSTCOPY
|
#ifdef DEBUG_POSTCOPY
|
||||||
ram_debug_dump_bitmap(unsentmap, true);
|
ram_debug_dump_bitmap(unsentmap, true, pages);
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
trace_ram_postcopy_send_discard_bitmap();
|
||||||
|
|
||||||
ret = postcopy_each_ram_send_discard(ms);
|
ret = postcopy_each_ram_send_discard(ms);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
@ -1918,8 +1856,6 @@ err:
|
|||||||
|
|
||||||
static int ram_state_init(RAMState *rs)
|
static int ram_state_init(RAMState *rs)
|
||||||
{
|
{
|
||||||
unsigned long ram_bitmap_pages;
|
|
||||||
|
|
||||||
memset(rs, 0, sizeof(*rs));
|
memset(rs, 0, sizeof(*rs));
|
||||||
qemu_mutex_init(&rs->bitmap_mutex);
|
qemu_mutex_init(&rs->bitmap_mutex);
|
||||||
qemu_mutex_init(&rs->src_page_req_mutex);
|
qemu_mutex_init(&rs->src_page_req_mutex);
|
||||||
@ -1961,16 +1897,19 @@ static int ram_state_init(RAMState *rs)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
ram_state_reset(rs);
|
ram_state_reset(rs);
|
||||||
|
|
||||||
rs->ram_bitmap = g_new0(RAMBitmap, 1);
|
|
||||||
/* Skip setting bitmap if there is no RAM */
|
/* Skip setting bitmap if there is no RAM */
|
||||||
if (ram_bytes_total()) {
|
if (ram_bytes_total()) {
|
||||||
ram_bitmap_pages = last_ram_page();
|
RAMBlock *block;
|
||||||
rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages);
|
|
||||||
bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages);
|
|
||||||
|
|
||||||
if (migrate_postcopy_ram()) {
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||||
rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages);
|
unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
|
||||||
bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages);
|
|
||||||
|
block->bmap = bitmap_new(pages);
|
||||||
|
bitmap_set(block->bmap, 0, pages);
|
||||||
|
if (migrate_postcopy_ram()) {
|
||||||
|
block->unsentmap = bitmap_new(pages);
|
||||||
|
bitmap_set(block->unsentmap, 0, pages);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user