ram: Move migration_dirty_pages to RAMState

Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
This commit is contained in:
Juan Quintela 2017-03-13 21:21:41 +01:00
parent 180f61f75a
commit 0d8ec885ed

View File

@ -183,6 +183,8 @@ struct RAMState {
double xbzrle_cache_miss_rate;
/* xbzrle number of overflows */
uint64_t xbzrle_overflows;
/* number of dirty bits in the bitmap */
uint64_t migration_dirty_pages;
};
typedef struct RAMState RAMState;
@ -223,8 +225,12 @@ uint64_t xbzrle_mig_pages_overflow(void)
return ram_state.xbzrle_overflows;
}
static ram_addr_t ram_save_remaining(void)
{
return ram_state.migration_dirty_pages;
}
static QemuMutex migration_bitmap_mutex;
static uint64_t migration_dirty_pages;
/* used by the search for pages to send */
struct PageSearchStatus {
@ -582,7 +588,7 @@ ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
return (next - base) << TARGET_PAGE_BITS;
}
static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr)
{
bool ret;
int nr = addr >> TARGET_PAGE_BITS;
@ -591,7 +597,7 @@ static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
ret = test_and_clear_bit(nr, bitmap);
if (ret) {
migration_dirty_pages--;
rs->migration_dirty_pages--;
}
return ret;
}
@ -601,8 +607,9 @@ static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start,
{
unsigned long *bitmap;
bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap,
start, length, &rs->num_dirty_pages_period);
rs->migration_dirty_pages +=
cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length,
&rs->num_dirty_pages_period);
}
static void migration_bitmap_sync_init(RAMState *rs)
@ -1304,7 +1311,7 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
int res = 0;
/* Check the pages is dirty and if it is send it */
if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) {
unsigned long *unsentmap;
if (compression_switch && migrate_use_compression()) {
res = ram_save_compressed_page(rs, ms, f, pss,
@ -1454,11 +1461,6 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero)
}
}
static ram_addr_t ram_save_remaining(void)
{
return migration_dirty_pages;
}
uint64_t ram_bytes_remaining(void)
{
return ram_save_remaining() * TARGET_PAGE_SIZE;
@ -1532,6 +1534,7 @@ static void ram_state_reset(RAMState *rs)
void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
{
RAMState *rs = &ram_state;
/* called in qemu main thread, so there is
* no writing race against this migration_bitmap
*/
@ -1557,7 +1560,7 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
atomic_rcu_set(&migration_bitmap_rcu, bitmap);
qemu_mutex_unlock(&migration_bitmap_mutex);
migration_dirty_pages += new - old;
rs->migration_dirty_pages += new - old;
call_rcu(old_bitmap, migration_bitmap_free, rcu);
}
}
@ -1730,6 +1733,7 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
RAMBlock *block,
PostcopyDiscardState *pds)
{
RAMState *rs = &ram_state;
unsigned long *bitmap;
unsigned long *unsentmap;
unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
@ -1827,7 +1831,7 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
* Remark them as dirty, updating the count for any pages
* that weren't previously dirty.
*/
migration_dirty_pages += !test_and_set_bit(page, bitmap);
rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
}
}
@ -2053,7 +2057,7 @@ static int ram_save_init_globals(RAMState *rs)
* Count the total number of pages used by ram blocks not including any
* gaps due to alignment or unplugs.
*/
migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
memory_global_dirty_log_start();
migration_bitmap_sync(rs);