ram: Move QEMUFile into RAMState
We receive the file from save_live operations and we don't use it until 3 or 4 levels of calls down. Signed-off-by: Juan Quintela <quintela@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
parent
204b88b869
commit
ce25d33781
@ -757,21 +757,20 @@ static void migration_bitmap_sync(RAMState *rs)
|
|||||||
* Returns the number of pages written.
|
* Returns the number of pages written.
|
||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @f: QEMUFile where to send the data
|
|
||||||
* @block: block that contains the page we want to send
|
* @block: block that contains the page we want to send
|
||||||
* @offset: offset inside the block for the page
|
* @offset: offset inside the block for the page
|
||||||
* @p: pointer to the page
|
* @p: pointer to the page
|
||||||
*/
|
*/
|
||||||
static int save_zero_page(RAMState *rs, QEMUFile *f, RAMBlock *block,
|
static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
|
||||||
ram_addr_t offset, uint8_t *p)
|
uint8_t *p)
|
||||||
{
|
{
|
||||||
int pages = -1;
|
int pages = -1;
|
||||||
|
|
||||||
if (is_zero_range(p, TARGET_PAGE_SIZE)) {
|
if (is_zero_range(p, TARGET_PAGE_SIZE)) {
|
||||||
rs->zero_pages++;
|
rs->zero_pages++;
|
||||||
rs->bytes_transferred +=
|
rs->bytes_transferred +=
|
||||||
save_page_header(f, block, offset | RAM_SAVE_FLAG_COMPRESS);
|
save_page_header(rs->f, block, offset | RAM_SAVE_FLAG_COMPRESS);
|
||||||
qemu_put_byte(f, 0);
|
qemu_put_byte(rs->f, 0);
|
||||||
rs->bytes_transferred += 1;
|
rs->bytes_transferred += 1;
|
||||||
pages = 1;
|
pages = 1;
|
||||||
}
|
}
|
||||||
@ -799,12 +798,11 @@ static void ram_release_pages(MigrationState *ms, const char *rbname,
|
|||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @ms: current migration state
|
* @ms: current migration state
|
||||||
* @f: QEMUFile where to send the data
|
|
||||||
* @block: block that contains the page we want to send
|
* @block: block that contains the page we want to send
|
||||||
* @offset: offset inside the block for the page
|
* @offset: offset inside the block for the page
|
||||||
* @last_stage: if we are at the completion stage
|
* @last_stage: if we are at the completion stage
|
||||||
*/
|
*/
|
||||||
static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
static int ram_save_page(RAMState *rs, MigrationState *ms,
|
||||||
PageSearchStatus *pss, bool last_stage)
|
PageSearchStatus *pss, bool last_stage)
|
||||||
{
|
{
|
||||||
int pages = -1;
|
int pages = -1;
|
||||||
@ -820,7 +818,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
|||||||
|
|
||||||
/* In doubt sent page as normal */
|
/* In doubt sent page as normal */
|
||||||
bytes_xmit = 0;
|
bytes_xmit = 0;
|
||||||
ret = ram_control_save_page(f, block->offset,
|
ret = ram_control_save_page(rs->f, block->offset,
|
||||||
offset, TARGET_PAGE_SIZE, &bytes_xmit);
|
offset, TARGET_PAGE_SIZE, &bytes_xmit);
|
||||||
if (bytes_xmit) {
|
if (bytes_xmit) {
|
||||||
rs->bytes_transferred += bytes_xmit;
|
rs->bytes_transferred += bytes_xmit;
|
||||||
@ -843,7 +841,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pages = save_zero_page(rs, f, block, offset, p);
|
pages = save_zero_page(rs, block, offset, p);
|
||||||
if (pages > 0) {
|
if (pages > 0) {
|
||||||
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
|
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
|
||||||
* page would be stale
|
* page would be stale
|
||||||
@ -865,14 +863,14 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
|||||||
|
|
||||||
/* XBZRLE overflow or normal page */
|
/* XBZRLE overflow or normal page */
|
||||||
if (pages == -1) {
|
if (pages == -1) {
|
||||||
rs->bytes_transferred += save_page_header(f, block,
|
rs->bytes_transferred += save_page_header(rs->f, block,
|
||||||
offset | RAM_SAVE_FLAG_PAGE);
|
offset | RAM_SAVE_FLAG_PAGE);
|
||||||
if (send_async) {
|
if (send_async) {
|
||||||
qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE,
|
qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
|
||||||
migrate_release_ram() &
|
migrate_release_ram() &
|
||||||
migration_in_postcopy(ms));
|
migration_in_postcopy(ms));
|
||||||
} else {
|
} else {
|
||||||
qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
|
qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
|
||||||
}
|
}
|
||||||
rs->bytes_transferred += TARGET_PAGE_SIZE;
|
rs->bytes_transferred += TARGET_PAGE_SIZE;
|
||||||
pages = 1;
|
pages = 1;
|
||||||
@ -907,7 +905,7 @@ static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
|
|||||||
return bytes_sent;
|
return bytes_sent;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flush_compressed_data(RAMState *rs, QEMUFile *f)
|
static void flush_compressed_data(RAMState *rs)
|
||||||
{
|
{
|
||||||
int idx, len, thread_count;
|
int idx, len, thread_count;
|
||||||
|
|
||||||
@ -927,7 +925,7 @@ static void flush_compressed_data(RAMState *rs, QEMUFile *f)
|
|||||||
for (idx = 0; idx < thread_count; idx++) {
|
for (idx = 0; idx < thread_count; idx++) {
|
||||||
qemu_mutex_lock(&comp_param[idx].mutex);
|
qemu_mutex_lock(&comp_param[idx].mutex);
|
||||||
if (!comp_param[idx].quit) {
|
if (!comp_param[idx].quit) {
|
||||||
len = qemu_put_qemu_file(f, comp_param[idx].file);
|
len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
|
||||||
rs->bytes_transferred += len;
|
rs->bytes_transferred += len;
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&comp_param[idx].mutex);
|
qemu_mutex_unlock(&comp_param[idx].mutex);
|
||||||
@ -941,8 +939,8 @@ static inline void set_compress_params(CompressParam *param, RAMBlock *block,
|
|||||||
param->offset = offset;
|
param->offset = offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f,
|
static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
|
||||||
RAMBlock *block, ram_addr_t offset)
|
ram_addr_t offset)
|
||||||
{
|
{
|
||||||
int idx, thread_count, bytes_xmit = -1, pages = -1;
|
int idx, thread_count, bytes_xmit = -1, pages = -1;
|
||||||
|
|
||||||
@ -952,7 +950,7 @@ static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f,
|
|||||||
for (idx = 0; idx < thread_count; idx++) {
|
for (idx = 0; idx < thread_count; idx++) {
|
||||||
if (comp_param[idx].done) {
|
if (comp_param[idx].done) {
|
||||||
comp_param[idx].done = false;
|
comp_param[idx].done = false;
|
||||||
bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
|
bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
|
||||||
qemu_mutex_lock(&comp_param[idx].mutex);
|
qemu_mutex_lock(&comp_param[idx].mutex);
|
||||||
set_compress_params(&comp_param[idx], block, offset);
|
set_compress_params(&comp_param[idx], block, offset);
|
||||||
qemu_cond_signal(&comp_param[idx].cond);
|
qemu_cond_signal(&comp_param[idx].cond);
|
||||||
@ -981,13 +979,11 @@ static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f,
|
|||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @ms: current migration state
|
* @ms: current migration state
|
||||||
* @f: QEMUFile where to send the data
|
|
||||||
* @block: block that contains the page we want to send
|
* @block: block that contains the page we want to send
|
||||||
* @offset: offset inside the block for the page
|
* @offset: offset inside the block for the page
|
||||||
* @last_stage: if we are at the completion stage
|
* @last_stage: if we are at the completion stage
|
||||||
*/
|
*/
|
||||||
static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
|
static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
|
||||||
QEMUFile *f,
|
|
||||||
PageSearchStatus *pss, bool last_stage)
|
PageSearchStatus *pss, bool last_stage)
|
||||||
{
|
{
|
||||||
int pages = -1;
|
int pages = -1;
|
||||||
@ -999,7 +995,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
|
|||||||
|
|
||||||
p = block->host + offset;
|
p = block->host + offset;
|
||||||
|
|
||||||
ret = ram_control_save_page(f, block->offset,
|
ret = ram_control_save_page(rs->f, block->offset,
|
||||||
offset, TARGET_PAGE_SIZE, &bytes_xmit);
|
offset, TARGET_PAGE_SIZE, &bytes_xmit);
|
||||||
if (bytes_xmit) {
|
if (bytes_xmit) {
|
||||||
rs->bytes_transferred += bytes_xmit;
|
rs->bytes_transferred += bytes_xmit;
|
||||||
@ -1021,20 +1017,20 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
|
|||||||
* is used to avoid resending the block name.
|
* is used to avoid resending the block name.
|
||||||
*/
|
*/
|
||||||
if (block != rs->last_sent_block) {
|
if (block != rs->last_sent_block) {
|
||||||
flush_compressed_data(rs, f);
|
flush_compressed_data(rs);
|
||||||
pages = save_zero_page(rs, f, block, offset, p);
|
pages = save_zero_page(rs, block, offset, p);
|
||||||
if (pages == -1) {
|
if (pages == -1) {
|
||||||
/* Make sure the first page is sent out before other pages */
|
/* Make sure the first page is sent out before other pages */
|
||||||
bytes_xmit = save_page_header(f, block, offset |
|
bytes_xmit = save_page_header(rs->f, block, offset |
|
||||||
RAM_SAVE_FLAG_COMPRESS_PAGE);
|
RAM_SAVE_FLAG_COMPRESS_PAGE);
|
||||||
blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
|
blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
|
||||||
migrate_compress_level());
|
migrate_compress_level());
|
||||||
if (blen > 0) {
|
if (blen > 0) {
|
||||||
rs->bytes_transferred += bytes_xmit + blen;
|
rs->bytes_transferred += bytes_xmit + blen;
|
||||||
rs->norm_pages++;
|
rs->norm_pages++;
|
||||||
pages = 1;
|
pages = 1;
|
||||||
} else {
|
} else {
|
||||||
qemu_file_set_error(f, blen);
|
qemu_file_set_error(rs->f, blen);
|
||||||
error_report("compressed data failed!");
|
error_report("compressed data failed!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1043,9 +1039,9 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
offset |= RAM_SAVE_FLAG_CONTINUE;
|
offset |= RAM_SAVE_FLAG_CONTINUE;
|
||||||
pages = save_zero_page(rs, f, block, offset, p);
|
pages = save_zero_page(rs, block, offset, p);
|
||||||
if (pages == -1) {
|
if (pages == -1) {
|
||||||
pages = compress_page_with_multi_thread(rs, f, block, offset);
|
pages = compress_page_with_multi_thread(rs, block, offset);
|
||||||
} else {
|
} else {
|
||||||
ram_release_pages(ms, block->idstr, pss->offset, pages);
|
ram_release_pages(ms, block->idstr, pss->offset, pages);
|
||||||
}
|
}
|
||||||
@ -1062,13 +1058,12 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
|
|||||||
* Returns if a page is found
|
* Returns if a page is found
|
||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @f: QEMUFile where to send the data
|
|
||||||
* @pss: data about the state of the current dirty page scan
|
* @pss: data about the state of the current dirty page scan
|
||||||
* @again: set to false if the search has scanned the whole of RAM
|
* @again: set to false if the search has scanned the whole of RAM
|
||||||
* @ram_addr_abs: pointer into which to store the address of the dirty page
|
* @ram_addr_abs: pointer into which to store the address of the dirty page
|
||||||
* within the global ram_addr space
|
* within the global ram_addr space
|
||||||
*/
|
*/
|
||||||
static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss,
|
static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss,
|
||||||
bool *again, ram_addr_t *ram_addr_abs)
|
bool *again, ram_addr_t *ram_addr_abs)
|
||||||
{
|
{
|
||||||
pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset,
|
pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset,
|
||||||
@ -1096,7 +1091,7 @@ static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss,
|
|||||||
/* If xbzrle is on, stop using the data compression at this
|
/* If xbzrle is on, stop using the data compression at this
|
||||||
* point. In theory, xbzrle can do better than compression.
|
* point. In theory, xbzrle can do better than compression.
|
||||||
*/
|
*/
|
||||||
flush_compressed_data(rs, f);
|
flush_compressed_data(rs);
|
||||||
compression_switch = false;
|
compression_switch = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1317,12 +1312,11 @@ err:
|
|||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @ms: current migration state
|
* @ms: current migration state
|
||||||
* @f: QEMUFile where to send the data
|
|
||||||
* @pss: data about the page we want to send
|
* @pss: data about the page we want to send
|
||||||
* @last_stage: if we are at the completion stage
|
* @last_stage: if we are at the completion stage
|
||||||
* @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space
|
* @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space
|
||||||
*/
|
*/
|
||||||
static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
static int ram_save_target_page(RAMState *rs, MigrationState *ms,
|
||||||
PageSearchStatus *pss,
|
PageSearchStatus *pss,
|
||||||
bool last_stage,
|
bool last_stage,
|
||||||
ram_addr_t dirty_ram_abs)
|
ram_addr_t dirty_ram_abs)
|
||||||
@ -1333,9 +1327,9 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
|||||||
if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) {
|
if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) {
|
||||||
unsigned long *unsentmap;
|
unsigned long *unsentmap;
|
||||||
if (compression_switch && migrate_use_compression()) {
|
if (compression_switch && migrate_use_compression()) {
|
||||||
res = ram_save_compressed_page(rs, ms, f, pss, last_stage);
|
res = ram_save_compressed_page(rs, ms, pss, last_stage);
|
||||||
} else {
|
} else {
|
||||||
res = ram_save_page(rs, ms, f, pss, last_stage);
|
res = ram_save_page(rs, ms, pss, last_stage);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (res < 0) {
|
if (res < 0) {
|
||||||
@ -1370,12 +1364,11 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
|||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @ms: current migration state
|
* @ms: current migration state
|
||||||
* @f: QEMUFile where to send the data
|
|
||||||
* @pss: data about the page we want to send
|
* @pss: data about the page we want to send
|
||||||
* @last_stage: if we are at the completion stage
|
* @last_stage: if we are at the completion stage
|
||||||
* @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
|
* @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
|
||||||
*/
|
*/
|
||||||
static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
static int ram_save_host_page(RAMState *rs, MigrationState *ms,
|
||||||
PageSearchStatus *pss,
|
PageSearchStatus *pss,
|
||||||
bool last_stage,
|
bool last_stage,
|
||||||
ram_addr_t dirty_ram_abs)
|
ram_addr_t dirty_ram_abs)
|
||||||
@ -1384,8 +1377,7 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
|||||||
size_t pagesize = qemu_ram_pagesize(pss->block);
|
size_t pagesize = qemu_ram_pagesize(pss->block);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
tmppages = ram_save_target_page(rs, ms, f, pss, last_stage,
|
tmppages = ram_save_target_page(rs, ms, pss, last_stage, dirty_ram_abs);
|
||||||
dirty_ram_abs);
|
|
||||||
if (tmppages < 0) {
|
if (tmppages < 0) {
|
||||||
return tmppages;
|
return tmppages;
|
||||||
}
|
}
|
||||||
@ -1408,14 +1400,13 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
|
|||||||
* Returns the number of pages written where zero means no dirty pages
|
* Returns the number of pages written where zero means no dirty pages
|
||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @f: QEMUFile where to send the data
|
|
||||||
* @last_stage: if we are at the completion stage
|
* @last_stage: if we are at the completion stage
|
||||||
*
|
*
|
||||||
* On systems where host-page-size > target-page-size it will send all the
|
* On systems where host-page-size > target-page-size it will send all the
|
||||||
* pages in a host page that are dirty.
|
* pages in a host page that are dirty.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage)
|
static int ram_find_and_save_block(RAMState *rs, bool last_stage)
|
||||||
{
|
{
|
||||||
PageSearchStatus pss;
|
PageSearchStatus pss;
|
||||||
MigrationState *ms = migrate_get_current();
|
MigrationState *ms = migrate_get_current();
|
||||||
@ -1443,12 +1434,11 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage)
|
|||||||
|
|
||||||
if (!found) {
|
if (!found) {
|
||||||
/* priority queue empty, so just search for something dirty */
|
/* priority queue empty, so just search for something dirty */
|
||||||
found = find_dirty_block(rs, f, &pss, &again, &dirty_ram_abs);
|
found = find_dirty_block(rs, &pss, &again, &dirty_ram_abs);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (found) {
|
if (found) {
|
||||||
pages = ram_save_host_page(rs, ms, f, &pss, last_stage,
|
pages = ram_save_host_page(rs, ms, &pss, last_stage, dirty_ram_abs);
|
||||||
dirty_ram_abs);
|
|
||||||
}
|
}
|
||||||
} while (!pages && again);
|
} while (!pages && again);
|
||||||
|
|
||||||
@ -2148,7 +2138,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
|||||||
while ((ret = qemu_file_rate_limit(f)) == 0) {
|
while ((ret = qemu_file_rate_limit(f)) == 0) {
|
||||||
int pages;
|
int pages;
|
||||||
|
|
||||||
pages = ram_find_and_save_block(rs, f, false);
|
pages = ram_find_and_save_block(rs, false);
|
||||||
/* no more pages to sent */
|
/* no more pages to sent */
|
||||||
if (pages == 0) {
|
if (pages == 0) {
|
||||||
done = 1;
|
done = 1;
|
||||||
@ -2170,7 +2160,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
|||||||
}
|
}
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
flush_compressed_data(rs, f);
|
flush_compressed_data(rs);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2218,14 +2208,14 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
|||||||
while (true) {
|
while (true) {
|
||||||
int pages;
|
int pages;
|
||||||
|
|
||||||
pages = ram_find_and_save_block(rs, f, !migration_in_colo_state());
|
pages = ram_find_and_save_block(rs, !migration_in_colo_state());
|
||||||
/* no more blocks to sent */
|
/* no more blocks to sent */
|
||||||
if (pages == 0) {
|
if (pages == 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
flush_compressed_data(rs, f);
|
flush_compressed_data(rs);
|
||||||
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
|
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
Loading…
Reference in New Issue
Block a user