migration/dirtyrate: Fix precision losses and g_usleep overshoot
Signed-off-by: Andrei Gudkov <gudkov.andrei@huawei.com> Reviewed-by: Hyman Huang <yong.huang@smartx.com> Message-Id: <8ddb0d40d143f77aab8f602bd494e01e5fa01614.1691161009.git.gudkov.andrei@huawei.com> Signed-off-by: Hyman Huang <yong.huang@smartx.com>
This commit is contained in:
parent
19b14cea45
commit
3eb82637fb
@ -57,6 +57,8 @@ static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time)
|
|||||||
msec = current_time - initial_time;
|
msec = current_time - initial_time;
|
||||||
} else {
|
} else {
|
||||||
g_usleep((msec + initial_time - current_time) * 1000);
|
g_usleep((msec + initial_time - current_time) * 1000);
|
||||||
|
/* g_usleep may overshoot */
|
||||||
|
msec = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - initial_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
return msec;
|
return msec;
|
||||||
@ -77,9 +79,13 @@ static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
|
|||||||
{
|
{
|
||||||
uint64_t increased_dirty_pages =
|
uint64_t increased_dirty_pages =
|
||||||
dirty_pages.end_pages - dirty_pages.start_pages;
|
dirty_pages.end_pages - dirty_pages.start_pages;
|
||||||
uint64_t memory_size_MiB = qemu_target_pages_to_MiB(increased_dirty_pages);
|
|
||||||
|
|
||||||
return memory_size_MiB * 1000 / calc_time_ms;
|
/*
|
||||||
|
* multiply by 1000ms/s _before_ converting down to megabytes
|
||||||
|
* to avoid losing precision
|
||||||
|
*/
|
||||||
|
return qemu_target_pages_to_MiB(increased_dirty_pages * 1000) /
|
||||||
|
calc_time_ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
void global_dirty_log_change(unsigned int flag, bool start)
|
void global_dirty_log_change(unsigned int flag, bool start)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user