2017-04-17 21:26:27 +03:00
|
|
|
/*
|
|
|
|
* QEMU System Emulator
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
|
|
|
* Copyright (c) 2011-2015 Red Hat Inc
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Juan Quintela <quintela@redhat.com>
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef QEMU_MIGRATION_RAM_H
|
|
|
|
#define QEMU_MIGRATION_RAM_H
|
|
|
|
|
2018-02-11 12:36:01 +03:00
|
|
|
#include "qapi/qapi-types-migration.h"
|
2017-04-17 21:26:27 +03:00
|
|
|
#include "exec/cpu-common.h"
|
2018-02-19 21:01:03 +03:00
|
|
|
#include "io/channel.h"
|
2017-04-17 21:26:27 +03:00
|
|
|
|
2017-06-06 20:49:03 +03:00
|
|
|
extern XBZRLECacheStats xbzrle_counters;
|
2018-09-06 10:01:00 +03:00
|
|
|
extern CompressionStats compression_counters;
|
2017-06-06 20:49:03 +03:00
|
|
|
|
2020-09-16 09:22:00 +03:00
|
|
|
bool ramblock_is_ignored(RAMBlock *block);
|
|
|
|
/* Should be holding either ram_list.mutex, or the RCU lock. */
|
|
|
|
#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
|
|
|
|
INTERNAL_RAMBLOCK_FOREACH(block) \
|
|
|
|
if (ramblock_is_ignored(block)) {} else
|
|
|
|
|
|
|
|
#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
|
|
|
|
INTERNAL_RAMBLOCK_FOREACH(block) \
|
|
|
|
if (!qemu_ram_is_migratable(block)) {} else
|
|
|
|
|
2021-02-02 17:17:32 +03:00
|
|
|
int xbzrle_cache_resize(uint64_t new_size, Error **errp);
|
2017-04-17 21:26:27 +03:00
|
|
|
uint64_t ram_bytes_remaining(void);
|
|
|
|
uint64_t ram_bytes_total(void);
|
2021-11-09 06:04:54 +03:00
|
|
|
void mig_throttle_counter_reset(void);
|
2017-04-17 21:26:27 +03:00
|
|
|
|
|
|
|
uint64_t ram_pagesize_summary(void);
|
|
|
|
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
|
|
|
|
void acct_update_position(QEMUFile *f, size_t size, bool zero);
|
|
|
|
void ram_postcopy_migrated_memory_release(MigrationState *ms);
|
|
|
|
/* For outgoing discard bitmap */
|
2021-12-07 14:50:14 +03:00
|
|
|
void ram_postcopy_send_discard_bitmap(MigrationState *ms);
|
2017-04-17 21:26:27 +03:00
|
|
|
/* For incoming postcopy discard */
|
|
|
|
int ram_discard_range(const char *block_name, uint64_t start, size_t length);
|
|
|
|
int ram_postcopy_incoming_init(MigrationIncomingState *mis);
|
2022-07-07 21:55:02 +03:00
|
|
|
int ram_load_postcopy(QEMUFile *f, int channel);
|
2017-04-17 21:26:27 +03:00
|
|
|
|
|
|
|
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
|
2017-10-05 14:13:20 +03:00
|
|
|
|
2022-02-22 23:02:03 +03:00
|
|
|
void ram_transferred_add(uint64_t bytes);
|
2021-12-16 12:19:38 +03:00
|
|
|
void ram_release_page(const char *rbname, uint64_t offset);
|
2022-02-22 23:02:03 +03:00
|
|
|
|
2017-10-05 14:13:20 +03:00
|
|
|
int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr);
|
2018-03-12 20:21:08 +03:00
|
|
|
bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset);
|
2017-10-05 14:13:20 +03:00
|
|
|
void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr);
|
|
|
|
void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, size_t nr);
|
2018-05-02 13:47:28 +03:00
|
|
|
int64_t ramblock_recv_bitmap_send(QEMUFile *file,
|
|
|
|
const char *block_name);
|
|
|
|
int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb);
|
migration/postcopy: Handle RAMBlocks with a RamDiscardManager on the destination
Currently, when someone (i.e., the VM) accesses discarded parts inside a
RAMBlock with a RamDiscardManager managing the corresponding mapped memory
region, postcopy will request migration of the corresponding page from the
source. The source, however, will never answer, because it refuses to
migrate such pages with undefined content ("logically unplugged"): the
pages are never dirty, and get_queued_page() will consequently skip
processing these postcopy requests.
Especially reading discarded ("logically unplugged") ranges is supposed to
work in some setups (for example with current virtio-mem), although it
barely ever happens: still, not placing a page would currently stall the
VM, as it cannot make forward progress.
Let's check the state via the RamDiscardManager (the state e.g.,
of virtio-mem is migrated during precopy) and avoid sending a request
that will never get answered. Place a fresh zero page instead to keep
the VM working. This is the same behavior that would happen
automatically without userfaultfd being active, when accessing virtual
memory regions without populated pages -- "populate on demand".
For now, there are valid cases (as documented in the virtio-mem spec) where
a VM might read discarded memory; in the future, we will disallow that.
Then, we might want to handle that case differently, e.g., warning the
user that the VM seems to be mis-behaving.
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2021-10-11 20:53:43 +03:00
|
|
|
bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start);
|
2022-07-07 21:55:02 +03:00
|
|
|
void postcopy_preempt_shutdown_file(MigrationState *s);
|
|
|
|
void *postcopy_preempt_thread(void *opaque);
|
2017-10-05 14:13:20 +03:00
|
|
|
|
2018-09-03 07:38:48 +03:00
|
|
|
/* ram cache */
|
|
|
|
int colo_init_ram_cache(void);
|
2020-05-11 14:10:51 +03:00
|
|
|
void colo_flush_ram_cache(void);
|
2018-09-03 07:38:48 +03:00
|
|
|
void colo_release_ram_cache(void);
|
2020-02-24 09:54:10 +03:00
|
|
|
void colo_incoming_start_dirty_log(void);
|
2018-09-03 07:38:48 +03:00
|
|
|
|
2021-01-29 13:14:03 +03:00
|
|
|
/* Background snapshot */
|
|
|
|
bool ram_write_tracking_available(void);
|
|
|
|
bool ram_write_tracking_compatible(void);
|
2021-04-01 12:22:25 +03:00
|
|
|
void ram_write_tracking_prepare(void);
|
2021-01-29 13:14:05 +03:00
|
|
|
int ram_write_tracking_start(void);
|
|
|
|
void ram_write_tracking_stop(void);
|
2021-01-29 13:14:03 +03:00
|
|
|
|
2017-04-17 21:26:27 +03:00
|
|
|
#endif
|