qemu/migration/postcopy-ram.h

201 lines
6.7 KiB
C
Raw Normal View History

/*
* Postcopy migration for RAM
*
* Copyright 2013 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Dave Gilbert <dgilbert@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef QEMU_POSTCOPY_RAM_H
#define QEMU_POSTCOPY_RAM_H
migration/postcopy: Add postcopy-recover-setup phase This patch adds a migration state on src called "postcopy-recover-setup". The new state will describe the intermediate step starting from when the src QEMU received a postcopy recovery request, until the migration channels are properly established, but before the recovery process take place. The request came from Libvirt where Libvirt currently rely on the migration state events to detect migration state changes. That works for most of the migration process but except postcopy recovery failures at the beginning. Currently postcopy recovery only has two major states: - postcopy-paused: this is the state that both sides of QEMU will be in for a long time as long as the migration channel was interrupted. - postcopy-recover: this is the state where both sides of QEMU handshake with each other, preparing for a continuation of postcopy which used to be interrupted. The issue here is when the recovery port is invalid, the src QEMU will take the URI/channels, noticing the ports are not valid, and it'll silently keep in the postcopy-paused state, with no event sent to Libvirt. In this case, the only thing Libvirt can do is to poll the migration status with a proper interval, however that's less optimal. Considering that this is the only case where Libvirt won't get a notification from QEMU on such events, let's add postcopy-recover-setup state to mimic what we have with the "setup" state of a newly initialized migration, describing the phase of connection establishment. With that, postcopy recovery will have two paths to go now, and either path will guarantee an event generated. Now the events will look like this during a recovery process on src QEMU: - Initially when the recovery is initiated on src, QEMU will go from "postcopy-paused" -> "postcopy-recover-setup". Old QEMUs don't have this event. - Depending on whether the channel re-establishment is succeeded: - In succeeded case, src QEMU will move from "postcopy-recover-setup" to "postcopy-recover". Old QEMUs also have this event. - In failure case, src QEMU will move from "postcopy-recover-setup" to "postcopy-paused" again. Old QEMUs don't have this event. This guarantees that Libvirt will always receive a notification for recovery process properly. One thing to mention is, such new status is only needed on src QEMU not both. On dest QEMU, the state machine doesn't change. Hence the events don't change either. It's done like so because dest QEMU may not have an explicit point of setup start. E.g., it can happen that when dest QEMUs doesn't use migrate-recover command to use a new URI/channel, but the old URI/channels can be reused in recovery, in which case the old ports simply can work again after the network routes are fixed up. Add a new helper postcopy_is_paused() detecting whether postcopy is still paused, taking RECOVER_SETUP into account too. When using it on both src/dst, a slight change is done altogether to always wait for the semaphore before checking the status, because for both sides a sem_post() will be required for a recovery. Cc: Jiri Denemark <jdenemar@redhat.com> Cc: Prasad Pandit <ppandit@redhat.com> Reviewed-by: Fabiano Rosas <farosas@suse.de> Buglink: https://issues.redhat.com/browse/RHEL-38485 Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Fabiano Rosas <farosas@suse.de>
2024-06-20 01:30:40 +03:00
#include "qapi/qapi-types-migration.h"
/* Return true if the host supports everything we need to do postcopy-ram */
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis,
Error **errp);
/*
* Make all of RAM sensitive to accesses to areas that haven't yet been written
* and wire up anything necessary to deal with it.
*/
int postcopy_ram_incoming_setup(MigrationIncomingState *mis);
/*
* Initialise postcopy-ram, setting the RAM to a state where we can go into
* postcopy later; must be called prior to any precopy.
* called from ram.c's similarly named ram_postcopy_incoming_init
*/
int postcopy_ram_incoming_init(MigrationIncomingState *mis);
/*
* At the end of a migration where postcopy_ram_incoming_init was called.
*/
int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis);
/*
* Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
* however leaving it until after precopy means that most of the precopy
* data is still THPd
*/
int postcopy_ram_prepare_discard(MigrationIncomingState *mis);
/*
* Called at the start of each RAMBlock by the bitmap code.
*/
void postcopy_discard_send_init(MigrationState *ms, const char *name);
/*
* Called by the bitmap code for each chunk to discard.
* May send a discard message, may just leave it queued to
* be sent later.
* @start,@length: a range of pages in the migration bitmap in the
* RAM block passed to postcopy_discard_send_init() (length=1 is one page)
*/
void postcopy_discard_send_range(MigrationState *ms, unsigned long start,
unsigned long length);
/*
* Called at the end of each RAMBlock by the bitmap code.
* Sends any outstanding discard messages.
*/
void postcopy_discard_send_finish(MigrationState *ms);
/*
* Place a page (from) at (host) efficiently
* There are restrictions on how 'from' must be mapped, in general best
* to use other postcopy_ routines to allocate.
* returns 0 on success
*/
int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
RAMBlock *rb);
/*
* Place a zero page at (host) atomically
* returns 0 on success
*/
int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
RAMBlock *rb);
/* The current postcopy state is read/set by postcopy_state_get/set
* which update it atomically.
* The state is updated as postcopy messages are received, and
* in general only one thread should be writing to the state at any one
* time, initially the main thread and then the listen thread;
* Corner cases are where either thread finishes early and/or errors.
* The state is checked as messages are received to ensure that
* the source is sending us messages in the correct order.
* The state is also used by the RAM reception code to know if it
* has to place pages atomically, and the cleanup code at the end of
* the main thread to know if it has to delay cleanup until the end
* of postcopy.
*/
typedef enum {
POSTCOPY_INCOMING_NONE = 0, /* Initial state - no postcopy */
POSTCOPY_INCOMING_ADVISE,
POSTCOPY_INCOMING_DISCARD,
POSTCOPY_INCOMING_LISTENING,
POSTCOPY_INCOMING_RUNNING,
POSTCOPY_INCOMING_END
} PostcopyState;
PostcopyState postcopy_state_get(void);
/* Set the state and return the old state */
PostcopyState postcopy_state_set(PostcopyState new_state);
void postcopy_fault_thread_notify(MigrationIncomingState *mis);
/*
* To be called once at the start before any device initialisation
*/
void postcopy_infrastructure_init(void);
/* Add a notifier to a list to be called when checking whether the devices
* can support postcopy.
* It's data is a *PostcopyNotifyData
* It should return 0 if OK, or a negative value on failure.
* On failure it must set the data->errp to an error.
*
*/
enum PostcopyNotifyReason {
POSTCOPY_NOTIFY_PROBE = 0,
POSTCOPY_NOTIFY_INBOUND_ADVISE,
POSTCOPY_NOTIFY_INBOUND_LISTEN,
POSTCOPY_NOTIFY_INBOUND_END,
};
struct PostcopyNotifyData {
enum PostcopyNotifyReason reason;
};
void postcopy_add_notifier(NotifierWithReturn *nn);
void postcopy_remove_notifier(NotifierWithReturn *n);
/* Call the notifier list set by postcopy_add_start_notifier */
int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp);
void postcopy_thread_create(MigrationIncomingState *mis,
QemuThread *thread, const char *name,
void *(*fn)(void *), int joinable);
struct PostCopyFD;
/* ufd is a pointer to the struct uffd_msg *TODO: more Portable! */
typedef int (*pcfdhandler)(struct PostCopyFD *pcfd, void *ufd);
/* Notification to wake, either on place or on reception of
* a fault on something that's already arrived (race)
*/
typedef int (*pcfdwake)(struct PostCopyFD *pcfd, RAMBlock *rb, uint64_t offset);
struct PostCopyFD {
int fd;
/* Data to pass to handler */
void *data;
/* Handler to be called whenever we get a poll event */
pcfdhandler handler;
/* Notification to wake shared client */
pcfdwake waker;
/* A string to use in error messages */
const char *idstr;
};
/* Register a userfaultfd owned by an external process for
* shared memory.
*/
void postcopy_register_shared_ufd(struct PostCopyFD *pcfd);
void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd);
/* Call each of the shared 'waker's registered telling them of
* availability of a block.
*/
int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset);
/* postcopy_wake_shared: Notify a client ufd that a page is available
*
* Returns 0 on success
*
* @pcfd: Structure with fd, handler and name as above
* @client_addr: Address in the client program, not QEMU
* @rb: The RAMBlock the page is in
*/
int postcopy_wake_shared(struct PostCopyFD *pcfd, uint64_t client_addr,
RAMBlock *rb);
/* Callback from shared fault handlers to ask for a page */
int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
uint64_t client_addr, uint64_t offset);
/* Hard-code channels for now for postcopy preemption */
enum PostcopyChannels {
RAM_CHANNEL_PRECOPY = 0,
RAM_CHANNEL_POSTCOPY = 1,
RAM_CHANNEL_MAX,
};
2022-12-20 21:44:18 +03:00
void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file);
void postcopy_preempt_setup(MigrationState *s);
int postcopy_preempt_establish_channel(MigrationState *s);
migration/postcopy: Add postcopy-recover-setup phase This patch adds a migration state on src called "postcopy-recover-setup". The new state will describe the intermediate step starting from when the src QEMU received a postcopy recovery request, until the migration channels are properly established, but before the recovery process take place. The request came from Libvirt where Libvirt currently rely on the migration state events to detect migration state changes. That works for most of the migration process but except postcopy recovery failures at the beginning. Currently postcopy recovery only has two major states: - postcopy-paused: this is the state that both sides of QEMU will be in for a long time as long as the migration channel was interrupted. - postcopy-recover: this is the state where both sides of QEMU handshake with each other, preparing for a continuation of postcopy which used to be interrupted. The issue here is when the recovery port is invalid, the src QEMU will take the URI/channels, noticing the ports are not valid, and it'll silently keep in the postcopy-paused state, with no event sent to Libvirt. In this case, the only thing Libvirt can do is to poll the migration status with a proper interval, however that's less optimal. Considering that this is the only case where Libvirt won't get a notification from QEMU on such events, let's add postcopy-recover-setup state to mimic what we have with the "setup" state of a newly initialized migration, describing the phase of connection establishment. With that, postcopy recovery will have two paths to go now, and either path will guarantee an event generated. Now the events will look like this during a recovery process on src QEMU: - Initially when the recovery is initiated on src, QEMU will go from "postcopy-paused" -> "postcopy-recover-setup". Old QEMUs don't have this event. - Depending on whether the channel re-establishment is succeeded: - In succeeded case, src QEMU will move from "postcopy-recover-setup" to "postcopy-recover". Old QEMUs also have this event. - In failure case, src QEMU will move from "postcopy-recover-setup" to "postcopy-paused" again. Old QEMUs don't have this event. This guarantees that Libvirt will always receive a notification for recovery process properly. One thing to mention is, such new status is only needed on src QEMU not both. On dest QEMU, the state machine doesn't change. Hence the events don't change either. It's done like so because dest QEMU may not have an explicit point of setup start. E.g., it can happen that when dest QEMUs doesn't use migrate-recover command to use a new URI/channel, but the old URI/channels can be reused in recovery, in which case the old ports simply can work again after the network routes are fixed up. Add a new helper postcopy_is_paused() detecting whether postcopy is still paused, taking RECOVER_SETUP into account too. When using it on both src/dst, a slight change is done altogether to always wait for the semaphore before checking the status, because for both sides a sem_post() will be required for a recovery. Cc: Jiri Denemark <jdenemar@redhat.com> Cc: Prasad Pandit <ppandit@redhat.com> Reviewed-by: Fabiano Rosas <farosas@suse.de> Buglink: https://issues.redhat.com/browse/RHEL-38485 Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Fabiano Rosas <farosas@suse.de>
2024-06-20 01:30:40 +03:00
bool postcopy_is_paused(MigrationStatus status);
#endif