2020-01-22 18:16:07 +03:00
|
|
|
/*
|
|
|
|
* Multifd common code
|
|
|
|
*
|
|
|
|
* Copyright (c) 2019-2020 Red Hat Inc
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Juan Quintela <quintela@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qemu/rcu.h"
|
|
|
|
#include "exec/target_page.h"
|
|
|
|
#include "sysemu/sysemu.h"
|
|
|
|
#include "exec/ramblock.h"
|
|
|
|
#include "qemu/error-report.h"
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "ram.h"
|
|
|
|
#include "migration.h"
|
|
|
|
#include "socket.h"
|
2020-09-15 06:04:01 +03:00
|
|
|
#include "tls.h"
|
2020-01-22 18:16:07 +03:00
|
|
|
#include "qemu-file.h"
|
|
|
|
#include "trace.h"
|
|
|
|
#include "multifd.h"
|
2023-02-03 10:35:19 +03:00
|
|
|
#include "threadinfo.h"
|
2020-01-22 18:16:07 +03:00
|
|
|
|
2020-12-28 18:08:52 +03:00
|
|
|
#include "qemu/yank.h"
|
|
|
|
#include "io/channel-socket.h"
|
2021-03-23 20:52:42 +03:00
|
|
|
#include "yank_functions.h"
|
2020-12-28 18:08:52 +03:00
|
|
|
|
2020-01-22 18:16:07 +03:00
|
|
|
/* Multiple fd's */
|
|
|
|
|
|
|
|
#define MULTIFD_MAGIC 0x11223344U
|
|
|
|
#define MULTIFD_VERSION 1
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint32_t magic;
|
|
|
|
uint32_t version;
|
|
|
|
unsigned char uuid[16]; /* QemuUUID */
|
|
|
|
uint8_t id;
|
|
|
|
uint8_t unused1[7]; /* Reserved for future use */
|
|
|
|
uint64_t unused2[4]; /* Reserved for future use */
|
|
|
|
} __attribute__((packed)) MultiFDInit_t;
|
|
|
|
|
2019-05-15 14:37:46 +03:00
|
|
|
/* Multifd without compression */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nocomp_send_setup: setup send side
|
|
|
|
*
|
|
|
|
* For no compression this function does nothing.
|
|
|
|
*
|
|
|
|
* Returns 0 for success or -1 for error
|
|
|
|
*
|
|
|
|
* @p: Params for the channel that we are using
|
|
|
|
* @errp: pointer to an error
|
|
|
|
*/
|
|
|
|
static int nocomp_send_setup(MultiFDSendParams *p, Error **errp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nocomp_send_cleanup: cleanup send side
|
|
|
|
*
|
|
|
|
* For no compression this function does nothing.
|
|
|
|
*
|
|
|
|
* @p: Params for the channel that we are using
|
2021-11-22 13:58:41 +03:00
|
|
|
* @errp: pointer to an error
|
2019-05-15 14:37:46 +03:00
|
|
|
*/
|
|
|
|
static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nocomp_send_prepare: prepare date to be able to send
|
|
|
|
*
|
|
|
|
* For no compression we just have to calculate the size of the
|
|
|
|
* packet.
|
|
|
|
*
|
|
|
|
* Returns 0 for success or -1 for error
|
|
|
|
*
|
|
|
|
* @p: Params for the channel that we are using
|
|
|
|
* @errp: pointer to an error
|
|
|
|
*/
|
2021-11-22 14:08:08 +03:00
|
|
|
static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
|
2019-05-15 14:37:46 +03:00
|
|
|
{
|
2021-11-19 14:06:05 +03:00
|
|
|
MultiFDPages_t *pages = p->pages;
|
|
|
|
|
2021-11-22 15:26:18 +03:00
|
|
|
for (int i = 0; i < p->normal_num; i++) {
|
|
|
|
p->iov[p->iovs_num].iov_base = pages->block->host + p->normal[i];
|
2022-05-02 17:45:35 +03:00
|
|
|
p->iov[p->iovs_num].iov_len = p->page_size;
|
2021-11-19 14:06:05 +03:00
|
|
|
p->iovs_num++;
|
|
|
|
}
|
|
|
|
|
2022-05-02 17:45:35 +03:00
|
|
|
p->next_packet_size = p->normal_num * p->page_size;
|
2019-05-15 14:37:46 +03:00
|
|
|
p->flags |= MULTIFD_FLAG_NOCOMP;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nocomp_recv_setup: setup receive side
|
|
|
|
*
|
|
|
|
* For no compression this function does nothing.
|
|
|
|
*
|
|
|
|
* Returns 0 for success or -1 for error
|
|
|
|
*
|
|
|
|
* @p: Params for the channel that we are using
|
|
|
|
* @errp: pointer to an error
|
|
|
|
*/
|
|
|
|
static int nocomp_recv_setup(MultiFDRecvParams *p, Error **errp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nocomp_recv_cleanup: setup receive side
|
|
|
|
*
|
|
|
|
* For no compression this function does nothing.
|
|
|
|
*
|
|
|
|
* @p: Params for the channel that we are using
|
|
|
|
*/
|
|
|
|
static void nocomp_recv_cleanup(MultiFDRecvParams *p)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nocomp_recv_pages: read the data from the channel into actual pages
|
|
|
|
*
|
|
|
|
* For no compression we just need to read things into the correct place.
|
|
|
|
*
|
|
|
|
* Returns 0 for success or -1 for error
|
|
|
|
*
|
|
|
|
* @p: Params for the channel that we are using
|
|
|
|
* @errp: pointer to an error
|
|
|
|
*/
|
2021-11-22 14:49:43 +03:00
|
|
|
static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp)
|
2019-05-15 14:37:46 +03:00
|
|
|
{
|
|
|
|
uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
|
|
|
|
|
|
|
|
if (flags != MULTIFD_FLAG_NOCOMP) {
|
2021-12-15 16:20:48 +03:00
|
|
|
error_setg(errp, "multifd %u: flags received %x flags expected %x",
|
2019-05-15 14:37:46 +03:00
|
|
|
p->id, flags, MULTIFD_FLAG_NOCOMP);
|
|
|
|
return -1;
|
|
|
|
}
|
2021-11-22 15:41:06 +03:00
|
|
|
for (int i = 0; i < p->normal_num; i++) {
|
2021-11-22 16:10:57 +03:00
|
|
|
p->iov[i].iov_base = p->host + p->normal[i];
|
2022-05-02 17:45:35 +03:00
|
|
|
p->iov[i].iov_len = p->page_size;
|
2021-11-19 14:06:05 +03:00
|
|
|
}
|
2021-11-22 15:41:06 +03:00
|
|
|
return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp);
|
2019-05-15 14:37:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static MultiFDMethods multifd_nocomp_ops = {
|
|
|
|
.send_setup = nocomp_send_setup,
|
|
|
|
.send_cleanup = nocomp_send_cleanup,
|
|
|
|
.send_prepare = nocomp_send_prepare,
|
|
|
|
.recv_setup = nocomp_recv_setup,
|
|
|
|
.recv_cleanup = nocomp_recv_cleanup,
|
|
|
|
.recv_pages = nocomp_recv_pages
|
|
|
|
};
|
|
|
|
|
|
|
|
static MultiFDMethods *multifd_ops[MULTIFD_COMPRESSION__MAX] = {
|
|
|
|
[MULTIFD_COMPRESSION_NONE] = &multifd_nocomp_ops,
|
|
|
|
};
|
|
|
|
|
2019-01-04 17:30:06 +03:00
|
|
|
void multifd_register_ops(int method, MultiFDMethods *ops)
|
|
|
|
{
|
|
|
|
assert(0 < method && method < MULTIFD_COMPRESSION__MAX);
|
|
|
|
multifd_ops[method] = ops;
|
|
|
|
}
|
|
|
|
|
2020-01-22 18:16:07 +03:00
|
|
|
static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
|
|
|
|
{
|
|
|
|
MultiFDInit_t msg = {};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
msg.magic = cpu_to_be32(MULTIFD_MAGIC);
|
|
|
|
msg.version = cpu_to_be32(MULTIFD_VERSION);
|
|
|
|
msg.id = p->id;
|
|
|
|
memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
|
|
|
|
|
|
|
|
ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
|
|
|
|
if (ret != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
|
|
|
|
{
|
|
|
|
MultiFDInit_t msg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
|
|
|
|
if (ret != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg.magic = be32_to_cpu(msg.magic);
|
|
|
|
msg.version = be32_to_cpu(msg.version);
|
|
|
|
|
|
|
|
if (msg.magic != MULTIFD_MAGIC) {
|
|
|
|
error_setg(errp, "multifd: received packet magic %x "
|
|
|
|
"expected %x", msg.magic, MULTIFD_MAGIC);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg.version != MULTIFD_VERSION) {
|
2021-12-15 16:20:48 +03:00
|
|
|
error_setg(errp, "multifd: received packet version %u "
|
|
|
|
"expected %u", msg.version, MULTIFD_VERSION);
|
2020-01-22 18:16:07 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
|
|
|
|
char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
|
|
|
|
char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
|
|
|
|
|
|
|
|
error_setg(errp, "multifd: received uuid '%s' and expected "
|
|
|
|
"uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
|
|
|
|
g_free(uuid);
|
|
|
|
g_free(msg_uuid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg.id > migrate_multifd_channels()) {
|
2021-12-15 16:20:48 +03:00
|
|
|
error_setg(errp, "multifd: received channel version %u "
|
|
|
|
"expected %u", msg.version, MULTIFD_VERSION);
|
2020-01-22 18:16:07 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return msg.id;
|
|
|
|
}
|
|
|
|
|
|
|
|
static MultiFDPages_t *multifd_pages_init(size_t size)
|
|
|
|
{
|
|
|
|
MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
|
|
|
|
|
|
|
|
pages->allocated = size;
|
|
|
|
pages->offset = g_new0(ram_addr_t, size);
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void multifd_pages_clear(MultiFDPages_t *pages)
|
|
|
|
{
|
2021-11-22 13:51:40 +03:00
|
|
|
pages->num = 0;
|
2020-01-22 18:16:07 +03:00
|
|
|
pages->allocated = 0;
|
|
|
|
pages->packet_num = 0;
|
|
|
|
pages->block = NULL;
|
|
|
|
g_free(pages->offset);
|
|
|
|
pages->offset = NULL;
|
|
|
|
g_free(pages);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void multifd_send_fill_packet(MultiFDSendParams *p)
|
|
|
|
{
|
|
|
|
MultiFDPacket_t *packet = p->packet;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
packet->flags = cpu_to_be32(p->flags);
|
|
|
|
packet->pages_alloc = cpu_to_be32(p->pages->allocated);
|
2021-11-22 16:13:51 +03:00
|
|
|
packet->normal_pages = cpu_to_be32(p->normal_num);
|
2020-01-22 18:16:07 +03:00
|
|
|
packet->next_packet_size = cpu_to_be32(p->next_packet_size);
|
|
|
|
packet->packet_num = cpu_to_be64(p->packet_num);
|
|
|
|
|
|
|
|
if (p->pages->block) {
|
|
|
|
strncpy(packet->ramblock, p->pages->block->idstr, 256);
|
|
|
|
}
|
|
|
|
|
2021-11-22 15:26:18 +03:00
|
|
|
for (i = 0; i < p->normal_num; i++) {
|
2020-01-22 18:16:07 +03:00
|
|
|
/* there are architectures where ram_addr_t is 32 bit */
|
2021-11-22 15:26:18 +03:00
|
|
|
uint64_t temp = p->normal[i];
|
2020-01-22 18:16:07 +03:00
|
|
|
|
|
|
|
packet->offset[i] = cpu_to_be64(temp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
|
|
|
|
{
|
|
|
|
MultiFDPacket_t *packet = p->packet;
|
|
|
|
RAMBlock *block;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
packet->magic = be32_to_cpu(packet->magic);
|
|
|
|
if (packet->magic != MULTIFD_MAGIC) {
|
|
|
|
error_setg(errp, "multifd: received packet "
|
|
|
|
"magic %x and expected magic %x",
|
|
|
|
packet->magic, MULTIFD_MAGIC);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
packet->version = be32_to_cpu(packet->version);
|
|
|
|
if (packet->version != MULTIFD_VERSION) {
|
|
|
|
error_setg(errp, "multifd: received packet "
|
2021-12-15 16:20:48 +03:00
|
|
|
"version %u and expected version %u",
|
2020-01-22 18:16:07 +03:00
|
|
|
packet->version, MULTIFD_VERSION);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
p->flags = be32_to_cpu(packet->flags);
|
|
|
|
|
|
|
|
packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
|
|
|
|
/*
|
|
|
|
* If we received a packet that is 100 times bigger than expected
|
|
|
|
* just stop migration. It is a magic number.
|
|
|
|
*/
|
2022-05-02 17:53:12 +03:00
|
|
|
if (packet->pages_alloc > p->page_count) {
|
2020-01-22 18:16:07 +03:00
|
|
|
error_setg(errp, "multifd: received packet "
|
2021-11-22 15:41:06 +03:00
|
|
|
"with size %u and expected a size of %u",
|
2022-05-02 17:53:12 +03:00
|
|
|
packet->pages_alloc, p->page_count) ;
|
2020-01-22 18:16:07 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-11-22 16:13:51 +03:00
|
|
|
p->normal_num = be32_to_cpu(packet->normal_pages);
|
2021-11-22 15:41:06 +03:00
|
|
|
if (p->normal_num > packet->pages_alloc) {
|
2020-01-22 18:16:07 +03:00
|
|
|
error_setg(errp, "multifd: received packet "
|
2021-12-15 16:20:48 +03:00
|
|
|
"with %u pages and expected maximum pages are %u",
|
2021-11-22 15:41:06 +03:00
|
|
|
p->normal_num, packet->pages_alloc) ;
|
2020-01-22 18:16:07 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
p->next_packet_size = be32_to_cpu(packet->next_packet_size);
|
|
|
|
p->packet_num = be64_to_cpu(packet->packet_num);
|
|
|
|
|
2021-11-22 15:41:06 +03:00
|
|
|
if (p->normal_num == 0) {
|
2020-01-22 18:16:07 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* make sure that ramblock is 0 terminated */
|
|
|
|
packet->ramblock[255] = 0;
|
|
|
|
block = qemu_ram_block_by_name(packet->ramblock);
|
|
|
|
if (!block) {
|
|
|
|
error_setg(errp, "multifd: unknown ram block %s",
|
|
|
|
packet->ramblock);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-11-22 16:10:57 +03:00
|
|
|
p->host = block->host;
|
2021-11-22 15:41:06 +03:00
|
|
|
for (i = 0; i < p->normal_num; i++) {
|
2020-01-22 18:16:07 +03:00
|
|
|
uint64_t offset = be64_to_cpu(packet->offset[i]);
|
|
|
|
|
2022-05-02 17:45:35 +03:00
|
|
|
if (offset > (block->used_length - p->page_size)) {
|
2020-01-22 18:16:07 +03:00
|
|
|
error_setg(errp, "multifd: offset too long %" PRIu64
|
|
|
|
" (max " RAM_ADDR_FMT ")",
|
2021-04-29 14:27:07 +03:00
|
|
|
offset, block->used_length);
|
2020-01-22 18:16:07 +03:00
|
|
|
return -1;
|
|
|
|
}
|
2021-11-22 15:41:06 +03:00
|
|
|
p->normal[i] = offset;
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct {
|
|
|
|
MultiFDSendParams *params;
|
|
|
|
/* array of pages to sent */
|
|
|
|
MultiFDPages_t *pages;
|
|
|
|
/* global number of generated multifd packets */
|
|
|
|
uint64_t packet_num;
|
|
|
|
/* send channels ready */
|
|
|
|
QemuSemaphore channels_ready;
|
|
|
|
/*
|
|
|
|
* Have we already run terminate threads. There is a race when it
|
|
|
|
* happens that we got one error while we are exiting.
|
|
|
|
* We will use atomic operations. Only valid values are 0 and 1.
|
|
|
|
*/
|
|
|
|
int exiting;
|
2019-05-15 14:37:46 +03:00
|
|
|
/* multifd ops */
|
|
|
|
MultiFDMethods *ops;
|
2020-01-22 18:16:07 +03:00
|
|
|
} *multifd_send_state;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* How we use multifd_send_state->pages and channel->pages?
|
|
|
|
*
|
|
|
|
* We create a pages for each channel, and a main one. Each time that
|
|
|
|
* we need to send a batch of pages we interchange the ones between
|
|
|
|
* multifd_send_state and the channel that is sending it. There are
|
|
|
|
* two reasons for that:
|
|
|
|
* - to not have to do so many mallocs during migration
|
|
|
|
* - to make easier to know what to free at the end of migration
|
|
|
|
*
|
|
|
|
* This way we always know who is the owner of each "pages" struct,
|
|
|
|
* and we don't need any locking. It belongs to the migration thread
|
|
|
|
* or to the channel thread. Switching is safe because the migration
|
|
|
|
* thread is using the channel mutex when changing it, and the channel
|
|
|
|
* have to had finish with its own, otherwise pending_job can't be
|
|
|
|
* false.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int multifd_send_pages(QEMUFile *f)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
static int next_channel;
|
|
|
|
MultiFDSendParams *p = NULL; /* make happy gcc */
|
|
|
|
MultiFDPages_t *pages = multifd_send_state->pages;
|
|
|
|
uint64_t transferred;
|
|
|
|
|
2020-09-23 13:56:46 +03:00
|
|
|
if (qatomic_read(&multifd_send_state->exiting)) {
|
2020-01-22 18:16:07 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_sem_wait(&multifd_send_state->channels_ready);
|
2020-06-17 14:31:54 +03:00
|
|
|
/*
|
|
|
|
* next_channel can remain from a previous migration that was
|
|
|
|
* using more channels, so ensure it doesn't overflow if the
|
|
|
|
* limit is lower now.
|
|
|
|
*/
|
|
|
|
next_channel %= migrate_multifd_channels();
|
2020-01-22 18:16:07 +03:00
|
|
|
for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
|
|
|
|
p = &multifd_send_state->params[i];
|
|
|
|
|
|
|
|
qemu_mutex_lock(&p->mutex);
|
|
|
|
if (p->quit) {
|
|
|
|
error_report("%s: channel %d has already quit!", __func__, i);
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (!p->pending_job) {
|
|
|
|
p->pending_job++;
|
|
|
|
next_channel = (i + 1) % migrate_multifd_channels();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
}
|
2021-11-22 13:51:40 +03:00
|
|
|
assert(!p->pages->num);
|
2020-01-22 18:16:07 +03:00
|
|
|
assert(!p->pages->block);
|
|
|
|
|
|
|
|
p->packet_num = multifd_send_state->packet_num++;
|
|
|
|
multifd_send_state->pages = p->pages;
|
|
|
|
p->pages = pages;
|
2022-05-02 17:45:35 +03:00
|
|
|
transferred = ((uint64_t) pages->num) * p->page_size + p->packet_len;
|
2022-06-20 14:01:52 +03:00
|
|
|
qemu_file_acct_rate_limit(f, transferred);
|
2020-01-22 18:16:07 +03:00
|
|
|
ram_counters.multifd_bytes += transferred;
|
2022-10-12 00:55:51 +03:00
|
|
|
stat64_add(&ram_atomic_counters.transferred, transferred);
|
2020-01-22 18:16:07 +03:00
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
qemu_sem_post(&p->sem);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
|
|
|
|
{
|
|
|
|
MultiFDPages_t *pages = multifd_send_state->pages;
|
2022-10-17 10:53:50 +03:00
|
|
|
bool changed = false;
|
2020-01-22 18:16:07 +03:00
|
|
|
|
|
|
|
if (!pages->block) {
|
|
|
|
pages->block = block;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pages->block == block) {
|
2021-11-22 13:51:40 +03:00
|
|
|
pages->offset[pages->num] = offset;
|
|
|
|
pages->num++;
|
2020-01-22 18:16:07 +03:00
|
|
|
|
2021-11-22 13:51:40 +03:00
|
|
|
if (pages->num < pages->allocated) {
|
2020-01-22 18:16:07 +03:00
|
|
|
return 1;
|
|
|
|
}
|
2022-10-17 10:53:50 +03:00
|
|
|
} else {
|
|
|
|
changed = true;
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (multifd_send_pages(f) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-10-17 10:53:50 +03:00
|
|
|
if (changed) {
|
|
|
|
return multifd_queue_page(f, block, offset);
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void multifd_send_terminate_threads(Error *err)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
trace_multifd_send_terminate_threads(err != NULL);
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
migrate_set_error(s, err);
|
|
|
|
if (s->state == MIGRATION_STATUS_SETUP ||
|
|
|
|
s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
|
|
|
|
s->state == MIGRATION_STATUS_DEVICE ||
|
|
|
|
s->state == MIGRATION_STATUS_ACTIVE) {
|
|
|
|
migrate_set_state(&s->state, s->state,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't want to exit each threads twice. Depending on where
|
|
|
|
* we get the error, or if there are two independent errors in two
|
|
|
|
* threads at the same time, we can end calling this function
|
|
|
|
* twice.
|
|
|
|
*/
|
2020-09-23 13:56:46 +03:00
|
|
|
if (qatomic_xchg(&multifd_send_state->exiting, 1)) {
|
2020-01-22 18:16:07 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDSendParams *p = &multifd_send_state->params[i];
|
|
|
|
|
|
|
|
qemu_mutex_lock(&p->mutex);
|
|
|
|
p->quit = true;
|
|
|
|
qemu_sem_post(&p->sem);
|
2021-12-03 14:55:33 +03:00
|
|
|
if (p->c) {
|
|
|
|
qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void multifd_save_cleanup(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 23:28:10 +03:00
|
|
|
if (!migrate_use_multifd()) {
|
2020-01-22 18:16:07 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
multifd_send_terminate_threads(NULL);
|
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDSendParams *p = &multifd_send_state->params[i];
|
|
|
|
|
|
|
|
if (p->running) {
|
|
|
|
qemu_thread_join(&p->thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDSendParams *p = &multifd_send_state->params[i];
|
2019-05-15 14:37:46 +03:00
|
|
|
Error *local_err = NULL;
|
2020-01-22 18:16:07 +03:00
|
|
|
|
2021-09-09 10:18:08 +03:00
|
|
|
if (p->registered_yank) {
|
|
|
|
migration_ioc_unregister_yank(p->c);
|
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
socket_send_channel_destroy(p->c);
|
|
|
|
p->c = NULL;
|
|
|
|
qemu_mutex_destroy(&p->mutex);
|
|
|
|
qemu_sem_destroy(&p->sem);
|
|
|
|
qemu_sem_destroy(&p->sem_sync);
|
|
|
|
g_free(p->name);
|
|
|
|
p->name = NULL;
|
|
|
|
multifd_pages_clear(p->pages);
|
|
|
|
p->pages = NULL;
|
|
|
|
p->packet_len = 0;
|
|
|
|
g_free(p->packet);
|
|
|
|
p->packet = NULL;
|
2021-11-19 14:06:05 +03:00
|
|
|
g_free(p->iov);
|
|
|
|
p->iov = NULL;
|
2021-11-22 15:26:18 +03:00
|
|
|
g_free(p->normal);
|
|
|
|
p->normal = NULL;
|
2019-05-15 14:37:46 +03:00
|
|
|
multifd_send_state->ops->send_cleanup(p, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
migrate_set_error(migrate_get_current(), local_err);
|
2020-05-06 12:54:16 +03:00
|
|
|
error_free(local_err);
|
2019-05-15 14:37:46 +03:00
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
qemu_sem_destroy(&multifd_send_state->channels_ready);
|
|
|
|
g_free(multifd_send_state->params);
|
|
|
|
multifd_send_state->params = NULL;
|
|
|
|
multifd_pages_clear(multifd_send_state->pages);
|
|
|
|
multifd_send_state->pages = NULL;
|
|
|
|
g_free(multifd_send_state);
|
|
|
|
multifd_send_state = NULL;
|
|
|
|
}
|
|
|
|
|
2022-10-25 07:47:28 +03:00
|
|
|
static int multifd_zero_copy_flush(QIOChannel *c)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
Error *err = NULL;
|
|
|
|
|
|
|
|
ret = qio_channel_flush(c, &err);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report_err(err);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (ret == 1) {
|
|
|
|
dirty_sync_missed_zero_copy();
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-13 09:28:35 +03:00
|
|
|
int multifd_send_sync_main(QEMUFile *f)
|
2020-01-22 18:16:07 +03:00
|
|
|
{
|
|
|
|
int i;
|
2022-05-13 09:28:37 +03:00
|
|
|
bool flush_zero_copy;
|
2020-01-22 18:16:07 +03:00
|
|
|
|
|
|
|
if (!migrate_use_multifd()) {
|
2022-05-13 09:28:35 +03:00
|
|
|
return 0;
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
2021-11-22 13:51:40 +03:00
|
|
|
if (multifd_send_state->pages->num) {
|
2020-01-22 18:16:07 +03:00
|
|
|
if (multifd_send_pages(f) < 0) {
|
|
|
|
error_report("%s: multifd_send_pages fail", __func__);
|
2022-05-13 09:28:35 +03:00
|
|
|
return -1;
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
}
|
2022-05-13 09:28:37 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When using zero-copy, it's necessary to flush the pages before any of
|
|
|
|
* the pages can be sent again, so we'll make sure the new version of the
|
|
|
|
* pages will always arrive _later_ than the old pages.
|
|
|
|
*
|
|
|
|
* Currently we achieve this by flushing the zero-page requested writes
|
|
|
|
* per ram iteration, but in the future we could potentially optimize it
|
|
|
|
* to be less frequent, e.g. only after we finished one whole scanning of
|
|
|
|
* all the dirty bitmaps.
|
|
|
|
*/
|
|
|
|
|
|
|
|
flush_zero_copy = migrate_use_zero_copy_send();
|
|
|
|
|
2020-01-22 18:16:07 +03:00
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDSendParams *p = &multifd_send_state->params[i];
|
|
|
|
|
|
|
|
trace_multifd_send_sync_main_signal(p->id);
|
|
|
|
|
|
|
|
qemu_mutex_lock(&p->mutex);
|
|
|
|
|
|
|
|
if (p->quit) {
|
|
|
|
error_report("%s: channel %d has already quit", __func__, i);
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
2022-05-13 09:28:35 +03:00
|
|
|
return -1;
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
p->packet_num = multifd_send_state->packet_num++;
|
|
|
|
p->flags |= MULTIFD_FLAG_SYNC;
|
|
|
|
p->pending_job++;
|
2022-06-20 14:01:52 +03:00
|
|
|
qemu_file_acct_rate_limit(f, p->packet_len);
|
2020-01-22 18:16:07 +03:00
|
|
|
ram_counters.multifd_bytes += p->packet_len;
|
2022-10-12 00:55:51 +03:00
|
|
|
stat64_add(&ram_atomic_counters.transferred, p->packet_len);
|
2020-01-22 18:16:07 +03:00
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
qemu_sem_post(&p->sem);
|
|
|
|
}
|
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDSendParams *p = &multifd_send_state->params[i];
|
|
|
|
|
|
|
|
trace_multifd_send_sync_main_wait(p->id);
|
|
|
|
qemu_sem_wait(&p->sem_sync);
|
2022-10-17 10:53:51 +03:00
|
|
|
|
|
|
|
if (flush_zero_copy && p->c && (multifd_zero_copy_flush(p->c) < 0)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
trace_multifd_send_sync_main(multifd_send_state->packet_num);
|
2022-05-13 09:28:35 +03:00
|
|
|
|
|
|
|
return 0;
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *multifd_send_thread(void *opaque)
|
|
|
|
{
|
|
|
|
MultiFDSendParams *p = opaque;
|
2023-02-03 10:35:19 +03:00
|
|
|
MigrationThread *thread = NULL;
|
2020-01-22 18:16:07 +03:00
|
|
|
Error *local_err = NULL;
|
|
|
|
int ret = 0;
|
multifd: Send header packet without flags if zero-copy-send is enabled
Since d48c3a0445 ("multifd: Use a single writev on the send side"),
sending the header packet and the memory pages happens in the same
writev, which can potentially make the migration faster.
Using channel-socket as example, this works well with the default copying
mechanism of sendmsg(), but with zero-copy-send=true, it will cause
the migration to often break.
This happens because the header packet buffer gets reused quite often,
and there is a high chance that by the time the MSG_ZEROCOPY mechanism get
to send the buffer, it has already changed, sending the wrong data and
causing the migration to abort.
It means that, as it is, the buffer for the header packet is not suitable
for sending with MSG_ZEROCOPY.
In order to enable zero copy for multifd, send the header packet on an
individual write(), without any flags, and the remanining pages with a
writev(), as it was happening before. This only changes how a migration
with zero-copy-send=true works, not changing any current behavior for
migrations with zero-copy-send=false.
Signed-off-by: Leonardo Bras <leobras@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20220513062836.965425-8-leobras@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-05-13 09:28:36 +03:00
|
|
|
bool use_zero_copy_send = migrate_use_zero_copy_send();
|
2020-01-22 18:16:07 +03:00
|
|
|
|
2023-02-03 10:35:19 +03:00
|
|
|
thread = MigrationThreadAdd(p->name, qemu_get_thread_id());
|
|
|
|
|
2020-01-22 18:16:07 +03:00
|
|
|
trace_multifd_send_thread_start(p->id);
|
|
|
|
rcu_register_thread();
|
|
|
|
|
|
|
|
if (multifd_send_initial_packet(p, &local_err) < 0) {
|
|
|
|
ret = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* initial packet */
|
|
|
|
p->num_packets = 1;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
qemu_sem_wait(&p->sem);
|
|
|
|
|
2020-09-23 13:56:46 +03:00
|
|
|
if (qatomic_read(&multifd_send_state->exiting)) {
|
2020-01-22 18:16:07 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
qemu_mutex_lock(&p->mutex);
|
|
|
|
|
|
|
|
if (p->pending_job) {
|
|
|
|
uint64_t packet_num = p->packet_num;
|
2023-03-09 13:29:56 +03:00
|
|
|
uint32_t flags;
|
2021-11-22 15:26:18 +03:00
|
|
|
p->normal_num = 0;
|
|
|
|
|
multifd: Send header packet without flags if zero-copy-send is enabled
Since d48c3a0445 ("multifd: Use a single writev on the send side"),
sending the header packet and the memory pages happens in the same
writev, which can potentially make the migration faster.
Using channel-socket as example, this works well with the default copying
mechanism of sendmsg(), but with zero-copy-send=true, it will cause
the migration to often break.
This happens because the header packet buffer gets reused quite often,
and there is a high chance that by the time the MSG_ZEROCOPY mechanism get
to send the buffer, it has already changed, sending the wrong data and
causing the migration to abort.
It means that, as it is, the buffer for the header packet is not suitable
for sending with MSG_ZEROCOPY.
In order to enable zero copy for multifd, send the header packet on an
individual write(), without any flags, and the remanining pages with a
writev(), as it was happening before. This only changes how a migration
with zero-copy-send=true works, not changing any current behavior for
migrations with zero-copy-send=false.
Signed-off-by: Leonardo Bras <leobras@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20220513062836.965425-8-leobras@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-05-13 09:28:36 +03:00
|
|
|
if (use_zero_copy_send) {
|
|
|
|
p->iovs_num = 0;
|
|
|
|
} else {
|
|
|
|
p->iovs_num = 1;
|
|
|
|
}
|
|
|
|
|
2021-11-22 15:26:18 +03:00
|
|
|
for (int i = 0; i < p->pages->num; i++) {
|
|
|
|
p->normal[p->normal_num] = p->pages->offset[i];
|
|
|
|
p->normal_num++;
|
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
|
2021-11-22 15:26:18 +03:00
|
|
|
if (p->normal_num) {
|
2021-11-22 14:08:08 +03:00
|
|
|
ret = multifd_send_state->ops->send_prepare(p, &local_err);
|
2019-05-15 14:37:46 +03:00
|
|
|
if (ret != 0) {
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
multifd_send_fill_packet(p);
|
2023-03-09 13:29:56 +03:00
|
|
|
flags = p->flags;
|
2020-01-22 18:16:07 +03:00
|
|
|
p->flags = 0;
|
|
|
|
p->num_packets++;
|
2021-11-22 15:26:18 +03:00
|
|
|
p->total_normal_pages += p->normal_num;
|
2021-11-22 13:51:40 +03:00
|
|
|
p->pages->num = 0;
|
2020-01-22 18:16:07 +03:00
|
|
|
p->pages->block = NULL;
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
|
2021-11-22 15:26:18 +03:00
|
|
|
trace_multifd_send(p->id, packet_num, p->normal_num, flags,
|
2020-01-22 18:16:07 +03:00
|
|
|
p->next_packet_size);
|
|
|
|
|
multifd: Send header packet without flags if zero-copy-send is enabled
Since d48c3a0445 ("multifd: Use a single writev on the send side"),
sending the header packet and the memory pages happens in the same
writev, which can potentially make the migration faster.
Using channel-socket as example, this works well with the default copying
mechanism of sendmsg(), but with zero-copy-send=true, it will cause
the migration to often break.
This happens because the header packet buffer gets reused quite often,
and there is a high chance that by the time the MSG_ZEROCOPY mechanism get
to send the buffer, it has already changed, sending the wrong data and
causing the migration to abort.
It means that, as it is, the buffer for the header packet is not suitable
for sending with MSG_ZEROCOPY.
In order to enable zero copy for multifd, send the header packet on an
individual write(), without any flags, and the remanining pages with a
writev(), as it was happening before. This only changes how a migration
with zero-copy-send=true works, not changing any current behavior for
migrations with zero-copy-send=false.
Signed-off-by: Leonardo Bras <leobras@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20220513062836.965425-8-leobras@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-05-13 09:28:36 +03:00
|
|
|
if (use_zero_copy_send) {
|
|
|
|
/* Send header first, without zerocopy */
|
|
|
|
ret = qio_channel_write_all(p->c, (void *)p->packet,
|
|
|
|
p->packet_len, &local_err);
|
|
|
|
if (ret != 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Send header using the same writev call */
|
|
|
|
p->iov[0].iov_len = p->packet_len;
|
|
|
|
p->iov[0].iov_base = p->packet;
|
|
|
|
}
|
2021-11-19 17:35:58 +03:00
|
|
|
|
2022-05-13 09:28:37 +03:00
|
|
|
ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
|
|
|
|
0, p->write_flags, &local_err);
|
2020-01-22 18:16:07 +03:00
|
|
|
if (ret != 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_mutex_lock(&p->mutex);
|
|
|
|
p->pending_job--;
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
|
|
|
|
if (flags & MULTIFD_FLAG_SYNC) {
|
|
|
|
qemu_sem_post(&p->sem_sync);
|
|
|
|
}
|
|
|
|
qemu_sem_post(&multifd_send_state->channels_ready);
|
|
|
|
} else if (p->quit) {
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
/* sometimes there are spurious wakeups */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (local_err) {
|
|
|
|
trace_multifd_send_error(p->id);
|
|
|
|
multifd_send_terminate_threads(local_err);
|
2020-05-06 12:54:16 +03:00
|
|
|
error_free(local_err);
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Error happen, I will exit, but I can't just leave, tell
|
|
|
|
* who pay attention to me.
|
|
|
|
*/
|
|
|
|
if (ret != 0) {
|
|
|
|
qemu_sem_post(&p->sem_sync);
|
|
|
|
qemu_sem_post(&multifd_send_state->channels_ready);
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_mutex_lock(&p->mutex);
|
|
|
|
p->running = false;
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
|
|
|
|
rcu_unregister_thread();
|
2023-02-03 10:35:19 +03:00
|
|
|
MigrationThreadDel(thread);
|
2021-11-22 15:26:18 +03:00
|
|
|
trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages);
|
2020-01-22 18:16:07 +03:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-09-15 06:04:01 +03:00
|
|
|
static bool multifd_channel_connect(MultiFDSendParams *p,
|
|
|
|
QIOChannel *ioc,
|
|
|
|
Error *error);
|
|
|
|
|
|
|
|
static void multifd_tls_outgoing_handshake(QIOTask *task,
|
|
|
|
gpointer opaque)
|
|
|
|
{
|
|
|
|
MultiFDSendParams *p = opaque;
|
|
|
|
QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task));
|
|
|
|
Error *err = NULL;
|
|
|
|
|
2020-09-15 06:04:02 +03:00
|
|
|
if (qio_task_propagate_error(task, &err)) {
|
|
|
|
trace_multifd_tls_outgoing_handshake_error(ioc, error_get_pretty(err));
|
|
|
|
} else {
|
|
|
|
trace_multifd_tls_outgoing_handshake_complete(ioc);
|
|
|
|
}
|
2021-02-09 13:42:37 +03:00
|
|
|
|
|
|
|
if (!multifd_channel_connect(p, ioc, err)) {
|
|
|
|
/*
|
|
|
|
* Error happen, mark multifd_send_thread status as 'quit' although it
|
|
|
|
* is not created, and then tell who pay attention to me.
|
|
|
|
*/
|
|
|
|
p->quit = true;
|
|
|
|
qemu_sem_post(&multifd_send_state->channels_ready);
|
|
|
|
qemu_sem_post(&p->sem_sync);
|
|
|
|
}
|
2020-09-15 06:04:01 +03:00
|
|
|
}
|
|
|
|
|
2020-11-06 09:24:53 +03:00
|
|
|
static void *multifd_tls_handshake_thread(void *opaque)
|
|
|
|
{
|
|
|
|
MultiFDSendParams *p = opaque;
|
|
|
|
QIOChannelTLS *tioc = QIO_CHANNEL_TLS(p->c);
|
|
|
|
|
|
|
|
qio_channel_tls_handshake(tioc,
|
|
|
|
multifd_tls_outgoing_handshake,
|
|
|
|
p,
|
|
|
|
NULL,
|
|
|
|
NULL);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-09-15 06:04:01 +03:00
|
|
|
static void multifd_tls_channel_connect(MultiFDSendParams *p,
|
|
|
|
QIOChannel *ioc,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
2022-03-31 18:08:40 +03:00
|
|
|
const char *hostname = s->hostname;
|
2020-09-15 06:04:01 +03:00
|
|
|
QIOChannelTLS *tioc;
|
|
|
|
|
|
|
|
tioc = migration_tls_client_create(s, ioc, hostname, errp);
|
|
|
|
if (!tioc) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-11-11 17:26:03 +03:00
|
|
|
object_unref(OBJECT(ioc));
|
2020-09-15 06:04:02 +03:00
|
|
|
trace_multifd_tls_outgoing_handshake_start(ioc, tioc, hostname);
|
2020-09-15 06:04:01 +03:00
|
|
|
qio_channel_set_name(QIO_CHANNEL(tioc), "multifd-tls-outgoing");
|
2020-11-06 09:24:53 +03:00
|
|
|
p->c = QIO_CHANNEL(tioc);
|
|
|
|
qemu_thread_create(&p->thread, "multifd-tls-handshake-worker",
|
|
|
|
multifd_tls_handshake_thread, p,
|
|
|
|
QEMU_THREAD_JOINABLE);
|
2020-09-15 06:04:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool multifd_channel_connect(MultiFDSendParams *p,
|
|
|
|
QIOChannel *ioc,
|
|
|
|
Error *error)
|
|
|
|
{
|
2020-09-15 06:04:02 +03:00
|
|
|
trace_multifd_set_outgoing_channel(
|
2022-03-31 18:08:40 +03:00
|
|
|
ioc, object_get_typename(OBJECT(ioc)),
|
|
|
|
migrate_get_current()->hostname, error);
|
2020-09-15 06:04:02 +03:00
|
|
|
|
2021-12-22 14:30:48 +03:00
|
|
|
if (error) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (migrate_channel_requires_tls_upgrade(ioc)) {
|
|
|
|
multifd_tls_channel_connect(p, ioc, &error);
|
|
|
|
if (!error) {
|
|
|
|
/*
|
|
|
|
* tls_channel_connect will call back to this
|
|
|
|
* function after the TLS handshake,
|
|
|
|
* so we mustn't call multifd_send_thread until then
|
|
|
|
*/
|
|
|
|
return true;
|
2020-09-15 06:04:01 +03:00
|
|
|
} else {
|
2021-12-22 14:30:48 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
migration_ioc_register_yank(ioc);
|
|
|
|
p->registered_yank = true;
|
|
|
|
p->c = ioc;
|
|
|
|
qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
|
|
|
|
QEMU_THREAD_JOINABLE);
|
2020-09-15 06:04:01 +03:00
|
|
|
}
|
2021-12-22 14:30:48 +03:00
|
|
|
return true;
|
2020-09-15 06:04:01 +03:00
|
|
|
}
|
|
|
|
|
2020-09-15 06:04:00 +03:00
|
|
|
static void multifd_new_send_channel_cleanup(MultiFDSendParams *p,
|
|
|
|
QIOChannel *ioc, Error *err)
|
|
|
|
{
|
|
|
|
migrate_set_error(migrate_get_current(), err);
|
|
|
|
/* Error happen, we need to tell who pay attention to me */
|
|
|
|
qemu_sem_post(&multifd_send_state->channels_ready);
|
|
|
|
qemu_sem_post(&p->sem_sync);
|
|
|
|
/*
|
|
|
|
* Although multifd_send_thread is not created, but main migration
|
|
|
|
* thread neet to judge whether it is running, so we need to mark
|
|
|
|
* its status.
|
|
|
|
*/
|
|
|
|
p->quit = true;
|
|
|
|
object_unref(OBJECT(ioc));
|
|
|
|
error_free(err);
|
|
|
|
}
|
|
|
|
|
2020-01-22 18:16:07 +03:00
|
|
|
static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
|
|
|
|
{
|
|
|
|
MultiFDSendParams *p = opaque;
|
|
|
|
QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
trace_multifd_new_send_channel_async(p->id);
|
2022-01-27 23:02:01 +03:00
|
|
|
if (!qio_task_propagate_error(task, &local_err)) {
|
2020-01-22 18:16:07 +03:00
|
|
|
p->c = QIO_CHANNEL(sioc);
|
|
|
|
qio_channel_set_delay(p->c, false);
|
|
|
|
p->running = true;
|
2022-01-27 23:02:01 +03:00
|
|
|
if (multifd_channel_connect(p, sioc, local_err)) {
|
|
|
|
return;
|
2020-09-15 06:04:01 +03:00
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
2020-09-15 06:04:00 +03:00
|
|
|
|
|
|
|
multifd_new_send_channel_cleanup(p, sioc, local_err);
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int multifd_save_setup(Error **errp)
|
|
|
|
{
|
|
|
|
int thread_count;
|
|
|
|
uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
|
|
|
|
uint8_t i;
|
|
|
|
|
|
|
|
if (!migrate_use_multifd()) {
|
|
|
|
return 0;
|
|
|
|
}
|
2021-09-09 10:29:50 +03:00
|
|
|
|
2020-01-22 18:16:07 +03:00
|
|
|
thread_count = migrate_multifd_channels();
|
|
|
|
multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
|
|
|
|
multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
|
|
|
|
multifd_send_state->pages = multifd_pages_init(page_count);
|
|
|
|
qemu_sem_init(&multifd_send_state->channels_ready, 0);
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_set(&multifd_send_state->exiting, 0);
|
2019-05-15 14:37:46 +03:00
|
|
|
multifd_send_state->ops = multifd_ops[migrate_multifd_compression()];
|
2020-01-22 18:16:07 +03:00
|
|
|
|
|
|
|
for (i = 0; i < thread_count; i++) {
|
|
|
|
MultiFDSendParams *p = &multifd_send_state->params[i];
|
|
|
|
|
|
|
|
qemu_mutex_init(&p->mutex);
|
|
|
|
qemu_sem_init(&p->sem, 0);
|
|
|
|
qemu_sem_init(&p->sem_sync, 0);
|
|
|
|
p->quit = false;
|
|
|
|
p->pending_job = 0;
|
|
|
|
p->id = i;
|
|
|
|
p->pages = multifd_pages_init(page_count);
|
|
|
|
p->packet_len = sizeof(MultiFDPacket_t)
|
|
|
|
+ sizeof(uint64_t) * page_count;
|
|
|
|
p->packet = g_malloc0(p->packet_len);
|
|
|
|
p->packet->magic = cpu_to_be32(MULTIFD_MAGIC);
|
|
|
|
p->packet->version = cpu_to_be32(MULTIFD_VERSION);
|
|
|
|
p->name = g_strdup_printf("multifdsend_%d", i);
|
2021-11-19 17:35:58 +03:00
|
|
|
/* We need one extra place for the packet header */
|
|
|
|
p->iov = g_new0(struct iovec, page_count + 1);
|
2021-11-22 15:26:18 +03:00
|
|
|
p->normal = g_new0(ram_addr_t, page_count);
|
2022-05-02 17:45:35 +03:00
|
|
|
p->page_size = qemu_target_page_size();
|
2022-05-02 17:53:12 +03:00
|
|
|
p->page_count = page_count;
|
2022-05-13 09:28:37 +03:00
|
|
|
|
|
|
|
if (migrate_use_zero_copy_send()) {
|
|
|
|
p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY;
|
|
|
|
} else {
|
|
|
|
p->write_flags = 0;
|
|
|
|
}
|
|
|
|
|
2020-01-22 18:16:07 +03:00
|
|
|
socket_send_channel_create(multifd_new_send_channel_async, p);
|
|
|
|
}
|
2019-05-15 14:37:46 +03:00
|
|
|
|
|
|
|
for (i = 0; i < thread_count; i++) {
|
|
|
|
MultiFDSendParams *p = &multifd_send_state->params[i];
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = multifd_send_state->ops->send_setup(p, &local_err);
|
|
|
|
if (ret) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct {
|
|
|
|
MultiFDRecvParams *params;
|
|
|
|
/* number of created threads */
|
|
|
|
int count;
|
|
|
|
/* syncs main thread and channels */
|
|
|
|
QemuSemaphore sem_sync;
|
|
|
|
/* global number of generated multifd packets */
|
|
|
|
uint64_t packet_num;
|
2019-05-15 14:37:46 +03:00
|
|
|
/* multifd ops */
|
|
|
|
MultiFDMethods *ops;
|
2020-01-22 18:16:07 +03:00
|
|
|
} *multifd_recv_state;
|
|
|
|
|
|
|
|
static void multifd_recv_terminate_threads(Error *err)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
trace_multifd_recv_terminate_threads(err != NULL);
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
migrate_set_error(s, err);
|
|
|
|
if (s->state == MIGRATION_STATUS_SETUP ||
|
|
|
|
s->state == MIGRATION_STATUS_ACTIVE) {
|
|
|
|
migrate_set_state(&s->state, s->state,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
|
|
|
|
|
|
|
qemu_mutex_lock(&p->mutex);
|
|
|
|
p->quit = true;
|
|
|
|
/*
|
|
|
|
* We could arrive here for two reasons:
|
|
|
|
* - normal quit, i.e. everything went fine, just finished
|
|
|
|
* - error quit: We close the channels so the channel threads
|
|
|
|
* finish the qio_channel_read_all_eof()
|
|
|
|
*/
|
|
|
|
if (p->c) {
|
|
|
|
qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-10 09:36:31 +03:00
|
|
|
void multifd_load_shutdown(void)
|
|
|
|
{
|
|
|
|
if (migrate_use_multifd()) {
|
|
|
|
multifd_recv_terminate_threads(NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-10 09:36:28 +03:00
|
|
|
void multifd_load_cleanup(void)
|
2020-01-22 18:16:07 +03:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 23:28:10 +03:00
|
|
|
if (!migrate_use_multifd()) {
|
2023-02-10 09:36:28 +03:00
|
|
|
return;
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
multifd_recv_terminate_threads(NULL);
|
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
|
|
|
|
|
|
|
if (p->running) {
|
|
|
|
/*
|
|
|
|
* multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
|
|
|
|
* however try to wakeup it without harm in cleanup phase.
|
|
|
|
*/
|
|
|
|
qemu_sem_post(&p->sem_sync);
|
|
|
|
}
|
2023-02-10 09:36:30 +03:00
|
|
|
|
|
|
|
qemu_thread_join(&p->thread);
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
|
|
|
|
2021-09-09 10:19:45 +03:00
|
|
|
migration_ioc_unregister_yank(p->c);
|
2020-01-22 18:16:07 +03:00
|
|
|
object_unref(OBJECT(p->c));
|
|
|
|
p->c = NULL;
|
|
|
|
qemu_mutex_destroy(&p->mutex);
|
|
|
|
qemu_sem_destroy(&p->sem_sync);
|
|
|
|
g_free(p->name);
|
|
|
|
p->name = NULL;
|
|
|
|
p->packet_len = 0;
|
|
|
|
g_free(p->packet);
|
|
|
|
p->packet = NULL;
|
2021-11-19 14:06:05 +03:00
|
|
|
g_free(p->iov);
|
|
|
|
p->iov = NULL;
|
2021-11-22 15:41:06 +03:00
|
|
|
g_free(p->normal);
|
|
|
|
p->normal = NULL;
|
2019-05-15 14:37:46 +03:00
|
|
|
multifd_recv_state->ops->recv_cleanup(p);
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
qemu_sem_destroy(&multifd_recv_state->sem_sync);
|
|
|
|
g_free(multifd_recv_state->params);
|
|
|
|
multifd_recv_state->params = NULL;
|
|
|
|
g_free(multifd_recv_state);
|
|
|
|
multifd_recv_state = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void multifd_recv_sync_main(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!migrate_use_multifd()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
|
|
|
|
|
|
|
trace_multifd_recv_sync_main_wait(p->id);
|
|
|
|
qemu_sem_wait(&multifd_recv_state->sem_sync);
|
|
|
|
}
|
|
|
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
|
|
|
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
|
|
|
|
2020-04-04 07:21:08 +03:00
|
|
|
WITH_QEMU_LOCK_GUARD(&p->mutex) {
|
|
|
|
if (multifd_recv_state->packet_num < p->packet_num) {
|
|
|
|
multifd_recv_state->packet_num = p->packet_num;
|
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
trace_multifd_recv_sync_main_signal(p->id);
|
|
|
|
qemu_sem_post(&p->sem_sync);
|
|
|
|
}
|
|
|
|
trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *multifd_recv_thread(void *opaque)
|
|
|
|
{
|
|
|
|
MultiFDRecvParams *p = opaque;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
trace_multifd_recv_thread_start(p->id);
|
|
|
|
rcu_register_thread();
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
uint32_t flags;
|
|
|
|
|
|
|
|
if (p->quit) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
|
|
|
|
p->packet_len, &local_err);
|
2022-01-27 23:02:01 +03:00
|
|
|
if (ret == 0 || ret == -1) { /* 0: EOF -1: Error */
|
2020-01-22 18:16:07 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_mutex_lock(&p->mutex);
|
|
|
|
ret = multifd_recv_unfill_packet(p, &local_err);
|
|
|
|
if (ret) {
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
flags = p->flags;
|
2019-05-15 14:37:46 +03:00
|
|
|
/* recv methods don't know how to handle the SYNC flag */
|
|
|
|
p->flags &= ~MULTIFD_FLAG_SYNC;
|
2021-11-22 15:41:06 +03:00
|
|
|
trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags,
|
2020-01-22 18:16:07 +03:00
|
|
|
p->next_packet_size);
|
|
|
|
p->num_packets++;
|
2021-11-22 15:41:06 +03:00
|
|
|
p->total_normal_pages += p->normal_num;
|
2020-01-22 18:16:07 +03:00
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
|
2021-11-22 15:41:06 +03:00
|
|
|
if (p->normal_num) {
|
2021-11-22 14:49:43 +03:00
|
|
|
ret = multifd_recv_state->ops->recv_pages(p, &local_err);
|
2020-01-22 18:16:07 +03:00
|
|
|
if (ret != 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & MULTIFD_FLAG_SYNC) {
|
|
|
|
qemu_sem_post(&multifd_recv_state->sem_sync);
|
|
|
|
qemu_sem_wait(&p->sem_sync);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (local_err) {
|
|
|
|
multifd_recv_terminate_threads(local_err);
|
2020-05-06 12:54:16 +03:00
|
|
|
error_free(local_err);
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
qemu_mutex_lock(&p->mutex);
|
|
|
|
p->running = false;
|
|
|
|
qemu_mutex_unlock(&p->mutex);
|
|
|
|
|
|
|
|
rcu_unregister_thread();
|
2021-11-22 15:41:06 +03:00
|
|
|
trace_multifd_recv_thread_end(p->id, p->num_packets, p->total_normal_pages);
|
2020-01-22 18:16:07 +03:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int multifd_load_setup(Error **errp)
|
|
|
|
{
|
|
|
|
int thread_count;
|
|
|
|
uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
|
|
|
|
uint8_t i;
|
|
|
|
|
2022-12-20 21:44:18 +03:00
|
|
|
/*
|
|
|
|
* Return successfully if multiFD recv state is already initialised
|
|
|
|
* or multiFD is not enabled.
|
|
|
|
*/
|
|
|
|
if (multifd_recv_state || !migrate_use_multifd()) {
|
2020-01-22 18:16:07 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2022-12-20 21:44:18 +03:00
|
|
|
|
2020-01-22 18:16:07 +03:00
|
|
|
thread_count = migrate_multifd_channels();
|
|
|
|
multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
|
|
|
|
multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_set(&multifd_recv_state->count, 0);
|
2020-01-22 18:16:07 +03:00
|
|
|
qemu_sem_init(&multifd_recv_state->sem_sync, 0);
|
2019-05-15 14:37:46 +03:00
|
|
|
multifd_recv_state->ops = multifd_ops[migrate_multifd_compression()];
|
2020-01-22 18:16:07 +03:00
|
|
|
|
|
|
|
for (i = 0; i < thread_count; i++) {
|
|
|
|
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
|
|
|
|
|
|
|
qemu_mutex_init(&p->mutex);
|
|
|
|
qemu_sem_init(&p->sem_sync, 0);
|
|
|
|
p->quit = false;
|
|
|
|
p->id = i;
|
|
|
|
p->packet_len = sizeof(MultiFDPacket_t)
|
|
|
|
+ sizeof(uint64_t) * page_count;
|
|
|
|
p->packet = g_malloc0(p->packet_len);
|
|
|
|
p->name = g_strdup_printf("multifdrecv_%d", i);
|
2021-11-19 14:06:05 +03:00
|
|
|
p->iov = g_new0(struct iovec, page_count);
|
2021-11-22 15:41:06 +03:00
|
|
|
p->normal = g_new0(ram_addr_t, page_count);
|
2022-05-02 17:53:12 +03:00
|
|
|
p->page_count = page_count;
|
2022-05-02 17:45:35 +03:00
|
|
|
p->page_size = qemu_target_page_size();
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
2019-05-15 14:37:46 +03:00
|
|
|
|
|
|
|
for (i = 0; i < thread_count; i++) {
|
|
|
|
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = multifd_recv_state->ops->recv_setup(p, &local_err);
|
|
|
|
if (ret) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2020-01-22 18:16:07 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool multifd_recv_all_channels_created(void)
|
|
|
|
{
|
|
|
|
int thread_count = migrate_multifd_channels();
|
|
|
|
|
|
|
|
if (!migrate_use_multifd()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-04-21 14:28:33 +03:00
|
|
|
if (!multifd_recv_state) {
|
|
|
|
/* Called before any connections created */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-09-23 13:56:46 +03:00
|
|
|
return thread_count == qatomic_read(&multifd_recv_state->count);
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to receive all multifd channels to get ready for the migration.
|
2022-12-20 21:44:18 +03:00
|
|
|
* Sets @errp when failing to receive the current channel.
|
2020-01-22 18:16:07 +03:00
|
|
|
*/
|
2022-12-20 21:44:18 +03:00
|
|
|
void multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
|
2020-01-22 18:16:07 +03:00
|
|
|
{
|
|
|
|
MultiFDRecvParams *p;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int id;
|
|
|
|
|
|
|
|
id = multifd_recv_initial_packet(ioc, &local_err);
|
|
|
|
if (id < 0) {
|
|
|
|
multifd_recv_terminate_threads(local_err);
|
|
|
|
error_propagate_prepend(errp, local_err,
|
|
|
|
"failed to receive packet"
|
|
|
|
" via multifd channel %d: ",
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_read(&multifd_recv_state->count));
|
2022-12-20 21:44:18 +03:00
|
|
|
return;
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
trace_multifd_recv_new_channel(id);
|
|
|
|
|
|
|
|
p = &multifd_recv_state->params[id];
|
|
|
|
if (p->c != NULL) {
|
|
|
|
error_setg(&local_err, "multifd: received id '%d' already setup'",
|
|
|
|
id);
|
|
|
|
multifd_recv_terminate_threads(local_err);
|
|
|
|
error_propagate(errp, local_err);
|
2022-12-20 21:44:18 +03:00
|
|
|
return;
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|
|
|
|
p->c = ioc;
|
|
|
|
object_ref(OBJECT(ioc));
|
|
|
|
/* initial packet */
|
|
|
|
p->num_packets = 1;
|
|
|
|
|
|
|
|
p->running = true;
|
|
|
|
qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
|
|
|
|
QEMU_THREAD_JOINABLE);
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_inc(&multifd_recv_state->count);
|
2020-01-22 18:16:07 +03:00
|
|
|
}
|