migration: remove unreachble RDMA code in save_hook impl
The QEMUFile 'save_hook' callback has a 'size_t size' parameter. The RDMA impl of this has logic that takes different actions depending on whether the value is zero or non-zero. It has commented out logic that would have taken further actions if the value was negative. The only place where the 'save_hook' callback is invoked is the ram_control_save_page() method, which passes 'size' through from its caller. The only caller of this method is in turn control_save_page(). This method unconditionally passes the 'TARGET_PAGE_SIZE' constant for the 'size' parameter. IOW, the only scenario for 'size' that can execute in the qemu_rdma_save_page method is 'size > 0'. The remaining code has been unreachable since RDMA support was first introduced 9 years ago. Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
parent
c0e0825c98
commit
246683c22f
120
migration/rdma.c
120
migration/rdma.c
@ -1462,34 +1462,6 @@ static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index,
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set bit for unregistration in the next iteration.
|
||||
* We cannot transmit right here, but will unpin later.
|
||||
*/
|
||||
static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index,
|
||||
uint64_t chunk, uint64_t wr_id)
|
||||
{
|
||||
if (rdma->unregistrations[rdma->unregister_next] != 0) {
|
||||
error_report("rdma migration: queue is full");
|
||||
} else {
|
||||
RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
|
||||
|
||||
if (!test_and_set_bit(chunk, block->unregister_bitmap)) {
|
||||
trace_qemu_rdma_signal_unregister_append(chunk,
|
||||
rdma->unregister_next);
|
||||
|
||||
rdma->unregistrations[rdma->unregister_next++] =
|
||||
qemu_rdma_make_wrid(wr_id, index, chunk);
|
||||
|
||||
if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) {
|
||||
rdma->unregister_next = 0;
|
||||
}
|
||||
} else {
|
||||
trace_qemu_rdma_signal_unregister_already(chunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Consult the connection manager to see a work request
|
||||
* (of any kind) has completed.
|
||||
@ -3237,23 +3209,7 @@ qio_channel_rdma_shutdown(QIOChannel *ioc,
|
||||
* Offset is an offset to be added to block_offset and used
|
||||
* to also lookup the corresponding RAMBlock.
|
||||
*
|
||||
* @size > 0 :
|
||||
* Initiate an transfer this size.
|
||||
*
|
||||
* @size == 0 :
|
||||
* A 'hint' or 'advice' that means that we wish to speculatively
|
||||
* and asynchronously unregister this memory. In this case, there is no
|
||||
* guarantee that the unregister will actually happen, for example,
|
||||
* if the memory is being actively transmitted. Additionally, the memory
|
||||
* may be re-registered at any future time if a write within the same
|
||||
* chunk was requested again, even if you attempted to unregister it
|
||||
* here.
|
||||
*
|
||||
* @size < 0 : TODO, not yet supported
|
||||
* Unregister the memory NOW. This means that the caller does not
|
||||
* expect there to be any future RDMA transfers and we just want to clean
|
||||
* things up. This is used in case the upper layer owns the memory and
|
||||
* cannot wait for qemu_fclose() to occur.
|
||||
* @size : Number of bytes to transfer
|
||||
*
|
||||
* @bytes_sent : User-specificed pointer to indicate how many bytes were
|
||||
* sent. Usually, this will not be more than a few bytes of
|
||||
@ -3282,61 +3238,27 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
|
||||
|
||||
qemu_fflush(f);
|
||||
|
||||
if (size > 0) {
|
||||
/*
|
||||
* Add this page to the current 'chunk'. If the chunk
|
||||
* is full, or the page doesn't belong to the current chunk,
|
||||
* an actual RDMA write will occur and a new chunk will be formed.
|
||||
*/
|
||||
ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
|
||||
if (ret < 0) {
|
||||
error_report("rdma migration: write error! %d", ret);
|
||||
goto err;
|
||||
}
|
||||
/*
|
||||
* Add this page to the current 'chunk'. If the chunk
|
||||
* is full, or the page doesn't belong to the current chunk,
|
||||
* an actual RDMA write will occur and a new chunk will be formed.
|
||||
*/
|
||||
ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
|
||||
if (ret < 0) {
|
||||
error_report("rdma migration: write error! %d", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* We always return 1 bytes because the RDMA
|
||||
* protocol is completely asynchronous. We do not yet know
|
||||
* whether an identified chunk is zero or not because we're
|
||||
* waiting for other pages to potentially be merged with
|
||||
* the current chunk. So, we have to call qemu_update_position()
|
||||
* later on when the actual write occurs.
|
||||
*/
|
||||
if (bytes_sent) {
|
||||
*bytes_sent = 1;
|
||||
}
|
||||
} else {
|
||||
uint64_t index, chunk;
|
||||
|
||||
/* TODO: Change QEMUFileOps prototype to be signed: size_t => long
|
||||
if (size < 0) {
|
||||
ret = qemu_rdma_drain_cq(f, rdma);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "rdma: failed to synchronously drain"
|
||||
" completion queue before unregistration.\n");
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
ret = qemu_rdma_search_ram_block(rdma, block_offset,
|
||||
offset, size, &index, &chunk);
|
||||
|
||||
if (ret) {
|
||||
error_report("ram block search failed");
|
||||
goto err;
|
||||
}
|
||||
|
||||
qemu_rdma_signal_unregister(rdma, index, chunk, 0);
|
||||
|
||||
/*
|
||||
* TODO: Synchronous, guaranteed unregistration (should not occur during
|
||||
* fast-path). Otherwise, unregisters will process on the next call to
|
||||
* qemu_rdma_drain_cq()
|
||||
if (size < 0) {
|
||||
qemu_rdma_unregister_waiting(rdma);
|
||||
}
|
||||
*/
|
||||
/*
|
||||
* We always return 1 bytes because the RDMA
|
||||
* protocol is completely asynchronous. We do not yet know
|
||||
* whether an identified chunk is zero or not because we're
|
||||
* waiting for other pages to potentially be merged with
|
||||
* the current chunk. So, we have to call qemu_update_position()
|
||||
* later on when the actual write occurs.
|
||||
*/
|
||||
if (bytes_sent) {
|
||||
*bytes_sent = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user