qemu/nbd/common.c

97 lines
2.9 KiB
C
Raw Normal View History

/*
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
*
* Network Block Device Common Code
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; under version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
2016-03-14 11:01:28 +03:00
#include "qapi/error.h"
#include "nbd-internal.h"
ssize_t nbd_wr_syncv(QIOChannel *ioc,
struct iovec *iov,
size_t niov,
size_t offset,
size_t length,
bool do_read)
{
ssize_t done = 0;
Error *local_err = NULL;
struct iovec *local_iov = g_new(struct iovec, niov);
struct iovec *local_iov_head = local_iov;
unsigned int nlocal_iov = niov;
nlocal_iov = iov_copy(local_iov, nlocal_iov,
iov, niov,
offset, length);
while (nlocal_iov > 0) {
ssize_t len;
if (do_read) {
len = qio_channel_readv(ioc, local_iov, nlocal_iov, &local_err);
} else {
len = qio_channel_writev(ioc, local_iov, nlocal_iov, &local_err);
}
if (len == QIO_CHANNEL_ERR_BLOCK) {
if (qemu_in_coroutine()) {
/* XXX figure out if we can create a variant on
* qio_channel_yield() that works with AIO contexts
* and consider using that in this branch */
qemu_coroutine_yield();
nbd: do not hang nbd_wr_syncv if outside a coroutine and no available data Until commit 1c778ef7 ("nbd: convert to using I/O channels for actual socket I/O", 2016-02-16), nbd_wr_sync returned -EAGAIN this scenario. nbd_reply_ready required these semantics because it has two conflicting requirements: 1) if a reply can be received on the socket, nbd_reply_ready needs to read the header outside coroutine context to identify _which_ coroutine to enter to process the rest of the reply 2) on the other hand, nbd_reply_ready can find a false positive if another thread (e.g. a VCPU thread running aio_poll) sneaks in and calls nbd_reply_ready too. In this case nbd_reply_ready does nothing and expects nbd_wr_syncv to return -EAGAIN. Currently, the solution to the first requirement is to wait in the very rare case of a read() that doesn't retrieve the reply header in its entirety; this is what nbd_wr_syncv does by calling qio_channel_wait(). However, the unconditional call to qio_channel_wait() breaks the second requirement. To fix this, the patch makes nbd_wr_syncv return -EAGAIN if done is zero, similar to the code before commit 1c778ef7. This is okay because NBD client-side negotiation is the only other case that calls nbd_wr_syncv outside a coroutine, and it places the socket in blocking mode. On the other hand, it is a bit unpleasant to put this in nbd_wr_syncv(), because the function is used by both client and server. The full fix would be to add a counter to NbdClientSession for how many bytes have been filled in s->reply. Then a reply can be filled by multiple separate invocations of nbd_reply_ready and the qio_channel_wait() call can be removed completely. Something to consider for 2.7... Reported-by: Changlong Xie <xiecl.fnst@cn.fujitsu.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-07 14:25:08 +03:00
} else if (done) {
/* XXX this is needed by nbd_reply_ready. */
qio_channel_wait(ioc,
do_read ? G_IO_IN : G_IO_OUT);
nbd: do not hang nbd_wr_syncv if outside a coroutine and no available data Until commit 1c778ef7 ("nbd: convert to using I/O channels for actual socket I/O", 2016-02-16), nbd_wr_sync returned -EAGAIN this scenario. nbd_reply_ready required these semantics because it has two conflicting requirements: 1) if a reply can be received on the socket, nbd_reply_ready needs to read the header outside coroutine context to identify _which_ coroutine to enter to process the rest of the reply 2) on the other hand, nbd_reply_ready can find a false positive if another thread (e.g. a VCPU thread running aio_poll) sneaks in and calls nbd_reply_ready too. In this case nbd_reply_ready does nothing and expects nbd_wr_syncv to return -EAGAIN. Currently, the solution to the first requirement is to wait in the very rare case of a read() that doesn't retrieve the reply header in its entirety; this is what nbd_wr_syncv does by calling qio_channel_wait(). However, the unconditional call to qio_channel_wait() breaks the second requirement. To fix this, the patch makes nbd_wr_syncv return -EAGAIN if done is zero, similar to the code before commit 1c778ef7. This is okay because NBD client-side negotiation is the only other case that calls nbd_wr_syncv outside a coroutine, and it places the socket in blocking mode. On the other hand, it is a bit unpleasant to put this in nbd_wr_syncv(), because the function is used by both client and server. The full fix would be to add a counter to NbdClientSession for how many bytes have been filled in s->reply. Then a reply can be filled by multiple separate invocations of nbd_reply_ready and the qio_channel_wait() call can be removed completely. Something to consider for 2.7... Reported-by: Changlong Xie <xiecl.fnst@cn.fujitsu.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-07 14:25:08 +03:00
} else {
return -EAGAIN;
}
continue;
}
if (len < 0) {
TRACE("I/O error: %s", error_get_pretty(local_err));
error_free(local_err);
/* XXX handle Error objects */
done = -EIO;
goto cleanup;
}
if (do_read && len == 0) {
break;
}
iov_discard_front(&local_iov, &nlocal_iov, len);
done += len;
}
cleanup:
g_free(local_iov_head);
return done;
}
void nbd_tls_handshake(Object *src,
Error *err,
void *opaque)
{
struct NBDTLSHandshakeData *data = opaque;
if (err) {
TRACE("TLS failed %s", error_get_pretty(err));
data->error = error_copy(err);
}
data->complete = true;
g_main_loop_quit(data->loop);
}