nbd-client: fix handling of hungup connections

After the switch to reading replies in a coroutine, nothing is
reentering pending receive coroutines if the connection hangs.
Move nbd_recv_coroutines_enter_all to the reply read coroutine,
which is the place where hangups are detected.  nbd_teardown_connection
can simply wait for the reply read coroutine to detect the hangup
and clean up after itself.

This wouldn't be enough though because nbd_receive_reply returns 0
(rather than -EPIPE or similar) when reading from a hung connection.
Fix the return value check in nbd_read_reply_entry.

This fixes qemu-iotests 083.

Reported-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 20170314111157.14464-1-pbonzini@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
Paolo Bonzini 2017-03-14 12:11:56 +01:00 committed by Max Reitz
parent c919297379
commit a12a712a7d
2 changed files with 7 additions and 7 deletions

View File

@ -33,17 +33,15 @@
#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ ((uint64_t)(intptr_t)bs)) #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ ((uint64_t)(intptr_t)bs))
#define INDEX_TO_HANDLE(bs, index) ((index) ^ ((uint64_t)(intptr_t)bs)) #define INDEX_TO_HANDLE(bs, index) ((index) ^ ((uint64_t)(intptr_t)bs))
static void nbd_recv_coroutines_enter_all(BlockDriverState *bs) static void nbd_recv_coroutines_enter_all(NBDClientSession *s)
{ {
NBDClientSession *s = nbd_get_client_session(bs);
int i; int i;
for (i = 0; i < MAX_NBD_REQUESTS; i++) { for (i = 0; i < MAX_NBD_REQUESTS; i++) {
if (s->recv_coroutine[i]) { if (s->recv_coroutine[i]) {
qemu_coroutine_enter(s->recv_coroutine[i]); aio_co_wake(s->recv_coroutine[i]);
} }
} }
BDRV_POLL_WHILE(bs, s->read_reply_co);
} }
static void nbd_teardown_connection(BlockDriverState *bs) static void nbd_teardown_connection(BlockDriverState *bs)
@ -58,7 +56,7 @@ static void nbd_teardown_connection(BlockDriverState *bs)
qio_channel_shutdown(client->ioc, qio_channel_shutdown(client->ioc,
QIO_CHANNEL_SHUTDOWN_BOTH, QIO_CHANNEL_SHUTDOWN_BOTH,
NULL); NULL);
nbd_recv_coroutines_enter_all(bs); BDRV_POLL_WHILE(bs, client->read_reply_co);
nbd_client_detach_aio_context(bs); nbd_client_detach_aio_context(bs);
object_unref(OBJECT(client->sioc)); object_unref(OBJECT(client->sioc));
@ -76,7 +74,7 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
for (;;) { for (;;) {
assert(s->reply.handle == 0); assert(s->reply.handle == 0);
ret = nbd_receive_reply(s->ioc, &s->reply); ret = nbd_receive_reply(s->ioc, &s->reply);
if (ret < 0) { if (ret <= 0) {
break; break;
} }
@ -103,6 +101,8 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
aio_co_wake(s->recv_coroutine[i]); aio_co_wake(s->recv_coroutine[i]);
qemu_coroutine_yield(); qemu_coroutine_yield();
} }
nbd_recv_coroutines_enter_all(s);
s->read_reply_co = NULL; s->read_reply_co = NULL;
} }

View File

@ -812,6 +812,6 @@ ssize_t nbd_receive_reply(QIOChannel *ioc, NBDReply *reply)
LOG("invalid magic (got 0x%" PRIx32 ")", magic); LOG("invalid magic (got 0x%" PRIx32 ")", magic);
return -EINVAL; return -EINVAL;
} }
return 0; return sizeof(buf);
} }