qemu/block/nbd.c

2544 lines
76 KiB
C
Raw Normal View History

/*
* QEMU Block driver for NBD
*
* Copyright (c) 2019 Virtuozzo International GmbH.
* Copyright (C) 2016 Red Hat, Inc.
* Copyright (C) 2008 Bull S.A.S.
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
*
* Some parts:
* Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "trace.h"
#include "qemu/uri.h"
#include "qemu/option.h"
#include "qemu/cutils.h"
#include "qemu/main-loop.h"
#include "qemu/atomic.h"
#include "qapi/qapi-visit-sockets.h"
#include "qapi/qmp/qstring.h"
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
#include "qapi/clone-visitor.h"
#include "block/qdict.h"
#include "block/nbd.h"
#include "block/block_int.h"
#include "qemu/yank.h"
#define EN_OPTSTR ":exportname="
#define MAX_NBD_REQUESTS 16
#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
#define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
typedef struct {
Coroutine *coroutine;
uint64_t offset; /* original offset of the request */
bool receiving; /* waiting for connection_co? */
} NBDClientRequest;
typedef enum NBDClientState {
NBD_CLIENT_CONNECTING_WAIT,
NBD_CLIENT_CONNECTING_NOWAIT,
NBD_CLIENT_CONNECTED,
NBD_CLIENT_QUIT
} NBDClientState;
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
typedef enum NBDConnectThreadState {
/* No thread, no pending results */
CONNECT_THREAD_NONE,
/* Thread is running, no results for now */
CONNECT_THREAD_RUNNING,
/*
* Thread is running, but requestor exited. Thread should close
* the new socket and free the connect state on exit.
*/
CONNECT_THREAD_RUNNING_DETACHED,
/* Thread finished, results are stored in a state */
CONNECT_THREAD_FAIL,
CONNECT_THREAD_SUCCESS
} NBDConnectThreadState;
typedef struct NBDConnectThread {
/* Initialization constants */
SocketAddress *saddr; /* address to connect to */
/*
* Bottom half to schedule on completion. Scheduled only if bh_ctx is not
* NULL
*/
QEMUBHFunc *bh_func;
void *bh_opaque;
/*
* Result of last attempt. Valid in FAIL and SUCCESS states.
* If you want to steal error, don't forget to set pointer to NULL.
*/
QIOChannelSocket *sioc;
Error *err;
/* state and bh_ctx are protected by mutex */
QemuMutex mutex;
NBDConnectThreadState state; /* current state of the thread */
AioContext *bh_ctx; /* where to schedule bh (NULL means don't schedule) */
} NBDConnectThread;
typedef struct BDRVNBDState {
QIOChannelSocket *sioc; /* The master data channel */
QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
NBDExportInfo info;
CoMutex send_mutex;
CoQueue free_sema;
Coroutine *connection_co;
Coroutine *teardown_co;
QemuCoSleepState *connection_co_sleep_ns_state;
bool drained;
bool wait_drained_end;
int in_flight;
NBDClientState state;
int connect_status;
Error *connect_err;
bool wait_in_flight;
block/nbd: fix reconnect-delay reconnect-delay has a design flaw: we handle it in the same loop where we do connection attempt. So, reconnect-delay may be exceeded by unpredictable time of connection attempt. Let's instead use separate timer. How to reproduce the bug: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. On node2 start qemu-io: ./build/qemu-io --image-opts \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=15 4. Type 'read 0 512' in qemu-io interface to check that connection works Be careful: you should make steps 5-7 in a short time, less than 15 seconds. 5. Kill nbd server on node1 6. Run 'read 0 512' in qemu-io interface again, to be sure that nbd client goes to reconnect loop. 7. On node1 run the following command sudo iptables -A INPUT -p tcp --dport 10809 -j DROP This will make the connect() call of qemu-io at node2 take a long time. And you'll see that read command in qemu-io will hang for a long time, more than 15 seconds specified by reconnect-delay parameter. It's the bug. 8. Don't forget to drop iptables rule on node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Important note: Step [5] is necessary to reproduce _this_ bug. If we miss step [5], the read command (step 6) will hang for a long time and this commit doesn't help, because there will be not long connect() to unreachable host, but long sendmsg() to unreachable host, which should be fixed by enabling and adjusting keep-alive on the socket, which is a thing for further patch set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-4-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:03:00 +03:00
QEMUTimer *reconnect_delay_timer;
NBDClientRequest requests[MAX_NBD_REQUESTS];
NBDReply reply;
BlockDriverState *bs;
/* Connection parameters */
uint32_t reconnect_delay;
SocketAddress *saddr;
char *export, *tlscredsid;
QCryptoTLSCreds *tlscreds;
const char *hostname;
char *x_dirty_bitmap;
bool alloc_depth;
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
bool wait_connect;
NBDConnectThread *connect_thread;
} BDRVNBDState;
static int nbd_establish_connection(BlockDriverState *bs, SocketAddress *saddr,
Error **errp);
static int nbd_co_establish_connection(BlockDriverState *bs, Error **errp);
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
static void nbd_co_establish_connection_cancel(BlockDriverState *bs,
bool detach);
static int nbd_client_handshake(BlockDriverState *bs, Error **errp);
static void nbd_yank(void *opaque);
static void nbd_clear_bdrvstate(BDRVNBDState *s)
{
object_unref(OBJECT(s->tlscreds));
qapi_free_SocketAddress(s->saddr);
s->saddr = NULL;
g_free(s->export);
s->export = NULL;
g_free(s->tlscredsid);
s->tlscredsid = NULL;
g_free(s->x_dirty_bitmap);
s->x_dirty_bitmap = NULL;
}
static void nbd_channel_error(BDRVNBDState *s, int ret)
{
if (ret == -EIO) {
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED) {
s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
NBD_CLIENT_CONNECTING_NOWAIT;
}
} else {
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED) {
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
}
s->state = NBD_CLIENT_QUIT;
}
}
static void nbd_recv_coroutines_wake_all(BDRVNBDState *s)
{
int i;
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
NBDClientRequest *req = &s->requests[i];
if (req->coroutine && req->receiving) {
aio_co_wake(req->coroutine);
}
}
}
block/nbd: fix reconnect-delay reconnect-delay has a design flaw: we handle it in the same loop where we do connection attempt. So, reconnect-delay may be exceeded by unpredictable time of connection attempt. Let's instead use separate timer. How to reproduce the bug: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. On node2 start qemu-io: ./build/qemu-io --image-opts \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=15 4. Type 'read 0 512' in qemu-io interface to check that connection works Be careful: you should make steps 5-7 in a short time, less than 15 seconds. 5. Kill nbd server on node1 6. Run 'read 0 512' in qemu-io interface again, to be sure that nbd client goes to reconnect loop. 7. On node1 run the following command sudo iptables -A INPUT -p tcp --dport 10809 -j DROP This will make the connect() call of qemu-io at node2 take a long time. And you'll see that read command in qemu-io will hang for a long time, more than 15 seconds specified by reconnect-delay parameter. It's the bug. 8. Don't forget to drop iptables rule on node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Important note: Step [5] is necessary to reproduce _this_ bug. If we miss step [5], the read command (step 6) will hang for a long time and this commit doesn't help, because there will be not long connect() to unreachable host, but long sendmsg() to unreachable host, which should be fixed by enabling and adjusting keep-alive on the socket, which is a thing for further patch set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-4-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:03:00 +03:00
static void reconnect_delay_timer_del(BDRVNBDState *s)
{
if (s->reconnect_delay_timer) {
timer_free(s->reconnect_delay_timer);
s->reconnect_delay_timer = NULL;
}
}
static void reconnect_delay_timer_cb(void *opaque)
{
BDRVNBDState *s = opaque;
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
block/nbd: fix reconnect-delay reconnect-delay has a design flaw: we handle it in the same loop where we do connection attempt. So, reconnect-delay may be exceeded by unpredictable time of connection attempt. Let's instead use separate timer. How to reproduce the bug: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. On node2 start qemu-io: ./build/qemu-io --image-opts \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=15 4. Type 'read 0 512' in qemu-io interface to check that connection works Be careful: you should make steps 5-7 in a short time, less than 15 seconds. 5. Kill nbd server on node1 6. Run 'read 0 512' in qemu-io interface again, to be sure that nbd client goes to reconnect loop. 7. On node1 run the following command sudo iptables -A INPUT -p tcp --dport 10809 -j DROP This will make the connect() call of qemu-io at node2 take a long time. And you'll see that read command in qemu-io will hang for a long time, more than 15 seconds specified by reconnect-delay parameter. It's the bug. 8. Don't forget to drop iptables rule on node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Important note: Step [5] is necessary to reproduce _this_ bug. If we miss step [5], the read command (step 6) will hang for a long time and this commit doesn't help, because there will be not long connect() to unreachable host, but long sendmsg() to unreachable host, which should be fixed by enabling and adjusting keep-alive on the socket, which is a thing for further patch set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-4-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:03:00 +03:00
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
while (qemu_co_enter_next(&s->free_sema, NULL)) {
/* Resume all queued requests */
}
}
reconnect_delay_timer_del(s);
}
static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
{
if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTING_WAIT) {
block/nbd: fix reconnect-delay reconnect-delay has a design flaw: we handle it in the same loop where we do connection attempt. So, reconnect-delay may be exceeded by unpredictable time of connection attempt. Let's instead use separate timer. How to reproduce the bug: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. On node2 start qemu-io: ./build/qemu-io --image-opts \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=15 4. Type 'read 0 512' in qemu-io interface to check that connection works Be careful: you should make steps 5-7 in a short time, less than 15 seconds. 5. Kill nbd server on node1 6. Run 'read 0 512' in qemu-io interface again, to be sure that nbd client goes to reconnect loop. 7. On node1 run the following command sudo iptables -A INPUT -p tcp --dport 10809 -j DROP This will make the connect() call of qemu-io at node2 take a long time. And you'll see that read command in qemu-io will hang for a long time, more than 15 seconds specified by reconnect-delay parameter. It's the bug. 8. Don't forget to drop iptables rule on node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Important note: Step [5] is necessary to reproduce _this_ bug. If we miss step [5], the read command (step 6) will hang for a long time and this commit doesn't help, because there will be not long connect() to unreachable host, but long sendmsg() to unreachable host, which should be fixed by enabling and adjusting keep-alive on the socket, which is a thing for further patch set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-4-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:03:00 +03:00
return;
}
assert(!s->reconnect_delay_timer);
s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
QEMU_CLOCK_REALTIME,
SCALE_NS,
reconnect_delay_timer_cb, s);
timer_mod(s->reconnect_delay_timer, expire_time_ns);
}
static void nbd_client_detach_aio_context(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
block/nbd: fix reconnect-delay reconnect-delay has a design flaw: we handle it in the same loop where we do connection attempt. So, reconnect-delay may be exceeded by unpredictable time of connection attempt. Let's instead use separate timer. How to reproduce the bug: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. On node2 start qemu-io: ./build/qemu-io --image-opts \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=15 4. Type 'read 0 512' in qemu-io interface to check that connection works Be careful: you should make steps 5-7 in a short time, less than 15 seconds. 5. Kill nbd server on node1 6. Run 'read 0 512' in qemu-io interface again, to be sure that nbd client goes to reconnect loop. 7. On node1 run the following command sudo iptables -A INPUT -p tcp --dport 10809 -j DROP This will make the connect() call of qemu-io at node2 take a long time. And you'll see that read command in qemu-io will hang for a long time, more than 15 seconds specified by reconnect-delay parameter. It's the bug. 8. Don't forget to drop iptables rule on node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Important note: Step [5] is necessary to reproduce _this_ bug. If we miss step [5], the read command (step 6) will hang for a long time and this commit doesn't help, because there will be not long connect() to unreachable host, but long sendmsg() to unreachable host, which should be fixed by enabling and adjusting keep-alive on the socket, which is a thing for further patch set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-4-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:03:00 +03:00
/* Timer is deleted in nbd_client_co_drain_begin() */
assert(!s->reconnect_delay_timer);
qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
}
static void nbd_client_attach_aio_context_bh(void *opaque)
{
BlockDriverState *bs = opaque;
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
/*
* The node is still drained, so we know the coroutine has yielded in
* nbd_read_eof(), the only place where bs->in_flight can reach 0, or it is
* entered for the first time. Both places are safe for entering the
* coroutine.
*/
qemu_aio_coroutine_enter(bs->aio_context, s->connection_co);
bdrv_dec_in_flight(bs);
}
static void nbd_client_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
/*
* s->connection_co is either yielded from nbd_receive_reply or from
* nbd_co_reconnect_loop()
*/
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED) {
qio_channel_attach_aio_context(QIO_CHANNEL(s->ioc), new_context);
}
bdrv_inc_in_flight(bs);
/*
* Need to wait here for the BH to run because the BH must run while the
* node is still drained.
*/
aio_wait_bh_oneshot(new_context, nbd_client_attach_aio_context_bh, bs);
}
static void coroutine_fn nbd_client_co_drain_begin(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
s->drained = true;
if (s->connection_co_sleep_ns_state) {
qemu_co_sleep_wake(s->connection_co_sleep_ns_state);
}
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
nbd_co_establish_connection_cancel(bs, false);
block/nbd: fix drain dead-lock because of nbd reconnect-delay We pause reconnect process during drained section. So, if we have some requests, waiting for reconnect we should cancel them, otherwise they deadlock the drained section. How to reproduce: 1. Create an image: qemu-img create -f qcow2 xx 100M 2. Start NBD server: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./build/x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=60 \ -vnc :0 -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, kill the nbd server, and run dd in the guest again: dd if=/dev/sdb of=/dev/null bs=1M count=10 Now Qemu is trying to reconnect, and dd-generated requests are waiting for the connection (they will wait up to 60 seconds (see reconnect-delay option above) and than fail). But suddenly, vm may totally hang in the deadlock. You may need to increase reconnect-delay period to catch the dead-lock. VM doesn't respond because drain dead-lock happens in cpu thread with global mutex taken. That's not good thing by itself and is not fixed by this commit (true way is using iothreads). Still this commit fixes drain dead-lock itself. Note: probably, we can instead continue to reconnect during drained section. To achieve this, we may move negotiation to the connect thread to make it independent of bs aio context. But expanding drained section doesn't seem good anyway. So, let's now fix the bug the simplest way. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-2-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:02:58 +03:00
block/nbd: fix reconnect-delay reconnect-delay has a design flaw: we handle it in the same loop where we do connection attempt. So, reconnect-delay may be exceeded by unpredictable time of connection attempt. Let's instead use separate timer. How to reproduce the bug: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. On node2 start qemu-io: ./build/qemu-io --image-opts \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=15 4. Type 'read 0 512' in qemu-io interface to check that connection works Be careful: you should make steps 5-7 in a short time, less than 15 seconds. 5. Kill nbd server on node1 6. Run 'read 0 512' in qemu-io interface again, to be sure that nbd client goes to reconnect loop. 7. On node1 run the following command sudo iptables -A INPUT -p tcp --dport 10809 -j DROP This will make the connect() call of qemu-io at node2 take a long time. And you'll see that read command in qemu-io will hang for a long time, more than 15 seconds specified by reconnect-delay parameter. It's the bug. 8. Don't forget to drop iptables rule on node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Important note: Step [5] is necessary to reproduce _this_ bug. If we miss step [5], the read command (step 6) will hang for a long time and this commit doesn't help, because there will be not long connect() to unreachable host, but long sendmsg() to unreachable host, which should be fixed by enabling and adjusting keep-alive on the socket, which is a thing for further patch set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-4-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:03:00 +03:00
reconnect_delay_timer_del(s);
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
block/nbd: fix drain dead-lock because of nbd reconnect-delay We pause reconnect process during drained section. So, if we have some requests, waiting for reconnect we should cancel them, otherwise they deadlock the drained section. How to reproduce: 1. Create an image: qemu-img create -f qcow2 xx 100M 2. Start NBD server: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./build/x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=60 \ -vnc :0 -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, kill the nbd server, and run dd in the guest again: dd if=/dev/sdb of=/dev/null bs=1M count=10 Now Qemu is trying to reconnect, and dd-generated requests are waiting for the connection (they will wait up to 60 seconds (see reconnect-delay option above) and than fail). But suddenly, vm may totally hang in the deadlock. You may need to increase reconnect-delay period to catch the dead-lock. VM doesn't respond because drain dead-lock happens in cpu thread with global mutex taken. That's not good thing by itself and is not fixed by this commit (true way is using iothreads). Still this commit fixes drain dead-lock itself. Note: probably, we can instead continue to reconnect during drained section. To achieve this, we may move negotiation to the connect thread to make it independent of bs aio context. But expanding drained section doesn't seem good anyway. So, let's now fix the bug the simplest way. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-2-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:02:58 +03:00
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
qemu_co_queue_restart_all(&s->free_sema);
}
}
static void coroutine_fn nbd_client_co_drain_end(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
s->drained = false;
if (s->wait_drained_end) {
s->wait_drained_end = false;
aio_co_wake(s->connection_co);
}
}
static void nbd_teardown_connection(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
if (s->ioc) {
/* finish any pending coroutines */
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
} else if (s->sioc) {
/* abort negotiation */
qio_channel_shutdown(QIO_CHANNEL(s->sioc), QIO_CHANNEL_SHUTDOWN_BOTH,
NULL);
}
s->state = NBD_CLIENT_QUIT;
if (s->connection_co) {
if (s->connection_co_sleep_ns_state) {
qemu_co_sleep_wake(s->connection_co_sleep_ns_state);
}
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
nbd_co_establish_connection_cancel(bs, true);
}
if (qemu_in_coroutine()) {
s->teardown_co = qemu_coroutine_self();
/* connection_co resumes us when it terminates */
qemu_coroutine_yield();
s->teardown_co = NULL;
} else {
BDRV_POLL_WHILE(bs, s->connection_co);
}
assert(!s->connection_co);
}
static bool nbd_client_connecting(BDRVNBDState *s)
{
NBDClientState state = qatomic_load_acquire(&s->state);
return state == NBD_CLIENT_CONNECTING_WAIT ||
state == NBD_CLIENT_CONNECTING_NOWAIT;
}
static bool nbd_client_connecting_wait(BDRVNBDState *s)
{
return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT;
}
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
static void connect_bh(void *opaque)
{
BDRVNBDState *state = opaque;
assert(state->wait_connect);
state->wait_connect = false;
aio_co_wake(state->connection_co);
}
static void nbd_init_connect_thread(BDRVNBDState *s)
{
s->connect_thread = g_new(NBDConnectThread, 1);
*s->connect_thread = (NBDConnectThread) {
.saddr = QAPI_CLONE(SocketAddress, s->saddr),
.state = CONNECT_THREAD_NONE,
.bh_func = connect_bh,
.bh_opaque = s,
};
qemu_mutex_init(&s->connect_thread->mutex);
}
static void nbd_free_connect_thread(NBDConnectThread *thr)
{
if (thr->sioc) {
qio_channel_close(QIO_CHANNEL(thr->sioc), NULL);
}
error_free(thr->err);
qapi_free_SocketAddress(thr->saddr);
g_free(thr);
}
static void *connect_thread_func(void *opaque)
{
NBDConnectThread *thr = opaque;
int ret;
bool do_free = false;
thr->sioc = qio_channel_socket_new();
error_free(thr->err);
thr->err = NULL;
ret = qio_channel_socket_connect_sync(thr->sioc, thr->saddr, &thr->err);
if (ret < 0) {
object_unref(OBJECT(thr->sioc));
thr->sioc = NULL;
}
qemu_mutex_lock(&thr->mutex);
switch (thr->state) {
case CONNECT_THREAD_RUNNING:
thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
if (thr->bh_ctx) {
aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);
/* play safe, don't reuse bh_ctx on further connection attempts */
thr->bh_ctx = NULL;
}
break;
case CONNECT_THREAD_RUNNING_DETACHED:
do_free = true;
break;
default:
abort();
}
qemu_mutex_unlock(&thr->mutex);
if (do_free) {
nbd_free_connect_thread(thr);
}
return NULL;
}
static int coroutine_fn
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
{
int ret;
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
QemuThread thread;
BDRVNBDState *s = bs->opaque;
NBDConnectThread *thr = s->connect_thread;
qemu_mutex_lock(&thr->mutex);
switch (thr->state) {
case CONNECT_THREAD_FAIL:
case CONNECT_THREAD_NONE:
error_free(thr->err);
thr->err = NULL;
thr->state = CONNECT_THREAD_RUNNING;
qemu_thread_create(&thread, "nbd-connect",
connect_thread_func, thr, QEMU_THREAD_DETACHED);
break;
case CONNECT_THREAD_SUCCESS:
/* Previous attempt finally succeeded in background */
thr->state = CONNECT_THREAD_NONE;
s->sioc = thr->sioc;
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
thr->sioc = NULL;
yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
nbd_yank, bs);
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
qemu_mutex_unlock(&thr->mutex);
return 0;
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
case CONNECT_THREAD_RUNNING:
/* Already running, will wait */
break;
default:
abort();
}
thr->bh_ctx = qemu_get_current_aio_context();
qemu_mutex_unlock(&thr->mutex);
/*
* We are going to wait for connect-thread finish, but
* nbd_client_co_drain_begin() can interrupt.
*
* Note that wait_connect variable is not visible for connect-thread. It
* doesn't need mutex protection, it used only inside home aio context of
* bs.
*/
s->wait_connect = true;
qemu_coroutine_yield();
qemu_mutex_lock(&thr->mutex);
switch (thr->state) {
case CONNECT_THREAD_SUCCESS:
case CONNECT_THREAD_FAIL:
thr->state = CONNECT_THREAD_NONE;
error_propagate(errp, thr->err);
thr->err = NULL;
s->sioc = thr->sioc;
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
thr->sioc = NULL;
if (s->sioc) {
yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
nbd_yank, bs);
}
ret = (s->sioc ? 0 : -1);
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
break;
case CONNECT_THREAD_RUNNING:
case CONNECT_THREAD_RUNNING_DETACHED:
/*
* Obviously, drained section wants to start. Report the attempt as
* failed. Still connect thread is executing in background, and its
* result may be used for next connection attempt.
*/
ret = -1;
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
error_setg(errp, "Connection attempt cancelled by other operation");
break;
case CONNECT_THREAD_NONE:
/*
* Impossible. We've seen this thread running. So it should be
* running or at least give some results.
*/
abort();
default:
abort();
}
qemu_mutex_unlock(&thr->mutex);
return ret;
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
}
/*
* nbd_co_establish_connection_cancel
* Cancel nbd_co_establish_connection asynchronously: it will finish soon, to
* allow drained section to begin.
*
* If detach is true, also cleanup the state (or if thread is running, move it
* to CONNECT_THREAD_RUNNING_DETACHED state). s->connect_thread becomes NULL if
* detach is true.
*/
static void nbd_co_establish_connection_cancel(BlockDriverState *bs,
bool detach)
{
BDRVNBDState *s = bs->opaque;
NBDConnectThread *thr = s->connect_thread;
bool wake = false;
bool do_free = false;
qemu_mutex_lock(&thr->mutex);
if (thr->state == CONNECT_THREAD_RUNNING) {
/* We can cancel only in running state, when bh is not yet scheduled */
thr->bh_ctx = NULL;
if (s->wait_connect) {
s->wait_connect = false;
wake = true;
}
if (detach) {
thr->state = CONNECT_THREAD_RUNNING_DETACHED;
s->connect_thread = NULL;
}
} else if (detach) {
do_free = true;
}
qemu_mutex_unlock(&thr->mutex);
if (do_free) {
nbd_free_connect_thread(thr);
s->connect_thread = NULL;
}
if (wake) {
aio_co_wake(s->connection_co);
}
}
static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
{
int ret;
Error *local_err = NULL;
if (!nbd_client_connecting(s)) {
return;
}
/* Wait for completion of all in-flight requests */
qemu_co_mutex_lock(&s->send_mutex);
while (s->in_flight > 0) {
qemu_co_mutex_unlock(&s->send_mutex);
nbd_recv_coroutines_wake_all(s);
s->wait_in_flight = true;
qemu_coroutine_yield();
s->wait_in_flight = false;
qemu_co_mutex_lock(&s->send_mutex);
}
qemu_co_mutex_unlock(&s->send_mutex);
if (!nbd_client_connecting(s)) {
return;
}
/*
* Now we are sure that nobody is accessing the channel, and no one will
* try until we set the state to CONNECTED.
*/
/* Finalize previous connection if any */
if (s->ioc) {
qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
nbd_yank, s->bs);
object_unref(OBJECT(s->sioc));
s->sioc = NULL;
object_unref(OBJECT(s->ioc));
s->ioc = NULL;
}
if (nbd_co_establish_connection(s->bs, &local_err) < 0) {
ret = -ECONNREFUSED;
goto out;
}
block/nbd: allow drain during reconnect attempt It should be safe to reenter qio_channel_yield() on io/channel read/write path, so it's safe to reduce in_flight and allow attaching new aio context. And no problem to allow drain itself: connection attempt is not a guest request. Moreover, if remote server is down, we can hang in negotiation, blocking drain section and provoking a dead lock. How to reproduce the dead lock: 1. Create nbd-fault-injector.conf with the following contents: [inject-error "mega1"] event=data io=readwrite when=before 2. In one terminal run nbd-fault-injector in a loop, like this: n=1; while true; do echo $n; ((n++)); ./nbd-fault-injector.py 127.0.0.1:10000 nbd-fault-injector.conf; done 3. In another terminal run qemu-io in a loop, like this: n=1; while true; do echo $n; ((n++)); ./qemu-io -c 'read 0 512' nbd://127.0.0.1:10000; done After some time, qemu-io will hang trying to drain, for example, like this: #3 aio_poll (ctx=0x55f006bdd890, blocking=true) at util/aio-posix.c:600 #4 bdrv_do_drained_begin (bs=0x55f006bea710, recursive=false, parent=0x0, ignore_bds_parents=false, poll=true) at block/io.c:427 #5 bdrv_drained_begin (bs=0x55f006bea710) at block/io.c:433 #6 blk_drain (blk=0x55f006befc80) at block/block-backend.c:1710 #7 blk_unref (blk=0x55f006befc80) at block/block-backend.c:498 #8 bdrv_open_inherit (filename=0x7fffba1563bc "nbd+tcp://127.0.0.1:10000", reference=0x0, options=0x55f006be86d0, flags=24578, parent=0x0, child_class=0x0, child_role=0, errp=0x7fffba154620) at block.c:3491 #9 bdrv_open (filename=0x7fffba1563bc "nbd+tcp://127.0.0.1:10000", reference=0x0, options=0x0, flags=16386, errp=0x7fffba154620) at block.c:3513 #10 blk_new_open (filename=0x7fffba1563bc "nbd+tcp://127.0.0.1:10000", reference=0x0, options=0x0, flags=16386, errp=0x7fffba154620) at block/block-backend.c:421 And connection_co stack like this: #0 qemu_coroutine_switch (from_=0x55f006bf2650, to_=0x7fe96e07d918, action=COROUTINE_YIELD) at util/coroutine-ucontext.c:302 #1 qemu_coroutine_yield () at util/qemu-coroutine.c:193 #2 qio_channel_yield (ioc=0x55f006bb3c20, condition=G_IO_IN) at io/channel.c:472 #3 qio_channel_readv_all_eof (ioc=0x55f006bb3c20, iov=0x7fe96d729bf0, niov=1, errp=0x7fe96d729eb0) at io/channel.c:110 #4 qio_channel_readv_all (ioc=0x55f006bb3c20, iov=0x7fe96d729bf0, niov=1, errp=0x7fe96d729eb0) at io/channel.c:143 #5 qio_channel_read_all (ioc=0x55f006bb3c20, buf=0x7fe96d729d28 "\300.\366\004\360U", buflen=8, errp=0x7fe96d729eb0) at io/channel.c:247 #6 nbd_read (ioc=0x55f006bb3c20, buffer=0x7fe96d729d28, size=8, desc=0x55f004f69644 "initial magic", errp=0x7fe96d729eb0) at /work/src/qemu/master/include/block/nbd.h:365 #7 nbd_read64 (ioc=0x55f006bb3c20, val=0x7fe96d729d28, desc=0x55f004f69644 "initial magic", errp=0x7fe96d729eb0) at /work/src/qemu/master/include/block/nbd.h:391 #8 nbd_start_negotiate (aio_context=0x55f006bdd890, ioc=0x55f006bb3c20, tlscreds=0x0, hostname=0x0, outioc=0x55f006bf19f8, structured_reply=true, zeroes=0x7fe96d729dca, errp=0x7fe96d729eb0) at nbd/client.c:904 #9 nbd_receive_negotiate (aio_context=0x55f006bdd890, ioc=0x55f006bb3c20, tlscreds=0x0, hostname=0x0, outioc=0x55f006bf19f8, info=0x55f006bf1a00, errp=0x7fe96d729eb0) at nbd/client.c:1032 #10 nbd_client_connect (bs=0x55f006bea710, errp=0x7fe96d729eb0) at block/nbd.c:1460 #11 nbd_reconnect_attempt (s=0x55f006bf19f0) at block/nbd.c:287 #12 nbd_co_reconnect_loop (s=0x55f006bf19f0) at block/nbd.c:309 #13 nbd_connection_entry (opaque=0x55f006bf19f0) at block/nbd.c:360 #14 coroutine_trampoline (i0=113190480, i1=22000) at util/coroutine-ucontext.c:173 Note, that the hang may be triggered by another bug, so the whole case is fixed only together with commit "block/nbd: on shutdown terminate connection attempt". Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200727184751.15704-3-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-07-27 21:47:48 +03:00
bdrv_dec_in_flight(s->bs);
ret = nbd_client_handshake(s->bs, &local_err);
block/nbd: allow drain during reconnect attempt It should be safe to reenter qio_channel_yield() on io/channel read/write path, so it's safe to reduce in_flight and allow attaching new aio context. And no problem to allow drain itself: connection attempt is not a guest request. Moreover, if remote server is down, we can hang in negotiation, blocking drain section and provoking a dead lock. How to reproduce the dead lock: 1. Create nbd-fault-injector.conf with the following contents: [inject-error "mega1"] event=data io=readwrite when=before 2. In one terminal run nbd-fault-injector in a loop, like this: n=1; while true; do echo $n; ((n++)); ./nbd-fault-injector.py 127.0.0.1:10000 nbd-fault-injector.conf; done 3. In another terminal run qemu-io in a loop, like this: n=1; while true; do echo $n; ((n++)); ./qemu-io -c 'read 0 512' nbd://127.0.0.1:10000; done After some time, qemu-io will hang trying to drain, for example, like this: #3 aio_poll (ctx=0x55f006bdd890, blocking=true) at util/aio-posix.c:600 #4 bdrv_do_drained_begin (bs=0x55f006bea710, recursive=false, parent=0x0, ignore_bds_parents=false, poll=true) at block/io.c:427 #5 bdrv_drained_begin (bs=0x55f006bea710) at block/io.c:433 #6 blk_drain (blk=0x55f006befc80) at block/block-backend.c:1710 #7 blk_unref (blk=0x55f006befc80) at block/block-backend.c:498 #8 bdrv_open_inherit (filename=0x7fffba1563bc "nbd+tcp://127.0.0.1:10000", reference=0x0, options=0x55f006be86d0, flags=24578, parent=0x0, child_class=0x0, child_role=0, errp=0x7fffba154620) at block.c:3491 #9 bdrv_open (filename=0x7fffba1563bc "nbd+tcp://127.0.0.1:10000", reference=0x0, options=0x0, flags=16386, errp=0x7fffba154620) at block.c:3513 #10 blk_new_open (filename=0x7fffba1563bc "nbd+tcp://127.0.0.1:10000", reference=0x0, options=0x0, flags=16386, errp=0x7fffba154620) at block/block-backend.c:421 And connection_co stack like this: #0 qemu_coroutine_switch (from_=0x55f006bf2650, to_=0x7fe96e07d918, action=COROUTINE_YIELD) at util/coroutine-ucontext.c:302 #1 qemu_coroutine_yield () at util/qemu-coroutine.c:193 #2 qio_channel_yield (ioc=0x55f006bb3c20, condition=G_IO_IN) at io/channel.c:472 #3 qio_channel_readv_all_eof (ioc=0x55f006bb3c20, iov=0x7fe96d729bf0, niov=1, errp=0x7fe96d729eb0) at io/channel.c:110 #4 qio_channel_readv_all (ioc=0x55f006bb3c20, iov=0x7fe96d729bf0, niov=1, errp=0x7fe96d729eb0) at io/channel.c:143 #5 qio_channel_read_all (ioc=0x55f006bb3c20, buf=0x7fe96d729d28 "\300.\366\004\360U", buflen=8, errp=0x7fe96d729eb0) at io/channel.c:247 #6 nbd_read (ioc=0x55f006bb3c20, buffer=0x7fe96d729d28, size=8, desc=0x55f004f69644 "initial magic", errp=0x7fe96d729eb0) at /work/src/qemu/master/include/block/nbd.h:365 #7 nbd_read64 (ioc=0x55f006bb3c20, val=0x7fe96d729d28, desc=0x55f004f69644 "initial magic", errp=0x7fe96d729eb0) at /work/src/qemu/master/include/block/nbd.h:391 #8 nbd_start_negotiate (aio_context=0x55f006bdd890, ioc=0x55f006bb3c20, tlscreds=0x0, hostname=0x0, outioc=0x55f006bf19f8, structured_reply=true, zeroes=0x7fe96d729dca, errp=0x7fe96d729eb0) at nbd/client.c:904 #9 nbd_receive_negotiate (aio_context=0x55f006bdd890, ioc=0x55f006bb3c20, tlscreds=0x0, hostname=0x0, outioc=0x55f006bf19f8, info=0x55f006bf1a00, errp=0x7fe96d729eb0) at nbd/client.c:1032 #10 nbd_client_connect (bs=0x55f006bea710, errp=0x7fe96d729eb0) at block/nbd.c:1460 #11 nbd_reconnect_attempt (s=0x55f006bf19f0) at block/nbd.c:287 #12 nbd_co_reconnect_loop (s=0x55f006bf19f0) at block/nbd.c:309 #13 nbd_connection_entry (opaque=0x55f006bf19f0) at block/nbd.c:360 #14 coroutine_trampoline (i0=113190480, i1=22000) at util/coroutine-ucontext.c:173 Note, that the hang may be triggered by another bug, so the whole case is fixed only together with commit "block/nbd: on shutdown terminate connection attempt". Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200727184751.15704-3-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-07-27 21:47:48 +03:00
if (s->drained) {
s->wait_drained_end = true;
while (s->drained) {
/*
* We may be entered once from nbd_client_attach_aio_context_bh
* and then from nbd_client_co_drain_end. So here is a loop.
*/
qemu_coroutine_yield();
}
}
bdrv_inc_in_flight(s->bs);
out:
s->connect_status = ret;
error_free(s->connect_err);
s->connect_err = NULL;
error_propagate(&s->connect_err, local_err);
if (ret >= 0) {
/* successfully connected */
s->state = NBD_CLIENT_CONNECTED;
qemu_co_queue_restart_all(&s->free_sema);
}
}
static coroutine_fn void nbd_co_reconnect_loop(BDRVNBDState *s)
{
uint64_t timeout = 1 * NANOSECONDS_PER_SECOND;
uint64_t max_timeout = 16 * NANOSECONDS_PER_SECOND;
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
block/nbd: fix reconnect-delay reconnect-delay has a design flaw: we handle it in the same loop where we do connection attempt. So, reconnect-delay may be exceeded by unpredictable time of connection attempt. Let's instead use separate timer. How to reproduce the bug: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. On node2 start qemu-io: ./build/qemu-io --image-opts \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=15 4. Type 'read 0 512' in qemu-io interface to check that connection works Be careful: you should make steps 5-7 in a short time, less than 15 seconds. 5. Kill nbd server on node1 6. Run 'read 0 512' in qemu-io interface again, to be sure that nbd client goes to reconnect loop. 7. On node1 run the following command sudo iptables -A INPUT -p tcp --dport 10809 -j DROP This will make the connect() call of qemu-io at node2 take a long time. And you'll see that read command in qemu-io will hang for a long time, more than 15 seconds specified by reconnect-delay parameter. It's the bug. 8. Don't forget to drop iptables rule on node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Important note: Step [5] is necessary to reproduce _this_ bug. If we miss step [5], the read command (step 6) will hang for a long time and this commit doesn't help, because there will be not long connect() to unreachable host, but long sendmsg() to unreachable host, which should be fixed by enabling and adjusting keep-alive on the socket, which is a thing for further patch set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-4-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:03:00 +03:00
reconnect_delay_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
s->reconnect_delay * NANOSECONDS_PER_SECOND);
}
nbd_reconnect_attempt(s);
while (nbd_client_connecting(s)) {
if (s->drained) {
bdrv_dec_in_flight(s->bs);
s->wait_drained_end = true;
while (s->drained) {
/*
* We may be entered once from nbd_client_attach_aio_context_bh
* and then from nbd_client_co_drain_end. So here is a loop.
*/
qemu_coroutine_yield();
}
bdrv_inc_in_flight(s->bs);
} else {
qemu_co_sleep_ns_wakeable(QEMU_CLOCK_REALTIME, timeout,
&s->connection_co_sleep_ns_state);
if (s->drained) {
continue;
}
if (timeout < max_timeout) {
timeout *= 2;
}
}
nbd_reconnect_attempt(s);
}
block/nbd: fix reconnect-delay reconnect-delay has a design flaw: we handle it in the same loop where we do connection attempt. So, reconnect-delay may be exceeded by unpredictable time of connection attempt. Let's instead use separate timer. How to reproduce the bug: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. On node2 start qemu-io: ./build/qemu-io --image-opts \ driver=nbd,server.type=inet,server.host=192.168.100.5,server.port=10809,reconnect-delay=15 4. Type 'read 0 512' in qemu-io interface to check that connection works Be careful: you should make steps 5-7 in a short time, less than 15 seconds. 5. Kill nbd server on node1 6. Run 'read 0 512' in qemu-io interface again, to be sure that nbd client goes to reconnect loop. 7. On node1 run the following command sudo iptables -A INPUT -p tcp --dport 10809 -j DROP This will make the connect() call of qemu-io at node2 take a long time. And you'll see that read command in qemu-io will hang for a long time, more than 15 seconds specified by reconnect-delay parameter. It's the bug. 8. Don't forget to drop iptables rule on node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Important note: Step [5] is necessary to reproduce _this_ bug. If we miss step [5], the read command (step 6) will hang for a long time and this commit doesn't help, because there will be not long connect() to unreachable host, but long sendmsg() to unreachable host, which should be fixed by enabling and adjusting keep-alive on the socket, which is a thing for further patch set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200903190301.367620-4-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2020-09-03 22:03:00 +03:00
reconnect_delay_timer_del(s);
}
static coroutine_fn void nbd_connection_entry(void *opaque)
{
BDRVNBDState *s = opaque;
uint64_t i;
int ret = 0;
Error *local_err = NULL;
while (qatomic_load_acquire(&s->state) != NBD_CLIENT_QUIT) {
/*
* The NBD client can only really be considered idle when it has
* yielded from qio_channel_readv_all_eof(), waiting for data. This is
* the point where the additional scheduled coroutine entry happens
* after nbd_client_attach_aio_context().
*
* Therefore we keep an additional in_flight reference all the time and
* only drop it temporarily here.
*/
if (nbd_client_connecting(s)) {
nbd_co_reconnect_loop(s);
}
if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
continue;
}
assert(s->reply.handle == 0);
ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, &local_err);
if (local_err) {
trace_nbd_read_reply_entry_fail(ret, error_get_pretty(local_err));
error_free(local_err);
local_err = NULL;
}
if (ret <= 0) {
nbd_channel_error(s, ret ? ret : -EIO);
continue;
}
/*
* There's no need for a mutex on the receive side, because the
* handler acts as a synchronization point and ensures that only
* one coroutine is called until the reply finishes.
*/
i = HANDLE_TO_INDEX(s, s->reply.handle);
if (i >= MAX_NBD_REQUESTS ||
!s->requests[i].coroutine ||
!s->requests[i].receiving ||
(nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
{
nbd_channel_error(s, -EINVAL);
continue;
}
/*
* We're woken up again by the request itself. Note that there
* is no race between yielding and reentering connection_co. This
* is because:
*
* - if the request runs on the same AioContext, it is only
* entered after we yield
*
* - if the request runs on a different AioContext, reentering
* connection_co happens through a bottom half, which can only
* run after we yield.
*/
aio_co_wake(s->requests[i].coroutine);
qemu_coroutine_yield();
}
qemu_co_queue_restart_all(&s->free_sema);
nbd_recv_coroutines_wake_all(s);
bdrv_dec_in_flight(s->bs);
s->connection_co = NULL;
if (s->ioc) {
qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
nbd_yank, s->bs);
object_unref(OBJECT(s->sioc));
s->sioc = NULL;
object_unref(OBJECT(s->ioc));
s->ioc = NULL;
}
if (s->teardown_co) {
aio_co_wake(s->teardown_co);
}
aio_wait_kick();
}
static int nbd_co_send_request(BlockDriverState *bs,
NBDRequest *request,
QEMUIOVector *qiov)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
int rc, i = -1;
qemu_co_mutex_lock(&s->send_mutex);
while (s->in_flight == MAX_NBD_REQUESTS || nbd_client_connecting_wait(s)) {
qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
}
if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
rc = -EIO;
goto err;
}
s->in_flight++;
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
if (s->requests[i].coroutine == NULL) {
break;
}
}
g_assert(qemu_in_coroutine());
assert(i < MAX_NBD_REQUESTS);
s->requests[i].coroutine = qemu_coroutine_self();
s->requests[i].offset = request->from;
s->requests[i].receiving = false;
request->handle = INDEX_TO_HANDLE(s, i);
assert(s->ioc);
if (qiov) {
qio_channel_set_cork(s->ioc, true);
rc = nbd_send_request(s->ioc, request);
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED &&
rc >= 0) {
if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
NULL) < 0) {
rc = -EIO;
}
} else if (rc >= 0) {
rc = -EIO;
}
qio_channel_set_cork(s->ioc, false);
} else {
rc = nbd_send_request(s->ioc, request);
}
err:
if (rc < 0) {
nbd_channel_error(s, rc);
if (i != -1) {
s->requests[i].coroutine = NULL;
s->in_flight--;
}
if (s->in_flight == 0 && s->wait_in_flight) {
aio_co_wake(s->connection_co);
} else {
qemu_co_queue_next(&s->free_sema);
}
}
qemu_co_mutex_unlock(&s->send_mutex);
return rc;
}
static inline uint16_t payload_advance16(uint8_t **payload)
{
*payload += 2;
return lduw_be_p(*payload - 2);
}
static inline uint32_t payload_advance32(uint8_t **payload)
{
*payload += 4;
return ldl_be_p(*payload - 4);
}
static inline uint64_t payload_advance64(uint8_t **payload)
{
*payload += 8;
return ldq_be_p(*payload - 8);
}
static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
NBDStructuredReplyChunk *chunk,
uint8_t *payload, uint64_t orig_offset,
QEMUIOVector *qiov, Error **errp)
{
uint64_t offset;
uint32_t hole_size;
if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
error_setg(errp, "Protocol error: invalid payload for "
"NBD_REPLY_TYPE_OFFSET_HOLE");
return -EINVAL;
}
offset = payload_advance64(&payload);
hole_size = payload_advance32(&payload);
if (!hole_size || offset < orig_offset || hole_size > qiov->size ||
offset > orig_offset + qiov->size - hole_size) {
error_setg(errp, "Protocol error: server sent chunk exceeding requested"
" region");
return -EINVAL;
}
if (s->info.min_block &&
!QEMU_IS_ALIGNED(hole_size, s->info.min_block)) {
trace_nbd_structured_read_compliance("hole");
}
qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
return 0;
}
/*
* nbd_parse_blockstatus_payload
* Based on our request, we expect only one extent in reply, for the
* base:allocation context.
*/
static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
NBDStructuredReplyChunk *chunk,
uint8_t *payload, uint64_t orig_length,
NBDExtent *extent, Error **errp)
{
uint32_t context_id;
/* The server succeeded, so it must have sent [at least] one extent */
if (chunk->length < sizeof(context_id) + sizeof(*extent)) {
error_setg(errp, "Protocol error: invalid payload for "
"NBD_REPLY_TYPE_BLOCK_STATUS");
return -EINVAL;
}
context_id = payload_advance32(&payload);
if (s->info.context_id != context_id) {
error_setg(errp, "Protocol error: unexpected context id %d for "
"NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
"id is %d", context_id,
s->info.context_id);
return -EINVAL;
}
extent->length = payload_advance32(&payload);
extent->flags = payload_advance32(&payload);
if (extent->length == 0) {
error_setg(errp, "Protocol error: server sent status chunk with "
"zero length");
return -EINVAL;
}
/*
* A server sending unaligned block status is in violation of the
* protocol, but as qemu-nbd 3.1 is such a server (at least for
* POSIX files that are not a multiple of 512 bytes, since qemu
* rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
* still sees an implicit hole beyond the real EOF), it's nicer to
* work around the misbehaving server. If the request included
* more than the final unaligned block, truncate it back to an
* aligned result; if the request was only the final block, round
* up to the full block and change the status to fully-allocated
* (always a safe status, even if it loses information).
*/
if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length,
s->info.min_block)) {
trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
if (extent->length > s->info.min_block) {
extent->length = QEMU_ALIGN_DOWN(extent->length,
s->info.min_block);
} else {
extent->length = s->info.min_block;
extent->flags = 0;
}
}
/*
* We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
* sent us any more than one extent, nor should it have included
* status beyond our request in that extent. However, it's easy
* enough to ignore the server's noncompliance without killing the
* connection; just ignore trailing extents, and clamp things to
* the length of our request.
*/
if (chunk->length > sizeof(context_id) + sizeof(*extent)) {
trace_nbd_parse_blockstatus_compliance("more than one extent");
}
if (extent->length > orig_length) {
extent->length = orig_length;
trace_nbd_parse_blockstatus_compliance("extent length too large");
}
/*
* HACK: if we are using x-dirty-bitmaps to access
* qemu:allocation-depth, treat all depths > 2 the same as 2,
* since nbd_client_co_block_status is only expecting the low two
* bits to be set.
*/
if (s->alloc_depth && extent->flags > 2) {
extent->flags = 2;
}
return 0;
}
/*
* nbd_parse_error_payload
* on success @errp contains message describing nbd error reply
*/
static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
uint8_t *payload, int *request_ret,
Error **errp)
{
uint32_t error;
uint16_t message_size;
assert(chunk->type & (1 << 15));
if (chunk->length < sizeof(error) + sizeof(message_size)) {
error_setg(errp,
"Protocol error: invalid payload for structured error");
return -EINVAL;
}
error = nbd_errno_to_system_errno(payload_advance32(&payload));
if (error == 0) {
error_setg(errp, "Protocol error: server sent structured error chunk "
"with error = 0");
return -EINVAL;
}
*request_ret = -error;
message_size = payload_advance16(&payload);
if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
error_setg(errp, "Protocol error: server sent structured error chunk "
"with incorrect message size");
return -EINVAL;
}
/* TODO: Add a trace point to mention the server complaint */
/* TODO handle ERROR_OFFSET */
return 0;
}
static int nbd_co_receive_offset_data_payload(BDRVNBDState *s,
uint64_t orig_offset,
QEMUIOVector *qiov, Error **errp)
{
QEMUIOVector sub_qiov;
uint64_t offset;
size_t data_size;
int ret;
NBDStructuredReplyChunk *chunk = &s->reply.structured;
assert(nbd_reply_is_structured(&s->reply));
/* The NBD spec requires at least one byte of payload */
if (chunk->length <= sizeof(offset)) {
error_setg(errp, "Protocol error: invalid payload for "
"NBD_REPLY_TYPE_OFFSET_DATA");
return -EINVAL;
}
if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) {
return -EIO;
}
data_size = chunk->length - sizeof(offset);
assert(data_size);
if (offset < orig_offset || data_size > qiov->size ||
offset > orig_offset + qiov->size - data_size) {
error_setg(errp, "Protocol error: server sent chunk exceeding requested"
" region");
return -EINVAL;
}
if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) {
trace_nbd_structured_read_compliance("data");
}
qemu_iovec_init(&sub_qiov, qiov->niov);
qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
qemu_iovec_destroy(&sub_qiov);
return ret < 0 ? -EIO : 0;
}
#define NBD_MAX_MALLOC_PAYLOAD 1000
static coroutine_fn int nbd_co_receive_structured_payload(
BDRVNBDState *s, void **payload, Error **errp)
{
int ret;
uint32_t len;
assert(nbd_reply_is_structured(&s->reply));
len = s->reply.structured.length;
if (len == 0) {
return 0;
}
if (payload == NULL) {
error_setg(errp, "Unexpected structured payload");
return -EINVAL;
}
if (len > NBD_MAX_MALLOC_PAYLOAD) {
error_setg(errp, "Payload too large");
return -EINVAL;
}
*payload = g_new(char, len);
ret = nbd_read(s->ioc, *payload, len, "structured payload", errp);
if (ret < 0) {
g_free(*payload);
*payload = NULL;
return ret;
}
return 0;
}
/*
* nbd_co_do_receive_one_chunk
* for simple reply:
* set request_ret to received reply error
* if qiov is not NULL: read payload to @qiov
* for structured reply chunk:
* if error chunk: read payload, set @request_ret, do not set @payload
* else if offset_data chunk: read payload data to @qiov, do not set @payload
* else: read payload to @payload
*
* If function fails, @errp contains corresponding error message, and the
* connection with the server is suspect. If it returns 0, then the
* transaction succeeded (although @request_ret may be a negative errno
* corresponding to the server's error reply), and errp is unchanged.
*/
static coroutine_fn int nbd_co_do_receive_one_chunk(
BDRVNBDState *s, uint64_t handle, bool only_structured,
int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
{
int ret;
int i = HANDLE_TO_INDEX(s, handle);
void *local_payload = NULL;
NBDStructuredReplyChunk *chunk;
if (payload) {
*payload = NULL;
}
*request_ret = 0;
/* Wait until we're woken up by nbd_connection_entry. */
s->requests[i].receiving = true;
qemu_coroutine_yield();
s->requests[i].receiving = false;
if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
error_setg(errp, "Connection closed");
return -EIO;
}
assert(s->ioc);
assert(s->reply.handle == handle);
if (nbd_reply_is_simple(&s->reply)) {
if (only_structured) {
error_setg(errp, "Protocol error: simple reply when structured "
"reply chunk was expected");
return -EINVAL;
}
*request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
if (*request_ret < 0 || !qiov) {
return 0;
}
return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
errp) < 0 ? -EIO : 0;
}
/* handle structured reply chunk */
assert(s->info.structured_reply);
chunk = &s->reply.structured;
if (chunk->type == NBD_REPLY_TYPE_NONE) {
if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
" NBD_REPLY_FLAG_DONE flag set");
return -EINVAL;
}
if (chunk->length) {
error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
" nonzero length");
return -EINVAL;
}
return 0;
}
if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
if (!qiov) {
error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
return -EINVAL;
}
return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
qiov, errp);
}
if (nbd_reply_type_is_error(chunk->type)) {
payload = &local_payload;
}
ret = nbd_co_receive_structured_payload(s, payload, errp);
if (ret < 0) {
return ret;
}
if (nbd_reply_type_is_error(chunk->type)) {
ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
g_free(local_payload);
return ret;
}
return 0;
}
/*
* nbd_co_receive_one_chunk
* Read reply, wake up connection_co and set s->quit if needed.
* Return value is a fatal error code or normal nbd reply error code
*/
static coroutine_fn int nbd_co_receive_one_chunk(
BDRVNBDState *s, uint64_t handle, bool only_structured,
int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
Error **errp)
{
int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured,
request_ret, qiov, payload, errp);
if (ret < 0) {
memset(reply, 0, sizeof(*reply));
nbd_channel_error(s, ret);
} else {
/* For assert at loop start in nbd_connection_entry */
*reply = s->reply;
}
s->reply.handle = 0;
if (s->connection_co && !s->wait_in_flight) {
/*
* We must check s->wait_in_flight, because we may entered by
* nbd_recv_coroutines_wake_all(), in this case we should not
* wake connection_co here, it will woken by last request.
*/
aio_co_wake(s->connection_co);
}
return ret;
}
typedef struct NBDReplyChunkIter {
int ret;
int request_ret;
Error *err;
bool done, only_structured;
} NBDReplyChunkIter;
static void nbd_iter_channel_error(NBDReplyChunkIter *iter,
int ret, Error **local_err)
{
assert(local_err && *local_err);
assert(ret < 0);
if (!iter->ret) {
iter->ret = ret;
error_propagate(&iter->err, *local_err);
} else {
error_free(*local_err);
}
*local_err = NULL;
}
static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
{
assert(ret < 0);
if (!iter->request_ret) {
iter->request_ret = ret;
}
}
/*
* NBD_FOREACH_REPLY_CHUNK
* The pointer stored in @payload requires g_free() to free it.
*/
#define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
qiov, reply, payload) \
for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
/*
* nbd_reply_chunk_iter_receive
* The pointer stored in @payload requires g_free() to free it.
*/
static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s,
NBDReplyChunkIter *iter,
uint64_t handle,
QEMUIOVector *qiov, NBDReply *reply,
void **payload)
{
int ret, request_ret;
NBDReply local_reply;
NBDStructuredReplyChunk *chunk;
Error *local_err = NULL;
if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
error_setg(&local_err, "Connection closed");
nbd_iter_channel_error(iter, -EIO, &local_err);
goto break_loop;
}
if (iter->done) {
/* Previous iteration was last. */
goto break_loop;
}
if (reply == NULL) {
reply = &local_reply;
}
ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured,
&request_ret, qiov, reply, payload,
&local_err);
if (ret < 0) {
nbd_iter_channel_error(iter, ret, &local_err);
} else if (request_ret < 0) {
nbd_iter_request_error(iter, request_ret);
}
/* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
if (nbd_reply_is_simple(reply) ||
qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
goto break_loop;
}
chunk = &reply->structured;
iter->only_structured = true;
if (chunk->type == NBD_REPLY_TYPE_NONE) {
/* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
assert(chunk->flags & NBD_REPLY_FLAG_DONE);
goto break_loop;
}
if (chunk->flags & NBD_REPLY_FLAG_DONE) {
/* This iteration is last. */
iter->done = true;
}
/* Execute the loop body */
return true;
break_loop:
s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
qemu_co_mutex_lock(&s->send_mutex);
s->in_flight--;
if (s->in_flight == 0 && s->wait_in_flight) {
aio_co_wake(s->connection_co);
} else {
qemu_co_queue_next(&s->free_sema);
}
qemu_co_mutex_unlock(&s->send_mutex);
return false;
}
static int nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) {
/* nbd_reply_chunk_iter_receive does all the work */
}
error_propagate(errp, iter.err);
*request_ret = iter.request_ret;
return iter.ret;
}
static int nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
uint64_t offset, QEMUIOVector *qiov,
int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
NBDReply reply;
void *payload = NULL;
Error *local_err = NULL;
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply,
qiov, &reply, &payload)
{
int ret;
NBDStructuredReplyChunk *chunk = &reply.structured;
assert(nbd_reply_is_structured(&reply));
switch (chunk->type) {
case NBD_REPLY_TYPE_OFFSET_DATA:
/*
* special cased in nbd_co_receive_one_chunk, data is already
* in qiov
*/
break;
case NBD_REPLY_TYPE_OFFSET_HOLE:
ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload,
offset, qiov, &local_err);
if (ret < 0) {
nbd_channel_error(s, ret);
nbd_iter_channel_error(&iter, ret, &local_err);
}
break;
default:
if (!nbd_reply_type_is_error(chunk->type)) {
/* not allowed reply type */
nbd_channel_error(s, -EINVAL);
error_setg(&local_err,
"Unexpected reply type: %d (%s) for CMD_READ",
chunk->type, nbd_reply_type_lookup(chunk->type));
nbd_iter_channel_error(&iter, -EINVAL, &local_err);
}
}
g_free(payload);
payload = NULL;
}
error_propagate(errp, iter.err);
*request_ret = iter.request_ret;
return iter.ret;
}
static int nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
uint64_t handle, uint64_t length,
NBDExtent *extent,
int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
NBDReply reply;
void *payload = NULL;
Error *local_err = NULL;
bool received = false;
assert(!extent->length);
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, &reply, &payload) {
int ret;
NBDStructuredReplyChunk *chunk = &reply.structured;
assert(nbd_reply_is_structured(&reply));
switch (chunk->type) {
case NBD_REPLY_TYPE_BLOCK_STATUS:
if (received) {
nbd_channel_error(s, -EINVAL);
error_setg(&local_err, "Several BLOCK_STATUS chunks in reply");
nbd_iter_channel_error(&iter, -EINVAL, &local_err);
}
received = true;
ret = nbd_parse_blockstatus_payload(s, &reply.structured,
payload, length, extent,
&local_err);
if (ret < 0) {
nbd_channel_error(s, ret);
nbd_iter_channel_error(&iter, ret, &local_err);
}
break;
default:
if (!nbd_reply_type_is_error(chunk->type)) {
nbd_channel_error(s, -EINVAL);
error_setg(&local_err,
"Unexpected reply type: %d (%s) "
"for CMD_BLOCK_STATUS",
chunk->type, nbd_reply_type_lookup(chunk->type));
nbd_iter_channel_error(&iter, -EINVAL, &local_err);
}
}
g_free(payload);
payload = NULL;
}
if (!extent->length && !iter.request_ret) {
error_setg(&local_err, "Server did not reply with any status extents");
nbd_iter_channel_error(&iter, -EIO, &local_err);
}
error_propagate(errp, iter.err);
*request_ret = iter.request_ret;
return iter.ret;
}
static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
QEMUIOVector *write_qiov)
{
int ret, request_ret;
Error *local_err = NULL;
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
assert(request->type != NBD_CMD_READ);
if (write_qiov) {
assert(request->type == NBD_CMD_WRITE);
assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
} else {
assert(request->type != NBD_CMD_WRITE);
}
do {
ret = nbd_co_send_request(bs, request, write_qiov);
if (ret < 0) {
continue;
}
ret = nbd_co_receive_return_code(s, request->handle,
&request_ret, &local_err);
if (local_err) {
trace_nbd_co_request_fail(request->from, request->len,
request->handle, request->flags,
request->type,
nbd_cmd_lookup(request->type),
ret, error_get_pretty(local_err));
error_free(local_err);
local_err = NULL;
}
} while (ret < 0 && nbd_client_connecting_wait(s));
return ret ? ret : request_ret;
}
static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, int flags)
{
int ret, request_ret;
Error *local_err = NULL;
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = {
.type = NBD_CMD_READ,
.from = offset,
.len = bytes,
};
assert(bytes <= NBD_MAX_BUFFER_SIZE);
assert(!flags);
if (!bytes) {
return 0;
}
/*
* Work around the fact that the block layer doesn't do
* byte-accurate sizing yet - if the read exceeds the server's
* advertised size because the block layer rounded size up, then
* truncate the request to the server and tail-pad with zero.
*/
if (offset >= s->info.size) {
assert(bytes < BDRV_SECTOR_SIZE);
qemu_iovec_memset(qiov, 0, 0, bytes);
return 0;
}
if (offset + bytes > s->info.size) {
uint64_t slop = offset + bytes - s->info.size;
assert(slop < BDRV_SECTOR_SIZE);
qemu_iovec_memset(qiov, bytes - slop, 0, slop);
request.len -= slop;
}
do {
ret = nbd_co_send_request(bs, &request, NULL);
if (ret < 0) {
continue;
}
ret = nbd_co_receive_cmdread_reply(s, request.handle, offset, qiov,
&request_ret, &local_err);
if (local_err) {
trace_nbd_co_request_fail(request.from, request.len, request.handle,
request.flags, request.type,
nbd_cmd_lookup(request.type),
ret, error_get_pretty(local_err));
error_free(local_err);
local_err = NULL;
}
} while (ret < 0 && nbd_client_connecting_wait(s));
return ret ? ret : request_ret;
}
static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, int flags)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = {
.type = NBD_CMD_WRITE,
.from = offset,
.len = bytes,
};
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
if (flags & BDRV_REQ_FUA) {
assert(s->info.flags & NBD_FLAG_SEND_FUA);
request.flags |= NBD_CMD_FLAG_FUA;
}
assert(bytes <= NBD_MAX_BUFFER_SIZE);
if (!bytes) {
return 0;
}
return nbd_co_request(bs, &request, qiov);
}
static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int bytes, BdrvRequestFlags flags)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = {
.type = NBD_CMD_WRITE_ZEROES,
.from = offset,
.len = bytes,
};
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
return -ENOTSUP;
}
if (flags & BDRV_REQ_FUA) {
assert(s->info.flags & NBD_FLAG_SEND_FUA);
request.flags |= NBD_CMD_FLAG_FUA;
}
if (!(flags & BDRV_REQ_MAY_UNMAP)) {
request.flags |= NBD_CMD_FLAG_NO_HOLE;
}
if (flags & BDRV_REQ_NO_FALLBACK) {
assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO);
request.flags |= NBD_CMD_FLAG_FAST_ZERO;
}
if (!bytes) {
return 0;
}
return nbd_co_request(bs, &request, NULL);
}
static int nbd_client_co_flush(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = { .type = NBD_CMD_FLUSH };
if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) {
return 0;
}
request.from = 0;
request.len = 0;
return nbd_co_request(bs, &request, NULL);
}
static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset,
int bytes)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = {
.type = NBD_CMD_TRIM,
.from = offset,
.len = bytes,
};
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
return 0;
}
return nbd_co_request(bs, &request, NULL);
}
static int coroutine_fn nbd_client_co_block_status(
BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
int ret, request_ret;
NBDExtent extent = { 0 };
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
Error *local_err = NULL;
NBDRequest request = {
.type = NBD_CMD_BLOCK_STATUS,
.from = offset,
.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
MIN(bytes, s->info.size - offset)),
.flags = NBD_CMD_FLAG_REQ_ONE,
};
if (!s->info.base_allocation) {
*pnum = bytes;
*map = offset;
*file = bs;
return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
}
/*
* Work around the fact that the block layer doesn't do
* byte-accurate sizing yet - if the status request exceeds the
* server's advertised size because the block layer rounded size
* up, we truncated the request to the server (above), or are
* called on just the hole.
*/
if (offset >= s->info.size) {
*pnum = bytes;
assert(bytes < BDRV_SECTOR_SIZE);
/* Intentionally don't report offset_valid for the hole */
return BDRV_BLOCK_ZERO;
}
if (s->info.min_block) {
assert(QEMU_IS_ALIGNED(request.len, s->info.min_block));
}
do {
ret = nbd_co_send_request(bs, &request, NULL);
if (ret < 0) {
continue;
}
ret = nbd_co_receive_blockstatus_reply(s, request.handle, bytes,
&extent, &request_ret,
&local_err);
if (local_err) {
trace_nbd_co_request_fail(request.from, request.len, request.handle,
request.flags, request.type,
nbd_cmd_lookup(request.type),
ret, error_get_pretty(local_err));
error_free(local_err);
local_err = NULL;
}
} while (ret < 0 && nbd_client_connecting_wait(s));
if (ret < 0 || request_ret < 0) {
return ret ? ret : request_ret;
}
assert(extent.length);
*pnum = extent.length;
*map = offset;
*file = bs;
return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) |
(extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) |
BDRV_BLOCK_OFFSET_VALID;
}
static int nbd_client_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
{
BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque;
if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) {
error_setg(errp, "Can't reopen read-only NBD mount as read/write");
return -EACCES;
}
return 0;
}
static void nbd_yank(void *opaque)
{
BlockDriverState *bs = opaque;
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
qatomic_store_release(&s->state, NBD_CLIENT_QUIT);
qio_channel_shutdown(QIO_CHANNEL(s->sioc), QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
}
static void nbd_client_close(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = { .type = NBD_CMD_DISC };
if (s->ioc) {
nbd_send_request(s->ioc, &request);
}
nbd_teardown_connection(bs);
}
static int nbd_establish_connection(BlockDriverState *bs,
SocketAddress *saddr,
Error **errp)
{
nbd: Use ERRP_GUARD() If we want to check error after errp-function call, we need to introduce local_err and then propagate it to errp. Instead, use the ERRP_GUARD() macro, benefits are: 1. No need of explicit error_propagate call 2. No need of explicit local_err variable: use errp directly 3. ERRP_GUARD() leaves errp as is if it's not NULL or &error_fatal, this means that we don't break error_abort (we'll abort on error_set, not on error_propagate) If we want to add some info to errp (by error_prepend() or error_append_hint()), we must use the ERRP_GUARD() macro. Otherwise, this info will not be added when errp == &error_fatal (the program will exit prior to the error_append_hint() or error_prepend() call). Fix several such cases, e.g. in nbd_read(). This commit is generated by command sed -n '/^Network Block Device (NBD)$/,/^$/{s/^F: //p}' \ MAINTAINERS | \ xargs git ls-files | grep '\.[hc]$' | \ xargs spatch \ --sp-file scripts/coccinelle/errp-guard.cocci \ --macro-file scripts/cocci-macro-file.h \ --in-place --no-show-diff --max-width 80 Reported-by: Kevin Wolf <kwolf@redhat.com> Reported-by: Greg Kurz <groug@kaod.org> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> [Commit message tweaked] Signed-off-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20200707165037.1026246-8-armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> [ERRP_AUTO_PROPAGATE() renamed to ERRP_GUARD(), and auto-propagated-errp.cocci to errp-guard.cocci. Commit message tweaked again.]
2020-07-07 19:50:36 +03:00
ERRP_GUARD();
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
s->sioc = qio_channel_socket_new();
qio_channel_set_name(QIO_CHANNEL(s->sioc), "nbd-client");
qio_channel_socket_connect_sync(s->sioc, saddr, errp);
nbd: Use ERRP_GUARD() If we want to check error after errp-function call, we need to introduce local_err and then propagate it to errp. Instead, use the ERRP_GUARD() macro, benefits are: 1. No need of explicit error_propagate call 2. No need of explicit local_err variable: use errp directly 3. ERRP_GUARD() leaves errp as is if it's not NULL or &error_fatal, this means that we don't break error_abort (we'll abort on error_set, not on error_propagate) If we want to add some info to errp (by error_prepend() or error_append_hint()), we must use the ERRP_GUARD() macro. Otherwise, this info will not be added when errp == &error_fatal (the program will exit prior to the error_append_hint() or error_prepend() call). Fix several such cases, e.g. in nbd_read(). This commit is generated by command sed -n '/^Network Block Device (NBD)$/,/^$/{s/^F: //p}' \ MAINTAINERS | \ xargs git ls-files | grep '\.[hc]$' | \ xargs spatch \ --sp-file scripts/coccinelle/errp-guard.cocci \ --macro-file scripts/cocci-macro-file.h \ --in-place --no-show-diff --max-width 80 Reported-by: Kevin Wolf <kwolf@redhat.com> Reported-by: Greg Kurz <groug@kaod.org> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> [Commit message tweaked] Signed-off-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20200707165037.1026246-8-armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> [ERRP_AUTO_PROPAGATE() renamed to ERRP_GUARD(), and auto-propagated-errp.cocci to errp-guard.cocci. Commit message tweaked again.]
2020-07-07 19:50:36 +03:00
if (*errp) {
object_unref(OBJECT(s->sioc));
s->sioc = NULL;
return -1;
}
yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name), nbd_yank, bs);
qio_channel_set_delay(QIO_CHANNEL(s->sioc), false);
return 0;
}
/* nbd_client_handshake takes ownership on s->sioc. On failure it's unref'ed. */
static int nbd_client_handshake(BlockDriverState *bs, Error **errp)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
AioContext *aio_context = bdrv_get_aio_context(bs);
int ret;
trace_nbd_client_handshake(s->export);
qio_channel_set_blocking(QIO_CHANNEL(s->sioc), false, NULL);
qio_channel_attach_aio_context(QIO_CHANNEL(s->sioc), aio_context);
s->info.request_sizes = true;
s->info.structured_reply = true;
s->info.base_allocation = true;
s->info.x_dirty_bitmap = g_strdup(s->x_dirty_bitmap);
s->info.name = g_strdup(s->export ?: "");
ret = nbd_receive_negotiate(aio_context, QIO_CHANNEL(s->sioc), s->tlscreds,
s->hostname, &s->ioc, &s->info, errp);
g_free(s->info.x_dirty_bitmap);
g_free(s->info.name);
if (ret < 0) {
yank_unregister_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
nbd_yank, bs);
object_unref(OBJECT(s->sioc));
s->sioc = NULL;
return ret;
}
if (s->x_dirty_bitmap) {
if (!s->info.base_allocation) {
error_setg(errp, "requested x-dirty-bitmap %s not found",
s->x_dirty_bitmap);
ret = -EINVAL;
goto fail;
}
if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) {
s->alloc_depth = true;
}
}
if (s->info.flags & NBD_FLAG_READ_ONLY) {
ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
if (ret < 0) {
goto fail;
}
}
if (s->info.flags & NBD_FLAG_SEND_FUA) {
bs->supported_write_flags = BDRV_REQ_FUA;
bs->supported_zero_flags |= BDRV_REQ_FUA;
}
if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) {
bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK;
}
}
if (!s->ioc) {
s->ioc = QIO_CHANNEL(s->sioc);
object_ref(OBJECT(s->ioc));
}
trace_nbd_client_handshake_success(s->export);
return 0;
fail:
/*
* We have connected, but must fail for other reasons.
* Send NBD_CMD_DISC as a courtesy to the server.
*/
{
NBDRequest request = { .type = NBD_CMD_DISC };
nbd_send_request(s->ioc ?: QIO_CHANNEL(s->sioc), &request);
yank_unregister_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
nbd_yank, bs);
object_unref(OBJECT(s->sioc));
s->sioc = NULL;
return ret;
}
}
/*
* Parse nbd_open options
*/
static int nbd_parse_uri(const char *filename, QDict *options)
{
URI *uri;
const char *p;
QueryParams *qp = NULL;
int ret = 0;
bool is_unix;
uri = uri_parse(filename);
if (!uri) {
return -EINVAL;
}
/* transport */
if (!g_strcmp0(uri->scheme, "nbd")) {
is_unix = false;
} else if (!g_strcmp0(uri->scheme, "nbd+tcp")) {
is_unix = false;
} else if (!g_strcmp0(uri->scheme, "nbd+unix")) {
is_unix = true;
} else {
ret = -EINVAL;
goto out;
}
p = uri->path ? uri->path : "";
if (p[0] == '/') {
p++;
}
if (p[0]) {
qdict_put_str(options, "export", p);
}
qp = query_params_parse(uri->query);
if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
ret = -EINVAL;
goto out;
}
if (is_unix) {
/* nbd+unix:///export?socket=path */
if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
ret = -EINVAL;
goto out;
}
qdict_put_str(options, "server.type", "unix");
qdict_put_str(options, "server.path", qp->p[0].value);
} else {
QString *host;
char *port_str;
/* nbd[+tcp]://host[:port]/export */
if (!uri->server) {
ret = -EINVAL;
goto out;
}
/* strip braces from literal IPv6 address */
if (uri->server[0] == '[') {
host = qstring_from_substr(uri->server, 1,
strlen(uri->server) - 1);
} else {
host = qstring_from_str(uri->server);
}
qdict_put_str(options, "server.type", "inet");
nbd: Tidy up blockdev-add interface SocketAddress is a simple union, and simple unions are awkward: they have their variant members wrapped in a "data" object on the wire, and require additional indirections in C. I intend to limit its use to existing external interfaces, and convert all internal interfaces to SocketAddressFlat. BlockdevOptionsNbd is an external interface using SocketAddress. We already use SocketAddressFlat elsewhere in blockdev-add. Replace it by SocketAddressFlat while we can (it's new in 2.9) for simplicity and consistency. For example, { "execute": "blockdev-add", "arguments": { "node-name": "foo", "driver": "nbd", "server": { "type": "inet", "data": { "host": "localhost", "port": "12345" } } } } becomes { "execute": "blockdev-add", "arguments": { "node-name": "foo", "driver": "nbd", "server": { "type": "inet", "host": "localhost", "port": "12345" } } } Since the internal interfaces still take SocketAddress, this requires conversion function socket_address_crumple(). It'll go away when I update the interfaces. Unfortunately, SocketAddress is also visible in -drive since 2.8: -drive if=none,driver=nbd,server.type=inet,server.data.host=127.0.0.1,server.data.port=12345 Nobody should be using it, as it's fairly new and has never been documented, so adding still more compatibility gunk to keep it working isn't worth the trouble. You now have to use -drive if=none,driver=nbd,server.type=inet,server.host=127.0.0.1,server.port=12345 Signed-off-by: Markus Armbruster <armbru@redhat.com> Message-id: 1490895797-29094-9-git-send-email-armbru@redhat.com [mreitz: Change iotest 147 accordingly] Because of this interface change, iotest 147 has to be adapted. Unfortunately, we cannot just flatten all of the addresses because nbd-server-start still takes a plain SocketAddress. Therefore, we need both and this is most easily achieved by writing the SocketAddress into the code and flattening it where necessary. Signed-off-by: Max Reitz <mreitz@redhat.com> Message-id: 20170330221243.17333-1-mreitz@redhat.com Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-03-30 20:43:16 +03:00
qdict_put(options, "server.host", host);
port_str = g_strdup_printf("%d", uri->port ?: NBD_DEFAULT_PORT);
qdict_put_str(options, "server.port", port_str);
g_free(port_str);
}
out:
if (qp) {
query_params_free(qp);
}
uri_free(uri);
return ret;
}
static bool nbd_has_filename_options_conflict(QDict *options, Error **errp)
{
const QDictEntry *e;
for (e = qdict_first(options); e; e = qdict_next(options, e)) {
if (!strcmp(e->key, "host") ||
!strcmp(e->key, "port") ||
!strcmp(e->key, "path") ||
!strcmp(e->key, "export") ||
strstart(e->key, "server.", NULL))
{
error_setg(errp, "Option '%s' cannot be used with a file name",
e->key);
return true;
}
}
return false;
}
static void nbd_parse_filename(const char *filename, QDict *options,
Error **errp)
{
g_autofree char *file = NULL;
char *export_name;
const char *host_spec;
const char *unixpath;
if (nbd_has_filename_options_conflict(options, errp)) {
return;
}
if (strstr(filename, "://")) {
int ret = nbd_parse_uri(filename, options);
if (ret < 0) {
error_setg(errp, "No valid URL specified");
}
return;
}
file = g_strdup(filename);
export_name = strstr(file, EN_OPTSTR);
if (export_name) {
if (export_name[strlen(EN_OPTSTR)] == 0) {
return;
}
export_name[0] = 0; /* truncate 'file' */
export_name += strlen(EN_OPTSTR);
qdict_put_str(options, "export", export_name);
}
/* extract the host_spec - fail if it's not nbd:... */
if (!strstart(file, "nbd:", &host_spec)) {
error_setg(errp, "File name string for NBD must start with 'nbd:'");
return;
}
if (!*host_spec) {
return;
}
/* are we a UNIX or TCP socket? */
if (strstart(host_spec, "unix:", &unixpath)) {
qdict_put_str(options, "server.type", "unix");
qdict_put_str(options, "server.path", unixpath);
} else {
InetSocketAddress *addr = g_new(InetSocketAddress, 1);
if (inet_parse(addr, host_spec, errp)) {
goto out_inet;
}
qdict_put_str(options, "server.type", "inet");
qdict_put_str(options, "server.host", addr->host);
qdict_put_str(options, "server.port", addr->port);
out_inet:
qapi_free_InetSocketAddress(addr);
}
}
static bool nbd_process_legacy_socket_options(QDict *output_options,
QemuOpts *legacy_opts,
Error **errp)
{
const char *path = qemu_opt_get(legacy_opts, "path");
const char *host = qemu_opt_get(legacy_opts, "host");
const char *port = qemu_opt_get(legacy_opts, "port");
const QDictEntry *e;
if (!path && !host && !port) {
return true;
}
for (e = qdict_first(output_options); e; e = qdict_next(output_options, e))
{
if (strstart(e->key, "server.", NULL)) {
error_setg(errp, "Cannot use 'server' and path/host/port at the "
"same time");
return false;
}
}
if (path && host) {
error_setg(errp, "path and host may not be used at the same time");
return false;
} else if (path) {
if (port) {
error_setg(errp, "port may not be used without host");
return false;
}
qdict_put_str(output_options, "server.type", "unix");
qdict_put_str(output_options, "server.path", path);
} else if (host) {
qdict_put_str(output_options, "server.type", "inet");
qdict_put_str(output_options, "server.host", host);
qdict_put_str(output_options, "server.port",
port ?: stringify(NBD_DEFAULT_PORT));
}
return true;
}
static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options,
Error **errp)
{
SocketAddress *saddr = NULL;
QDict *addr = NULL;
Visitor *iv = NULL;
qdict_extract_subqdict(options, &addr, "server.");
if (!qdict_size(addr)) {
error_setg(errp, "NBD server address missing");
goto done;
}
iv = qobject_input_visitor_new_flat_confused(addr, errp);
if (!iv) {
goto done;
}
if (!visit_type_SocketAddress(iv, NULL, &saddr, errp)) {
goto done;
}
done:
qobject_unref(addr);
visit_free(iv);
return saddr;
}
static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
{
Object *obj;
QCryptoTLSCreds *creds;
obj = object_resolve_path_component(
object_get_objects_root(), id);
if (!obj) {
error_setg(errp, "No TLS credentials with id '%s'",
id);
return NULL;
}
creds = (QCryptoTLSCreds *)
object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
if (!creds) {
error_setg(errp, "Object with id '%s' is not TLS credentials",
id);
return NULL;
}
if (creds->endpoint != QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT) {
error_setg(errp,
"Expecting TLS credentials with a client endpoint");
return NULL;
}
object_ref(obj);
return creds;
}
static QemuOptsList nbd_runtime_opts = {
.name = "nbd",
.head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head),
.desc = {
{
.name = "host",
.type = QEMU_OPT_STRING,
.help = "TCP host to connect to",
},
{
.name = "port",
.type = QEMU_OPT_STRING,
.help = "TCP port to connect to",
},
{
.name = "path",
.type = QEMU_OPT_STRING,
.help = "Unix socket path to connect to",
},
{
.name = "export",
.type = QEMU_OPT_STRING,
.help = "Name of the NBD export to open",
},
{
.name = "tls-creds",
.type = QEMU_OPT_STRING,
.help = "ID of the TLS credentials to use",
},
{
.name = "x-dirty-bitmap",
.type = QEMU_OPT_STRING,
.help = "experimental: expose named dirty bitmap in place of "
"block status",
},
{
.name = "reconnect-delay",
.type = QEMU_OPT_NUMBER,
.help = "On an unexpected disconnect, the nbd client tries to "
"connect again until succeeding or encountering a serious "
"error. During the first @reconnect-delay seconds, all "
"requests are paused and will be rerun on a successful "
"reconnect. After that time, any delayed requests and all "
"future requests before a successful reconnect will "
"immediately fail. Default 0",
},
block/nbd: fix segmentation fault when .desc is not null-terminated The find_desc_by_name() from util/qemu-option.c relies on the .name not being NULL to call strcmp(). This check becomes unsafe when the list is not NULL-terminated, which is the case of nbd_runtime_opts in block/nbd.c, and can result in segmentation fault when strcmp() tries to access an invalid memory: #0 0x00007fff8c75f7d4 in __strcmp_power9 () from /lib64/libc.so.6 #1 0x00000000102d3ec8 in find_desc_by_name (desc=0x1036d6f0, name=0x28e46670 "server.path") at util/qemu-option.c:166 #2 0x00000000102d93e0 in qemu_opts_absorb_qdict (opts=0x28e47a80, qdict=0x28e469a0, errp=0x7fffec247c98) at util/qemu-option.c:1026 #3 0x000000001012a2e4 in nbd_open (bs=0x28e42290, options=0x28e469a0, flags=24578, errp=0x7fffec247d80) at block/nbd.c:406 #4 0x00000000100144e8 in bdrv_open_driver (bs=0x28e42290, drv=0x1036e070 <bdrv_nbd_unix>, node_name=0x0, options=0x28e469a0, open_flags=24578, errp=0x7fffec247f50) at block.c:1135 #5 0x0000000010015b04 in bdrv_open_common (bs=0x28e42290, file=0x0, options=0x28e469a0, errp=0x7fffec247f50) at block.c:1395 >From gdb, the desc[i].name was not NULL and resulted in strcmp() accessing an invalid memory: >>> p desc[5] $8 = { name = 0x1037f098 "R27A", type = 1561964883, help = 0xc0bbb23e <error: Cannot access memory at address 0xc0bbb23e>, def_value_str = 0x2 <error: Cannot access memory at address 0x2> } >>> p desc[6] $9 = { name = 0x103dac78 <__gcov0.do_qemu_init_bdrv_nbd_init> "\001", type = 272101528, help = 0x29ec0b754403e31f <error: Cannot access memory at address 0x29ec0b754403e31f>, def_value_str = 0x81f343b9 <error: Cannot access memory at address 0x81f343b9> } This patch fixes the segmentation fault in strcmp() by adding a NULL element at the end of nbd_runtime_opts.desc list, which is the common practice to most of other structs like runtime_opts in block/null.c. Thus, the desc[i].name != NULL check becomes safe because it will not evaluate to true when .desc list reached its end. Reported-by: R. Nageswara Sastry <nasastry@in.ibm.com> Buglink: https://bugs.launchpad.net/qemu/+bug/1727259 Signed-off-by: Murilo Opsfelder Araujo <muriloo@linux.vnet.ibm.com> Message-Id: <20180105133241.14141-2-muriloo@linux.vnet.ibm.com> CC: qemu-stable@nongnu.org Fixes: 7ccc44fd7d1dfa62c4d6f3a680df809d6e7068ce Signed-off-by: Eric Blake <eblake@redhat.com>
2018-01-05 16:32:41 +03:00
{ /* end of list */ }
},
};
static int nbd_process_options(BlockDriverState *bs, QDict *options,
Error **errp)
{
BDRVNBDState *s = bs->opaque;
QemuOpts *opts;
int ret = -EINVAL;
opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort);
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
goto error;
}
/* Translate @host, @port, and @path to a SocketAddress */
if (!nbd_process_legacy_socket_options(options, opts, errp)) {
goto error;
}
/* Pop the config into our state object. Exit if invalid. */
s->saddr = nbd_config(s, options, errp);
if (!s->saddr) {
goto error;
}
s->export = g_strdup(qemu_opt_get(opts, "export"));
if (s->export && strlen(s->export) > NBD_MAX_STRING_SIZE) {
error_setg(errp, "export name too long to send to server");
goto error;
}
s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds"));
if (s->tlscredsid) {
s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp);
if (!s->tlscreds) {
goto error;
}
/* TODO SOCKET_ADDRESS_KIND_FD where fd has AF_INET or AF_INET6 */
if (s->saddr->type != SOCKET_ADDRESS_TYPE_INET) {
error_setg(errp, "TLS only supported over IP sockets");
goto error;
}
s->hostname = s->saddr->u.inet.host;
}
s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap"));
if (s->x_dirty_bitmap && strlen(s->x_dirty_bitmap) > NBD_MAX_STRING_SIZE) {
error_setg(errp, "x-dirty-bitmap query too long to send to server");
goto error;
}
s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0);
ret = 0;
error:
if (ret < 0) {
nbd_clear_bdrvstate(s);
}
qemu_opts_del(opts);
return ret;
}
static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
int ret;
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
ret = nbd_process_options(bs, options, errp);
if (ret < 0) {
return ret;
}
s->bs = bs;
qemu_co_mutex_init(&s->send_mutex);
qemu_co_queue_init(&s->free_sema);
if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) {
return -EEXIST;
}
/*
* establish TCP connection, return error if it fails
* TODO: Configurable retry-until-timeout behaviour.
*/
if (nbd_establish_connection(bs, s->saddr, errp) < 0) {
yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name));
return -ECONNREFUSED;
}
ret = nbd_client_handshake(bs, errp);
if (ret < 0) {
yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name));
block/nbd: fix memory leak in nbd_open() In currently implementation there will be a memory leak when nbd_client_connect() returns error status. Here is an easy way to reproduce: 1. run qemu-iotests as follow and check the result with asan: ./check -raw 143 Following is the asan output backtrack: Direct leak of 40 byte(s) in 1 object(s) allocated from: #0 0x7f629688a560 in calloc (/usr/lib64/libasan.so.3+0xc7560) #1 0x7f6295e7e015 in g_malloc0 (/usr/lib64/libglib-2.0.so.0+0x50015) #2 0x56281dab4642 in qobject_input_start_struct /mnt/sdb/qemu-4.2.0-rc0/qapi/qobject-input-visitor.c:295 #3 0x56281dab1a04 in visit_start_struct /mnt/sdb/qemu-4.2.0-rc0/qapi/qapi-visit-core.c:49 #4 0x56281dad1827 in visit_type_SocketAddress qapi/qapi-visit-sockets.c:386 #5 0x56281da8062f in nbd_config /mnt/sdb/qemu-4.2.0-rc0/block/nbd.c:1716 #6 0x56281da8062f in nbd_process_options /mnt/sdb/qemu-4.2.0-rc0/block/nbd.c:1829 #7 0x56281da8062f in nbd_open /mnt/sdb/qemu-4.2.0-rc0/block/nbd.c:1873 Direct leak of 15 byte(s) in 1 object(s) allocated from: #0 0x7f629688a3a0 in malloc (/usr/lib64/libasan.so.3+0xc73a0) #1 0x7f6295e7dfbd in g_malloc (/usr/lib64/libglib-2.0.so.0+0x4ffbd) #2 0x7f6295e96ace in g_strdup (/usr/lib64/libglib-2.0.so.0+0x68ace) #3 0x56281da804ac in nbd_process_options /mnt/sdb/qemu-4.2.0-rc0/block/nbd.c:1834 #4 0x56281da804ac in nbd_open /mnt/sdb/qemu-4.2.0-rc0/block/nbd.c:1873 Indirect leak of 24 byte(s) in 1 object(s) allocated from: #0 0x7f629688a3a0 in malloc (/usr/lib64/libasan.so.3+0xc73a0) #1 0x7f6295e7dfbd in g_malloc (/usr/lib64/libglib-2.0.so.0+0x4ffbd) #2 0x7f6295e96ace in g_strdup (/usr/lib64/libglib-2.0.so.0+0x68ace) #3 0x56281dab41a3 in qobject_input_type_str_keyval /mnt/sdb/qemu-4.2.0-rc0/qapi/qobject-input-visitor.c:536 #4 0x56281dab2ee9 in visit_type_str /mnt/sdb/qemu-4.2.0-rc0/qapi/qapi-visit-core.c:297 #5 0x56281dad0fa1 in visit_type_UnixSocketAddress_members qapi/qapi-visit-sockets.c:141 #6 0x56281dad17b6 in visit_type_SocketAddress_members qapi/qapi-visit-sockets.c:366 #7 0x56281dad186a in visit_type_SocketAddress qapi/qapi-visit-sockets.c:393 #8 0x56281da8062f in nbd_config /mnt/sdb/qemu-4.2.0-rc0/block/nbd.c:1716 #9 0x56281da8062f in nbd_process_options /mnt/sdb/qemu-4.2.0-rc0/block/nbd.c:1829 #10 0x56281da8062f in nbd_open /mnt/sdb/qemu-4.2.0-rc0/block/nbd.c:1873 Fixes: 8f071c9db506e03ab Reported-by: Euler Robot <euler.robot@huawei.com> Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Cc: qemu-stable <qemu-stable@nongnu.org> Cc: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <1575517528-44312-3-git-send-email-pannengyuan@huawei.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2019-12-05 06:45:28 +03:00
nbd_clear_bdrvstate(s);
return ret;
}
/* successfully connected */
s->state = NBD_CLIENT_CONNECTED;
block/nbd: use non-blocking connect: fix vm hang on connect() This makes nbd's connection_co yield during reconnects, so that reconnect doesn't block the main thread. This is very important in case of an unavailable nbd server host: connect() call may take a long time, blocking the main thread (and due to reconnect, it will hang again and again with small gaps of working time during pauses between connection attempts). Realization notes: - We don't want to implement non-blocking connect() over non-blocking socket, because getaddrinfo() doesn't have portable non-blocking realization anyway, so let's just use a thread for both getaddrinfo() and connect(). - We can't use qio_channel_socket_connect_async (which behaves similarly and starts a thread to execute connect() call), as it's relying on someone iterating main loop (g_main_loop_run() or something like this), which is not always the case. - We can't use thread_pool_submit_co API, as thread pool waits for all threads to finish (but we don't want to wait for blocking reconnect attempt on shutdown. So, we just create the thread by hand. Some additional difficulties are: - We want our connect to avoid blocking drained sections and aio context switches. To achieve this, we make it possible to "cancel" synchronous wait for the connect (which is a coroutine yield actually), still, the thread continues in background, and if successful, its result may be reused on next reconnect attempt. - We don't want to wait for reconnect on shutdown, so there is CONNECT_THREAD_RUNNING_DETACHED thread state, which means that the block layer is no longer interested in a result, and thread should close new connected socket on finish and free the state. How to reproduce the bug, fixed with this commit: 1. Create an image on node1: qemu-img create -f qcow2 xx 100M 2. Start NBD server on node1: qemu-nbd xx 3. Start vm with second nbd disk on node2, like this: ./x86_64-softmmu/qemu-system-x86_64 -nodefaults -drive \ file=/work/images/cent7.qcow2 -drive file=nbd+tcp://192.168.100.2 \ -vnc :0 -qmp stdio -m 2G -enable-kvm -vga std 4. Access the vm through vnc (or some other way?), and check that NBD drive works: dd if=/dev/sdb of=/dev/null bs=1M count=10 - the command should succeed. 5. Now, let's trigger nbd-reconnect loop in Qemu process. For this: 5.1 Kill NBD server on node1 5.2 run "dd if=/dev/sdb of=/dev/null bs=1M count=10" in the guest again. The command should fail and a lot of error messages about failing disk may appear as well. Now NBD client driver in Qemu tries to reconnect. Still, VM works well. 6. Make node1 unavailable on NBD port, so connect() from node2 will last for a long time: On node1 (Note, that 10809 is just a default NBD port): sudo iptables -A INPUT -p tcp --dport 10809 -j DROP After some time the guest hangs, and you may check in gdb that Qemu hangs in connect() call, issued from the main thread. This is the BUG. 7. Don't forget to drop iptables rule from your node1: sudo iptables -D INPUT -p tcp --dport 10809 -j DROP Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20200812145237.4396-1-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: minor wording and formatting tweaks] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-08-12 17:52:37 +03:00
nbd_init_connect_thread(s);
s->connection_co = qemu_coroutine_create(nbd_connection_entry, s);
bdrv_inc_in_flight(bs);
aio_co_schedule(bdrv_get_aio_context(bs), s->connection_co);
return 0;
}
static int nbd_co_flush(BlockDriverState *bs)
{
return nbd_client_co_flush(bs);
}
static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
nbd: Honor server's advertised minimum block size Commit 79ba8c98 (v2.7) changed the setting of request_alignment to occur only during bdrv_refresh_limits(), rather than at at bdrv_open() time; but at the time, NBD was unaffected, because it still used sector-based callbacks, so the block layer defaulted NBD to use 512 request_alignment. Later, commit 70c4fb26 (also v2.7) changed NBD to use byte-based callbacks, without setting request_alignment. This resulted in NBD using request_alignment of 1, which works great when the server supports it (as is the case for qemu-nbd), but falls apart miserably if the server requires alignment (but only if qemu actually sends a sub-sector request; qemu-io can do it, but most qemu operations still perform on sectors or larger). Even later, the NBD protocol was updated to document that clients should learn the server's minimum alignment during NBD_OPT_GO; and recommended that clients should assume a minimum size of 512 unless the server understands NBD_OPT_GO and replied with a smaller size. Commit 081dd1fe (v2.10) attempted to do that, by assigning request_alignment to whatever was learned from the server; but it has two flaws: the assignment is done during bdrv_open() so it gets unconditionally wiped out back to 1 during any later bdrv_refresh_limits(); and the code is not using a default of 512 when the server did not report a minimum size. Fix these issues by moving the assignment to request_alignment to the right function, and by using a sane default when the server does not advertise a minimum size. CC: qemu-stable@nongnu.org Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20180215032905.27146-1-eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy<vsementsov@virtuozzo.com>
2018-02-15 06:29:05 +03:00
uint32_t min = s->info.min_block;
uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block);
nbd/client: Lower min_block for block-status, unaligned size We have a latent bug in our NBD client code, tickled by the brand new nbdkit 1.11.10 block status support: $ nbdkit --filter=log --filter=truncate -U - \ data data="1" size=511 truncate=64K logfile=/dev/stdout \ --run 'qemu-img convert $nbd /var/tmp/out' ... qemu-img: block/io.c:2122: bdrv_co_block_status: Assertion `*pnum && QEMU_IS_ALIGNED(*pnum, align) && align > offset - aligned_offset' failed. The culprit? Our implementation of .bdrv_co_block_status can return unaligned block status for any server that operates with a lower actual alignment than what we tell the block layer in request_alignment, in violation of the block layer's constraints. To date, we've been unable to trip the bug, because qemu as NBD server always advertises block sizing (at which point it is a server bug if the server sends unaligned status - although qemu 3.1 is such a server and I've sent separate patches for 4.0 both to get the server to obey the spec, and to let the client to tolerate server oddities at EOF). But nbdkit does not (yet) advertise block sizing, and therefore is not in violation of the spec for returning block status at whatever boundaries it wants, and those unaligned results can occur anywhere rather than just at EOF. While we are still wise to avoid sending sub-sector read/write requests to a server of unknown origin, we MUST consider that a server telling us block status without an advertised block size is correct. So, we either have to munge unaligned answers from the server into aligned ones that we hand back to the block layer, or we have to tell the block layer about a smaller alignment. Similarly, if the server advertises an image size that is not sector-aligned, we might as well assume that the server intends to let us access those tail bytes, and therefore supports a minimum block size of 1, regardless of whether the server supports block status (although we still need more patches to fix the problem that with an unaligned image, we can send read or block status requests that exceed EOF to the server). Again, qemu as server cannot trip this problem (because it rounds images to sector alignment), but nbdkit advertised unaligned size even before it gained block status support. Solve both alignment problems at once by using better heuristics on what alignment to report to the block layer when the server did not give us something to work with. Note that very few NBD servers implement block status (to date, only qemu and nbdkit are known to do so); and as the NBD spec mentioned block sizing constraints prior to documenting block status, it can be assumed that any future implementations of block status are aware that they must advertise block size if they want a minimum size other than 1. We've had a long history of struggles with picking the right alignment to use in the block layer, as evidenced by the commit message of fd8d372d (v2.12) that introduced the current choice of forced 512-byte alignment. There is no iotest coverage for this fix, because qemu can't provoke it, and I didn't want to make test 241 dependent on nbdkit. Fixes: fd8d372d Reported-by: Richard W.M. Jones <rjones@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20190329042750.14704-3-eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Tested-by: Richard W.M. Jones <rjones@redhat.com>
2019-03-29 07:27:46 +03:00
/*
* If the server did not advertise an alignment:
* - a size that is not sector-aligned implies that an alignment
* of 1 can be used to access those tail bytes
* - advertisement of block status requires an alignment of 1, so
* that we don't violate block layer constraints that block
* status is always aligned (as we can't control whether the
* server will report sub-sector extents, such as a hole at EOF
* on an unaligned POSIX file)
* - otherwise, assume the server is so old that we are safer avoiding
* sub-sector requests
*/
if (!min) {
min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) ||
s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE;
}
bs->bl.request_alignment = min;
bs->bl.max_pdiscard = QEMU_ALIGN_DOWN(INT_MAX, min);
bs->bl.max_pwrite_zeroes = max;
bs->bl.max_transfer = max;
if (s->info.opt_block &&
s->info.opt_block > bs->bl.opt_transfer) {
bs->bl.opt_transfer = s->info.opt_block;
}
}
static void nbd_close(BlockDriverState *bs)
{
BDRVNBDState *s = bs->opaque;
nbd_client_close(bs);
yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name));
nbd_clear_bdrvstate(s);
}
block: nbd: Fix convert qcow2 compressed to nbd When converting to qcow2 compressed format, the last step is a special zero length compressed write, ending in a call to bdrv_co_truncate(). This call always fails for the nbd driver since it does not implement bdrv_co_truncate(). For block devices, which have the same limits, the call succeeds since the file driver implements bdrv_co_truncate(). If the caller asked to truncate to the same or smaller size with exact=false, the truncate succeeds. Implement the same logic for nbd. Example failing without this change: In one shell start qemu-nbd: $ truncate -s 1g test.tar $ qemu-nbd --socket=/tmp/nbd.sock --persistent --format=raw --offset 1536 test.tar In another shell convert an image to qcow2 compressed via NBD: $ echo "disk data" > disk.raw $ truncate -s 1g disk.raw $ qemu-img convert -f raw -O qcow2 -c disk1.raw nbd+unix:///?socket=/tmp/nbd.sock; echo $? 1 qemu-img failed, but the conversion was successful: $ qemu-img info nbd+unix:///?socket=/tmp/nbd.sock image: nbd+unix://?socket=/tmp/nbd.sock file format: qcow2 virtual size: 1 GiB (1073741824 bytes) ... $ qemu-img check nbd+unix:///?socket=/tmp/nbd.sock No errors were found on the image. 1/16384 = 0.01% allocated, 100.00% fragmented, 100.00% compressed clusters Image end offset: 393216 $ qemu-img compare disk.raw nbd+unix:///?socket=/tmp/nbd.sock Images are identical. Fixes: https://bugzilla.redhat.com/1860627 Signed-off-by: Nir Soffer <nsoffer@redhat.com> Message-Id: <20200727215846.395443-2-nsoffer@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> [eblake: typo fixes] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-07-28 00:58:43 +03:00
/*
* NBD cannot truncate, but if the caller asks to truncate to the same size, or
* to a smaller size with exact=false, there is no reason to fail the
* operation.
*
* Preallocation mode is ignored since it does not seems useful to fail when
* we never change anything.
*/
static int coroutine_fn nbd_co_truncate(BlockDriverState *bs, int64_t offset,
bool exact, PreallocMode prealloc,
BdrvRequestFlags flags, Error **errp)
{
BDRVNBDState *s = bs->opaque;
if (offset != s->info.size && exact) {
error_setg(errp, "Cannot resize NBD nodes");
return -ENOTSUP;
}
if (offset > s->info.size) {
error_setg(errp, "Cannot grow NBD nodes");
return -EINVAL;
}
return 0;
}
static int64_t nbd_getlength(BlockDriverState *bs)
{
BDRVNBDState *s = bs->opaque;
return s->info.size;
}
static void nbd_refresh_filename(BlockDriverState *bs)
{
BDRVNBDState *s = bs->opaque;
const char *host = NULL, *port = NULL, *path = NULL;
size_t len = 0;
if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
nbd: Tidy up blockdev-add interface SocketAddress is a simple union, and simple unions are awkward: they have their variant members wrapped in a "data" object on the wire, and require additional indirections in C. I intend to limit its use to existing external interfaces, and convert all internal interfaces to SocketAddressFlat. BlockdevOptionsNbd is an external interface using SocketAddress. We already use SocketAddressFlat elsewhere in blockdev-add. Replace it by SocketAddressFlat while we can (it's new in 2.9) for simplicity and consistency. For example, { "execute": "blockdev-add", "arguments": { "node-name": "foo", "driver": "nbd", "server": { "type": "inet", "data": { "host": "localhost", "port": "12345" } } } } becomes { "execute": "blockdev-add", "arguments": { "node-name": "foo", "driver": "nbd", "server": { "type": "inet", "host": "localhost", "port": "12345" } } } Since the internal interfaces still take SocketAddress, this requires conversion function socket_address_crumple(). It'll go away when I update the interfaces. Unfortunately, SocketAddress is also visible in -drive since 2.8: -drive if=none,driver=nbd,server.type=inet,server.data.host=127.0.0.1,server.data.port=12345 Nobody should be using it, as it's fairly new and has never been documented, so adding still more compatibility gunk to keep it working isn't worth the trouble. You now have to use -drive if=none,driver=nbd,server.type=inet,server.host=127.0.0.1,server.port=12345 Signed-off-by: Markus Armbruster <armbru@redhat.com> Message-id: 1490895797-29094-9-git-send-email-armbru@redhat.com [mreitz: Change iotest 147 accordingly] Because of this interface change, iotest 147 has to be adapted. Unfortunately, we cannot just flatten all of the addresses because nbd-server-start still takes a plain SocketAddress. Therefore, we need both and this is most easily achieved by writing the SocketAddress into the code and flattening it where necessary. Signed-off-by: Max Reitz <mreitz@redhat.com> Message-id: 20170330221243.17333-1-mreitz@redhat.com Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-03-30 20:43:16 +03:00
const InetSocketAddress *inet = &s->saddr->u.inet;
if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) {
host = inet->host;
port = inet->port;
}
} else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) {
nbd: Tidy up blockdev-add interface SocketAddress is a simple union, and simple unions are awkward: they have their variant members wrapped in a "data" object on the wire, and require additional indirections in C. I intend to limit its use to existing external interfaces, and convert all internal interfaces to SocketAddressFlat. BlockdevOptionsNbd is an external interface using SocketAddress. We already use SocketAddressFlat elsewhere in blockdev-add. Replace it by SocketAddressFlat while we can (it's new in 2.9) for simplicity and consistency. For example, { "execute": "blockdev-add", "arguments": { "node-name": "foo", "driver": "nbd", "server": { "type": "inet", "data": { "host": "localhost", "port": "12345" } } } } becomes { "execute": "blockdev-add", "arguments": { "node-name": "foo", "driver": "nbd", "server": { "type": "inet", "host": "localhost", "port": "12345" } } } Since the internal interfaces still take SocketAddress, this requires conversion function socket_address_crumple(). It'll go away when I update the interfaces. Unfortunately, SocketAddress is also visible in -drive since 2.8: -drive if=none,driver=nbd,server.type=inet,server.data.host=127.0.0.1,server.data.port=12345 Nobody should be using it, as it's fairly new and has never been documented, so adding still more compatibility gunk to keep it working isn't worth the trouble. You now have to use -drive if=none,driver=nbd,server.type=inet,server.host=127.0.0.1,server.port=12345 Signed-off-by: Markus Armbruster <armbru@redhat.com> Message-id: 1490895797-29094-9-git-send-email-armbru@redhat.com [mreitz: Change iotest 147 accordingly] Because of this interface change, iotest 147 has to be adapted. Unfortunately, we cannot just flatten all of the addresses because nbd-server-start still takes a plain SocketAddress. Therefore, we need both and this is most easily achieved by writing the SocketAddress into the code and flattening it where necessary. Signed-off-by: Max Reitz <mreitz@redhat.com> Message-id: 20170330221243.17333-1-mreitz@redhat.com Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-03-30 20:43:16 +03:00
path = s->saddr->u.q_unix.path;
} /* else can't represent as pseudo-filename */
if (path && s->export) {
len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"nbd+unix:///%s?socket=%s", s->export, path);
} else if (path && !s->export) {
len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"nbd+unix://?socket=%s", path);
} else if (host && s->export) {
len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"nbd://%s:%s/%s", host, port, s->export);
} else if (host && !s->export) {
len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"nbd://%s:%s", host, port);
}
if (len >= sizeof(bs->exact_filename)) {
/* Name is too long to represent exactly, so leave it empty. */
bs->exact_filename[0] = '\0';
}
}
static char *nbd_dirname(BlockDriverState *bs, Error **errp)
{
/* The generic bdrv_dirname() implementation is able to work out some
* directory name for NBD nodes, but that would be wrong. So far there is no
* specification for how "export paths" would work, so NBD does not have
* directory names. */
error_setg(errp, "Cannot generate a base directory for NBD nodes");
return NULL;
}
static const char *const nbd_strong_runtime_opts[] = {
"path",
"host",
"port",
"export",
"tls-creds",
"server.",
NULL
};
static BlockDriver bdrv_nbd = {
.format_name = "nbd",
.protocol_name = "nbd",
.instance_size = sizeof(BDRVNBDState),
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_co_create_opts = bdrv_co_create_opts_simple,
.create_opts = &bdrv_create_opts_simple,
.bdrv_file_open = nbd_open,
.bdrv_reopen_prepare = nbd_client_reopen_prepare,
.bdrv_co_preadv = nbd_client_co_preadv,
.bdrv_co_pwritev = nbd_client_co_pwritev,
.bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_pdiscard = nbd_client_co_pdiscard,
.bdrv_refresh_limits = nbd_refresh_limits,
block: nbd: Fix convert qcow2 compressed to nbd When converting to qcow2 compressed format, the last step is a special zero length compressed write, ending in a call to bdrv_co_truncate(). This call always fails for the nbd driver since it does not implement bdrv_co_truncate(). For block devices, which have the same limits, the call succeeds since the file driver implements bdrv_co_truncate(). If the caller asked to truncate to the same or smaller size with exact=false, the truncate succeeds. Implement the same logic for nbd. Example failing without this change: In one shell start qemu-nbd: $ truncate -s 1g test.tar $ qemu-nbd --socket=/tmp/nbd.sock --persistent --format=raw --offset 1536 test.tar In another shell convert an image to qcow2 compressed via NBD: $ echo "disk data" > disk.raw $ truncate -s 1g disk.raw $ qemu-img convert -f raw -O qcow2 -c disk1.raw nbd+unix:///?socket=/tmp/nbd.sock; echo $? 1 qemu-img failed, but the conversion was successful: $ qemu-img info nbd+unix:///?socket=/tmp/nbd.sock image: nbd+unix://?socket=/tmp/nbd.sock file format: qcow2 virtual size: 1 GiB (1073741824 bytes) ... $ qemu-img check nbd+unix:///?socket=/tmp/nbd.sock No errors were found on the image. 1/16384 = 0.01% allocated, 100.00% fragmented, 100.00% compressed clusters Image end offset: 393216 $ qemu-img compare disk.raw nbd+unix:///?socket=/tmp/nbd.sock Images are identical. Fixes: https://bugzilla.redhat.com/1860627 Signed-off-by: Nir Soffer <nsoffer@redhat.com> Message-Id: <20200727215846.395443-2-nsoffer@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> [eblake: typo fixes] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-07-28 00:58:43 +03:00
.bdrv_co_truncate = nbd_co_truncate,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_client_detach_aio_context,
.bdrv_attach_aio_context = nbd_client_attach_aio_context,
.bdrv_co_drain_begin = nbd_client_co_drain_begin,
.bdrv_co_drain_end = nbd_client_co_drain_end,
.bdrv_refresh_filename = nbd_refresh_filename,
.bdrv_co_block_status = nbd_client_co_block_status,
.bdrv_dirname = nbd_dirname,
.strong_runtime_opts = nbd_strong_runtime_opts,
};
static BlockDriver bdrv_nbd_tcp = {
.format_name = "nbd",
.protocol_name = "nbd+tcp",
.instance_size = sizeof(BDRVNBDState),
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_co_create_opts = bdrv_co_create_opts_simple,
.create_opts = &bdrv_create_opts_simple,
.bdrv_file_open = nbd_open,
.bdrv_reopen_prepare = nbd_client_reopen_prepare,
.bdrv_co_preadv = nbd_client_co_preadv,
.bdrv_co_pwritev = nbd_client_co_pwritev,
.bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_pdiscard = nbd_client_co_pdiscard,
.bdrv_refresh_limits = nbd_refresh_limits,
block: nbd: Fix convert qcow2 compressed to nbd When converting to qcow2 compressed format, the last step is a special zero length compressed write, ending in a call to bdrv_co_truncate(). This call always fails for the nbd driver since it does not implement bdrv_co_truncate(). For block devices, which have the same limits, the call succeeds since the file driver implements bdrv_co_truncate(). If the caller asked to truncate to the same or smaller size with exact=false, the truncate succeeds. Implement the same logic for nbd. Example failing without this change: In one shell start qemu-nbd: $ truncate -s 1g test.tar $ qemu-nbd --socket=/tmp/nbd.sock --persistent --format=raw --offset 1536 test.tar In another shell convert an image to qcow2 compressed via NBD: $ echo "disk data" > disk.raw $ truncate -s 1g disk.raw $ qemu-img convert -f raw -O qcow2 -c disk1.raw nbd+unix:///?socket=/tmp/nbd.sock; echo $? 1 qemu-img failed, but the conversion was successful: $ qemu-img info nbd+unix:///?socket=/tmp/nbd.sock image: nbd+unix://?socket=/tmp/nbd.sock file format: qcow2 virtual size: 1 GiB (1073741824 bytes) ... $ qemu-img check nbd+unix:///?socket=/tmp/nbd.sock No errors were found on the image. 1/16384 = 0.01% allocated, 100.00% fragmented, 100.00% compressed clusters Image end offset: 393216 $ qemu-img compare disk.raw nbd+unix:///?socket=/tmp/nbd.sock Images are identical. Fixes: https://bugzilla.redhat.com/1860627 Signed-off-by: Nir Soffer <nsoffer@redhat.com> Message-Id: <20200727215846.395443-2-nsoffer@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> [eblake: typo fixes] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-07-28 00:58:43 +03:00
.bdrv_co_truncate = nbd_co_truncate,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_client_detach_aio_context,
.bdrv_attach_aio_context = nbd_client_attach_aio_context,
.bdrv_co_drain_begin = nbd_client_co_drain_begin,
.bdrv_co_drain_end = nbd_client_co_drain_end,
.bdrv_refresh_filename = nbd_refresh_filename,
.bdrv_co_block_status = nbd_client_co_block_status,
.bdrv_dirname = nbd_dirname,
.strong_runtime_opts = nbd_strong_runtime_opts,
};
static BlockDriver bdrv_nbd_unix = {
.format_name = "nbd",
.protocol_name = "nbd+unix",
.instance_size = sizeof(BDRVNBDState),
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_co_create_opts = bdrv_co_create_opts_simple,
.create_opts = &bdrv_create_opts_simple,
.bdrv_file_open = nbd_open,
.bdrv_reopen_prepare = nbd_client_reopen_prepare,
.bdrv_co_preadv = nbd_client_co_preadv,
.bdrv_co_pwritev = nbd_client_co_pwritev,
.bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_pdiscard = nbd_client_co_pdiscard,
.bdrv_refresh_limits = nbd_refresh_limits,
block: nbd: Fix convert qcow2 compressed to nbd When converting to qcow2 compressed format, the last step is a special zero length compressed write, ending in a call to bdrv_co_truncate(). This call always fails for the nbd driver since it does not implement bdrv_co_truncate(). For block devices, which have the same limits, the call succeeds since the file driver implements bdrv_co_truncate(). If the caller asked to truncate to the same or smaller size with exact=false, the truncate succeeds. Implement the same logic for nbd. Example failing without this change: In one shell start qemu-nbd: $ truncate -s 1g test.tar $ qemu-nbd --socket=/tmp/nbd.sock --persistent --format=raw --offset 1536 test.tar In another shell convert an image to qcow2 compressed via NBD: $ echo "disk data" > disk.raw $ truncate -s 1g disk.raw $ qemu-img convert -f raw -O qcow2 -c disk1.raw nbd+unix:///?socket=/tmp/nbd.sock; echo $? 1 qemu-img failed, but the conversion was successful: $ qemu-img info nbd+unix:///?socket=/tmp/nbd.sock image: nbd+unix://?socket=/tmp/nbd.sock file format: qcow2 virtual size: 1 GiB (1073741824 bytes) ... $ qemu-img check nbd+unix:///?socket=/tmp/nbd.sock No errors were found on the image. 1/16384 = 0.01% allocated, 100.00% fragmented, 100.00% compressed clusters Image end offset: 393216 $ qemu-img compare disk.raw nbd+unix:///?socket=/tmp/nbd.sock Images are identical. Fixes: https://bugzilla.redhat.com/1860627 Signed-off-by: Nir Soffer <nsoffer@redhat.com> Message-Id: <20200727215846.395443-2-nsoffer@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> [eblake: typo fixes] Signed-off-by: Eric Blake <eblake@redhat.com>
2020-07-28 00:58:43 +03:00
.bdrv_co_truncate = nbd_co_truncate,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_client_detach_aio_context,
.bdrv_attach_aio_context = nbd_client_attach_aio_context,
.bdrv_co_drain_begin = nbd_client_co_drain_begin,
.bdrv_co_drain_end = nbd_client_co_drain_end,
.bdrv_refresh_filename = nbd_refresh_filename,
.bdrv_co_block_status = nbd_client_co_block_status,
.bdrv_dirname = nbd_dirname,
.strong_runtime_opts = nbd_strong_runtime_opts,
};
static void bdrv_nbd_init(void)
{
bdrv_register(&bdrv_nbd);
bdrv_register(&bdrv_nbd_tcp);
bdrv_register(&bdrv_nbd_unix);
}
block_init(bdrv_nbd_init);