NBD patches through 2023-07-19
- Denis V. Lunev: fix hang with 'ssh ... "qemu-nbd -c"' - Eric Blake: preliminary work towards NBD 64-bit extensions -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEccLMIrHEYCkn0vOqp6FrSiUnQ2oFAmS4RwcACgkQp6FrSiUn Q2pXfQf/clnttPdw9BW2cJltFRKeMeZrgn8mut0S7jhC0DWIy6zanzp07MylryHP EyJ++dCbLEg8mueThL/n5mKsTS/OECtfZO9Ot11WmZqDZVtLKorfmy7YVI3VwMjI yQqrUIwiYxzZOkPban/MXofY6vJmuia5aGkEmYUyKiHvsLF3Hk2gHPB/qa2S+U6I QDmC032/L+/LgVkK5r/1vamwJNP29QI4DNp3RiTtcMK5sEZJfMsAZSxFDDdH2pqi 5gyVqw0zNl3vz6znoVy0XZ/8OUVloPKHswyf7xLlBukY1GL5D+aiXz2ilwBvk9aM SoZzYvaOOBDyJhSjapOvseTqXTNeqQ== =TB9t -----END PGP SIGNATURE----- Merge tag 'pull-nbd-2023-07-19' of https://repo.or.cz/qemu/ericb into staging NBD patches through 2023-07-19 - Denis V. Lunev: fix hang with 'ssh ... "qemu-nbd -c"' - Eric Blake: preliminary work towards NBD 64-bit extensions # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEccLMIrHEYCkn0vOqp6FrSiUnQ2oFAmS4RwcACgkQp6FrSiUn # Q2pXfQf/clnttPdw9BW2cJltFRKeMeZrgn8mut0S7jhC0DWIy6zanzp07MylryHP # EyJ++dCbLEg8mueThL/n5mKsTS/OECtfZO9Ot11WmZqDZVtLKorfmy7YVI3VwMjI # yQqrUIwiYxzZOkPban/MXofY6vJmuia5aGkEmYUyKiHvsLF3Hk2gHPB/qa2S+U6I # QDmC032/L+/LgVkK5r/1vamwJNP29QI4DNp3RiTtcMK5sEZJfMsAZSxFDDdH2pqi # 5gyVqw0zNl3vz6znoVy0XZ/8OUVloPKHswyf7xLlBukY1GL5D+aiXz2ilwBvk9aM # SoZzYvaOOBDyJhSjapOvseTqXTNeqQ== # =TB9t # -----END PGP SIGNATURE----- # gpg: Signature made Wed 19 Jul 2023 21:26:47 BST # gpg: using RSA key 71C2CC22B1C4602927D2F3AAA7A16B4A2527436A # gpg: Good signature from "Eric Blake <eblake@redhat.com>" [full] # gpg: aka "Eric Blake (Free Software Programmer) <ebb9@byu.net>" [full] # gpg: aka "[jpeg image of size 6874]" [full] # Primary key fingerprint: 71C2 CC22 B1C4 6029 27D2 F3AA A7A1 6B4A 2527 436A * tag 'pull-nbd-2023-07-19' of https://repo.or.cz/qemu/ericb: nbd: Use enum for various negotiation modes nbd/client: Add safety check on chunk payload length nbd/client: Simplify cookie vs. index computation nbd: s/handle/cookie/ to match NBD spec nbd/server: Refactor to pass full request around nbd/server: Prepare for alternate-size headers nbd: Consistent typedef usage in header nbd/client: Use smarter assert qemu-nbd: make verbose bool and local variable in main() qemu-nbd: handle dup2() error when qemu-nbd finished setup process qemu-nbd: properly report error on error in dup2() after qemu_daemon() qemu-nbd: properly report error if qemu_daemon() is failed qemu-nbd: fix regression with qemu-nbd --fork run over ssh qemu-nbd: pass structure into nbd_client_thread instead of plain char* Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
d1181d2937
96
block/nbd.c
96
block/nbd.c
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* QEMU Block driver for NBD
|
||||
* QEMU Block driver for NBD
|
||||
*
|
||||
* Copyright (c) 2019 Virtuozzo International GmbH.
|
||||
* Copyright (C) 2016 Red Hat, Inc.
|
||||
* Copyright Red Hat
|
||||
* Copyright (C) 2008 Bull S.A.S.
|
||||
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
|
||||
*
|
||||
@ -50,8 +50,8 @@
|
||||
#define EN_OPTSTR ":exportname="
|
||||
#define MAX_NBD_REQUESTS 16
|
||||
|
||||
#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
|
||||
#define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
|
||||
#define COOKIE_TO_INDEX(cookie) ((cookie) - 1)
|
||||
#define INDEX_TO_COOKIE(index) ((index) + 1)
|
||||
|
||||
typedef struct {
|
||||
Coroutine *coroutine;
|
||||
@ -417,25 +417,25 @@ static void coroutine_fn GRAPH_RDLOCK nbd_reconnect_attempt(BDRVNBDState *s)
|
||||
reconnect_delay_timer_del(s);
|
||||
}
|
||||
|
||||
static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
|
||||
static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t cookie)
|
||||
{
|
||||
int ret;
|
||||
uint64_t ind = HANDLE_TO_INDEX(s, handle), ind2;
|
||||
uint64_t ind = COOKIE_TO_INDEX(cookie), ind2;
|
||||
QEMU_LOCK_GUARD(&s->receive_mutex);
|
||||
|
||||
while (true) {
|
||||
if (s->reply.handle == handle) {
|
||||
if (s->reply.cookie == cookie) {
|
||||
/* We are done */
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (s->reply.handle != 0) {
|
||||
if (s->reply.cookie != 0) {
|
||||
/*
|
||||
* Some other request is being handled now. It should already be
|
||||
* woken by whoever set s->reply.handle (or never wait in this
|
||||
* woken by whoever set s->reply.cookie (or never wait in this
|
||||
* yield). So, we should not wake it here.
|
||||
*/
|
||||
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
|
||||
ind2 = COOKIE_TO_INDEX(s->reply.cookie);
|
||||
assert(!s->requests[ind2].receiving);
|
||||
|
||||
s->requests[ind].receiving = true;
|
||||
@ -445,9 +445,9 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
|
||||
/*
|
||||
* We may be woken for 2 reasons:
|
||||
* 1. From this function, executing in parallel coroutine, when our
|
||||
* handle is received.
|
||||
* cookie is received.
|
||||
* 2. From nbd_co_receive_one_chunk(), when previous request is
|
||||
* finished and s->reply.handle set to 0.
|
||||
* finished and s->reply.cookie set to 0.
|
||||
* Anyway, it's OK to lock the mutex and go to the next iteration.
|
||||
*/
|
||||
|
||||
@ -456,8 +456,8 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We are under mutex and handle is 0. We have to do the dirty work. */
|
||||
assert(s->reply.handle == 0);
|
||||
/* We are under mutex and cookie is 0. We have to do the dirty work. */
|
||||
assert(s->reply.cookie == 0);
|
||||
ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL);
|
||||
if (ret <= 0) {
|
||||
ret = ret ? ret : -EIO;
|
||||
@ -468,12 +468,12 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
|
||||
nbd_channel_error(s, -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
|
||||
ind2 = COOKIE_TO_INDEX(s->reply.cookie);
|
||||
if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) {
|
||||
nbd_channel_error(s, -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (s->reply.handle == handle) {
|
||||
if (s->reply.cookie == cookie) {
|
||||
/* We are done */
|
||||
return 0;
|
||||
}
|
||||
@ -519,7 +519,7 @@ nbd_co_send_request(BlockDriverState *bs, NBDRequest *request,
|
||||
qemu_mutex_unlock(&s->requests_lock);
|
||||
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
request->handle = INDEX_TO_HANDLE(s, i);
|
||||
request->cookie = INDEX_TO_COOKIE(i);
|
||||
|
||||
assert(s->ioc);
|
||||
|
||||
@ -828,11 +828,11 @@ static coroutine_fn int nbd_co_receive_structured_payload(
|
||||
* corresponding to the server's error reply), and errp is unchanged.
|
||||
*/
|
||||
static coroutine_fn int nbd_co_do_receive_one_chunk(
|
||||
BDRVNBDState *s, uint64_t handle, bool only_structured,
|
||||
BDRVNBDState *s, uint64_t cookie, bool only_structured,
|
||||
int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
int i = HANDLE_TO_INDEX(s, handle);
|
||||
int i = COOKIE_TO_INDEX(cookie);
|
||||
void *local_payload = NULL;
|
||||
NBDStructuredReplyChunk *chunk;
|
||||
|
||||
@ -841,14 +841,14 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
|
||||
}
|
||||
*request_ret = 0;
|
||||
|
||||
ret = nbd_receive_replies(s, handle);
|
||||
ret = nbd_receive_replies(s, cookie);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Connection closed");
|
||||
return -EIO;
|
||||
}
|
||||
assert(s->ioc);
|
||||
|
||||
assert(s->reply.handle == handle);
|
||||
assert(s->reply.cookie == cookie);
|
||||
|
||||
if (nbd_reply_is_simple(&s->reply)) {
|
||||
if (only_structured) {
|
||||
@ -918,11 +918,11 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
|
||||
* Return value is a fatal error code or normal nbd reply error code
|
||||
*/
|
||||
static coroutine_fn int nbd_co_receive_one_chunk(
|
||||
BDRVNBDState *s, uint64_t handle, bool only_structured,
|
||||
BDRVNBDState *s, uint64_t cookie, bool only_structured,
|
||||
int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
|
||||
Error **errp)
|
||||
{
|
||||
int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured,
|
||||
int ret = nbd_co_do_receive_one_chunk(s, cookie, only_structured,
|
||||
request_ret, qiov, payload, errp);
|
||||
|
||||
if (ret < 0) {
|
||||
@ -932,7 +932,7 @@ static coroutine_fn int nbd_co_receive_one_chunk(
|
||||
/* For assert at loop start in nbd_connection_entry */
|
||||
*reply = s->reply;
|
||||
}
|
||||
s->reply.handle = 0;
|
||||
s->reply.cookie = 0;
|
||||
|
||||
nbd_recv_coroutines_wake(s);
|
||||
|
||||
@ -975,10 +975,10 @@ static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
|
||||
* NBD_FOREACH_REPLY_CHUNK
|
||||
* The pointer stored in @payload requires g_free() to free it.
|
||||
*/
|
||||
#define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
|
||||
#define NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, structured, \
|
||||
qiov, reply, payload) \
|
||||
for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
|
||||
nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
|
||||
nbd_reply_chunk_iter_receive(s, &iter, cookie, qiov, reply, payload);)
|
||||
|
||||
/*
|
||||
* nbd_reply_chunk_iter_receive
|
||||
@ -986,7 +986,7 @@ static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
|
||||
*/
|
||||
static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
|
||||
NBDReplyChunkIter *iter,
|
||||
uint64_t handle,
|
||||
uint64_t cookie,
|
||||
QEMUIOVector *qiov,
|
||||
NBDReply *reply,
|
||||
void **payload)
|
||||
@ -1005,7 +1005,7 @@ static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
|
||||
reply = &local_reply;
|
||||
}
|
||||
|
||||
ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured,
|
||||
ret = nbd_co_receive_one_chunk(s, cookie, iter->only_structured,
|
||||
&request_ret, qiov, reply, payload,
|
||||
&local_err);
|
||||
if (ret < 0) {
|
||||
@ -1038,7 +1038,7 @@ static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
|
||||
|
||||
break_loop:
|
||||
qemu_mutex_lock(&s->requests_lock);
|
||||
s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
|
||||
s->requests[COOKIE_TO_INDEX(cookie)].coroutine = NULL;
|
||||
s->in_flight--;
|
||||
qemu_co_queue_next(&s->free_sema);
|
||||
qemu_mutex_unlock(&s->requests_lock);
|
||||
@ -1046,12 +1046,13 @@ break_loop:
|
||||
return false;
|
||||
}
|
||||
|
||||
static int coroutine_fn nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
|
||||
int *request_ret, Error **errp)
|
||||
static int coroutine_fn
|
||||
nbd_co_receive_return_code(BDRVNBDState *s, uint64_t cookie,
|
||||
int *request_ret, Error **errp)
|
||||
{
|
||||
NBDReplyChunkIter iter;
|
||||
|
||||
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) {
|
||||
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, NULL, NULL) {
|
||||
/* nbd_reply_chunk_iter_receive does all the work */
|
||||
}
|
||||
|
||||
@ -1060,16 +1061,17 @@ static int coroutine_fn nbd_co_receive_return_code(BDRVNBDState *s, uint64_t han
|
||||
return iter.ret;
|
||||
}
|
||||
|
||||
static int coroutine_fn nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
|
||||
uint64_t offset, QEMUIOVector *qiov,
|
||||
int *request_ret, Error **errp)
|
||||
static int coroutine_fn
|
||||
nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
|
||||
uint64_t offset, QEMUIOVector *qiov,
|
||||
int *request_ret, Error **errp)
|
||||
{
|
||||
NBDReplyChunkIter iter;
|
||||
NBDReply reply;
|
||||
void *payload = NULL;
|
||||
Error *local_err = NULL;
|
||||
|
||||
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply,
|
||||
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, s->info.structured_reply,
|
||||
qiov, &reply, &payload)
|
||||
{
|
||||
int ret;
|
||||
@ -1112,10 +1114,10 @@ static int coroutine_fn nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t h
|
||||
return iter.ret;
|
||||
}
|
||||
|
||||
static int coroutine_fn nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
|
||||
uint64_t handle, uint64_t length,
|
||||
NBDExtent *extent,
|
||||
int *request_ret, Error **errp)
|
||||
static int coroutine_fn
|
||||
nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
|
||||
uint64_t length, NBDExtent *extent,
|
||||
int *request_ret, Error **errp)
|
||||
{
|
||||
NBDReplyChunkIter iter;
|
||||
NBDReply reply;
|
||||
@ -1124,7 +1126,7 @@ static int coroutine_fn nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
|
||||
bool received = false;
|
||||
|
||||
assert(!extent->length);
|
||||
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, &reply, &payload) {
|
||||
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, &reply, &payload) {
|
||||
int ret;
|
||||
NBDStructuredReplyChunk *chunk = &reply.structured;
|
||||
|
||||
@ -1194,11 +1196,11 @@ nbd_co_request(BlockDriverState *bs, NBDRequest *request,
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = nbd_co_receive_return_code(s, request->handle,
|
||||
ret = nbd_co_receive_return_code(s, request->cookie,
|
||||
&request_ret, &local_err);
|
||||
if (local_err) {
|
||||
trace_nbd_co_request_fail(request->from, request->len,
|
||||
request->handle, request->flags,
|
||||
request->cookie, request->flags,
|
||||
request->type,
|
||||
nbd_cmd_lookup(request->type),
|
||||
ret, error_get_pretty(local_err));
|
||||
@ -1253,10 +1255,10 @@ nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = nbd_co_receive_cmdread_reply(s, request.handle, offset, qiov,
|
||||
ret = nbd_co_receive_cmdread_reply(s, request.cookie, offset, qiov,
|
||||
&request_ret, &local_err);
|
||||
if (local_err) {
|
||||
trace_nbd_co_request_fail(request.from, request.len, request.handle,
|
||||
trace_nbd_co_request_fail(request.from, request.len, request.cookie,
|
||||
request.flags, request.type,
|
||||
nbd_cmd_lookup(request.type),
|
||||
ret, error_get_pretty(local_err));
|
||||
@ -1411,11 +1413,11 @@ static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = nbd_co_receive_blockstatus_reply(s, request.handle, bytes,
|
||||
ret = nbd_co_receive_blockstatus_reply(s, request.cookie, bytes,
|
||||
&extent, &request_ret,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
trace_nbd_co_request_fail(request.from, request.len, request.handle,
|
||||
trace_nbd_co_request_fail(request.from, request.len, request.cookie,
|
||||
request.flags, request.type,
|
||||
nbd_cmd_lookup(request.type),
|
||||
ret, error_get_pretty(local_err));
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2016-2022 Red Hat, Inc.
|
||||
* Copyright Red Hat
|
||||
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
|
||||
*
|
||||
* Network Block Device
|
||||
@ -26,24 +26,26 @@
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/bswap.h"
|
||||
|
||||
typedef struct NBDExport NBDExport;
|
||||
typedef struct NBDClient NBDClient;
|
||||
typedef struct NBDClientConnection NBDClientConnection;
|
||||
|
||||
extern const BlockExportDriver blk_exp_nbd;
|
||||
|
||||
/* Handshake phase structs - this struct is passed on the wire */
|
||||
|
||||
struct NBDOption {
|
||||
typedef struct NBDOption {
|
||||
uint64_t magic; /* NBD_OPTS_MAGIC */
|
||||
uint32_t option; /* NBD_OPT_* */
|
||||
uint32_t length;
|
||||
} QEMU_PACKED;
|
||||
typedef struct NBDOption NBDOption;
|
||||
} QEMU_PACKED NBDOption;
|
||||
|
||||
struct NBDOptionReply {
|
||||
typedef struct NBDOptionReply {
|
||||
uint64_t magic; /* NBD_REP_MAGIC */
|
||||
uint32_t option; /* NBD_OPT_* */
|
||||
uint32_t type; /* NBD_REP_* */
|
||||
uint32_t length;
|
||||
} QEMU_PACKED;
|
||||
typedef struct NBDOptionReply NBDOptionReply;
|
||||
} QEMU_PACKED NBDOptionReply;
|
||||
|
||||
typedef struct NBDOptionReplyMetaContext {
|
||||
NBDOptionReply h; /* h.type = NBD_REP_META_CONTEXT, h.length > 4 */
|
||||
@ -51,24 +53,33 @@ typedef struct NBDOptionReplyMetaContext {
|
||||
/* metadata context name follows */
|
||||
} QEMU_PACKED NBDOptionReplyMetaContext;
|
||||
|
||||
/* Track results of negotiation */
|
||||
typedef enum NBDMode {
|
||||
/* Keep this list in a continuum of increasing features. */
|
||||
NBD_MODE_OLDSTYLE, /* server lacks newstyle negotiation */
|
||||
NBD_MODE_EXPORT_NAME, /* newstyle but only OPT_EXPORT_NAME safe */
|
||||
NBD_MODE_SIMPLE, /* newstyle but only simple replies */
|
||||
NBD_MODE_STRUCTURED, /* newstyle, structured replies enabled */
|
||||
/* TODO add NBD_MODE_EXTENDED */
|
||||
} NBDMode;
|
||||
|
||||
/* Transmission phase structs
|
||||
*
|
||||
* Note: these are _NOT_ the same as the network representation of an NBD
|
||||
* request and reply!
|
||||
*/
|
||||
struct NBDRequest {
|
||||
uint64_t handle;
|
||||
typedef struct NBDRequest {
|
||||
uint64_t cookie;
|
||||
uint64_t from;
|
||||
uint32_t len;
|
||||
uint16_t flags; /* NBD_CMD_FLAG_* */
|
||||
uint16_t type; /* NBD_CMD_* */
|
||||
};
|
||||
typedef struct NBDRequest NBDRequest;
|
||||
} NBDRequest;
|
||||
|
||||
typedef struct NBDSimpleReply {
|
||||
uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */
|
||||
uint32_t error;
|
||||
uint64_t handle;
|
||||
uint64_t cookie;
|
||||
} QEMU_PACKED NBDSimpleReply;
|
||||
|
||||
/* Header of all structured replies */
|
||||
@ -76,7 +87,7 @@ typedef struct NBDStructuredReplyChunk {
|
||||
uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */
|
||||
uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
|
||||
uint16_t type; /* NBD_REPLY_TYPE_* */
|
||||
uint64_t handle; /* request handle */
|
||||
uint64_t cookie; /* request handle */
|
||||
uint32_t length; /* length of payload */
|
||||
} QEMU_PACKED NBDStructuredReplyChunk;
|
||||
|
||||
@ -84,40 +95,41 @@ typedef union NBDReply {
|
||||
NBDSimpleReply simple;
|
||||
NBDStructuredReplyChunk structured;
|
||||
struct {
|
||||
/* @magic and @handle fields have the same offset and size both in
|
||||
/*
|
||||
* @magic and @cookie fields have the same offset and size both in
|
||||
* simple reply and structured reply chunk, so let them be accessible
|
||||
* without ".simple." or ".structured." specification
|
||||
*/
|
||||
uint32_t magic;
|
||||
uint32_t _skip;
|
||||
uint64_t handle;
|
||||
uint64_t cookie;
|
||||
} QEMU_PACKED;
|
||||
} NBDReply;
|
||||
|
||||
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
|
||||
typedef struct NBDStructuredReadData {
|
||||
NBDStructuredReplyChunk h; /* h.length >= 9 */
|
||||
/* header's .length >= 9 */
|
||||
uint64_t offset;
|
||||
/* At least one byte of data payload follows, calculated from h.length */
|
||||
} QEMU_PACKED NBDStructuredReadData;
|
||||
|
||||
/* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */
|
||||
typedef struct NBDStructuredReadHole {
|
||||
NBDStructuredReplyChunk h; /* h.length == 12 */
|
||||
/* header's length == 12 */
|
||||
uint64_t offset;
|
||||
uint32_t length;
|
||||
} QEMU_PACKED NBDStructuredReadHole;
|
||||
|
||||
/* Header of all NBD_REPLY_TYPE_ERROR* errors */
|
||||
typedef struct NBDStructuredError {
|
||||
NBDStructuredReplyChunk h; /* h.length >= 6 */
|
||||
/* header's length >= 6 */
|
||||
uint32_t error;
|
||||
uint16_t message_length;
|
||||
} QEMU_PACKED NBDStructuredError;
|
||||
|
||||
/* Header of NBD_REPLY_TYPE_BLOCK_STATUS */
|
||||
typedef struct NBDStructuredMeta {
|
||||
NBDStructuredReplyChunk h; /* h.length >= 12 (at least one extent) */
|
||||
/* header's length >= 12 (at least one extent) */
|
||||
uint32_t context_id;
|
||||
/* extents follows */
|
||||
} QEMU_PACKED NBDStructuredMeta;
|
||||
@ -282,7 +294,7 @@ static inline bool nbd_reply_type_is_error(int type)
|
||||
#define NBD_ESHUTDOWN 108
|
||||
|
||||
/* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */
|
||||
struct NBDExportInfo {
|
||||
typedef struct NBDExportInfo {
|
||||
/* Set by client before nbd_receive_negotiate() */
|
||||
bool request_sizes;
|
||||
char *x_dirty_bitmap;
|
||||
@ -310,8 +322,7 @@ struct NBDExportInfo {
|
||||
char *description;
|
||||
int n_contexts;
|
||||
char **contexts;
|
||||
};
|
||||
typedef struct NBDExportInfo NBDExportInfo;
|
||||
} NBDExportInfo;
|
||||
|
||||
int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
||||
QCryptoTLSCreds *tlscreds,
|
||||
@ -330,9 +341,6 @@ int nbd_client(int fd);
|
||||
int nbd_disconnect(int fd);
|
||||
int nbd_errno_to_system_errno(int err);
|
||||
|
||||
typedef struct NBDExport NBDExport;
|
||||
typedef struct NBDClient NBDClient;
|
||||
|
||||
void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk);
|
||||
|
||||
AioContext *nbd_export_aio_context(NBDExport *exp);
|
||||
@ -407,10 +415,9 @@ const char *nbd_rep_lookup(uint32_t rep);
|
||||
const char *nbd_info_lookup(uint16_t info);
|
||||
const char *nbd_cmd_lookup(uint16_t info);
|
||||
const char *nbd_err_lookup(int err);
|
||||
const char *nbd_mode_lookup(NBDMode mode);
|
||||
|
||||
/* nbd/client-connection.c */
|
||||
typedef struct NBDClientConnection NBDClientConnection;
|
||||
|
||||
void nbd_client_connection_enable_retry(NBDClientConnection *conn);
|
||||
|
||||
NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr,
|
||||
|
79
nbd/client.c
79
nbd/client.c
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2016-2019 Red Hat, Inc.
|
||||
* Copyright Red Hat
|
||||
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
|
||||
*
|
||||
* Network Block Device Client Side
|
||||
@ -650,19 +650,20 @@ static int nbd_send_meta_query(QIOChannel *ioc, uint32_t opt,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t export_len = strlen(export);
|
||||
uint32_t export_len;
|
||||
uint32_t queries = !!query;
|
||||
uint32_t query_len = 0;
|
||||
uint32_t data_len;
|
||||
char *data;
|
||||
char *p;
|
||||
|
||||
assert(strnlen(export, NBD_MAX_STRING_SIZE + 1) <= NBD_MAX_STRING_SIZE);
|
||||
export_len = strlen(export);
|
||||
data_len = sizeof(export_len) + export_len + sizeof(queries);
|
||||
assert(export_len <= NBD_MAX_STRING_SIZE);
|
||||
if (query) {
|
||||
assert(strnlen(query, NBD_MAX_STRING_SIZE + 1) <= NBD_MAX_STRING_SIZE);
|
||||
query_len = strlen(query);
|
||||
data_len += sizeof(query_len) + query_len;
|
||||
assert(query_len <= NBD_MAX_STRING_SIZE);
|
||||
} else {
|
||||
assert(opt == NBD_OPT_LIST_META_CONTEXT);
|
||||
}
|
||||
@ -874,10 +875,7 @@ static int nbd_list_meta_contexts(QIOChannel *ioc,
|
||||
* Start the handshake to the server. After a positive return, the server
|
||||
* is ready to accept additional NBD_OPT requests.
|
||||
* Returns: negative errno: failure talking to server
|
||||
* 0: server is oldstyle, must call nbd_negotiate_finish_oldstyle
|
||||
* 1: server is newstyle, but can only accept EXPORT_NAME
|
||||
* 2: server is newstyle, but lacks structured replies
|
||||
* 3: server is newstyle and set up for structured replies
|
||||
* non-negative: enum NBDMode describing server abilities
|
||||
*/
|
||||
static int nbd_start_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
||||
QCryptoTLSCreds *tlscreds,
|
||||
@ -968,16 +966,16 @@ static int nbd_start_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 2 + result;
|
||||
return result ? NBD_MODE_STRUCTURED : NBD_MODE_SIMPLE;
|
||||
} else {
|
||||
return 1;
|
||||
return NBD_MODE_EXPORT_NAME;
|
||||
}
|
||||
} else if (magic == NBD_CLIENT_MAGIC) {
|
||||
if (tlscreds) {
|
||||
error_setg(errp, "Server does not support STARTTLS");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
return NBD_MODE_OLDSTYLE;
|
||||
} else {
|
||||
error_setg(errp, "Bad server magic received: 0x%" PRIx64, magic);
|
||||
return -EINVAL;
|
||||
@ -1031,6 +1029,9 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
||||
|
||||
result = nbd_start_negotiate(aio_context, ioc, tlscreds, hostname, outioc,
|
||||
info->structured_reply, &zeroes, errp);
|
||||
if (result < 0) {
|
||||
return result;
|
||||
}
|
||||
|
||||
info->structured_reply = false;
|
||||
info->base_allocation = false;
|
||||
@ -1038,8 +1039,8 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
||||
ioc = *outioc;
|
||||
}
|
||||
|
||||
switch (result) {
|
||||
case 3: /* newstyle, with structured replies */
|
||||
switch ((NBDMode)result) {
|
||||
case NBD_MODE_STRUCTURED:
|
||||
info->structured_reply = true;
|
||||
if (base_allocation) {
|
||||
result = nbd_negotiate_simple_meta_context(ioc, info, errp);
|
||||
@ -1049,7 +1050,7 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
||||
info->base_allocation = result == 1;
|
||||
}
|
||||
/* fall through */
|
||||
case 2: /* newstyle, try OPT_GO */
|
||||
case NBD_MODE_SIMPLE:
|
||||
/* Try NBD_OPT_GO first - if it works, we are done (it
|
||||
* also gives us a good message if the server requires
|
||||
* TLS). If it is not available, fall back to
|
||||
@ -1072,7 +1073,7 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
||||
return -EINVAL;
|
||||
}
|
||||
/* fall through */
|
||||
case 1: /* newstyle, but limited to EXPORT_NAME */
|
||||
case NBD_MODE_EXPORT_NAME:
|
||||
/* write the export name request */
|
||||
if (nbd_send_option_request(ioc, NBD_OPT_EXPORT_NAME, -1, info->name,
|
||||
errp) < 0) {
|
||||
@ -1088,7 +1089,7 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case 0: /* oldstyle, parse length and flags */
|
||||
case NBD_MODE_OLDSTYLE:
|
||||
if (*info->name) {
|
||||
error_setg(errp, "Server does not support non-empty export names");
|
||||
return -EINVAL;
|
||||
@ -1098,7 +1099,7 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return result;
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
trace_nbd_receive_negotiate_size_flags(info->size, info->flags);
|
||||
@ -1154,10 +1155,13 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
if (tlscreds && sioc) {
|
||||
ioc = sioc;
|
||||
}
|
||||
if (result < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (result) {
|
||||
case 2:
|
||||
case 3:
|
||||
switch ((NBDMode)result) {
|
||||
case NBD_MODE_SIMPLE:
|
||||
case NBD_MODE_STRUCTURED:
|
||||
/* newstyle - use NBD_OPT_LIST to populate array, then try
|
||||
* NBD_OPT_INFO on each array member. If structured replies
|
||||
* are enabled, also try NBD_OPT_LIST_META_CONTEXT. */
|
||||
@ -1178,7 +1182,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
memset(&array[count - 1], 0, sizeof(*array));
|
||||
array[count - 1].name = name;
|
||||
array[count - 1].description = desc;
|
||||
array[count - 1].structured_reply = result == 3;
|
||||
array[count - 1].structured_reply = result == NBD_MODE_STRUCTURED;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
@ -1194,7 +1198,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
break;
|
||||
}
|
||||
|
||||
if (result == 3 &&
|
||||
if (result == NBD_MODE_STRUCTURED &&
|
||||
nbd_list_meta_contexts(ioc, &array[i], errp) < 0) {
|
||||
goto out;
|
||||
}
|
||||
@ -1203,11 +1207,12 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
/* Send NBD_OPT_ABORT as a courtesy before hanging up */
|
||||
nbd_send_opt_abort(ioc);
|
||||
break;
|
||||
case 1: /* newstyle, but limited to EXPORT_NAME */
|
||||
case NBD_MODE_EXPORT_NAME:
|
||||
error_setg(errp, "Server does not support export lists");
|
||||
/* We can't even send NBD_OPT_ABORT, so merely hang up */
|
||||
goto out;
|
||||
case 0: /* oldstyle, parse length and flags */
|
||||
case NBD_MODE_OLDSTYLE:
|
||||
/* Lone export name is implied, but we can parse length and flags */
|
||||
array = g_new0(NBDExportInfo, 1);
|
||||
array->name = g_strdup("");
|
||||
count = 1;
|
||||
@ -1225,7 +1230,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
goto out;
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
*info = array;
|
||||
@ -1349,14 +1354,14 @@ int nbd_send_request(QIOChannel *ioc, NBDRequest *request)
|
||||
{
|
||||
uint8_t buf[NBD_REQUEST_SIZE];
|
||||
|
||||
trace_nbd_send_request(request->from, request->len, request->handle,
|
||||
trace_nbd_send_request(request->from, request->len, request->cookie,
|
||||
request->flags, request->type,
|
||||
nbd_cmd_lookup(request->type));
|
||||
|
||||
stl_be_p(buf, NBD_REQUEST_MAGIC);
|
||||
stw_be_p(buf + 4, request->flags);
|
||||
stw_be_p(buf + 6, request->type);
|
||||
stq_be_p(buf + 8, request->handle);
|
||||
stq_be_p(buf + 8, request->cookie);
|
||||
stq_be_p(buf + 16, request->from);
|
||||
stl_be_p(buf + 24, request->len);
|
||||
|
||||
@ -1382,7 +1387,7 @@ static int nbd_receive_simple_reply(QIOChannel *ioc, NBDSimpleReply *reply,
|
||||
}
|
||||
|
||||
reply->error = be32_to_cpu(reply->error);
|
||||
reply->handle = be64_to_cpu(reply->handle);
|
||||
reply->cookie = be64_to_cpu(reply->cookie);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1409,9 +1414,21 @@ static int nbd_receive_structured_reply_chunk(QIOChannel *ioc,
|
||||
|
||||
chunk->flags = be16_to_cpu(chunk->flags);
|
||||
chunk->type = be16_to_cpu(chunk->type);
|
||||
chunk->handle = be64_to_cpu(chunk->handle);
|
||||
chunk->cookie = be64_to_cpu(chunk->cookie);
|
||||
chunk->length = be32_to_cpu(chunk->length);
|
||||
|
||||
/*
|
||||
* Because we use BLOCK_STATUS with REQ_ONE, and cap READ requests
|
||||
* at 32M, no valid server should send us payload larger than
|
||||
* this. Even if we stopped using REQ_ONE, sane servers will cap
|
||||
* the number of extents they return for block status.
|
||||
*/
|
||||
if (chunk->length > NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData)) {
|
||||
error_setg(errp, "server chunk %" PRIu32 " (%s) payload is too long",
|
||||
chunk->type, nbd_rep_lookup(chunk->type));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1486,7 +1503,7 @@ int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
|
||||
}
|
||||
trace_nbd_receive_simple_reply(reply->simple.error,
|
||||
nbd_err_lookup(reply->simple.error),
|
||||
reply->handle);
|
||||
reply->cookie);
|
||||
break;
|
||||
case NBD_STRUCTURED_REPLY_MAGIC:
|
||||
ret = nbd_receive_structured_reply_chunk(ioc, &reply->structured, errp);
|
||||
@ -1496,7 +1513,7 @@ int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
|
||||
type = nbd_reply_type_lookup(reply->structured.type);
|
||||
trace_nbd_receive_structured_reply_chunk(reply->structured.flags,
|
||||
reply->structured.type, type,
|
||||
reply->structured.handle,
|
||||
reply->structured.cookie,
|
||||
reply->structured.length);
|
||||
break;
|
||||
default:
|
||||
|
17
nbd/common.c
17
nbd/common.c
@ -248,3 +248,20 @@ int nbd_errno_to_system_errno(int err)
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
const char *nbd_mode_lookup(NBDMode mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case NBD_MODE_OLDSTYLE:
|
||||
return "oldstyle";
|
||||
case NBD_MODE_EXPORT_NAME:
|
||||
return "export name only";
|
||||
case NBD_MODE_SIMPLE:
|
||||
return "simple headers";
|
||||
case NBD_MODE_STRUCTURED:
|
||||
return "structured replies";
|
||||
default:
|
||||
return "<unknown>";
|
||||
}
|
||||
}
|
||||
|
224
nbd/server.c
224
nbd/server.c
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2016-2022 Red Hat, Inc.
|
||||
* Copyright Red Hat
|
||||
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
|
||||
*
|
||||
* Network Block Device Server Side
|
||||
@ -1428,7 +1428,7 @@ static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *reque
|
||||
[ 0 .. 3] magic (NBD_REQUEST_MAGIC)
|
||||
[ 4 .. 5] flags (NBD_CMD_FLAG_FUA, ...)
|
||||
[ 6 .. 7] type (NBD_CMD_READ, ...)
|
||||
[ 8 .. 15] handle
|
||||
[ 8 .. 15] cookie
|
||||
[16 .. 23] from
|
||||
[24 .. 27] len
|
||||
*/
|
||||
@ -1436,7 +1436,7 @@ static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *reque
|
||||
magic = ldl_be_p(buf);
|
||||
request->flags = lduw_be_p(buf + 4);
|
||||
request->type = lduw_be_p(buf + 6);
|
||||
request->handle = ldq_be_p(buf + 8);
|
||||
request->cookie = ldq_be_p(buf + 8);
|
||||
request->from = ldq_be_p(buf + 16);
|
||||
request->len = ldl_be_p(buf + 24);
|
||||
|
||||
@ -1885,15 +1885,15 @@ static int coroutine_fn nbd_co_send_iov(NBDClient *client, struct iovec *iov,
|
||||
}
|
||||
|
||||
static inline void set_be_simple_reply(NBDSimpleReply *reply, uint64_t error,
|
||||
uint64_t handle)
|
||||
uint64_t cookie)
|
||||
{
|
||||
stl_be_p(&reply->magic, NBD_SIMPLE_REPLY_MAGIC);
|
||||
stl_be_p(&reply->error, error);
|
||||
stq_be_p(&reply->handle, handle);
|
||||
stq_be_p(&reply->cookie, cookie);
|
||||
}
|
||||
|
||||
static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client,
|
||||
uint64_t handle,
|
||||
NBDRequest *request,
|
||||
uint32_t error,
|
||||
void *data,
|
||||
size_t len,
|
||||
@ -1906,84 +1906,108 @@ static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client,
|
||||
{.iov_base = data, .iov_len = len}
|
||||
};
|
||||
|
||||
trace_nbd_co_send_simple_reply(handle, nbd_err, nbd_err_lookup(nbd_err),
|
||||
len);
|
||||
set_be_simple_reply(&reply, nbd_err, handle);
|
||||
assert(!len || !nbd_err);
|
||||
assert(!client->structured_reply || request->type != NBD_CMD_READ);
|
||||
trace_nbd_co_send_simple_reply(request->cookie, nbd_err,
|
||||
nbd_err_lookup(nbd_err), len);
|
||||
set_be_simple_reply(&reply, nbd_err, request->cookie);
|
||||
|
||||
return nbd_co_send_iov(client, iov, len ? 2 : 1, errp);
|
||||
return nbd_co_send_iov(client, iov, 2, errp);
|
||||
}
|
||||
|
||||
static inline void set_be_chunk(NBDStructuredReplyChunk *chunk, uint16_t flags,
|
||||
uint16_t type, uint64_t handle, uint32_t length)
|
||||
/*
|
||||
* Prepare the header of a reply chunk for network transmission.
|
||||
*
|
||||
* On input, @iov is partially initialized: iov[0].iov_base must point
|
||||
* to an uninitialized NBDReply, while the remaining @niov elements
|
||||
* (if any) must be ready for transmission. This function then
|
||||
* populates iov[0] for transmission.
|
||||
*/
|
||||
static inline void set_be_chunk(NBDClient *client, struct iovec *iov,
|
||||
size_t niov, uint16_t flags, uint16_t type,
|
||||
NBDRequest *request)
|
||||
{
|
||||
/* TODO - handle structured vs. extended replies */
|
||||
NBDStructuredReplyChunk *chunk = iov->iov_base;
|
||||
size_t i, length = 0;
|
||||
|
||||
for (i = 1; i < niov; i++) {
|
||||
length += iov[i].iov_len;
|
||||
}
|
||||
assert(length <= NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData));
|
||||
|
||||
iov[0].iov_len = sizeof(*chunk);
|
||||
stl_be_p(&chunk->magic, NBD_STRUCTURED_REPLY_MAGIC);
|
||||
stw_be_p(&chunk->flags, flags);
|
||||
stw_be_p(&chunk->type, type);
|
||||
stq_be_p(&chunk->handle, handle);
|
||||
stq_be_p(&chunk->cookie, request->cookie);
|
||||
stl_be_p(&chunk->length, length);
|
||||
}
|
||||
|
||||
static int coroutine_fn nbd_co_send_structured_done(NBDClient *client,
|
||||
uint64_t handle,
|
||||
Error **errp)
|
||||
static int coroutine_fn nbd_co_send_chunk_done(NBDClient *client,
|
||||
NBDRequest *request,
|
||||
Error **errp)
|
||||
{
|
||||
NBDStructuredReplyChunk chunk;
|
||||
NBDReply hdr;
|
||||
struct iovec iov[] = {
|
||||
{.iov_base = &chunk, .iov_len = sizeof(chunk)},
|
||||
{.iov_base = &hdr},
|
||||
};
|
||||
|
||||
trace_nbd_co_send_structured_done(handle);
|
||||
set_be_chunk(&chunk, NBD_REPLY_FLAG_DONE, NBD_REPLY_TYPE_NONE, handle, 0);
|
||||
|
||||
trace_nbd_co_send_chunk_done(request->cookie);
|
||||
set_be_chunk(client, iov, 1, NBD_REPLY_FLAG_DONE,
|
||||
NBD_REPLY_TYPE_NONE, request);
|
||||
return nbd_co_send_iov(client, iov, 1, errp);
|
||||
}
|
||||
|
||||
static int coroutine_fn nbd_co_send_structured_read(NBDClient *client,
|
||||
uint64_t handle,
|
||||
uint64_t offset,
|
||||
void *data,
|
||||
size_t size,
|
||||
bool final,
|
||||
Error **errp)
|
||||
static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client,
|
||||
NBDRequest *request,
|
||||
uint64_t offset,
|
||||
void *data,
|
||||
size_t size,
|
||||
bool final,
|
||||
Error **errp)
|
||||
{
|
||||
NBDReply hdr;
|
||||
NBDStructuredReadData chunk;
|
||||
struct iovec iov[] = {
|
||||
{.iov_base = &hdr},
|
||||
{.iov_base = &chunk, .iov_len = sizeof(chunk)},
|
||||
{.iov_base = data, .iov_len = size}
|
||||
};
|
||||
|
||||
assert(size);
|
||||
trace_nbd_co_send_structured_read(handle, offset, data, size);
|
||||
set_be_chunk(&chunk.h, final ? NBD_REPLY_FLAG_DONE : 0,
|
||||
NBD_REPLY_TYPE_OFFSET_DATA, handle,
|
||||
sizeof(chunk) - sizeof(chunk.h) + size);
|
||||
trace_nbd_co_send_chunk_read(request->cookie, offset, data, size);
|
||||
set_be_chunk(client, iov, 3, final ? NBD_REPLY_FLAG_DONE : 0,
|
||||
NBD_REPLY_TYPE_OFFSET_DATA, request);
|
||||
stq_be_p(&chunk.offset, offset);
|
||||
|
||||
return nbd_co_send_iov(client, iov, 2, errp);
|
||||
return nbd_co_send_iov(client, iov, 3, errp);
|
||||
}
|
||||
|
||||
static int coroutine_fn nbd_co_send_structured_error(NBDClient *client,
|
||||
uint64_t handle,
|
||||
uint32_t error,
|
||||
const char *msg,
|
||||
Error **errp)
|
||||
/*ebb*/
|
||||
static int coroutine_fn nbd_co_send_chunk_error(NBDClient *client,
|
||||
NBDRequest *request,
|
||||
uint32_t error,
|
||||
const char *msg,
|
||||
Error **errp)
|
||||
{
|
||||
NBDReply hdr;
|
||||
NBDStructuredError chunk;
|
||||
int nbd_err = system_errno_to_nbd_errno(error);
|
||||
struct iovec iov[] = {
|
||||
{.iov_base = &hdr},
|
||||
{.iov_base = &chunk, .iov_len = sizeof(chunk)},
|
||||
{.iov_base = (char *)msg, .iov_len = msg ? strlen(msg) : 0},
|
||||
};
|
||||
|
||||
assert(nbd_err);
|
||||
trace_nbd_co_send_structured_error(handle, nbd_err,
|
||||
nbd_err_lookup(nbd_err), msg ? msg : "");
|
||||
set_be_chunk(&chunk.h, NBD_REPLY_FLAG_DONE, NBD_REPLY_TYPE_ERROR, handle,
|
||||
sizeof(chunk) - sizeof(chunk.h) + iov[1].iov_len);
|
||||
trace_nbd_co_send_chunk_error(request->cookie, nbd_err,
|
||||
nbd_err_lookup(nbd_err), msg ? msg : "");
|
||||
set_be_chunk(client, iov, 3, NBD_REPLY_FLAG_DONE,
|
||||
NBD_REPLY_TYPE_ERROR, request);
|
||||
stl_be_p(&chunk.error, nbd_err);
|
||||
stw_be_p(&chunk.message_length, iov[1].iov_len);
|
||||
stw_be_p(&chunk.message_length, iov[2].iov_len);
|
||||
|
||||
return nbd_co_send_iov(client, iov, 1 + !!iov[1].iov_len, errp);
|
||||
return nbd_co_send_iov(client, iov, 3, errp);
|
||||
}
|
||||
|
||||
/* Do a sparse read and send the structured reply to the client.
|
||||
@ -1991,7 +2015,7 @@ static int coroutine_fn nbd_co_send_structured_error(NBDClient *client,
|
||||
* reported to the client, at which point this function succeeds.
|
||||
*/
|
||||
static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
|
||||
uint64_t handle,
|
||||
NBDRequest *request,
|
||||
uint64_t offset,
|
||||
uint8_t *data,
|
||||
size_t size,
|
||||
@ -2013,27 +2037,28 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
|
||||
char *msg = g_strdup_printf("unable to check for holes: %s",
|
||||
strerror(-status));
|
||||
|
||||
ret = nbd_co_send_structured_error(client, handle, -status, msg,
|
||||
errp);
|
||||
ret = nbd_co_send_chunk_error(client, request, -status, msg, errp);
|
||||
g_free(msg);
|
||||
return ret;
|
||||
}
|
||||
assert(pnum && pnum <= size - progress);
|
||||
final = progress + pnum == size;
|
||||
if (status & BDRV_BLOCK_ZERO) {
|
||||
NBDReply hdr;
|
||||
NBDStructuredReadHole chunk;
|
||||
struct iovec iov[] = {
|
||||
{.iov_base = &hdr},
|
||||
{.iov_base = &chunk, .iov_len = sizeof(chunk)},
|
||||
};
|
||||
|
||||
trace_nbd_co_send_structured_read_hole(handle, offset + progress,
|
||||
pnum);
|
||||
set_be_chunk(&chunk.h, final ? NBD_REPLY_FLAG_DONE : 0,
|
||||
NBD_REPLY_TYPE_OFFSET_HOLE,
|
||||
handle, sizeof(chunk) - sizeof(chunk.h));
|
||||
trace_nbd_co_send_chunk_read_hole(request->cookie,
|
||||
offset + progress, pnum);
|
||||
set_be_chunk(client, iov, 2,
|
||||
final ? NBD_REPLY_FLAG_DONE : 0,
|
||||
NBD_REPLY_TYPE_OFFSET_HOLE, request);
|
||||
stq_be_p(&chunk.offset, offset + progress);
|
||||
stl_be_p(&chunk.length, pnum);
|
||||
ret = nbd_co_send_iov(client, iov, 1, errp);
|
||||
ret = nbd_co_send_iov(client, iov, 2, errp);
|
||||
} else {
|
||||
ret = blk_co_pread(exp->common.blk, offset + progress, pnum,
|
||||
data + progress, 0);
|
||||
@ -2041,9 +2066,8 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
|
||||
error_setg_errno(errp, -ret, "reading from file failed");
|
||||
break;
|
||||
}
|
||||
ret = nbd_co_send_structured_read(client, handle, offset + progress,
|
||||
data + progress, pnum, final,
|
||||
errp);
|
||||
ret = nbd_co_send_chunk_read(client, request, offset + progress,
|
||||
data + progress, pnum, final, errp);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
@ -2196,30 +2220,31 @@ static int coroutine_fn blockalloc_to_extents(BlockBackend *blk,
|
||||
* @last controls whether NBD_REPLY_FLAG_DONE is sent.
|
||||
*/
|
||||
static int coroutine_fn
|
||||
nbd_co_send_extents(NBDClient *client, uint64_t handle, NBDExtentArray *ea,
|
||||
nbd_co_send_extents(NBDClient *client, NBDRequest *request, NBDExtentArray *ea,
|
||||
bool last, uint32_t context_id, Error **errp)
|
||||
{
|
||||
NBDReply hdr;
|
||||
NBDStructuredMeta chunk;
|
||||
struct iovec iov[] = {
|
||||
{.iov_base = &hdr},
|
||||
{.iov_base = &chunk, .iov_len = sizeof(chunk)},
|
||||
{.iov_base = ea->extents, .iov_len = ea->count * sizeof(ea->extents[0])}
|
||||
};
|
||||
|
||||
nbd_extent_array_convert_to_be(ea);
|
||||
|
||||
trace_nbd_co_send_extents(handle, ea->count, context_id, ea->total_length,
|
||||
last);
|
||||
set_be_chunk(&chunk.h, last ? NBD_REPLY_FLAG_DONE : 0,
|
||||
NBD_REPLY_TYPE_BLOCK_STATUS,
|
||||
handle, sizeof(chunk) - sizeof(chunk.h) + iov[1].iov_len);
|
||||
trace_nbd_co_send_extents(request->cookie, ea->count, context_id,
|
||||
ea->total_length, last);
|
||||
set_be_chunk(client, iov, 3, last ? NBD_REPLY_FLAG_DONE : 0,
|
||||
NBD_REPLY_TYPE_BLOCK_STATUS, request);
|
||||
stl_be_p(&chunk.context_id, context_id);
|
||||
|
||||
return nbd_co_send_iov(client, iov, 2, errp);
|
||||
return nbd_co_send_iov(client, iov, 3, errp);
|
||||
}
|
||||
|
||||
/* Get block status from the exported device and send it to the client */
|
||||
static int
|
||||
coroutine_fn nbd_co_send_block_status(NBDClient *client, uint64_t handle,
|
||||
coroutine_fn nbd_co_send_block_status(NBDClient *client, NBDRequest *request,
|
||||
BlockBackend *blk, uint64_t offset,
|
||||
uint32_t length, bool dont_fragment,
|
||||
bool last, uint32_t context_id,
|
||||
@ -2235,11 +2260,11 @@ coroutine_fn nbd_co_send_block_status(NBDClient *client, uint64_t handle,
|
||||
ret = blockalloc_to_extents(blk, offset, length, ea);
|
||||
}
|
||||
if (ret < 0) {
|
||||
return nbd_co_send_structured_error(
|
||||
client, handle, -ret, "can't get block status", errp);
|
||||
return nbd_co_send_chunk_error(client, request, -ret,
|
||||
"can't get block status", errp);
|
||||
}
|
||||
|
||||
return nbd_co_send_extents(client, handle, ea, last, context_id, errp);
|
||||
return nbd_co_send_extents(client, request, ea, last, context_id, errp);
|
||||
}
|
||||
|
||||
/* Populate @ea from a dirty bitmap. */
|
||||
@ -2274,17 +2299,20 @@ static void bitmap_to_extents(BdrvDirtyBitmap *bitmap,
|
||||
bdrv_dirty_bitmap_unlock(bitmap);
|
||||
}
|
||||
|
||||
static int coroutine_fn nbd_co_send_bitmap(NBDClient *client, uint64_t handle,
|
||||
BdrvDirtyBitmap *bitmap, uint64_t offset,
|
||||
uint32_t length, bool dont_fragment, bool last,
|
||||
uint32_t context_id, Error **errp)
|
||||
static int coroutine_fn nbd_co_send_bitmap(NBDClient *client,
|
||||
NBDRequest *request,
|
||||
BdrvDirtyBitmap *bitmap,
|
||||
uint64_t offset,
|
||||
uint32_t length, bool dont_fragment,
|
||||
bool last, uint32_t context_id,
|
||||
Error **errp)
|
||||
{
|
||||
unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS;
|
||||
g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents);
|
||||
|
||||
bitmap_to_extents(bitmap, offset, length, ea);
|
||||
|
||||
return nbd_co_send_extents(client, handle, ea, last, context_id, errp);
|
||||
return nbd_co_send_extents(client, request, ea, last, context_id, errp);
|
||||
}
|
||||
|
||||
/* nbd_co_receive_request
|
||||
@ -2308,7 +2336,7 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_nbd_co_receive_request_decode_type(request->handle, request->type,
|
||||
trace_nbd_co_receive_request_decode_type(request->cookie, request->type,
|
||||
nbd_cmd_lookup(request->type));
|
||||
|
||||
if (request->type != NBD_CMD_WRITE) {
|
||||
@ -2349,7 +2377,7 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *
|
||||
}
|
||||
req->complete = true;
|
||||
|
||||
trace_nbd_co_receive_request_payload_received(request->handle,
|
||||
trace_nbd_co_receive_request_payload_received(request->cookie,
|
||||
request->len);
|
||||
}
|
||||
|
||||
@ -2402,16 +2430,15 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *
|
||||
* Returns 0 if connection is still live, -errno on failure to talk to client
|
||||
*/
|
||||
static coroutine_fn int nbd_send_generic_reply(NBDClient *client,
|
||||
uint64_t handle,
|
||||
NBDRequest *request,
|
||||
int ret,
|
||||
const char *error_msg,
|
||||
Error **errp)
|
||||
{
|
||||
if (client->structured_reply && ret < 0) {
|
||||
return nbd_co_send_structured_error(client, handle, -ret, error_msg,
|
||||
errp);
|
||||
return nbd_co_send_chunk_error(client, request, -ret, error_msg, errp);
|
||||
} else {
|
||||
return nbd_co_send_simple_reply(client, handle, ret < 0 ? -ret : 0,
|
||||
return nbd_co_send_simple_reply(client, request, ret < 0 ? -ret : 0,
|
||||
NULL, 0, errp);
|
||||
}
|
||||
}
|
||||
@ -2431,7 +2458,7 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
|
||||
if (request->flags & NBD_CMD_FLAG_FUA) {
|
||||
ret = blk_co_flush(exp->common.blk);
|
||||
if (ret < 0) {
|
||||
return nbd_send_generic_reply(client, request->handle, ret,
|
||||
return nbd_send_generic_reply(client, request, ret,
|
||||
"flush failed", errp);
|
||||
}
|
||||
}
|
||||
@ -2439,26 +2466,25 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
|
||||
if (client->structured_reply && !(request->flags & NBD_CMD_FLAG_DF) &&
|
||||
request->len)
|
||||
{
|
||||
return nbd_co_send_sparse_read(client, request->handle, request->from,
|
||||
return nbd_co_send_sparse_read(client, request, request->from,
|
||||
data, request->len, errp);
|
||||
}
|
||||
|
||||
ret = blk_co_pread(exp->common.blk, request->from, request->len, data, 0);
|
||||
if (ret < 0) {
|
||||
return nbd_send_generic_reply(client, request->handle, ret,
|
||||
return nbd_send_generic_reply(client, request, ret,
|
||||
"reading from file failed", errp);
|
||||
}
|
||||
|
||||
if (client->structured_reply) {
|
||||
if (request->len) {
|
||||
return nbd_co_send_structured_read(client, request->handle,
|
||||
request->from, data,
|
||||
request->len, true, errp);
|
||||
return nbd_co_send_chunk_read(client, request, request->from, data,
|
||||
request->len, true, errp);
|
||||
} else {
|
||||
return nbd_co_send_structured_done(client, request->handle, errp);
|
||||
return nbd_co_send_chunk_done(client, request, errp);
|
||||
}
|
||||
} else {
|
||||
return nbd_co_send_simple_reply(client, request->handle, 0,
|
||||
return nbd_co_send_simple_reply(client, request, 0,
|
||||
data, request->len, errp);
|
||||
}
|
||||
}
|
||||
@ -2481,7 +2507,7 @@ static coroutine_fn int nbd_do_cmd_cache(NBDClient *client, NBDRequest *request,
|
||||
ret = blk_co_preadv(exp->common.blk, request->from, request->len,
|
||||
NULL, BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH);
|
||||
|
||||
return nbd_send_generic_reply(client, request->handle, ret,
|
||||
return nbd_send_generic_reply(client, request, ret,
|
||||
"caching data failed", errp);
|
||||
}
|
||||
|
||||
@ -2512,7 +2538,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
}
|
||||
ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data,
|
||||
flags);
|
||||
return nbd_send_generic_reply(client, request->handle, ret,
|
||||
return nbd_send_generic_reply(client, request, ret,
|
||||
"writing to file failed", errp);
|
||||
|
||||
case NBD_CMD_WRITE_ZEROES:
|
||||
@ -2528,7 +2554,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
}
|
||||
ret = blk_co_pwrite_zeroes(exp->common.blk, request->from, request->len,
|
||||
flags);
|
||||
return nbd_send_generic_reply(client, request->handle, ret,
|
||||
return nbd_send_generic_reply(client, request, ret,
|
||||
"writing to file failed", errp);
|
||||
|
||||
case NBD_CMD_DISC:
|
||||
@ -2537,7 +2563,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
|
||||
case NBD_CMD_FLUSH:
|
||||
ret = blk_co_flush(exp->common.blk);
|
||||
return nbd_send_generic_reply(client, request->handle, ret,
|
||||
return nbd_send_generic_reply(client, request, ret,
|
||||
"flush failed", errp);
|
||||
|
||||
case NBD_CMD_TRIM:
|
||||
@ -2545,12 +2571,12 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
if (ret >= 0 && request->flags & NBD_CMD_FLAG_FUA) {
|
||||
ret = blk_co_flush(exp->common.blk);
|
||||
}
|
||||
return nbd_send_generic_reply(client, request->handle, ret,
|
||||
return nbd_send_generic_reply(client, request, ret,
|
||||
"discard failed", errp);
|
||||
|
||||
case NBD_CMD_BLOCK_STATUS:
|
||||
if (!request->len) {
|
||||
return nbd_send_generic_reply(client, request->handle, -EINVAL,
|
||||
return nbd_send_generic_reply(client, request, -EINVAL,
|
||||
"need non-zero length", errp);
|
||||
}
|
||||
if (client->export_meta.count) {
|
||||
@ -2558,7 +2584,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
int contexts_remaining = client->export_meta.count;
|
||||
|
||||
if (client->export_meta.base_allocation) {
|
||||
ret = nbd_co_send_block_status(client, request->handle,
|
||||
ret = nbd_co_send_block_status(client, request,
|
||||
exp->common.blk,
|
||||
request->from,
|
||||
request->len, dont_fragment,
|
||||
@ -2571,7 +2597,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
}
|
||||
|
||||
if (client->export_meta.allocation_depth) {
|
||||
ret = nbd_co_send_block_status(client, request->handle,
|
||||
ret = nbd_co_send_block_status(client, request,
|
||||
exp->common.blk,
|
||||
request->from, request->len,
|
||||
dont_fragment,
|
||||
@ -2587,7 +2613,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
if (!client->export_meta.bitmaps[i]) {
|
||||
continue;
|
||||
}
|
||||
ret = nbd_co_send_bitmap(client, request->handle,
|
||||
ret = nbd_co_send_bitmap(client, request,
|
||||
client->exp->export_bitmaps[i],
|
||||
request->from, request->len,
|
||||
dont_fragment, !--contexts_remaining,
|
||||
@ -2601,7 +2627,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
return nbd_send_generic_reply(client, request->handle, -EINVAL,
|
||||
return nbd_send_generic_reply(client, request, -EINVAL,
|
||||
"CMD_BLOCK_STATUS not negotiated",
|
||||
errp);
|
||||
}
|
||||
@ -2609,7 +2635,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
default:
|
||||
msg = g_strdup_printf("invalid request type (%" PRIu32 ") received",
|
||||
request->type);
|
||||
ret = nbd_send_generic_reply(client, request->handle, -EINVAL, msg,
|
||||
ret = nbd_send_generic_reply(client, request, -EINVAL, msg,
|
||||
errp);
|
||||
g_free(msg);
|
||||
return ret;
|
||||
@ -2672,7 +2698,7 @@ static coroutine_fn void nbd_trip(void *opaque)
|
||||
Error *export_err = local_err;
|
||||
|
||||
local_err = NULL;
|
||||
ret = nbd_send_generic_reply(client, request.handle, -EINVAL,
|
||||
ret = nbd_send_generic_reply(client, &request, -EINVAL,
|
||||
error_get_pretty(export_err), &local_err);
|
||||
error_free(export_err);
|
||||
} else {
|
||||
|
@ -31,9 +31,9 @@ nbd_client_loop(void) "Doing NBD loop"
|
||||
nbd_client_loop_ret(int ret, const char *error) "NBD loop returned %d: %s"
|
||||
nbd_client_clear_queue(void) "Clearing NBD queue"
|
||||
nbd_client_clear_socket(void) "Clearing NBD socket"
|
||||
nbd_send_request(uint64_t from, uint32_t len, uint64_t handle, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu32 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
|
||||
nbd_receive_simple_reply(int32_t error, const char *errname, uint64_t handle) "Got simple reply: { .error = %" PRId32 " (%s), handle = %" PRIu64" }"
|
||||
nbd_receive_structured_reply_chunk(uint16_t flags, uint16_t type, const char *name, uint64_t handle, uint32_t length) "Got structured reply chunk: { flags = 0x%" PRIx16 ", type = %d (%s), handle = %" PRIu64 ", length = %" PRIu32 " }"
|
||||
nbd_send_request(uint64_t from, uint32_t len, uint64_t cookie, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu32 ", .cookie = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
|
||||
nbd_receive_simple_reply(int32_t error, const char *errname, uint64_t cookie) "Got simple reply: { .error = %" PRId32 " (%s), cookie = %" PRIu64" }"
|
||||
nbd_receive_structured_reply_chunk(uint16_t flags, uint16_t type, const char *name, uint64_t cookie, uint32_t length) "Got structured reply chunk: { flags = 0x%" PRIx16 ", type = %d (%s), cookie = %" PRIu64 ", length = %" PRIu32 " }"
|
||||
|
||||
# common.c
|
||||
nbd_unknown_error(int err) "Squashing unexpected error %d to EINVAL"
|
||||
@ -63,14 +63,14 @@ nbd_negotiate_success(void) "Negotiation succeeded"
|
||||
nbd_receive_request(uint32_t magic, uint16_t flags, uint16_t type, uint64_t from, uint32_t len) "Got request: { magic = 0x%" PRIx32 ", .flags = 0x%" PRIx16 ", .type = 0x%" PRIx16 ", from = %" PRIu64 ", len = %" PRIu32 " }"
|
||||
nbd_blk_aio_attached(const char *name, void *ctx) "Export %s: Attaching clients to AIO context %p"
|
||||
nbd_blk_aio_detach(const char *name, void *ctx) "Export %s: Detaching clients from AIO context %p"
|
||||
nbd_co_send_simple_reply(uint64_t handle, uint32_t error, const char *errname, int len) "Send simple reply: handle = %" PRIu64 ", error = %" PRIu32 " (%s), len = %d"
|
||||
nbd_co_send_structured_done(uint64_t handle) "Send structured reply done: handle = %" PRIu64
|
||||
nbd_co_send_structured_read(uint64_t handle, uint64_t offset, void *data, size_t size) "Send structured read data reply: handle = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %zu"
|
||||
nbd_co_send_structured_read_hole(uint64_t handle, uint64_t offset, size_t size) "Send structured read hole reply: handle = %" PRIu64 ", offset = %" PRIu64 ", len = %zu"
|
||||
nbd_co_send_extents(uint64_t handle, unsigned int extents, uint32_t id, uint64_t length, int last) "Send block status reply: handle = %" PRIu64 ", extents = %u, context = %d (extents cover %" PRIu64 " bytes, last chunk = %d)"
|
||||
nbd_co_send_structured_error(uint64_t handle, int err, const char *errname, const char *msg) "Send structured error reply: handle = %" PRIu64 ", error = %d (%s), msg = '%s'"
|
||||
nbd_co_receive_request_decode_type(uint64_t handle, uint16_t type, const char *name) "Decoding type: handle = %" PRIu64 ", type = %" PRIu16 " (%s)"
|
||||
nbd_co_receive_request_payload_received(uint64_t handle, uint32_t len) "Payload received: handle = %" PRIu64 ", len = %" PRIu32
|
||||
nbd_co_send_simple_reply(uint64_t cookie, uint32_t error, const char *errname, int len) "Send simple reply: cookie = %" PRIu64 ", error = %" PRIu32 " (%s), len = %d"
|
||||
nbd_co_send_chunk_done(uint64_t cookie) "Send structured reply done: cookie = %" PRIu64
|
||||
nbd_co_send_chunk_read(uint64_t cookie, uint64_t offset, void *data, size_t size) "Send structured read data reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %zu"
|
||||
nbd_co_send_chunk_read_hole(uint64_t cookie, uint64_t offset, size_t size) "Send structured read hole reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", len = %zu"
|
||||
nbd_co_send_extents(uint64_t cookie, unsigned int extents, uint32_t id, uint64_t length, int last) "Send block status reply: cookie = %" PRIu64 ", extents = %u, context = %d (extents cover %" PRIu64 " bytes, last chunk = %d)"
|
||||
nbd_co_send_chunk_error(uint64_t cookie, int err, const char *errname, const char *msg) "Send structured error reply: cookie = %" PRIu64 ", error = %d (%s), msg = '%s'"
|
||||
nbd_co_receive_request_decode_type(uint64_t cookie, uint16_t type, const char *name) "Decoding type: cookie = %" PRIu64 ", type = %" PRIu16 " (%s)"
|
||||
nbd_co_receive_request_payload_received(uint64_t cookie, uint32_t len) "Payload received: cookie = %" PRIu64 ", len = %" PRIu32
|
||||
nbd_co_receive_align_compliance(const char *op, uint64_t from, uint32_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx32 ", align=0x%" PRIx32
|
||||
nbd_trip(void) "Reading request"
|
||||
|
||||
|
68
qemu-nbd.c
68
qemu-nbd.c
@ -73,7 +73,6 @@
|
||||
|
||||
#define MBR_SIZE 512
|
||||
|
||||
static int verbose;
|
||||
static char *srcpath;
|
||||
static SocketAddress *saddr;
|
||||
static int persistent = 0;
|
||||
@ -272,9 +271,15 @@ static void *show_parts(void *arg)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct NbdClientOpts {
|
||||
char *device;
|
||||
bool fork_process;
|
||||
bool verbose;
|
||||
};
|
||||
|
||||
static void *nbd_client_thread(void *arg)
|
||||
{
|
||||
char *device = arg;
|
||||
struct NbdClientOpts *opts = arg;
|
||||
NBDExportInfo info = { .request_sizes = false, .name = g_strdup("") };
|
||||
QIOChannelSocket *sioc;
|
||||
int fd = -1;
|
||||
@ -298,10 +303,10 @@ static void *nbd_client_thread(void *arg)
|
||||
goto out;
|
||||
}
|
||||
|
||||
fd = open(device, O_RDWR);
|
||||
fd = open(opts->device, O_RDWR);
|
||||
if (fd < 0) {
|
||||
/* Linux-only, we can use %m in printf. */
|
||||
error_report("Failed to open %s: %m", device);
|
||||
error_report("Failed to open %s: %m", opts->device);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -311,14 +316,18 @@ static void *nbd_client_thread(void *arg)
|
||||
}
|
||||
|
||||
/* update partition table */
|
||||
pthread_create(&show_parts_thread, NULL, show_parts, device);
|
||||
pthread_create(&show_parts_thread, NULL, show_parts, opts->device);
|
||||
|
||||
if (verbose) {
|
||||
if (opts->verbose && !opts->fork_process) {
|
||||
fprintf(stderr, "NBD device %s is now connected to %s\n",
|
||||
device, srcpath);
|
||||
opts->device, srcpath);
|
||||
} else {
|
||||
/* Close stderr so that the qemu-nbd process exits. */
|
||||
dup2(STDOUT_FILENO, STDERR_FILENO);
|
||||
if (dup2(STDOUT_FILENO, STDERR_FILENO) < 0) {
|
||||
error_report("Could not set stderr to /dev/null: %s",
|
||||
strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
if (nbd_client(fd) < 0) {
|
||||
@ -573,9 +582,9 @@ int main(int argc, char **argv)
|
||||
const char *tlshostname = NULL;
|
||||
bool imageOpts = false;
|
||||
bool writethrough = false; /* Client will flush as needed. */
|
||||
bool verbose = false;
|
||||
bool fork_process = false;
|
||||
bool list = false;
|
||||
int old_stderr = -1;
|
||||
unsigned socket_activation;
|
||||
const char *pid_file_name = NULL;
|
||||
const char *selinux_label = NULL;
|
||||
@ -738,7 +747,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
break;
|
||||
case 'v':
|
||||
verbose = 1;
|
||||
verbose = true;
|
||||
break;
|
||||
case 'V':
|
||||
version(argv[0]);
|
||||
@ -928,19 +937,30 @@ int main(int argc, char **argv)
|
||||
error_report("Failed to fork: %s", strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
} else if (pid == 0) {
|
||||
int saved_errno;
|
||||
|
||||
close(stderr_fd[0]);
|
||||
|
||||
/* Remember parent's stderr if we will be restoring it. */
|
||||
if (fork_process) {
|
||||
old_stderr = dup(STDERR_FILENO);
|
||||
}
|
||||
|
||||
ret = qemu_daemon(1, 0);
|
||||
saved_errno = errno; /* dup2 will overwrite error below */
|
||||
|
||||
/* Temporarily redirect stderr to the parent's pipe... */
|
||||
dup2(stderr_fd[1], STDERR_FILENO);
|
||||
if (dup2(stderr_fd[1], STDERR_FILENO) < 0) {
|
||||
char str[256];
|
||||
snprintf(str, sizeof(str),
|
||||
"%s: Failed to link stderr to the pipe: %s\n",
|
||||
g_get_prgname(), strerror(errno));
|
||||
/*
|
||||
* We are unable to use error_report() here as we need to get
|
||||
* stderr pointed to the parent's pipe. Write to that pipe
|
||||
* manually.
|
||||
*/
|
||||
ret = write(stderr_fd[1], str, strlen(str));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
error_report("Failed to daemonize: %s", strerror(errno));
|
||||
error_report("Failed to daemonize: %s", strerror(saved_errno));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
@ -1125,8 +1145,13 @@ int main(int argc, char **argv)
|
||||
if (device) {
|
||||
#if HAVE_NBD_DEVICE
|
||||
int ret;
|
||||
struct NbdClientOpts opts = {
|
||||
.device = device,
|
||||
.fork_process = fork_process,
|
||||
.verbose = verbose,
|
||||
};
|
||||
|
||||
ret = pthread_create(&client_thread, NULL, nbd_client_thread, device);
|
||||
ret = pthread_create(&client_thread, NULL, nbd_client_thread, &opts);
|
||||
if (ret != 0) {
|
||||
error_report("Failed to create client thread: %s", strerror(ret));
|
||||
exit(EXIT_FAILURE);
|
||||
@ -1152,8 +1177,11 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (fork_process) {
|
||||
dup2(old_stderr, STDERR_FILENO);
|
||||
close(old_stderr);
|
||||
if (dup2(STDOUT_FILENO, STDERR_FILENO) < 0) {
|
||||
error_report("Could not set stderr to /dev/null: %s",
|
||||
strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
state = RUNNING;
|
||||
|
Loading…
Reference in New Issue
Block a user