2008-07-03 17:41:03 +04:00
|
|
|
/*
|
2023-06-08 16:56:31 +03:00
|
|
|
* Copyright Red Hat
|
2008-05-28 01:13:40 +04:00
|
|
|
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
|
|
|
|
*
|
|
|
|
* Network Block Device
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; under version 2 of the License.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
2009-07-17 00:47:01 +04:00
|
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
2008-07-03 17:41:03 +04:00
|
|
|
*/
|
2008-05-28 01:13:40 +04:00
|
|
|
|
|
|
|
#ifndef NBD_H
|
|
|
|
#define NBD_H
|
|
|
|
|
2020-09-24 18:26:50 +03:00
|
|
|
#include "block/export.h"
|
2016-02-10 21:41:04 +03:00
|
|
|
#include "io/channel-socket.h"
|
2016-02-10 21:41:11 +03:00
|
|
|
#include "crypto/tlscreds.h"
|
2019-01-28 19:58:30 +03:00
|
|
|
#include "qapi/error.h"
|
2022-11-25 20:53:28 +03:00
|
|
|
#include "qemu/bswap.h"
|
2011-02-22 18:44:53 +03:00
|
|
|
|
2023-06-08 16:56:31 +03:00
|
|
|
typedef struct NBDExport NBDExport;
|
|
|
|
typedef struct NBDClient NBDClient;
|
|
|
|
typedef struct NBDClientConnection NBDClientConnection;
|
|
|
|
|
2020-09-24 18:26:50 +03:00
|
|
|
extern const BlockExportDriver blk_exp_nbd;
|
|
|
|
|
2016-10-14 21:33:10 +03:00
|
|
|
/* Handshake phase structs - this struct is passed on the wire */
|
|
|
|
|
2023-06-08 16:56:31 +03:00
|
|
|
typedef struct NBDOption {
|
2016-10-14 21:33:10 +03:00
|
|
|
uint64_t magic; /* NBD_OPTS_MAGIC */
|
|
|
|
uint32_t option; /* NBD_OPT_* */
|
|
|
|
uint32_t length;
|
2023-06-08 16:56:31 +03:00
|
|
|
} QEMU_PACKED NBDOption;
|
2016-10-14 21:33:10 +03:00
|
|
|
|
2023-06-08 16:56:31 +03:00
|
|
|
typedef struct NBDOptionReply {
|
2016-10-14 21:33:10 +03:00
|
|
|
uint64_t magic; /* NBD_REP_MAGIC */
|
|
|
|
uint32_t option; /* NBD_OPT_* */
|
|
|
|
uint32_t type; /* NBD_REP_* */
|
|
|
|
uint32_t length;
|
2023-06-08 16:56:31 +03:00
|
|
|
} QEMU_PACKED NBDOptionReply;
|
2016-10-14 21:33:10 +03:00
|
|
|
|
2018-02-26 19:26:25 +03:00
|
|
|
typedef struct NBDOptionReplyMetaContext {
|
|
|
|
NBDOptionReply h; /* h.type = NBD_REP_META_CONTEXT, h.length > 4 */
|
|
|
|
uint32_t context_id;
|
2020-10-27 08:05:54 +03:00
|
|
|
/* metadata context name follows */
|
2018-02-26 19:26:25 +03:00
|
|
|
} QEMU_PACKED NBDOptionReplyMetaContext;
|
|
|
|
|
2023-06-08 16:56:37 +03:00
|
|
|
/* Track results of negotiation */
|
|
|
|
typedef enum NBDMode {
|
|
|
|
/* Keep this list in a continuum of increasing features. */
|
|
|
|
NBD_MODE_OLDSTYLE, /* server lacks newstyle negotiation */
|
|
|
|
NBD_MODE_EXPORT_NAME, /* newstyle but only OPT_EXPORT_NAME safe */
|
|
|
|
NBD_MODE_SIMPLE, /* newstyle but only simple replies */
|
|
|
|
NBD_MODE_STRUCTURED, /* newstyle, structured replies enabled */
|
|
|
|
/* TODO add NBD_MODE_EXTENDED */
|
|
|
|
} NBDMode;
|
|
|
|
|
2016-10-14 21:33:10 +03:00
|
|
|
/* Transmission phase structs
|
|
|
|
*
|
|
|
|
* Note: these are _NOT_ the same as the network representation of an NBD
|
2016-06-13 12:42:40 +03:00
|
|
|
* request and reply!
|
|
|
|
*/
|
2023-06-08 16:56:31 +03:00
|
|
|
typedef struct NBDRequest {
|
2023-06-08 16:56:34 +03:00
|
|
|
uint64_t cookie;
|
2008-07-03 17:41:03 +04:00
|
|
|
uint64_t from;
|
|
|
|
uint32_t len;
|
2016-10-14 21:33:10 +03:00
|
|
|
uint16_t flags; /* NBD_CMD_FLAG_* */
|
|
|
|
uint16_t type; /* NBD_CMD_* */
|
2023-06-08 16:56:31 +03:00
|
|
|
} NBDRequest;
|
2008-07-03 17:41:03 +04:00
|
|
|
|
2017-10-12 12:53:10 +03:00
|
|
|
typedef struct NBDSimpleReply {
|
|
|
|
uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */
|
|
|
|
uint32_t error;
|
2023-06-08 16:56:34 +03:00
|
|
|
uint64_t cookie;
|
2017-10-12 12:53:10 +03:00
|
|
|
} QEMU_PACKED NBDSimpleReply;
|
|
|
|
|
2017-10-27 13:40:28 +03:00
|
|
|
/* Header of all structured replies */
|
|
|
|
typedef struct NBDStructuredReplyChunk {
|
|
|
|
uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */
|
|
|
|
uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
|
|
|
|
uint16_t type; /* NBD_REPLY_TYPE_* */
|
2023-06-08 16:56:34 +03:00
|
|
|
uint64_t cookie; /* request handle */
|
2017-10-27 13:40:28 +03:00
|
|
|
uint32_t length; /* length of payload */
|
|
|
|
} QEMU_PACKED NBDStructuredReplyChunk;
|
|
|
|
|
2017-10-27 13:40:35 +03:00
|
|
|
typedef union NBDReply {
|
|
|
|
NBDSimpleReply simple;
|
|
|
|
NBDStructuredReplyChunk structured;
|
|
|
|
struct {
|
2023-06-08 16:56:34 +03:00
|
|
|
/*
|
|
|
|
* @magic and @cookie fields have the same offset and size both in
|
2017-10-27 13:40:35 +03:00
|
|
|
* simple reply and structured reply chunk, so let them be accessible
|
|
|
|
* without ".simple." or ".structured." specification
|
|
|
|
*/
|
|
|
|
uint32_t magic;
|
|
|
|
uint32_t _skip;
|
2023-06-08 16:56:34 +03:00
|
|
|
uint64_t cookie;
|
2017-10-27 13:40:35 +03:00
|
|
|
} QEMU_PACKED;
|
|
|
|
} NBDReply;
|
|
|
|
|
2017-11-09 00:57:00 +03:00
|
|
|
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
|
|
|
|
typedef struct NBDStructuredReadData {
|
nbd/server: Prepare for alternate-size headers
Upstream NBD now documents[1] an extension that supports 64-bit effect
lengths in requests. As part of that extension, the size of the reply
headers will change in order to permit a 64-bit length in the reply
for symmetry[2]. Additionally, where the reply header is currently 16
bytes for simple reply, and 20 bytes for structured reply; with the
extension enabled, there will only be one extended reply header, of 32
bytes, with both structured and extended modes sending identical
payloads for chunked replies.
Since we are already wired up to use iovecs, it is easiest to allow
for this change in header size by splitting each structured reply
across multiple iovecs, one for the header (which will become wider in
a future patch according to client negotiation), and the other(s) for
the chunk payload, and removing the header from the payload struct
definitions. Rename the affected functions with s/structured/chunk/
to make it obvious that the code will be reused in extended mode.
Interestingly, the client side code never utilized the packed types,
so only the server code needs to be updated.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-ext-header/doc/proto.md
as of NBD commit e6f3b94a934
[2] Note that on the surface, this is because some future server might
permit a 4G+ NBD_CMD_READ and need to reply with that much data in one
transaction. But even though the extended reply length is widened to
64 bits, for now the NBD spec is clear that servers will not reply
with more than a maximum payload bounded by the 32-bit
NBD_INFO_BLOCK_SIZE field; allowing a client and server to mutually
agree to transactions larger than 4G would require yet another
extension.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20230608135653.2918540-4-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
2023-06-08 16:56:32 +03:00
|
|
|
/* header's .length >= 9 */
|
2017-10-27 13:40:28 +03:00
|
|
|
uint64_t offset;
|
2017-11-09 00:57:00 +03:00
|
|
|
/* At least one byte of data payload follows, calculated from h.length */
|
|
|
|
} QEMU_PACKED NBDStructuredReadData;
|
|
|
|
|
|
|
|
/* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */
|
|
|
|
typedef struct NBDStructuredReadHole {
|
nbd/server: Prepare for alternate-size headers
Upstream NBD now documents[1] an extension that supports 64-bit effect
lengths in requests. As part of that extension, the size of the reply
headers will change in order to permit a 64-bit length in the reply
for symmetry[2]. Additionally, where the reply header is currently 16
bytes for simple reply, and 20 bytes for structured reply; with the
extension enabled, there will only be one extended reply header, of 32
bytes, with both structured and extended modes sending identical
payloads for chunked replies.
Since we are already wired up to use iovecs, it is easiest to allow
for this change in header size by splitting each structured reply
across multiple iovecs, one for the header (which will become wider in
a future patch according to client negotiation), and the other(s) for
the chunk payload, and removing the header from the payload struct
definitions. Rename the affected functions with s/structured/chunk/
to make it obvious that the code will be reused in extended mode.
Interestingly, the client side code never utilized the packed types,
so only the server code needs to be updated.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-ext-header/doc/proto.md
as of NBD commit e6f3b94a934
[2] Note that on the surface, this is because some future server might
permit a 4G+ NBD_CMD_READ and need to reply with that much data in one
transaction. But even though the extended reply length is widened to
64 bits, for now the NBD spec is clear that servers will not reply
with more than a maximum payload bounded by the 32-bit
NBD_INFO_BLOCK_SIZE field; allowing a client and server to mutually
agree to transactions larger than 4G would require yet another
extension.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20230608135653.2918540-4-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
2023-06-08 16:56:32 +03:00
|
|
|
/* header's length == 12 */
|
2017-11-09 00:57:00 +03:00
|
|
|
uint64_t offset;
|
|
|
|
uint32_t length;
|
|
|
|
} QEMU_PACKED NBDStructuredReadHole;
|
2017-10-27 13:40:28 +03:00
|
|
|
|
|
|
|
/* Header of all NBD_REPLY_TYPE_ERROR* errors */
|
|
|
|
typedef struct NBDStructuredError {
|
nbd/server: Prepare for alternate-size headers
Upstream NBD now documents[1] an extension that supports 64-bit effect
lengths in requests. As part of that extension, the size of the reply
headers will change in order to permit a 64-bit length in the reply
for symmetry[2]. Additionally, where the reply header is currently 16
bytes for simple reply, and 20 bytes for structured reply; with the
extension enabled, there will only be one extended reply header, of 32
bytes, with both structured and extended modes sending identical
payloads for chunked replies.
Since we are already wired up to use iovecs, it is easiest to allow
for this change in header size by splitting each structured reply
across multiple iovecs, one for the header (which will become wider in
a future patch according to client negotiation), and the other(s) for
the chunk payload, and removing the header from the payload struct
definitions. Rename the affected functions with s/structured/chunk/
to make it obvious that the code will be reused in extended mode.
Interestingly, the client side code never utilized the packed types,
so only the server code needs to be updated.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-ext-header/doc/proto.md
as of NBD commit e6f3b94a934
[2] Note that on the surface, this is because some future server might
permit a 4G+ NBD_CMD_READ and need to reply with that much data in one
transaction. But even though the extended reply length is widened to
64 bits, for now the NBD spec is clear that servers will not reply
with more than a maximum payload bounded by the 32-bit
NBD_INFO_BLOCK_SIZE field; allowing a client and server to mutually
agree to transactions larger than 4G would require yet another
extension.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20230608135653.2918540-4-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
2023-06-08 16:56:32 +03:00
|
|
|
/* header's length >= 6 */
|
2017-10-27 13:40:28 +03:00
|
|
|
uint32_t error;
|
|
|
|
uint16_t message_length;
|
|
|
|
} QEMU_PACKED NBDStructuredError;
|
|
|
|
|
2018-02-26 19:26:25 +03:00
|
|
|
/* Header of NBD_REPLY_TYPE_BLOCK_STATUS */
|
|
|
|
typedef struct NBDStructuredMeta {
|
nbd/server: Prepare for alternate-size headers
Upstream NBD now documents[1] an extension that supports 64-bit effect
lengths in requests. As part of that extension, the size of the reply
headers will change in order to permit a 64-bit length in the reply
for symmetry[2]. Additionally, where the reply header is currently 16
bytes for simple reply, and 20 bytes for structured reply; with the
extension enabled, there will only be one extended reply header, of 32
bytes, with both structured and extended modes sending identical
payloads for chunked replies.
Since we are already wired up to use iovecs, it is easiest to allow
for this change in header size by splitting each structured reply
across multiple iovecs, one for the header (which will become wider in
a future patch according to client negotiation), and the other(s) for
the chunk payload, and removing the header from the payload struct
definitions. Rename the affected functions with s/structured/chunk/
to make it obvious that the code will be reused in extended mode.
Interestingly, the client side code never utilized the packed types,
so only the server code needs to be updated.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-ext-header/doc/proto.md
as of NBD commit e6f3b94a934
[2] Note that on the surface, this is because some future server might
permit a 4G+ NBD_CMD_READ and need to reply with that much data in one
transaction. But even though the extended reply length is widened to
64 bits, for now the NBD spec is clear that servers will not reply
with more than a maximum payload bounded by the 32-bit
NBD_INFO_BLOCK_SIZE field; allowing a client and server to mutually
agree to transactions larger than 4G would require yet another
extension.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20230608135653.2918540-4-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
2023-06-08 16:56:32 +03:00
|
|
|
/* header's length >= 12 (at least one extent) */
|
2018-02-26 19:26:25 +03:00
|
|
|
uint32_t context_id;
|
|
|
|
/* extents follows */
|
|
|
|
} QEMU_PACKED NBDStructuredMeta;
|
|
|
|
|
|
|
|
/* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */
|
|
|
|
typedef struct NBDExtent {
|
|
|
|
uint32_t length;
|
|
|
|
uint32_t flags; /* NBD_STATE_* */
|
|
|
|
} QEMU_PACKED NBDExtent;
|
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* Transmission (export) flags: sent from server to client during handshake,
|
|
|
|
but describe what will happen during transmission */
|
2019-04-05 22:16:35 +03:00
|
|
|
enum {
|
|
|
|
NBD_FLAG_HAS_FLAGS_BIT = 0, /* Flags are there */
|
|
|
|
NBD_FLAG_READ_ONLY_BIT = 1, /* Device is read-only */
|
|
|
|
NBD_FLAG_SEND_FLUSH_BIT = 2, /* Send FLUSH */
|
|
|
|
NBD_FLAG_SEND_FUA_BIT = 3, /* Send FUA (Force Unit Access) */
|
|
|
|
NBD_FLAG_ROTATIONAL_BIT = 4, /* Use elevator algorithm -
|
|
|
|
rotational media */
|
|
|
|
NBD_FLAG_SEND_TRIM_BIT = 5, /* Send TRIM (discard) */
|
|
|
|
NBD_FLAG_SEND_WRITE_ZEROES_BIT = 6, /* Send WRITE_ZEROES */
|
|
|
|
NBD_FLAG_SEND_DF_BIT = 7, /* Send DF (Do not Fragment) */
|
|
|
|
NBD_FLAG_CAN_MULTI_CONN_BIT = 8, /* Multi-client cache consistent */
|
|
|
|
NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */
|
|
|
|
NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */
|
2019-08-23 17:37:23 +03:00
|
|
|
NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */
|
2019-04-05 22:16:35 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
|
|
|
|
#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
|
|
|
|
#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
|
|
|
|
#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
|
|
|
|
#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
|
|
|
|
#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
|
|
|
|
#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
|
|
|
|
#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
|
|
|
|
#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
|
|
|
|
#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
|
|
|
|
#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
|
2019-08-23 17:37:23 +03:00
|
|
|
#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
|
2011-09-08 19:24:55 +04:00
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* New-style handshake (global) flags, sent from server to client, and
|
|
|
|
control what will happen during handshake phase. */
|
2016-10-14 21:33:14 +03:00
|
|
|
#define NBD_FLAG_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
|
|
|
|
#define NBD_FLAG_NO_ZEROES (1 << 1) /* End handshake without zeroes. */
|
2014-06-07 04:32:31 +04:00
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* New-style client flags, sent from client to server to control what happens
|
|
|
|
during handshake phase. */
|
2016-10-14 21:33:14 +03:00
|
|
|
#define NBD_FLAG_C_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
|
|
|
|
#define NBD_FLAG_C_NO_ZEROES (1 << 1) /* End handshake without zeroes. */
|
2014-06-07 04:32:31 +04:00
|
|
|
|
2017-07-07 23:30:43 +03:00
|
|
|
/* Option requests. */
|
2018-02-15 16:51:40 +03:00
|
|
|
#define NBD_OPT_EXPORT_NAME (1)
|
|
|
|
#define NBD_OPT_ABORT (2)
|
|
|
|
#define NBD_OPT_LIST (3)
|
|
|
|
/* #define NBD_OPT_PEEK_EXPORT (4) not in use */
|
|
|
|
#define NBD_OPT_STARTTLS (5)
|
|
|
|
#define NBD_OPT_INFO (6)
|
|
|
|
#define NBD_OPT_GO (7)
|
|
|
|
#define NBD_OPT_STRUCTURED_REPLY (8)
|
2018-02-26 19:26:25 +03:00
|
|
|
#define NBD_OPT_LIST_META_CONTEXT (9)
|
|
|
|
#define NBD_OPT_SET_META_CONTEXT (10)
|
2017-07-07 23:30:43 +03:00
|
|
|
|
|
|
|
/* Option reply types. */
|
2016-10-14 21:33:16 +03:00
|
|
|
#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value))
|
|
|
|
|
2018-02-15 16:51:40 +03:00
|
|
|
#define NBD_REP_ACK (1) /* Data sending finished. */
|
|
|
|
#define NBD_REP_SERVER (2) /* Export description. */
|
|
|
|
#define NBD_REP_INFO (3) /* NBD_OPT_INFO/GO. */
|
2018-02-26 19:26:25 +03:00
|
|
|
#define NBD_REP_META_CONTEXT (4) /* NBD_OPT_{LIST,SET}_META_CONTEXT */
|
2017-07-07 23:30:43 +03:00
|
|
|
|
|
|
|
#define NBD_REP_ERR_UNSUP NBD_REP_ERR(1) /* Unknown option */
|
|
|
|
#define NBD_REP_ERR_POLICY NBD_REP_ERR(2) /* Server denied */
|
|
|
|
#define NBD_REP_ERR_INVALID NBD_REP_ERR(3) /* Invalid length */
|
|
|
|
#define NBD_REP_ERR_PLATFORM NBD_REP_ERR(4) /* Not compiled in */
|
|
|
|
#define NBD_REP_ERR_TLS_REQD NBD_REP_ERR(5) /* TLS required */
|
|
|
|
#define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */
|
|
|
|
#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */
|
|
|
|
#define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */
|
|
|
|
|
|
|
|
/* Info types, used during NBD_REP_INFO */
|
|
|
|
#define NBD_INFO_EXPORT 0
|
|
|
|
#define NBD_INFO_NAME 1
|
|
|
|
#define NBD_INFO_DESCRIPTION 2
|
|
|
|
#define NBD_INFO_BLOCK_SIZE 3
|
2016-02-10 21:41:11 +03:00
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* Request flags, sent from client to server during transmission phase */
|
2016-10-14 21:33:17 +03:00
|
|
|
#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
|
|
|
|
#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
|
2017-10-27 13:40:28 +03:00
|
|
|
#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
|
2018-02-26 19:26:25 +03:00
|
|
|
#define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS
|
|
|
|
* reply chunk */
|
2019-08-23 17:37:23 +03:00
|
|
|
#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
|
2014-06-07 04:32:31 +04:00
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* Supported request types */
|
2008-07-03 17:41:03 +04:00
|
|
|
enum {
|
|
|
|
NBD_CMD_READ = 0,
|
|
|
|
NBD_CMD_WRITE = 1,
|
2011-09-08 19:24:55 +04:00
|
|
|
NBD_CMD_DISC = 2,
|
|
|
|
NBD_CMD_FLUSH = 3,
|
2016-10-14 21:33:17 +03:00
|
|
|
NBD_CMD_TRIM = 4,
|
2018-04-13 17:31:56 +03:00
|
|
|
NBD_CMD_CACHE = 5,
|
2016-10-14 21:33:17 +03:00
|
|
|
NBD_CMD_WRITE_ZEROES = 6,
|
2018-02-26 19:26:25 +03:00
|
|
|
NBD_CMD_BLOCK_STATUS = 7,
|
2008-07-03 17:41:03 +04:00
|
|
|
};
|
|
|
|
|
2010-08-26 00:48:33 +04:00
|
|
|
#define NBD_DEFAULT_PORT 10809
|
|
|
|
|
2013-05-02 16:23:08 +04:00
|
|
|
/* Maximum size of a single READ/WRITE data buffer */
|
|
|
|
#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
|
2016-06-24 01:37:08 +03:00
|
|
|
|
2019-11-14 05:46:32 +03:00
|
|
|
/*
|
2020-10-27 08:05:54 +03:00
|
|
|
* Maximum size of a protocol string (export name, metadata context name,
|
2019-11-14 05:46:34 +03:00
|
|
|
* etc.). Use malloc rather than stack allocation for storage of a
|
|
|
|
* string.
|
2019-11-14 05:46:32 +03:00
|
|
|
*/
|
2019-11-14 05:46:34 +03:00
|
|
|
#define NBD_MAX_STRING_SIZE 4096
|
2011-10-07 16:35:58 +04:00
|
|
|
|
2017-10-27 13:40:28 +03:00
|
|
|
/* Two types of reply structures */
|
|
|
|
#define NBD_SIMPLE_REPLY_MAGIC 0x67446698
|
|
|
|
#define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef
|
|
|
|
|
|
|
|
/* Structured reply flags */
|
|
|
|
#define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */
|
|
|
|
|
|
|
|
/* Structured reply types */
|
|
|
|
#define NBD_REPLY_ERR(value) ((1 << 15) | (value))
|
|
|
|
|
|
|
|
#define NBD_REPLY_TYPE_NONE 0
|
|
|
|
#define NBD_REPLY_TYPE_OFFSET_DATA 1
|
|
|
|
#define NBD_REPLY_TYPE_OFFSET_HOLE 2
|
2018-02-26 19:26:25 +03:00
|
|
|
#define NBD_REPLY_TYPE_BLOCK_STATUS 5
|
2017-10-27 13:40:28 +03:00
|
|
|
#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
|
|
|
|
#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
|
|
|
|
|
2018-06-09 18:17:56 +03:00
|
|
|
/* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */
|
2018-02-26 19:26:25 +03:00
|
|
|
#define NBD_STATE_HOLE (1 << 0)
|
|
|
|
#define NBD_STATE_ZERO (1 << 1)
|
|
|
|
|
2018-06-09 18:17:56 +03:00
|
|
|
/* Extent flags for qemu:dirty-bitmap in NBD_REPLY_TYPE_BLOCK_STATUS */
|
|
|
|
#define NBD_STATE_DIRTY (1 << 0)
|
|
|
|
|
2020-10-27 08:05:54 +03:00
|
|
|
/* No flags needed for qemu:allocation-depth in NBD_REPLY_TYPE_BLOCK_STATUS */
|
|
|
|
|
2017-10-27 13:40:37 +03:00
|
|
|
static inline bool nbd_reply_type_is_error(int type)
|
|
|
|
{
|
|
|
|
return type & (1 << 15);
|
|
|
|
}
|
|
|
|
|
2017-10-27 13:40:27 +03:00
|
|
|
/* NBD errors are based on errno numbers, so there is a 1:1 mapping,
|
|
|
|
* but only a limited set of errno values is specified in the protocol.
|
|
|
|
* Everything else is squashed to EINVAL.
|
|
|
|
*/
|
|
|
|
#define NBD_SUCCESS 0
|
|
|
|
#define NBD_EPERM 1
|
|
|
|
#define NBD_EIO 5
|
|
|
|
#define NBD_ENOMEM 12
|
|
|
|
#define NBD_EINVAL 22
|
|
|
|
#define NBD_ENOSPC 28
|
2017-10-27 13:40:28 +03:00
|
|
|
#define NBD_EOVERFLOW 75
|
2019-08-23 17:37:23 +03:00
|
|
|
#define NBD_ENOTSUP 95
|
2017-10-27 13:40:27 +03:00
|
|
|
#define NBD_ESHUTDOWN 108
|
|
|
|
|
2017-07-07 23:30:41 +03:00
|
|
|
/* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */
|
2023-06-08 16:56:31 +03:00
|
|
|
typedef struct NBDExportInfo {
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 23:30:49 +03:00
|
|
|
/* Set by client before nbd_receive_negotiate() */
|
|
|
|
bool request_sizes;
|
2018-07-02 22:14:57 +03:00
|
|
|
char *x_dirty_bitmap;
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 22:36:54 +03:00
|
|
|
|
|
|
|
/* Set by client before nbd_receive_negotiate(), or by server results
|
|
|
|
* during nbd_receive_export_list() */
|
2019-01-17 22:36:46 +03:00
|
|
|
char *name; /* must be non-NULL */
|
2017-10-27 13:40:37 +03:00
|
|
|
|
|
|
|
/* In-out fields, set by client before nbd_receive_negotiate() and
|
|
|
|
* updated by server results during nbd_receive_negotiate() */
|
|
|
|
bool structured_reply;
|
2018-03-12 18:21:23 +03:00
|
|
|
bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */
|
2017-10-27 13:40:37 +03:00
|
|
|
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 22:36:54 +03:00
|
|
|
/* Set by server results during nbd_receive_negotiate() and
|
|
|
|
* nbd_receive_export_list() */
|
2017-07-07 23:30:41 +03:00
|
|
|
uint64_t size;
|
|
|
|
uint16_t flags;
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 23:30:49 +03:00
|
|
|
uint32_t min_block;
|
|
|
|
uint32_t opt_block;
|
|
|
|
uint32_t max_block;
|
2018-03-12 18:21:23 +03:00
|
|
|
|
2019-01-17 22:36:47 +03:00
|
|
|
uint32_t context_id;
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 22:36:54 +03:00
|
|
|
|
|
|
|
/* Set by server results during nbd_receive_export_list() */
|
|
|
|
char *description;
|
2019-01-17 22:36:55 +03:00
|
|
|
int n_contexts;
|
|
|
|
char **contexts;
|
2023-06-08 16:56:31 +03:00
|
|
|
} NBDExportInfo;
|
2017-07-07 23:30:41 +03:00
|
|
|
|
2019-06-18 14:43:21 +03:00
|
|
|
int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
|
|
|
|
QCryptoTLSCreds *tlscreds,
|
2019-01-17 22:36:46 +03:00
|
|
|
const char *hostname, QIOChannel **outioc,
|
|
|
|
NBDExportInfo *info, Error **errp);
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 22:36:54 +03:00
|
|
|
void nbd_free_export_list(NBDExportInfo *info, int count);
|
|
|
|
int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
|
|
|
const char *hostname, NBDExportInfo **info,
|
|
|
|
Error **errp);
|
2017-07-07 23:30:41 +03:00
|
|
|
int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info,
|
2017-05-26 14:09:13 +03:00
|
|
|
Error **errp);
|
2017-08-04 18:14:27 +03:00
|
|
|
int nbd_send_request(QIOChannel *ioc, NBDRequest *request);
|
2019-02-18 16:56:01 +03:00
|
|
|
int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
|
|
|
|
NBDReply *reply, Error **errp);
|
2010-08-31 11:30:33 +04:00
|
|
|
int nbd_client(int fd);
|
2008-05-28 01:13:40 +04:00
|
|
|
int nbd_disconnect(int fd);
|
2017-10-27 13:40:27 +03:00
|
|
|
int nbd_errno_to_system_errno(int err);
|
2008-05-28 01:13:40 +04:00
|
|
|
|
2020-09-24 18:26:53 +03:00
|
|
|
void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk);
|
2012-09-18 15:17:52 +04:00
|
|
|
|
2019-09-17 05:39:17 +03:00
|
|
|
AioContext *nbd_export_aio_context(NBDExport *exp);
|
2012-08-22 17:59:23 +04:00
|
|
|
NBDExport *nbd_export_find(const char *name);
|
|
|
|
|
2018-10-03 20:02:28 +03:00
|
|
|
void nbd_client_new(QIOChannelSocket *sioc,
|
2016-02-10 21:41:11 +03:00
|
|
|
QCryptoTLSCreds *tlscreds,
|
qemu-nbd: add support for authorization of TLS clients
Currently any client which can complete the TLS handshake is able to use
the NBD server. The server admin can turn on the 'verify-peer' option
for the x509 creds to require the client to provide a x509 certificate.
This means the client will have to acquire a certificate from the CA
before they are permitted to use the NBD server. This is still a fairly
low bar to cross.
This adds a '--tls-authz OBJECT-ID' option to the qemu-nbd command which
takes the ID of a previously added 'QAuthZ' object instance. This will
be used to validate the client's x509 distinguished name. Clients
failing the authorization check will not be permitted to use the NBD
server.
For example to setup authorization that only allows connection from a client
whose x509 certificate distinguished name is
CN=laptop.example.com,O=Example Org,L=London,ST=London,C=GB
escape the commas in the name and use:
qemu-nbd --object tls-creds-x509,id=tls0,dir=/home/berrange/qemutls,\
endpoint=server,verify-peer=yes \
--object 'authz-simple,id=auth0,identity=CN=laptop.example.com,,\
O=Example Org,,L=London,,ST=London,,C=GB' \
--tls-creds tls0 \
--tls-authz authz0 \
....other qemu-nbd args...
NB: a real shell command line would not have leading whitespace after
the line continuation, it is just included here for clarity.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Message-Id: <20190227162035.18543-2-berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: split long line in --help text, tweak 233 to show that whitespace
after ,, in identity= portion is actually okay]
Signed-off-by: Eric Blake <eblake@redhat.com>
2019-02-27 19:20:33 +03:00
|
|
|
const char *tlsauthz,
|
nbd: Fix regression on resiliency to port scan
Back in qemu 2.5, qemu-nbd was immune to port probes (a transient
server would not quit, regardless of how many probe connections
came and went, until a connection actually negotiated). But we
broke that in commit ee7d7aa when removing the return value to
nbd_client_new(), although that patch also introduced a bug causing
an assertion failure on a client that fails negotiation. We then
made it worse during refactoring in commit 1a6245a (a segfault
before we could even assert); the (masked) assertion was cleaned
up in d3780c2 (still in 2.6), and just recently we finally fixed
the segfault ("nbd: Fully intialize client in case of failed
negotiation"). But that still means that ever since we added
TLS support to qemu-nbd, we have been vulnerable to an ill-timed
port-scan being able to cause a denial of service by taking down
qemu-nbd before a real client has a chance to connect.
Since negotiation is now handled asynchronously via coroutines,
we no longer have a synchronous point of return by re-adding a
return value to nbd_client_new(). So this patch instead wires
things up to pass the negotiation status through the close_fn
callback function.
Simple test across two terminals:
$ qemu-nbd -f raw -p 30001 file
$ nmap 127.0.0.1 -p 30001 && \
qemu-io -c 'r 0 512' -f raw nbd://localhost:30001
Note that this patch does not change what constitutes successful
negotiation (thus, a client must enter transmission phase before
that client can be considered as a reason to terminate the server
when the connection ends). Perhaps we may want to tweak things
in a later patch to also treat a client that uses NBD_OPT_ABORT
as being a 'successful' negotiation (the client correctly talked
the NBD protocol, and informed us it was not going to use our
export after all), but that's a discussion for another day.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1451614
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170608222617.20376-1-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-06-09 01:26:17 +03:00
|
|
|
void (*close_fn)(NBDClient *, bool));
|
2012-09-18 15:17:52 +04:00
|
|
|
void nbd_client_get(NBDClient *client);
|
|
|
|
void nbd_client_put(NBDClient *client);
|
2011-09-19 16:03:37 +04:00
|
|
|
|
2022-05-12 03:49:23 +03:00
|
|
|
void nbd_server_is_qemu_nbd(int max_connections);
|
2020-09-24 18:27:12 +03:00
|
|
|
bool nbd_server_is_running(void);
|
nbd/server: Allow MULTI_CONN for shared writable exports
According to the NBD spec, a server that advertises
NBD_FLAG_CAN_MULTI_CONN promises that multiple client connections will
not see any cache inconsistencies: when properly separated by a single
flush, actions performed by one client will be visible to another
client, regardless of which client did the flush.
We always satisfy these conditions in qemu - even when we support
multiple clients, ALL clients go through a single point of reference
into the block layer, with no local caching. The effect of one client
is instantly visible to the next client. Even if our backend were a
network device, we argue that any multi-path caching effects that
would cause inconsistencies in back-to-back actions not seeing the
effect of previous actions would be a bug in that backend, and not the
fault of caching in qemu. As such, it is safe to unconditionally
advertise CAN_MULTI_CONN for any qemu NBD server situation that
supports parallel clients.
Note, however, that we don't want to advertise CAN_MULTI_CONN when we
know that a second client cannot connect (for historical reasons,
qemu-nbd defaults to a single connection while nbd-server-add and QMP
commands default to unlimited connections; but we already have
existing means to let either style of NBD server creation alter those
defaults). This is visible by no longer advertising MULTI_CONN for
'qemu-nbd -r' without -e, as in the iotest nbd-qemu-allocation.
The harder part of this patch is setting up an iotest to demonstrate
behavior of multiple NBD clients to a single server. It might be
possible with parallel qemu-io processes, but I found it easier to do
in python with the help of libnbd, and help from Nir and Vladimir in
writing the test.
Signed-off-by: Eric Blake <eblake@redhat.com>
Suggested-by: Nir Soffer <nsoffer@redhat.com>
Suggested-by: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
Message-Id: <20220512004924.417153-3-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2022-05-12 03:49:24 +03:00
|
|
|
int nbd_server_max_connections(void);
|
2017-04-26 10:36:41 +03:00
|
|
|
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
|
2020-09-24 18:26:54 +03:00
|
|
|
const char *tls_authz, uint32_t max_connections,
|
|
|
|
Error **errp);
|
2020-02-24 17:29:57 +03:00
|
|
|
void nbd_server_start_options(NbdServerOptions *arg, Error **errp);
|
2017-04-26 10:36:41 +03:00
|
|
|
|
2017-10-27 13:40:36 +03:00
|
|
|
/* nbd_read
|
|
|
|
* Reads @size bytes from @ioc. Returns 0 on success.
|
|
|
|
*/
|
|
|
|
static inline int nbd_read(QIOChannel *ioc, void *buffer, size_t size,
|
2019-01-28 19:58:30 +03:00
|
|
|
const char *desc, Error **errp)
|
2017-10-27 13:40:36 +03:00
|
|
|
{
|
nbd: Use ERRP_GUARD()
If we want to check error after errp-function call, we need to
introduce local_err and then propagate it to errp. Instead, use
the ERRP_GUARD() macro, benefits are:
1. No need of explicit error_propagate call
2. No need of explicit local_err variable: use errp directly
3. ERRP_GUARD() leaves errp as is if it's not NULL or
&error_fatal, this means that we don't break error_abort
(we'll abort on error_set, not on error_propagate)
If we want to add some info to errp (by error_prepend() or
error_append_hint()), we must use the ERRP_GUARD() macro.
Otherwise, this info will not be added when errp == &error_fatal
(the program will exit prior to the error_append_hint() or
error_prepend() call). Fix several such cases, e.g. in nbd_read().
This commit is generated by command
sed -n '/^Network Block Device (NBD)$/,/^$/{s/^F: //p}' \
MAINTAINERS | \
xargs git ls-files | grep '\.[hc]$' | \
xargs spatch \
--sp-file scripts/coccinelle/errp-guard.cocci \
--macro-file scripts/cocci-macro-file.h \
--in-place --no-show-diff --max-width 80
Reported-by: Kevin Wolf <kwolf@redhat.com>
Reported-by: Greg Kurz <groug@kaod.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20200707165037.1026246-8-armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[ERRP_AUTO_PROPAGATE() renamed to ERRP_GUARD(), and
auto-propagated-errp.cocci to errp-guard.cocci. Commit message
tweaked again.]
2020-07-07 19:50:36 +03:00
|
|
|
ERRP_GUARD();
|
2019-01-28 19:58:30 +03:00
|
|
|
int ret = qio_channel_read_all(ioc, buffer, size, errp) < 0 ? -EIO : 0;
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
if (desc) {
|
|
|
|
error_prepend(errp, "Failed to read %s: ", desc);
|
|
|
|
}
|
2021-01-29 10:38:59 +03:00
|
|
|
return ret;
|
2019-01-28 19:58:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DEF_NBD_READ_N(bits) \
|
|
|
|
static inline int nbd_read##bits(QIOChannel *ioc, \
|
|
|
|
uint##bits##_t *val, \
|
|
|
|
const char *desc, Error **errp) \
|
|
|
|
{ \
|
2021-01-29 10:38:59 +03:00
|
|
|
int ret = nbd_read(ioc, val, sizeof(*val), desc, errp); \
|
|
|
|
if (ret < 0) { \
|
|
|
|
return ret; \
|
2019-01-28 19:58:30 +03:00
|
|
|
} \
|
|
|
|
*val = be##bits##_to_cpu(*val); \
|
|
|
|
return 0; \
|
2017-10-27 13:40:36 +03:00
|
|
|
}
|
|
|
|
|
2019-01-28 19:58:30 +03:00
|
|
|
DEF_NBD_READ_N(16) /* Defines nbd_read16(). */
|
|
|
|
DEF_NBD_READ_N(32) /* Defines nbd_read32(). */
|
|
|
|
DEF_NBD_READ_N(64) /* Defines nbd_read64(). */
|
|
|
|
|
|
|
|
#undef DEF_NBD_READ_N
|
|
|
|
|
2017-10-27 13:40:35 +03:00
|
|
|
static inline bool nbd_reply_is_simple(NBDReply *reply)
|
|
|
|
{
|
|
|
|
return reply->magic == NBD_SIMPLE_REPLY_MAGIC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nbd_reply_is_structured(NBDReply *reply)
|
|
|
|
{
|
|
|
|
return reply->magic == NBD_STRUCTURED_REPLY_MAGIC;
|
|
|
|
}
|
|
|
|
|
2017-10-27 13:40:37 +03:00
|
|
|
const char *nbd_reply_type_lookup(uint16_t type);
|
2018-11-02 18:11:51 +03:00
|
|
|
const char *nbd_opt_lookup(uint32_t opt);
|
|
|
|
const char *nbd_rep_lookup(uint32_t rep);
|
|
|
|
const char *nbd_info_lookup(uint16_t info);
|
|
|
|
const char *nbd_cmd_lookup(uint16_t info);
|
|
|
|
const char *nbd_err_lookup(int err);
|
2023-06-08 16:56:37 +03:00
|
|
|
const char *nbd_mode_lookup(NBDMode mode);
|
2017-10-27 13:40:37 +03:00
|
|
|
|
2021-06-15 22:07:05 +03:00
|
|
|
/* nbd/client-connection.c */
|
2021-06-10 13:07:50 +03:00
|
|
|
void nbd_client_connection_enable_retry(NBDClientConnection *conn);
|
|
|
|
|
2021-06-10 13:07:49 +03:00
|
|
|
NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr,
|
|
|
|
bool do_negotiation,
|
|
|
|
const char *export_name,
|
|
|
|
const char *x_dirty_bitmap,
|
2022-03-04 22:36:00 +03:00
|
|
|
QCryptoTLSCreds *tlscreds,
|
|
|
|
const char *tlshostname);
|
2021-06-15 22:07:05 +03:00
|
|
|
void nbd_client_connection_release(NBDClientConnection *conn);
|
|
|
|
|
2021-06-10 13:07:56 +03:00
|
|
|
QIOChannel *coroutine_fn
|
2021-06-10 13:07:49 +03:00
|
|
|
nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
|
2021-06-10 13:07:59 +03:00
|
|
|
bool blocking, Error **errp);
|
2021-06-15 22:07:05 +03:00
|
|
|
|
2022-09-22 11:49:02 +03:00
|
|
|
void nbd_co_establish_connection_cancel(NBDClientConnection *conn);
|
2021-06-15 22:07:05 +03:00
|
|
|
|
2008-05-28 01:13:40 +04:00
|
|
|
#endif
|