2008-07-03 17:41:03 +04:00
|
|
|
/*
|
2017-07-07 23:30:43 +03:00
|
|
|
* Copyright (C) 2016-2017 Red Hat, Inc.
|
2008-05-28 01:13:40 +04:00
|
|
|
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
|
|
|
|
*
|
|
|
|
* Network Block Device
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; under version 2 of the License.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
2009-07-17 00:47:01 +04:00
|
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
2008-07-03 17:41:03 +04:00
|
|
|
*/
|
2008-05-28 01:13:40 +04:00
|
|
|
|
|
|
|
#ifndef NBD_H
|
|
|
|
#define NBD_H
|
|
|
|
|
|
|
|
|
2011-09-08 19:55:32 +04:00
|
|
|
#include "qemu-common.h"
|
2013-03-15 14:55:29 +04:00
|
|
|
#include "qemu/option.h"
|
2016-02-10 21:41:04 +03:00
|
|
|
#include "io/channel-socket.h"
|
2016-02-10 21:41:11 +03:00
|
|
|
#include "crypto/tlscreds.h"
|
2011-02-22 18:44:53 +03:00
|
|
|
|
2016-10-14 21:33:10 +03:00
|
|
|
/* Handshake phase structs - this struct is passed on the wire */
|
|
|
|
|
|
|
|
struct nbd_option {
|
|
|
|
uint64_t magic; /* NBD_OPTS_MAGIC */
|
|
|
|
uint32_t option; /* NBD_OPT_* */
|
|
|
|
uint32_t length;
|
|
|
|
} QEMU_PACKED;
|
|
|
|
typedef struct nbd_option nbd_option;
|
|
|
|
|
|
|
|
struct nbd_opt_reply {
|
|
|
|
uint64_t magic; /* NBD_REP_MAGIC */
|
|
|
|
uint32_t option; /* NBD_OPT_* */
|
|
|
|
uint32_t type; /* NBD_REP_* */
|
|
|
|
uint32_t length;
|
|
|
|
} QEMU_PACKED;
|
|
|
|
typedef struct nbd_opt_reply nbd_opt_reply;
|
|
|
|
|
|
|
|
/* Transmission phase structs
|
|
|
|
*
|
|
|
|
* Note: these are _NOT_ the same as the network representation of an NBD
|
2016-06-13 12:42:40 +03:00
|
|
|
* request and reply!
|
|
|
|
*/
|
2016-10-14 21:33:07 +03:00
|
|
|
struct NBDRequest {
|
2008-07-03 17:41:03 +04:00
|
|
|
uint64_t handle;
|
|
|
|
uint64_t from;
|
|
|
|
uint32_t len;
|
2016-10-14 21:33:10 +03:00
|
|
|
uint16_t flags; /* NBD_CMD_FLAG_* */
|
|
|
|
uint16_t type; /* NBD_CMD_* */
|
2016-06-13 12:42:40 +03:00
|
|
|
};
|
2016-10-14 21:33:07 +03:00
|
|
|
typedef struct NBDRequest NBDRequest;
|
2008-07-03 17:41:03 +04:00
|
|
|
|
2017-10-12 12:53:10 +03:00
|
|
|
typedef struct NBDSimpleReply {
|
|
|
|
uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */
|
|
|
|
uint32_t error;
|
|
|
|
uint64_t handle;
|
|
|
|
} QEMU_PACKED NBDSimpleReply;
|
|
|
|
|
2017-10-27 13:40:28 +03:00
|
|
|
/* Header of all structured replies */
|
|
|
|
typedef struct NBDStructuredReplyChunk {
|
|
|
|
uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */
|
|
|
|
uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
|
|
|
|
uint16_t type; /* NBD_REPLY_TYPE_* */
|
|
|
|
uint64_t handle; /* request handle */
|
|
|
|
uint32_t length; /* length of payload */
|
|
|
|
} QEMU_PACKED NBDStructuredReplyChunk;
|
|
|
|
|
2017-10-27 13:40:35 +03:00
|
|
|
typedef union NBDReply {
|
|
|
|
NBDSimpleReply simple;
|
|
|
|
NBDStructuredReplyChunk structured;
|
|
|
|
struct {
|
|
|
|
/* @magic and @handle fields have the same offset and size both in
|
|
|
|
* simple reply and structured reply chunk, so let them be accessible
|
|
|
|
* without ".simple." or ".structured." specification
|
|
|
|
*/
|
|
|
|
uint32_t magic;
|
|
|
|
uint32_t _skip;
|
|
|
|
uint64_t handle;
|
|
|
|
} QEMU_PACKED;
|
|
|
|
} NBDReply;
|
|
|
|
|
2017-11-09 00:57:00 +03:00
|
|
|
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
|
|
|
|
typedef struct NBDStructuredReadData {
|
|
|
|
NBDStructuredReplyChunk h; /* h.length >= 9 */
|
2017-10-27 13:40:28 +03:00
|
|
|
uint64_t offset;
|
2017-11-09 00:57:00 +03:00
|
|
|
/* At least one byte of data payload follows, calculated from h.length */
|
|
|
|
} QEMU_PACKED NBDStructuredReadData;
|
|
|
|
|
|
|
|
/* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */
|
|
|
|
typedef struct NBDStructuredReadHole {
|
|
|
|
NBDStructuredReplyChunk h; /* h.length == 12 */
|
|
|
|
uint64_t offset;
|
|
|
|
uint32_t length;
|
|
|
|
} QEMU_PACKED NBDStructuredReadHole;
|
2017-10-27 13:40:28 +03:00
|
|
|
|
|
|
|
/* Header of all NBD_REPLY_TYPE_ERROR* errors */
|
|
|
|
typedef struct NBDStructuredError {
|
2017-11-09 00:57:00 +03:00
|
|
|
NBDStructuredReplyChunk h; /* h.length >= 6 */
|
2017-10-27 13:40:28 +03:00
|
|
|
uint32_t error;
|
|
|
|
uint16_t message_length;
|
|
|
|
} QEMU_PACKED NBDStructuredError;
|
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* Transmission (export) flags: sent from server to client during handshake,
|
|
|
|
but describe what will happen during transmission */
|
2017-10-12 12:53:14 +03:00
|
|
|
#define NBD_FLAG_HAS_FLAGS (1 << 0) /* Flags are there */
|
|
|
|
#define NBD_FLAG_READ_ONLY (1 << 1) /* Device is read-only */
|
|
|
|
#define NBD_FLAG_SEND_FLUSH (1 << 2) /* Send FLUSH */
|
|
|
|
#define NBD_FLAG_SEND_FUA (1 << 3) /* Send FUA (Force Unit Access) */
|
|
|
|
#define NBD_FLAG_ROTATIONAL (1 << 4) /* Use elevator algorithm -
|
|
|
|
rotational media */
|
|
|
|
#define NBD_FLAG_SEND_TRIM (1 << 5) /* Send TRIM (discard) */
|
|
|
|
#define NBD_FLAG_SEND_WRITE_ZEROES (1 << 6) /* Send WRITE_ZEROES */
|
2017-10-27 13:40:28 +03:00
|
|
|
#define NBD_FLAG_SEND_DF (1 << 7) /* Send DF (Do not Fragment) */
|
2011-09-08 19:24:55 +04:00
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* New-style handshake (global) flags, sent from server to client, and
|
|
|
|
control what will happen during handshake phase. */
|
2016-10-14 21:33:14 +03:00
|
|
|
#define NBD_FLAG_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
|
|
|
|
#define NBD_FLAG_NO_ZEROES (1 << 1) /* End handshake without zeroes. */
|
2014-06-07 04:32:31 +04:00
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* New-style client flags, sent from client to server to control what happens
|
|
|
|
during handshake phase. */
|
2016-10-14 21:33:14 +03:00
|
|
|
#define NBD_FLAG_C_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
|
|
|
|
#define NBD_FLAG_C_NO_ZEROES (1 << 1) /* End handshake without zeroes. */
|
2014-06-07 04:32:31 +04:00
|
|
|
|
2017-07-07 23:30:43 +03:00
|
|
|
/* Option requests. */
|
|
|
|
#define NBD_OPT_EXPORT_NAME (1)
|
|
|
|
#define NBD_OPT_ABORT (2)
|
|
|
|
#define NBD_OPT_LIST (3)
|
|
|
|
/* #define NBD_OPT_PEEK_EXPORT (4) not in use */
|
|
|
|
#define NBD_OPT_STARTTLS (5)
|
|
|
|
#define NBD_OPT_INFO (6)
|
|
|
|
#define NBD_OPT_GO (7)
|
|
|
|
#define NBD_OPT_STRUCTURED_REPLY (8)
|
|
|
|
|
|
|
|
/* Option reply types. */
|
2016-10-14 21:33:16 +03:00
|
|
|
#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value))
|
|
|
|
|
2014-06-07 04:32:32 +04:00
|
|
|
#define NBD_REP_ACK (1) /* Data sending finished. */
|
|
|
|
#define NBD_REP_SERVER (2) /* Export description. */
|
2017-07-07 23:30:43 +03:00
|
|
|
#define NBD_REP_INFO (3) /* NBD_OPT_INFO/GO. */
|
|
|
|
|
|
|
|
#define NBD_REP_ERR_UNSUP NBD_REP_ERR(1) /* Unknown option */
|
|
|
|
#define NBD_REP_ERR_POLICY NBD_REP_ERR(2) /* Server denied */
|
|
|
|
#define NBD_REP_ERR_INVALID NBD_REP_ERR(3) /* Invalid length */
|
|
|
|
#define NBD_REP_ERR_PLATFORM NBD_REP_ERR(4) /* Not compiled in */
|
|
|
|
#define NBD_REP_ERR_TLS_REQD NBD_REP_ERR(5) /* TLS required */
|
|
|
|
#define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */
|
|
|
|
#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */
|
|
|
|
#define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */
|
|
|
|
|
|
|
|
/* Info types, used during NBD_REP_INFO */
|
|
|
|
#define NBD_INFO_EXPORT 0
|
|
|
|
#define NBD_INFO_NAME 1
|
|
|
|
#define NBD_INFO_DESCRIPTION 2
|
|
|
|
#define NBD_INFO_BLOCK_SIZE 3
|
2016-02-10 21:41:11 +03:00
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* Request flags, sent from client to server during transmission phase */
|
2016-10-14 21:33:17 +03:00
|
|
|
#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
|
|
|
|
#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
|
2017-10-27 13:40:28 +03:00
|
|
|
#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
|
2014-06-07 04:32:31 +04:00
|
|
|
|
2016-10-14 21:33:04 +03:00
|
|
|
/* Supported request types */
|
2008-07-03 17:41:03 +04:00
|
|
|
enum {
|
|
|
|
NBD_CMD_READ = 0,
|
|
|
|
NBD_CMD_WRITE = 1,
|
2011-09-08 19:24:55 +04:00
|
|
|
NBD_CMD_DISC = 2,
|
|
|
|
NBD_CMD_FLUSH = 3,
|
2016-10-14 21:33:17 +03:00
|
|
|
NBD_CMD_TRIM = 4,
|
|
|
|
/* 5 reserved for failed experiment NBD_CMD_CACHE */
|
|
|
|
NBD_CMD_WRITE_ZEROES = 6,
|
2008-07-03 17:41:03 +04:00
|
|
|
};
|
|
|
|
|
2010-08-26 00:48:33 +04:00
|
|
|
#define NBD_DEFAULT_PORT 10809
|
|
|
|
|
2013-05-02 16:23:08 +04:00
|
|
|
/* Maximum size of a single READ/WRITE data buffer */
|
|
|
|
#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
|
2016-06-24 01:37:08 +03:00
|
|
|
|
2016-05-12 01:39:44 +03:00
|
|
|
/* Maximum size of an export name. The NBD spec requires 256 and
|
|
|
|
* suggests that servers support up to 4096, but we stick to only the
|
|
|
|
* required size so that we can stack-allocate the names, and because
|
|
|
|
* going larger would require an audit of more code to make sure we
|
|
|
|
* aren't overflowing some other buffer. */
|
|
|
|
#define NBD_MAX_NAME_SIZE 256
|
2011-10-07 16:35:58 +04:00
|
|
|
|
2017-10-27 13:40:28 +03:00
|
|
|
/* Two types of reply structures */
|
|
|
|
#define NBD_SIMPLE_REPLY_MAGIC 0x67446698
|
|
|
|
#define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef
|
|
|
|
|
|
|
|
/* Structured reply flags */
|
|
|
|
#define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */
|
|
|
|
|
|
|
|
/* Structured reply types */
|
|
|
|
#define NBD_REPLY_ERR(value) ((1 << 15) | (value))
|
|
|
|
|
|
|
|
#define NBD_REPLY_TYPE_NONE 0
|
|
|
|
#define NBD_REPLY_TYPE_OFFSET_DATA 1
|
|
|
|
#define NBD_REPLY_TYPE_OFFSET_HOLE 2
|
|
|
|
#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
|
|
|
|
#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
|
|
|
|
|
2017-10-27 13:40:37 +03:00
|
|
|
static inline bool nbd_reply_type_is_error(int type)
|
|
|
|
{
|
|
|
|
return type & (1 << 15);
|
|
|
|
}
|
|
|
|
|
2017-10-27 13:40:27 +03:00
|
|
|
/* NBD errors are based on errno numbers, so there is a 1:1 mapping,
|
|
|
|
* but only a limited set of errno values is specified in the protocol.
|
|
|
|
* Everything else is squashed to EINVAL.
|
|
|
|
*/
|
|
|
|
#define NBD_SUCCESS 0
|
|
|
|
#define NBD_EPERM 1
|
|
|
|
#define NBD_EIO 5
|
|
|
|
#define NBD_ENOMEM 12
|
|
|
|
#define NBD_EINVAL 22
|
|
|
|
#define NBD_ENOSPC 28
|
2017-10-27 13:40:28 +03:00
|
|
|
#define NBD_EOVERFLOW 75
|
2017-10-27 13:40:27 +03:00
|
|
|
#define NBD_ESHUTDOWN 108
|
|
|
|
|
2017-07-07 23:30:41 +03:00
|
|
|
/* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */
|
|
|
|
struct NBDExportInfo {
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 23:30:49 +03:00
|
|
|
/* Set by client before nbd_receive_negotiate() */
|
|
|
|
bool request_sizes;
|
2017-10-27 13:40:37 +03:00
|
|
|
|
|
|
|
/* In-out fields, set by client before nbd_receive_negotiate() and
|
|
|
|
* updated by server results during nbd_receive_negotiate() */
|
|
|
|
bool structured_reply;
|
|
|
|
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 23:30:49 +03:00
|
|
|
/* Set by server results during nbd_receive_negotiate() */
|
2017-07-07 23:30:41 +03:00
|
|
|
uint64_t size;
|
|
|
|
uint16_t flags;
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 23:30:49 +03:00
|
|
|
uint32_t min_block;
|
|
|
|
uint32_t opt_block;
|
|
|
|
uint32_t max_block;
|
2017-07-07 23:30:41 +03:00
|
|
|
};
|
|
|
|
typedef struct NBDExportInfo NBDExportInfo;
|
|
|
|
|
|
|
|
int nbd_receive_negotiate(QIOChannel *ioc, const char *name,
|
2016-02-10 21:41:11 +03:00
|
|
|
QCryptoTLSCreds *tlscreds, const char *hostname,
|
2017-07-07 23:30:41 +03:00
|
|
|
QIOChannel **outioc, NBDExportInfo *info,
|
|
|
|
Error **errp);
|
|
|
|
int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info,
|
2017-05-26 14:09:13 +03:00
|
|
|
Error **errp);
|
2017-08-04 18:14:27 +03:00
|
|
|
int nbd_send_request(QIOChannel *ioc, NBDRequest *request);
|
2017-08-04 18:14:26 +03:00
|
|
|
int nbd_receive_reply(QIOChannel *ioc, NBDReply *reply, Error **errp);
|
2010-08-31 11:30:33 +04:00
|
|
|
int nbd_client(int fd);
|
2008-05-28 01:13:40 +04:00
|
|
|
int nbd_disconnect(int fd);
|
2017-10-27 13:40:27 +03:00
|
|
|
int nbd_errno_to_system_errno(int err);
|
2008-05-28 01:13:40 +04:00
|
|
|
|
2011-09-19 16:03:37 +04:00
|
|
|
typedef struct NBDExport NBDExport;
|
2011-09-19 16:33:23 +04:00
|
|
|
typedef struct NBDClient NBDClient;
|
2011-09-19 16:03:37 +04:00
|
|
|
|
2016-07-06 12:22:39 +03:00
|
|
|
NBDExport *nbd_export_new(BlockDriverState *bs, off_t dev_offset, off_t size,
|
2016-07-21 22:34:46 +03:00
|
|
|
uint16_t nbdflags, void (*close)(NBDExport *),
|
2016-07-06 12:22:39 +03:00
|
|
|
bool writethrough, BlockBackend *on_eject_blk,
|
2015-02-25 21:08:21 +03:00
|
|
|
Error **errp);
|
2011-09-19 16:03:37 +04:00
|
|
|
void nbd_export_close(NBDExport *exp);
|
2012-09-18 15:26:25 +04:00
|
|
|
void nbd_export_get(NBDExport *exp);
|
|
|
|
void nbd_export_put(NBDExport *exp);
|
2012-09-18 15:17:52 +04:00
|
|
|
|
2014-11-18 14:21:17 +03:00
|
|
|
BlockBackend *nbd_export_get_blockdev(NBDExport *exp);
|
2012-09-18 16:31:44 +04:00
|
|
|
|
2012-08-22 17:59:23 +04:00
|
|
|
NBDExport *nbd_export_find(const char *name);
|
|
|
|
void nbd_export_set_name(NBDExport *exp, const char *name);
|
2016-10-14 21:33:03 +03:00
|
|
|
void nbd_export_set_description(NBDExport *exp, const char *description);
|
2012-08-22 17:59:23 +04:00
|
|
|
void nbd_export_close_all(void);
|
|
|
|
|
2016-02-10 21:41:04 +03:00
|
|
|
void nbd_client_new(NBDExport *exp,
|
|
|
|
QIOChannelSocket *sioc,
|
2016-02-10 21:41:11 +03:00
|
|
|
QCryptoTLSCreds *tlscreds,
|
|
|
|
const char *tlsaclname,
|
nbd: Fix regression on resiliency to port scan
Back in qemu 2.5, qemu-nbd was immune to port probes (a transient
server would not quit, regardless of how many probe connections
came and went, until a connection actually negotiated). But we
broke that in commit ee7d7aa when removing the return value to
nbd_client_new(), although that patch also introduced a bug causing
an assertion failure on a client that fails negotiation. We then
made it worse during refactoring in commit 1a6245a (a segfault
before we could even assert); the (masked) assertion was cleaned
up in d3780c2 (still in 2.6), and just recently we finally fixed
the segfault ("nbd: Fully intialize client in case of failed
negotiation"). But that still means that ever since we added
TLS support to qemu-nbd, we have been vulnerable to an ill-timed
port-scan being able to cause a denial of service by taking down
qemu-nbd before a real client has a chance to connect.
Since negotiation is now handled asynchronously via coroutines,
we no longer have a synchronous point of return by re-adding a
return value to nbd_client_new(). So this patch instead wires
things up to pass the negotiation status through the close_fn
callback function.
Simple test across two terminals:
$ qemu-nbd -f raw -p 30001 file
$ nmap 127.0.0.1 -p 30001 && \
qemu-io -c 'r 0 512' -f raw nbd://localhost:30001
Note that this patch does not change what constitutes successful
negotiation (thus, a client must enter transmission phase before
that client can be considered as a reason to terminate the server
when the connection ends). Perhaps we may want to tweak things
in a later patch to also treat a client that uses NBD_OPT_ABORT
as being a 'successful' negotiation (the client correctly talked
the NBD protocol, and informed us it was not going to use our
export after all), but that's a discussion for another day.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1451614
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170608222617.20376-1-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-06-09 01:26:17 +03:00
|
|
|
void (*close_fn)(NBDClient *, bool));
|
2012-09-18 15:17:52 +04:00
|
|
|
void nbd_client_get(NBDClient *client);
|
|
|
|
void nbd_client_put(NBDClient *client);
|
2011-09-19 16:03:37 +04:00
|
|
|
|
2017-04-26 10:36:41 +03:00
|
|
|
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
|
|
|
|
Error **errp);
|
|
|
|
|
2017-10-27 13:40:36 +03:00
|
|
|
|
|
|
|
/* nbd_read
|
|
|
|
* Reads @size bytes from @ioc. Returns 0 on success.
|
|
|
|
*/
|
|
|
|
static inline int nbd_read(QIOChannel *ioc, void *buffer, size_t size,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
return qio_channel_read_all(ioc, buffer, size, errp) < 0 ? -EIO : 0;
|
|
|
|
}
|
|
|
|
|
2017-10-27 13:40:35 +03:00
|
|
|
static inline bool nbd_reply_is_simple(NBDReply *reply)
|
|
|
|
{
|
|
|
|
return reply->magic == NBD_SIMPLE_REPLY_MAGIC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nbd_reply_is_structured(NBDReply *reply)
|
|
|
|
{
|
|
|
|
return reply->magic == NBD_STRUCTURED_REPLY_MAGIC;
|
|
|
|
}
|
|
|
|
|
2017-10-27 13:40:37 +03:00
|
|
|
const char *nbd_reply_type_lookup(uint16_t type);
|
|
|
|
|
2008-05-28 01:13:40 +04:00
|
|
|
#endif
|