dbf946c251
Allowing an unlimited number of clients to any web service is a recipe
for a rudimentary denial of service attack: the client merely needs to
open lots of sockets without closing them, until qemu no longer has
any more fds available to allocate.
For qemu-nbd, we default to allowing only 1 connection unless more are
explicitly asked for (-e or --shared); this was historically picked as
a nice default (without an explicit -t, a non-persistent qemu-nbd goes
away after a client disconnects, without needing any additional
follow-up commands), and we are not going to change that interface now
(besides, someday we want to point people towards qemu-storage-daemon
instead of qemu-nbd).
But for qemu proper, and the newer qemu-storage-daemon, the QMP
nbd-server-start command has historically had a default of unlimited
number of connections, in part because unlike qemu-nbd it is
inherently persistent until nbd-server-stop. Allowing multiple client
sockets is particularly useful for clients that can take advantage of
MULTI_CONN (creating parallel sockets to increase throughput),
although known clients that do so (such as libnbd's nbdcopy) typically
use only 8 or 16 connections (the benefits of scaling diminish once
more sockets are competing for kernel attention). Picking a number
large enough for typical use cases, but not unlimited, makes it
slightly harder for a malicious client to perform a denial of service
merely by opening lots of connections withot progressing through the
handshake.
This change does not eliminate CVE-2024-7409 on its own, but reduces
the chance for fd exhaustion or unlimited memory usage as an attack
surface. On the other hand, by itself, it makes it more obvious that
with a finite limit, we have the problem of an unauthenticated client
holding 100 fds opened as a way to block out a legitimate client from
being able to connect; thus, later patches will further add timeouts
to reject clients that are not making progress.
This is an INTENTIONAL change in behavior, and will break any client
of nbd-server-start that was not passing an explicit max-connections
parameter, yet expects more than 100 simultaneous connections. We are
not aware of any such client (as stated above, most clients aware of
MULTI_CONN get by just fine on 8 or 16 connections, and probably cope
with later connections failing by relying on the earlier connections;
libvirt has not yet been passing max-connections, but generally
creates NBD servers with the intent for a single client for the sake
of live storage migration; meanwhile, the KubeSAN project anticipates
a large cluster sharing multiple clients [up to 8 per node, and up to
100 nodes in a cluster], but it currently uses qemu-nbd with an
explicit --shared=0 rather than qemu-storage-daemon with
nbd-server-start).
We considered using a deprecation period (declare that omitting
max-parameters is deprecated, and make it mandatory in 3 releases -
then we don't need to pick an arbitrary default); that has zero risk
of breaking any apps that accidentally depended on more than 100
connections, and where such breakage might not be noticed under unit
testing but only under the larger loads of production usage. But it
does not close the denial-of-service hole until far into the future,
and requires all apps to change to add the parameter even if 100 was
good enough. It also has a drawback that any app (like libvirt) that
is accidentally relying on an unlimited default should seriously
consider their own CVE now, at which point they are going to change to
pass explicit max-connections sooner than waiting for 3 qemu releases.
Finally, if our changed default breaks an app, that app can always
pass in an explicit max-parameters with a larger value.
It is also intentional that the HMP interface to nbd-server-start is
not changed to expose max-connections (any client needing to fine-tune
things should be using QMP).
Suggested-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-12-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
[ericb: Expand commit message to summarize Dan's argument for why we
break corner-case back-compat behavior without a deprecation period]
Signed-off-by: Eric Blake <eblake@redhat.com>
(cherry picked from commit c8a76dbd90
)
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
(Mjt: minor fixups in qapi/block-export.json)
294 lines
8.0 KiB
C
294 lines
8.0 KiB
C
/*
|
|
* Serving QEMU block devices via NBD
|
|
*
|
|
* Copyright (c) 2012 Red Hat, Inc.
|
|
*
|
|
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or
|
|
* later. See the COPYING file in the top-level directory.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "sysemu/blockdev.h"
|
|
#include "sysemu/block-backend.h"
|
|
#include "hw/block/block.h"
|
|
#include "qapi/error.h"
|
|
#include "qapi/clone-visitor.h"
|
|
#include "qapi/qapi-visit-block-export.h"
|
|
#include "qapi/qapi-commands-block-export.h"
|
|
#include "block/nbd.h"
|
|
#include "io/channel-socket.h"
|
|
#include "io/net-listener.h"
|
|
|
|
typedef struct NBDServerData {
|
|
QIONetListener *listener;
|
|
QCryptoTLSCreds *tlscreds;
|
|
char *tlsauthz;
|
|
uint32_t max_connections;
|
|
uint32_t connections;
|
|
} NBDServerData;
|
|
|
|
static NBDServerData *nbd_server;
|
|
static int qemu_nbd_connections = -1; /* Non-negative if this is qemu-nbd */
|
|
|
|
static void nbd_update_server_watch(NBDServerData *s);
|
|
|
|
void nbd_server_is_qemu_nbd(int max_connections)
|
|
{
|
|
qemu_nbd_connections = max_connections;
|
|
}
|
|
|
|
bool nbd_server_is_running(void)
|
|
{
|
|
return nbd_server || qemu_nbd_connections >= 0;
|
|
}
|
|
|
|
int nbd_server_max_connections(void)
|
|
{
|
|
return nbd_server ? nbd_server->max_connections : qemu_nbd_connections;
|
|
}
|
|
|
|
static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
|
|
{
|
|
nbd_client_put(client);
|
|
assert(nbd_server->connections > 0);
|
|
nbd_server->connections--;
|
|
nbd_update_server_watch(nbd_server);
|
|
}
|
|
|
|
static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
|
|
gpointer opaque)
|
|
{
|
|
nbd_server->connections++;
|
|
nbd_update_server_watch(nbd_server);
|
|
|
|
qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
|
|
/* TODO - expose handshake timeout as QMP option */
|
|
nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
|
|
nbd_server->tlscreds, nbd_server->tlsauthz,
|
|
nbd_blockdev_client_closed, NULL);
|
|
}
|
|
|
|
static void nbd_update_server_watch(NBDServerData *s)
|
|
{
|
|
if (!s->max_connections || s->connections < s->max_connections) {
|
|
qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, NULL);
|
|
} else {
|
|
qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL);
|
|
}
|
|
}
|
|
|
|
static void nbd_server_free(NBDServerData *server)
|
|
{
|
|
if (!server) {
|
|
return;
|
|
}
|
|
|
|
qio_net_listener_disconnect(server->listener);
|
|
object_unref(OBJECT(server->listener));
|
|
if (server->tlscreds) {
|
|
object_unref(OBJECT(server->tlscreds));
|
|
}
|
|
g_free(server->tlsauthz);
|
|
|
|
g_free(server);
|
|
}
|
|
|
|
static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
|
|
{
|
|
Object *obj;
|
|
QCryptoTLSCreds *creds;
|
|
|
|
obj = object_resolve_path_component(
|
|
object_get_objects_root(), id);
|
|
if (!obj) {
|
|
error_setg(errp, "No TLS credentials with id '%s'",
|
|
id);
|
|
return NULL;
|
|
}
|
|
creds = (QCryptoTLSCreds *)
|
|
object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
|
|
if (!creds) {
|
|
error_setg(errp, "Object with id '%s' is not TLS credentials",
|
|
id);
|
|
return NULL;
|
|
}
|
|
|
|
if (!qcrypto_tls_creds_check_endpoint(creds,
|
|
QCRYPTO_TLS_CREDS_ENDPOINT_SERVER,
|
|
errp)) {
|
|
return NULL;
|
|
}
|
|
object_ref(obj);
|
|
return creds;
|
|
}
|
|
|
|
|
|
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
|
|
const char *tls_authz, uint32_t max_connections,
|
|
Error **errp)
|
|
{
|
|
if (nbd_server) {
|
|
error_setg(errp, "NBD server already running");
|
|
return;
|
|
}
|
|
|
|
nbd_server = g_new0(NBDServerData, 1);
|
|
nbd_server->max_connections = max_connections;
|
|
nbd_server->listener = qio_net_listener_new();
|
|
|
|
qio_net_listener_set_name(nbd_server->listener,
|
|
"nbd-listener");
|
|
|
|
/*
|
|
* Because this server is persistent, a backlog of SOMAXCONN is
|
|
* better than trying to size it to max_connections.
|
|
*/
|
|
if (qio_net_listener_open_sync(nbd_server->listener, addr, SOMAXCONN,
|
|
errp) < 0) {
|
|
goto error;
|
|
}
|
|
|
|
if (tls_creds) {
|
|
nbd_server->tlscreds = nbd_get_tls_creds(tls_creds, errp);
|
|
if (!nbd_server->tlscreds) {
|
|
goto error;
|
|
}
|
|
}
|
|
|
|
nbd_server->tlsauthz = g_strdup(tls_authz);
|
|
|
|
nbd_update_server_watch(nbd_server);
|
|
|
|
return;
|
|
|
|
error:
|
|
nbd_server_free(nbd_server);
|
|
nbd_server = NULL;
|
|
}
|
|
|
|
void nbd_server_start_options(NbdServerOptions *arg, Error **errp)
|
|
{
|
|
if (!arg->has_max_connections) {
|
|
arg->max_connections = NBD_DEFAULT_MAX_CONNECTIONS;
|
|
}
|
|
|
|
nbd_server_start(arg->addr, arg->tls_creds, arg->tls_authz,
|
|
arg->max_connections, errp);
|
|
}
|
|
|
|
void qmp_nbd_server_start(SocketAddressLegacy *addr,
|
|
bool has_tls_creds, const char *tls_creds,
|
|
bool has_tls_authz, const char *tls_authz,
|
|
bool has_max_connections, uint32_t max_connections,
|
|
Error **errp)
|
|
{
|
|
SocketAddress *addr_flat = socket_address_flatten(addr);
|
|
|
|
if (!has_max_connections) {
|
|
max_connections = NBD_DEFAULT_MAX_CONNECTIONS;
|
|
}
|
|
|
|
nbd_server_start(addr_flat, tls_creds, tls_authz, max_connections, errp);
|
|
qapi_free_SocketAddress(addr_flat);
|
|
}
|
|
|
|
void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp)
|
|
{
|
|
BlockExport *export;
|
|
BlockDriverState *bs;
|
|
BlockBackend *on_eject_blk;
|
|
BlockExportOptions *export_opts;
|
|
|
|
bs = bdrv_lookup_bs(arg->device, arg->device, errp);
|
|
if (!bs) {
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* block-export-add would default to the node-name, but we may have to use
|
|
* the device name as a default here for compatibility.
|
|
*/
|
|
if (!arg->has_name) {
|
|
arg->has_name = true;
|
|
arg->name = g_strdup(arg->device);
|
|
}
|
|
|
|
export_opts = g_new(BlockExportOptions, 1);
|
|
*export_opts = (BlockExportOptions) {
|
|
.type = BLOCK_EXPORT_TYPE_NBD,
|
|
.id = g_strdup(arg->name),
|
|
.node_name = g_strdup(bdrv_get_node_name(bs)),
|
|
.has_writable = arg->has_writable,
|
|
.writable = arg->writable,
|
|
};
|
|
QAPI_CLONE_MEMBERS(BlockExportOptionsNbdBase, &export_opts->u.nbd,
|
|
qapi_NbdServerAddOptions_base(arg));
|
|
if (arg->has_bitmap) {
|
|
BlockDirtyBitmapOrStr *el = g_new(BlockDirtyBitmapOrStr, 1);
|
|
|
|
*el = (BlockDirtyBitmapOrStr) {
|
|
.type = QTYPE_QSTRING,
|
|
.u.local = g_strdup(arg->bitmap),
|
|
};
|
|
export_opts->u.nbd.has_bitmaps = true;
|
|
QAPI_LIST_PREPEND(export_opts->u.nbd.bitmaps, el);
|
|
}
|
|
|
|
/*
|
|
* nbd-server-add doesn't complain when a read-only device should be
|
|
* exported as writable, but simply downgrades it. This is an error with
|
|
* block-export-add.
|
|
*/
|
|
if (bdrv_is_read_only(bs)) {
|
|
export_opts->has_writable = true;
|
|
export_opts->writable = false;
|
|
}
|
|
|
|
export = blk_exp_add(export_opts, errp);
|
|
if (!export) {
|
|
goto fail;
|
|
}
|
|
|
|
/*
|
|
* nbd-server-add removes the export when the named BlockBackend used for
|
|
* @device goes away.
|
|
*/
|
|
on_eject_blk = blk_by_name(arg->device);
|
|
if (on_eject_blk) {
|
|
nbd_export_set_on_eject_blk(export, on_eject_blk);
|
|
}
|
|
|
|
fail:
|
|
qapi_free_BlockExportOptions(export_opts);
|
|
}
|
|
|
|
void qmp_nbd_server_remove(const char *name,
|
|
bool has_mode, BlockExportRemoveMode mode,
|
|
Error **errp)
|
|
{
|
|
BlockExport *exp;
|
|
|
|
exp = blk_exp_find(name);
|
|
if (exp && exp->drv->type != BLOCK_EXPORT_TYPE_NBD) {
|
|
error_setg(errp, "Block export '%s' is not an NBD export", name);
|
|
return;
|
|
}
|
|
|
|
qmp_block_export_del(name, has_mode, mode, errp);
|
|
}
|
|
|
|
void qmp_nbd_server_stop(Error **errp)
|
|
{
|
|
if (!nbd_server) {
|
|
error_setg(errp, "NBD server not running");
|
|
return;
|
|
}
|
|
|
|
blk_exp_close_all_type(BLOCK_EXPORT_TYPE_NBD);
|
|
|
|
nbd_server_free(nbd_server);
|
|
nbd_server = NULL;
|
|
}
|