2012-08-22 18:43:07 +04:00
|
|
|
/*
|
|
|
|
* Serving QEMU block devices via NBD
|
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or
|
|
|
|
* later. See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
2016-01-29 20:50:05 +03:00
|
|
|
#include "qemu/osdep.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/blockdev.h"
|
2014-11-18 14:21:17 +03:00
|
|
|
#include "sysemu/block-backend.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/block/block.h"
|
2018-02-01 14:18:31 +03:00
|
|
|
#include "qapi/error.h"
|
2020-10-27 08:05:48 +03:00
|
|
|
#include "qapi/clone-visitor.h"
|
|
|
|
#include "qapi/qapi-visit-block-export.h"
|
2020-09-24 18:26:48 +03:00
|
|
|
#include "qapi/qapi-commands-block-export.h"
|
2012-12-17 21:19:44 +04:00
|
|
|
#include "block/nbd.h"
|
2016-02-10 21:41:03 +03:00
|
|
|
#include "io/channel-socket.h"
|
2017-12-18 13:16:42 +03:00
|
|
|
#include "io/net-listener.h"
|
2012-08-22 18:43:07 +04:00
|
|
|
|
nbd/server: CVE-2024-7409: Close stray clients at server-stop
A malicious client can attempt to connect to an NBD server, and then
intentionally delay progress in the handshake, including if it does
not know the TLS secrets. Although the previous two patches reduce
this behavior by capping the default max-connections parameter and
killing slow clients, they did not eliminate the possibility of a
client waiting to close the socket until after the QMP nbd-server-stop
command is executed, at which point qemu would SEGV when trying to
dereference the NULL nbd_server global which is no longer present.
This amounts to a denial of service attack. Worse, if another NBD
server is started before the malicious client disconnects, I cannot
rule out additional adverse effects when the old client interferes
with the connection count of the new server (although the most likely
is a crash due to an assertion failure when checking
nbd_server->connections > 0).
For environments without this patch, the CVE can be mitigated by
ensuring (such as via a firewall) that only trusted clients can
connect to an NBD server. Note that using frameworks like libvirt
that ensure that TLS is used and that nbd-server-stop is not executed
while any trusted clients are still connected will only help if there
is also no possibility for an untrusted client to open a connection
but then stall on the NBD handshake.
Given the previous patches, it would be possible to guarantee that no
clients remain connected by having nbd-server-stop sleep for longer
than the default handshake deadline before finally freeing the global
nbd_server object, but that could make QMP non-responsive for a long
time. So intead, this patch fixes the problem by tracking all client
sockets opened while the server is running, and forcefully closing any
such sockets remaining without a completed handshake at the time of
nbd-server-stop, then waiting until the coroutines servicing those
sockets notice the state change. nbd-server-stop now has a second
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
blk_exp_close_all_type() that disconnects all clients that completed
handshakes), but forced socket shutdown is enough to progress the
coroutines and quickly tear down all clients before the server is
freed, thus finally fixing the CVE.
This patch relies heavily on the fact that nbd/server.c guarantees
that it only calls nbd_blockdev_client_closed() from the main loop
(see the assertion in nbd_client_put() and the hoops used in
nbd_client_put_nonzero() to achieve that); if we did not have that
guarantee, we would also need a mutex protecting our accesses of the
list of connections to survive re-entrancy from independent iothreads.
Although I did not actually try to test old builds, it looks like this
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
even back when that patch started using a QIONetListener to handle
listening on multiple sockets, nbd_server_free() was already unaware
that the nbd_blockdev_client_closed callback can be reached later by a
client thread that has not completed handshakes (and therefore the
client's socket never got added to the list closed in
nbd_export_close_all), despite that patch intentionally tearing down
the QIONetListener to prevent new clients.
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
Fixes: CVE-2024-7409
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2024-08-07 20:23:13 +03:00
|
|
|
typedef struct NBDConn {
|
|
|
|
QIOChannelSocket *cioc;
|
|
|
|
QLIST_ENTRY(NBDConn) next;
|
|
|
|
} NBDConn;
|
|
|
|
|
2016-02-10 21:41:14 +03:00
|
|
|
typedef struct NBDServerData {
|
2017-12-18 13:16:42 +03:00
|
|
|
QIONetListener *listener;
|
2016-02-10 21:41:14 +03:00
|
|
|
QCryptoTLSCreds *tlscreds;
|
nbd: allow authorization with nbd-server-start QMP command
As with the previous patch to qemu-nbd, the nbd-server-start QMP command
also needs to be able to specify authorization when enabling TLS encryption.
First the client must create a QAuthZ object instance using the
'object-add' command:
{
'execute': 'object-add',
'arguments': {
'qom-type': 'authz-list',
'id': 'authz0',
'parameters': {
'policy': 'deny',
'rules': [
{
'match': '*CN=fred',
'policy': 'allow'
}
]
}
}
}
They can then reference this in the new 'tls-authz' parameter when
executing the 'nbd-server-start' command:
{
'execute': 'nbd-server-start',
'arguments': {
'addr': {
'type': 'inet',
'host': '127.0.0.1',
'port': '9000'
},
'tls-creds': 'tls0',
'tls-authz': 'authz0'
}
}
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Message-Id: <20190227162035.18543-3-berrange@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2019-02-27 19:20:34 +03:00
|
|
|
char *tlsauthz;
|
2020-09-24 18:26:54 +03:00
|
|
|
uint32_t max_connections;
|
|
|
|
uint32_t connections;
|
nbd/server: CVE-2024-7409: Close stray clients at server-stop
A malicious client can attempt to connect to an NBD server, and then
intentionally delay progress in the handshake, including if it does
not know the TLS secrets. Although the previous two patches reduce
this behavior by capping the default max-connections parameter and
killing slow clients, they did not eliminate the possibility of a
client waiting to close the socket until after the QMP nbd-server-stop
command is executed, at which point qemu would SEGV when trying to
dereference the NULL nbd_server global which is no longer present.
This amounts to a denial of service attack. Worse, if another NBD
server is started before the malicious client disconnects, I cannot
rule out additional adverse effects when the old client interferes
with the connection count of the new server (although the most likely
is a crash due to an assertion failure when checking
nbd_server->connections > 0).
For environments without this patch, the CVE can be mitigated by
ensuring (such as via a firewall) that only trusted clients can
connect to an NBD server. Note that using frameworks like libvirt
that ensure that TLS is used and that nbd-server-stop is not executed
while any trusted clients are still connected will only help if there
is also no possibility for an untrusted client to open a connection
but then stall on the NBD handshake.
Given the previous patches, it would be possible to guarantee that no
clients remain connected by having nbd-server-stop sleep for longer
than the default handshake deadline before finally freeing the global
nbd_server object, but that could make QMP non-responsive for a long
time. So intead, this patch fixes the problem by tracking all client
sockets opened while the server is running, and forcefully closing any
such sockets remaining without a completed handshake at the time of
nbd-server-stop, then waiting until the coroutines servicing those
sockets notice the state change. nbd-server-stop now has a second
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
blk_exp_close_all_type() that disconnects all clients that completed
handshakes), but forced socket shutdown is enough to progress the
coroutines and quickly tear down all clients before the server is
freed, thus finally fixing the CVE.
This patch relies heavily on the fact that nbd/server.c guarantees
that it only calls nbd_blockdev_client_closed() from the main loop
(see the assertion in nbd_client_put() and the hoops used in
nbd_client_put_nonzero() to achieve that); if we did not have that
guarantee, we would also need a mutex protecting our accesses of the
list of connections to survive re-entrancy from independent iothreads.
Although I did not actually try to test old builds, it looks like this
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
even back when that patch started using a QIONetListener to handle
listening on multiple sockets, nbd_server_free() was already unaware
that the nbd_blockdev_client_closed callback can be reached later by a
client thread that has not completed handshakes (and therefore the
client's socket never got added to the list closed in
nbd_export_close_all), despite that patch intentionally tearing down
the QIONetListener to prevent new clients.
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
Fixes: CVE-2024-7409
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2024-08-07 20:23:13 +03:00
|
|
|
QLIST_HEAD(, NBDConn) conns;
|
2016-02-10 21:41:14 +03:00
|
|
|
} NBDServerData;
|
|
|
|
|
|
|
|
static NBDServerData *nbd_server;
|
2022-05-12 03:49:23 +03:00
|
|
|
static int qemu_nbd_connections = -1; /* Non-negative if this is qemu-nbd */
|
2016-02-10 21:41:14 +03:00
|
|
|
|
2020-09-24 18:26:54 +03:00
|
|
|
static void nbd_update_server_watch(NBDServerData *s);
|
|
|
|
|
2022-05-12 03:49:23 +03:00
|
|
|
void nbd_server_is_qemu_nbd(int max_connections)
|
2020-09-24 18:26:57 +03:00
|
|
|
{
|
2022-05-12 03:49:23 +03:00
|
|
|
qemu_nbd_connections = max_connections;
|
2020-09-24 18:26:57 +03:00
|
|
|
}
|
|
|
|
|
2020-09-24 18:27:12 +03:00
|
|
|
bool nbd_server_is_running(void)
|
|
|
|
{
|
2022-05-12 03:49:23 +03:00
|
|
|
return nbd_server || qemu_nbd_connections >= 0;
|
2020-09-24 18:27:12 +03:00
|
|
|
}
|
|
|
|
|
nbd/server: Allow MULTI_CONN for shared writable exports
According to the NBD spec, a server that advertises
NBD_FLAG_CAN_MULTI_CONN promises that multiple client connections will
not see any cache inconsistencies: when properly separated by a single
flush, actions performed by one client will be visible to another
client, regardless of which client did the flush.
We always satisfy these conditions in qemu - even when we support
multiple clients, ALL clients go through a single point of reference
into the block layer, with no local caching. The effect of one client
is instantly visible to the next client. Even if our backend were a
network device, we argue that any multi-path caching effects that
would cause inconsistencies in back-to-back actions not seeing the
effect of previous actions would be a bug in that backend, and not the
fault of caching in qemu. As such, it is safe to unconditionally
advertise CAN_MULTI_CONN for any qemu NBD server situation that
supports parallel clients.
Note, however, that we don't want to advertise CAN_MULTI_CONN when we
know that a second client cannot connect (for historical reasons,
qemu-nbd defaults to a single connection while nbd-server-add and QMP
commands default to unlimited connections; but we already have
existing means to let either style of NBD server creation alter those
defaults). This is visible by no longer advertising MULTI_CONN for
'qemu-nbd -r' without -e, as in the iotest nbd-qemu-allocation.
The harder part of this patch is setting up an iotest to demonstrate
behavior of multiple NBD clients to a single server. It might be
possible with parallel qemu-io processes, but I found it easier to do
in python with the help of libnbd, and help from Nir and Vladimir in
writing the test.
Signed-off-by: Eric Blake <eblake@redhat.com>
Suggested-by: Nir Soffer <nsoffer@redhat.com>
Suggested-by: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
Message-Id: <20220512004924.417153-3-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2022-05-12 03:49:24 +03:00
|
|
|
int nbd_server_max_connections(void)
|
|
|
|
{
|
|
|
|
return nbd_server ? nbd_server->max_connections : qemu_nbd_connections;
|
|
|
|
}
|
|
|
|
|
nbd: Fix regression on resiliency to port scan
Back in qemu 2.5, qemu-nbd was immune to port probes (a transient
server would not quit, regardless of how many probe connections
came and went, until a connection actually negotiated). But we
broke that in commit ee7d7aa when removing the return value to
nbd_client_new(), although that patch also introduced a bug causing
an assertion failure on a client that fails negotiation. We then
made it worse during refactoring in commit 1a6245a (a segfault
before we could even assert); the (masked) assertion was cleaned
up in d3780c2 (still in 2.6), and just recently we finally fixed
the segfault ("nbd: Fully intialize client in case of failed
negotiation"). But that still means that ever since we added
TLS support to qemu-nbd, we have been vulnerable to an ill-timed
port-scan being able to cause a denial of service by taking down
qemu-nbd before a real client has a chance to connect.
Since negotiation is now handled asynchronously via coroutines,
we no longer have a synchronous point of return by re-adding a
return value to nbd_client_new(). So this patch instead wires
things up to pass the negotiation status through the close_fn
callback function.
Simple test across two terminals:
$ qemu-nbd -f raw -p 30001 file
$ nmap 127.0.0.1 -p 30001 && \
qemu-io -c 'r 0 512' -f raw nbd://localhost:30001
Note that this patch does not change what constitutes successful
negotiation (thus, a client must enter transmission phase before
that client can be considered as a reason to terminate the server
when the connection ends). Perhaps we may want to tweak things
in a later patch to also treat a client that uses NBD_OPT_ABORT
as being a 'successful' negotiation (the client correctly talked
the NBD protocol, and informed us it was not going to use our
export after all), but that's a discussion for another day.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1451614
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170608222617.20376-1-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-06-09 01:26:17 +03:00
|
|
|
static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
|
|
|
|
{
|
nbd/server: CVE-2024-7409: Close stray clients at server-stop
A malicious client can attempt to connect to an NBD server, and then
intentionally delay progress in the handshake, including if it does
not know the TLS secrets. Although the previous two patches reduce
this behavior by capping the default max-connections parameter and
killing slow clients, they did not eliminate the possibility of a
client waiting to close the socket until after the QMP nbd-server-stop
command is executed, at which point qemu would SEGV when trying to
dereference the NULL nbd_server global which is no longer present.
This amounts to a denial of service attack. Worse, if another NBD
server is started before the malicious client disconnects, I cannot
rule out additional adverse effects when the old client interferes
with the connection count of the new server (although the most likely
is a crash due to an assertion failure when checking
nbd_server->connections > 0).
For environments without this patch, the CVE can be mitigated by
ensuring (such as via a firewall) that only trusted clients can
connect to an NBD server. Note that using frameworks like libvirt
that ensure that TLS is used and that nbd-server-stop is not executed
while any trusted clients are still connected will only help if there
is also no possibility for an untrusted client to open a connection
but then stall on the NBD handshake.
Given the previous patches, it would be possible to guarantee that no
clients remain connected by having nbd-server-stop sleep for longer
than the default handshake deadline before finally freeing the global
nbd_server object, but that could make QMP non-responsive for a long
time. So intead, this patch fixes the problem by tracking all client
sockets opened while the server is running, and forcefully closing any
such sockets remaining without a completed handshake at the time of
nbd-server-stop, then waiting until the coroutines servicing those
sockets notice the state change. nbd-server-stop now has a second
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
blk_exp_close_all_type() that disconnects all clients that completed
handshakes), but forced socket shutdown is enough to progress the
coroutines and quickly tear down all clients before the server is
freed, thus finally fixing the CVE.
This patch relies heavily on the fact that nbd/server.c guarantees
that it only calls nbd_blockdev_client_closed() from the main loop
(see the assertion in nbd_client_put() and the hoops used in
nbd_client_put_nonzero() to achieve that); if we did not have that
guarantee, we would also need a mutex protecting our accesses of the
list of connections to survive re-entrancy from independent iothreads.
Although I did not actually try to test old builds, it looks like this
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
even back when that patch started using a QIONetListener to handle
listening on multiple sockets, nbd_server_free() was already unaware
that the nbd_blockdev_client_closed callback can be reached later by a
client thread that has not completed handshakes (and therefore the
client's socket never got added to the list closed in
nbd_export_close_all), despite that patch intentionally tearing down
the QIONetListener to prevent new clients.
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
Fixes: CVE-2024-7409
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2024-08-07 20:23:13 +03:00
|
|
|
NBDConn *conn = nbd_client_owner(client);
|
|
|
|
|
|
|
|
assert(qemu_in_main_thread() && nbd_server);
|
|
|
|
|
|
|
|
object_unref(OBJECT(conn->cioc));
|
|
|
|
QLIST_REMOVE(conn, next);
|
|
|
|
g_free(conn);
|
|
|
|
|
nbd: Fix regression on resiliency to port scan
Back in qemu 2.5, qemu-nbd was immune to port probes (a transient
server would not quit, regardless of how many probe connections
came and went, until a connection actually negotiated). But we
broke that in commit ee7d7aa when removing the return value to
nbd_client_new(), although that patch also introduced a bug causing
an assertion failure on a client that fails negotiation. We then
made it worse during refactoring in commit 1a6245a (a segfault
before we could even assert); the (masked) assertion was cleaned
up in d3780c2 (still in 2.6), and just recently we finally fixed
the segfault ("nbd: Fully intialize client in case of failed
negotiation"). But that still means that ever since we added
TLS support to qemu-nbd, we have been vulnerable to an ill-timed
port-scan being able to cause a denial of service by taking down
qemu-nbd before a real client has a chance to connect.
Since negotiation is now handled asynchronously via coroutines,
we no longer have a synchronous point of return by re-adding a
return value to nbd_client_new(). So this patch instead wires
things up to pass the negotiation status through the close_fn
callback function.
Simple test across two terminals:
$ qemu-nbd -f raw -p 30001 file
$ nmap 127.0.0.1 -p 30001 && \
qemu-io -c 'r 0 512' -f raw nbd://localhost:30001
Note that this patch does not change what constitutes successful
negotiation (thus, a client must enter transmission phase before
that client can be considered as a reason to terminate the server
when the connection ends). Perhaps we may want to tweak things
in a later patch to also treat a client that uses NBD_OPT_ABORT
as being a 'successful' negotiation (the client correctly talked
the NBD protocol, and informed us it was not going to use our
export after all), but that's a discussion for another day.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1451614
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170608222617.20376-1-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-06-09 01:26:17 +03:00
|
|
|
nbd_client_put(client);
|
2020-09-24 18:26:54 +03:00
|
|
|
assert(nbd_server->connections > 0);
|
|
|
|
nbd_server->connections--;
|
|
|
|
nbd_update_server_watch(nbd_server);
|
nbd: Fix regression on resiliency to port scan
Back in qemu 2.5, qemu-nbd was immune to port probes (a transient
server would not quit, regardless of how many probe connections
came and went, until a connection actually negotiated). But we
broke that in commit ee7d7aa when removing the return value to
nbd_client_new(), although that patch also introduced a bug causing
an assertion failure on a client that fails negotiation. We then
made it worse during refactoring in commit 1a6245a (a segfault
before we could even assert); the (masked) assertion was cleaned
up in d3780c2 (still in 2.6), and just recently we finally fixed
the segfault ("nbd: Fully intialize client in case of failed
negotiation"). But that still means that ever since we added
TLS support to qemu-nbd, we have been vulnerable to an ill-timed
port-scan being able to cause a denial of service by taking down
qemu-nbd before a real client has a chance to connect.
Since negotiation is now handled asynchronously via coroutines,
we no longer have a synchronous point of return by re-adding a
return value to nbd_client_new(). So this patch instead wires
things up to pass the negotiation status through the close_fn
callback function.
Simple test across two terminals:
$ qemu-nbd -f raw -p 30001 file
$ nmap 127.0.0.1 -p 30001 && \
qemu-io -c 'r 0 512' -f raw nbd://localhost:30001
Note that this patch does not change what constitutes successful
negotiation (thus, a client must enter transmission phase before
that client can be considered as a reason to terminate the server
when the connection ends). Perhaps we may want to tweak things
in a later patch to also treat a client that uses NBD_OPT_ABORT
as being a 'successful' negotiation (the client correctly talked
the NBD protocol, and informed us it was not going to use our
export after all), but that's a discussion for another day.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1451614
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170608222617.20376-1-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-06-09 01:26:17 +03:00
|
|
|
}
|
2012-08-22 18:43:07 +04:00
|
|
|
|
2017-12-18 13:16:42 +03:00
|
|
|
static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
|
|
|
|
gpointer opaque)
|
2012-08-22 18:43:07 +04:00
|
|
|
{
|
nbd/server: CVE-2024-7409: Close stray clients at server-stop
A malicious client can attempt to connect to an NBD server, and then
intentionally delay progress in the handshake, including if it does
not know the TLS secrets. Although the previous two patches reduce
this behavior by capping the default max-connections parameter and
killing slow clients, they did not eliminate the possibility of a
client waiting to close the socket until after the QMP nbd-server-stop
command is executed, at which point qemu would SEGV when trying to
dereference the NULL nbd_server global which is no longer present.
This amounts to a denial of service attack. Worse, if another NBD
server is started before the malicious client disconnects, I cannot
rule out additional adverse effects when the old client interferes
with the connection count of the new server (although the most likely
is a crash due to an assertion failure when checking
nbd_server->connections > 0).
For environments without this patch, the CVE can be mitigated by
ensuring (such as via a firewall) that only trusted clients can
connect to an NBD server. Note that using frameworks like libvirt
that ensure that TLS is used and that nbd-server-stop is not executed
while any trusted clients are still connected will only help if there
is also no possibility for an untrusted client to open a connection
but then stall on the NBD handshake.
Given the previous patches, it would be possible to guarantee that no
clients remain connected by having nbd-server-stop sleep for longer
than the default handshake deadline before finally freeing the global
nbd_server object, but that could make QMP non-responsive for a long
time. So intead, this patch fixes the problem by tracking all client
sockets opened while the server is running, and forcefully closing any
such sockets remaining without a completed handshake at the time of
nbd-server-stop, then waiting until the coroutines servicing those
sockets notice the state change. nbd-server-stop now has a second
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
blk_exp_close_all_type() that disconnects all clients that completed
handshakes), but forced socket shutdown is enough to progress the
coroutines and quickly tear down all clients before the server is
freed, thus finally fixing the CVE.
This patch relies heavily on the fact that nbd/server.c guarantees
that it only calls nbd_blockdev_client_closed() from the main loop
(see the assertion in nbd_client_put() and the hoops used in
nbd_client_put_nonzero() to achieve that); if we did not have that
guarantee, we would also need a mutex protecting our accesses of the
list of connections to survive re-entrancy from independent iothreads.
Although I did not actually try to test old builds, it looks like this
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
even back when that patch started using a QIONetListener to handle
listening on multiple sockets, nbd_server_free() was already unaware
that the nbd_blockdev_client_closed callback can be reached later by a
client thread that has not completed handshakes (and therefore the
client's socket never got added to the list closed in
nbd_export_close_all), despite that patch intentionally tearing down
the QIONetListener to prevent new clients.
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
Fixes: CVE-2024-7409
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2024-08-07 20:23:13 +03:00
|
|
|
NBDConn *conn = g_new0(NBDConn, 1);
|
|
|
|
|
|
|
|
assert(qemu_in_main_thread() && nbd_server);
|
2020-09-24 18:26:54 +03:00
|
|
|
nbd_server->connections++;
|
nbd/server: CVE-2024-7409: Close stray clients at server-stop
A malicious client can attempt to connect to an NBD server, and then
intentionally delay progress in the handshake, including if it does
not know the TLS secrets. Although the previous two patches reduce
this behavior by capping the default max-connections parameter and
killing slow clients, they did not eliminate the possibility of a
client waiting to close the socket until after the QMP nbd-server-stop
command is executed, at which point qemu would SEGV when trying to
dereference the NULL nbd_server global which is no longer present.
This amounts to a denial of service attack. Worse, if another NBD
server is started before the malicious client disconnects, I cannot
rule out additional adverse effects when the old client interferes
with the connection count of the new server (although the most likely
is a crash due to an assertion failure when checking
nbd_server->connections > 0).
For environments without this patch, the CVE can be mitigated by
ensuring (such as via a firewall) that only trusted clients can
connect to an NBD server. Note that using frameworks like libvirt
that ensure that TLS is used and that nbd-server-stop is not executed
while any trusted clients are still connected will only help if there
is also no possibility for an untrusted client to open a connection
but then stall on the NBD handshake.
Given the previous patches, it would be possible to guarantee that no
clients remain connected by having nbd-server-stop sleep for longer
than the default handshake deadline before finally freeing the global
nbd_server object, but that could make QMP non-responsive for a long
time. So intead, this patch fixes the problem by tracking all client
sockets opened while the server is running, and forcefully closing any
such sockets remaining without a completed handshake at the time of
nbd-server-stop, then waiting until the coroutines servicing those
sockets notice the state change. nbd-server-stop now has a second
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
blk_exp_close_all_type() that disconnects all clients that completed
handshakes), but forced socket shutdown is enough to progress the
coroutines and quickly tear down all clients before the server is
freed, thus finally fixing the CVE.
This patch relies heavily on the fact that nbd/server.c guarantees
that it only calls nbd_blockdev_client_closed() from the main loop
(see the assertion in nbd_client_put() and the hoops used in
nbd_client_put_nonzero() to achieve that); if we did not have that
guarantee, we would also need a mutex protecting our accesses of the
list of connections to survive re-entrancy from independent iothreads.
Although I did not actually try to test old builds, it looks like this
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
even back when that patch started using a QIONetListener to handle
listening on multiple sockets, nbd_server_free() was already unaware
that the nbd_blockdev_client_closed callback can be reached later by a
client thread that has not completed handshakes (and therefore the
client's socket never got added to the list closed in
nbd_export_close_all), despite that patch intentionally tearing down
the QIONetListener to prevent new clients.
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
Fixes: CVE-2024-7409
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2024-08-07 20:23:13 +03:00
|
|
|
object_ref(OBJECT(cioc));
|
|
|
|
conn->cioc = cioc;
|
|
|
|
QLIST_INSERT_HEAD(&nbd_server->conns, conn, next);
|
2020-09-24 18:26:54 +03:00
|
|
|
nbd_update_server_watch(nbd_server);
|
|
|
|
|
2016-09-30 13:57:14 +03:00
|
|
|
qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
|
2024-08-07 16:50:01 +03:00
|
|
|
/* TODO - expose handshake timeout as QMP option */
|
|
|
|
nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
|
|
|
|
nbd_server->tlscreds, nbd_server->tlsauthz,
|
nbd/server: CVE-2024-7409: Close stray clients at server-stop
A malicious client can attempt to connect to an NBD server, and then
intentionally delay progress in the handshake, including if it does
not know the TLS secrets. Although the previous two patches reduce
this behavior by capping the default max-connections parameter and
killing slow clients, they did not eliminate the possibility of a
client waiting to close the socket until after the QMP nbd-server-stop
command is executed, at which point qemu would SEGV when trying to
dereference the NULL nbd_server global which is no longer present.
This amounts to a denial of service attack. Worse, if another NBD
server is started before the malicious client disconnects, I cannot
rule out additional adverse effects when the old client interferes
with the connection count of the new server (although the most likely
is a crash due to an assertion failure when checking
nbd_server->connections > 0).
For environments without this patch, the CVE can be mitigated by
ensuring (such as via a firewall) that only trusted clients can
connect to an NBD server. Note that using frameworks like libvirt
that ensure that TLS is used and that nbd-server-stop is not executed
while any trusted clients are still connected will only help if there
is also no possibility for an untrusted client to open a connection
but then stall on the NBD handshake.
Given the previous patches, it would be possible to guarantee that no
clients remain connected by having nbd-server-stop sleep for longer
than the default handshake deadline before finally freeing the global
nbd_server object, but that could make QMP non-responsive for a long
time. So intead, this patch fixes the problem by tracking all client
sockets opened while the server is running, and forcefully closing any
such sockets remaining without a completed handshake at the time of
nbd-server-stop, then waiting until the coroutines servicing those
sockets notice the state change. nbd-server-stop now has a second
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
blk_exp_close_all_type() that disconnects all clients that completed
handshakes), but forced socket shutdown is enough to progress the
coroutines and quickly tear down all clients before the server is
freed, thus finally fixing the CVE.
This patch relies heavily on the fact that nbd/server.c guarantees
that it only calls nbd_blockdev_client_closed() from the main loop
(see the assertion in nbd_client_put() and the hoops used in
nbd_client_put_nonzero() to achieve that); if we did not have that
guarantee, we would also need a mutex protecting our accesses of the
list of connections to survive re-entrancy from independent iothreads.
Although I did not actually try to test old builds, it looks like this
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
even back when that patch started using a QIONetListener to handle
listening on multiple sockets, nbd_server_free() was already unaware
that the nbd_blockdev_client_closed callback can be reached later by a
client thread that has not completed handshakes (and therefore the
client's socket never got added to the list closed in
nbd_export_close_all), despite that patch intentionally tearing down
the QIONetListener to prevent new clients.
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
Fixes: CVE-2024-7409
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2024-08-07 20:23:13 +03:00
|
|
|
nbd_blockdev_client_closed, conn);
|
2012-08-22 18:43:07 +04:00
|
|
|
}
|
|
|
|
|
2020-09-24 18:26:54 +03:00
|
|
|
static void nbd_update_server_watch(NBDServerData *s)
|
|
|
|
{
|
|
|
|
if (!s->max_connections || s->connections < s->max_connections) {
|
|
|
|
qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, NULL);
|
|
|
|
} else {
|
|
|
|
qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL);
|
|
|
|
}
|
|
|
|
}
|
2016-02-10 21:41:14 +03:00
|
|
|
|
|
|
|
static void nbd_server_free(NBDServerData *server)
|
2012-08-22 18:43:07 +04:00
|
|
|
{
|
nbd/server: CVE-2024-7409: Close stray clients at server-stop
A malicious client can attempt to connect to an NBD server, and then
intentionally delay progress in the handshake, including if it does
not know the TLS secrets. Although the previous two patches reduce
this behavior by capping the default max-connections parameter and
killing slow clients, they did not eliminate the possibility of a
client waiting to close the socket until after the QMP nbd-server-stop
command is executed, at which point qemu would SEGV when trying to
dereference the NULL nbd_server global which is no longer present.
This amounts to a denial of service attack. Worse, if another NBD
server is started before the malicious client disconnects, I cannot
rule out additional adverse effects when the old client interferes
with the connection count of the new server (although the most likely
is a crash due to an assertion failure when checking
nbd_server->connections > 0).
For environments without this patch, the CVE can be mitigated by
ensuring (such as via a firewall) that only trusted clients can
connect to an NBD server. Note that using frameworks like libvirt
that ensure that TLS is used and that nbd-server-stop is not executed
while any trusted clients are still connected will only help if there
is also no possibility for an untrusted client to open a connection
but then stall on the NBD handshake.
Given the previous patches, it would be possible to guarantee that no
clients remain connected by having nbd-server-stop sleep for longer
than the default handshake deadline before finally freeing the global
nbd_server object, but that could make QMP non-responsive for a long
time. So intead, this patch fixes the problem by tracking all client
sockets opened while the server is running, and forcefully closing any
such sockets remaining without a completed handshake at the time of
nbd-server-stop, then waiting until the coroutines servicing those
sockets notice the state change. nbd-server-stop now has a second
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
blk_exp_close_all_type() that disconnects all clients that completed
handshakes), but forced socket shutdown is enough to progress the
coroutines and quickly tear down all clients before the server is
freed, thus finally fixing the CVE.
This patch relies heavily on the fact that nbd/server.c guarantees
that it only calls nbd_blockdev_client_closed() from the main loop
(see the assertion in nbd_client_put() and the hoops used in
nbd_client_put_nonzero() to achieve that); if we did not have that
guarantee, we would also need a mutex protecting our accesses of the
list of connections to survive re-entrancy from independent iothreads.
Although I did not actually try to test old builds, it looks like this
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
even back when that patch started using a QIONetListener to handle
listening on multiple sockets, nbd_server_free() was already unaware
that the nbd_blockdev_client_closed callback can be reached later by a
client thread that has not completed handshakes (and therefore the
client's socket never got added to the list closed in
nbd_export_close_all), despite that patch intentionally tearing down
the QIONetListener to prevent new clients.
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
Fixes: CVE-2024-7409
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2024-08-07 20:23:13 +03:00
|
|
|
NBDConn *conn, *tmp;
|
|
|
|
|
2016-02-10 21:41:14 +03:00
|
|
|
if (!server) {
|
2012-08-22 18:43:07 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
nbd/server: CVE-2024-7409: Close stray clients at server-stop
A malicious client can attempt to connect to an NBD server, and then
intentionally delay progress in the handshake, including if it does
not know the TLS secrets. Although the previous two patches reduce
this behavior by capping the default max-connections parameter and
killing slow clients, they did not eliminate the possibility of a
client waiting to close the socket until after the QMP nbd-server-stop
command is executed, at which point qemu would SEGV when trying to
dereference the NULL nbd_server global which is no longer present.
This amounts to a denial of service attack. Worse, if another NBD
server is started before the malicious client disconnects, I cannot
rule out additional adverse effects when the old client interferes
with the connection count of the new server (although the most likely
is a crash due to an assertion failure when checking
nbd_server->connections > 0).
For environments without this patch, the CVE can be mitigated by
ensuring (such as via a firewall) that only trusted clients can
connect to an NBD server. Note that using frameworks like libvirt
that ensure that TLS is used and that nbd-server-stop is not executed
while any trusted clients are still connected will only help if there
is also no possibility for an untrusted client to open a connection
but then stall on the NBD handshake.
Given the previous patches, it would be possible to guarantee that no
clients remain connected by having nbd-server-stop sleep for longer
than the default handshake deadline before finally freeing the global
nbd_server object, but that could make QMP non-responsive for a long
time. So intead, this patch fixes the problem by tracking all client
sockets opened while the server is running, and forcefully closing any
such sockets remaining without a completed handshake at the time of
nbd-server-stop, then waiting until the coroutines servicing those
sockets notice the state change. nbd-server-stop now has a second
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
blk_exp_close_all_type() that disconnects all clients that completed
handshakes), but forced socket shutdown is enough to progress the
coroutines and quickly tear down all clients before the server is
freed, thus finally fixing the CVE.
This patch relies heavily on the fact that nbd/server.c guarantees
that it only calls nbd_blockdev_client_closed() from the main loop
(see the assertion in nbd_client_put() and the hoops used in
nbd_client_put_nonzero() to achieve that); if we did not have that
guarantee, we would also need a mutex protecting our accesses of the
list of connections to survive re-entrancy from independent iothreads.
Although I did not actually try to test old builds, it looks like this
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
even back when that patch started using a QIONetListener to handle
listening on multiple sockets, nbd_server_free() was already unaware
that the nbd_blockdev_client_closed callback can be reached later by a
client thread that has not completed handshakes (and therefore the
client's socket never got added to the list closed in
nbd_export_close_all), despite that patch intentionally tearing down
the QIONetListener to prevent new clients.
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
Fixes: CVE-2024-7409
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2024-08-07 20:23:13 +03:00
|
|
|
/*
|
|
|
|
* Forcefully close the listener socket, and any clients that have
|
|
|
|
* not yet disconnected on their own.
|
|
|
|
*/
|
2017-12-18 13:16:42 +03:00
|
|
|
qio_net_listener_disconnect(server->listener);
|
|
|
|
object_unref(OBJECT(server->listener));
|
nbd/server: CVE-2024-7409: Close stray clients at server-stop
A malicious client can attempt to connect to an NBD server, and then
intentionally delay progress in the handshake, including if it does
not know the TLS secrets. Although the previous two patches reduce
this behavior by capping the default max-connections parameter and
killing slow clients, they did not eliminate the possibility of a
client waiting to close the socket until after the QMP nbd-server-stop
command is executed, at which point qemu would SEGV when trying to
dereference the NULL nbd_server global which is no longer present.
This amounts to a denial of service attack. Worse, if another NBD
server is started before the malicious client disconnects, I cannot
rule out additional adverse effects when the old client interferes
with the connection count of the new server (although the most likely
is a crash due to an assertion failure when checking
nbd_server->connections > 0).
For environments without this patch, the CVE can be mitigated by
ensuring (such as via a firewall) that only trusted clients can
connect to an NBD server. Note that using frameworks like libvirt
that ensure that TLS is used and that nbd-server-stop is not executed
while any trusted clients are still connected will only help if there
is also no possibility for an untrusted client to open a connection
but then stall on the NBD handshake.
Given the previous patches, it would be possible to guarantee that no
clients remain connected by having nbd-server-stop sleep for longer
than the default handshake deadline before finally freeing the global
nbd_server object, but that could make QMP non-responsive for a long
time. So intead, this patch fixes the problem by tracking all client
sockets opened while the server is running, and forcefully closing any
such sockets remaining without a completed handshake at the time of
nbd-server-stop, then waiting until the coroutines servicing those
sockets notice the state change. nbd-server-stop now has a second
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
blk_exp_close_all_type() that disconnects all clients that completed
handshakes), but forced socket shutdown is enough to progress the
coroutines and quickly tear down all clients before the server is
freed, thus finally fixing the CVE.
This patch relies heavily on the fact that nbd/server.c guarantees
that it only calls nbd_blockdev_client_closed() from the main loop
(see the assertion in nbd_client_put() and the hoops used in
nbd_client_put_nonzero() to achieve that); if we did not have that
guarantee, we would also need a mutex protecting our accesses of the
list of connections to survive re-entrancy from independent iothreads.
Although I did not actually try to test old builds, it looks like this
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
even back when that patch started using a QIONetListener to handle
listening on multiple sockets, nbd_server_free() was already unaware
that the nbd_blockdev_client_closed callback can be reached later by a
client thread that has not completed handshakes (and therefore the
client's socket never got added to the list closed in
nbd_export_close_all), despite that patch intentionally tearing down
the QIONetListener to prevent new clients.
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
Fixes: CVE-2024-7409
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2024-08-07 20:23:13 +03:00
|
|
|
QLIST_FOREACH_SAFE(conn, &server->conns, next, tmp) {
|
|
|
|
qio_channel_shutdown(QIO_CHANNEL(conn->cioc), QIO_CHANNEL_SHUTDOWN_BOTH,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
AIO_WAIT_WHILE_UNLOCKED(NULL, server->connections > 0);
|
|
|
|
|
2016-02-10 21:41:14 +03:00
|
|
|
if (server->tlscreds) {
|
|
|
|
object_unref(OBJECT(server->tlscreds));
|
|
|
|
}
|
nbd: allow authorization with nbd-server-start QMP command
As with the previous patch to qemu-nbd, the nbd-server-start QMP command
also needs to be able to specify authorization when enabling TLS encryption.
First the client must create a QAuthZ object instance using the
'object-add' command:
{
'execute': 'object-add',
'arguments': {
'qom-type': 'authz-list',
'id': 'authz0',
'parameters': {
'policy': 'deny',
'rules': [
{
'match': '*CN=fred',
'policy': 'allow'
}
]
}
}
}
They can then reference this in the new 'tls-authz' parameter when
executing the 'nbd-server-start' command:
{
'execute': 'nbd-server-start',
'arguments': {
'addr': {
'type': 'inet',
'host': '127.0.0.1',
'port': '9000'
},
'tls-creds': 'tls0',
'tls-authz': 'authz0'
}
}
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Message-Id: <20190227162035.18543-3-berrange@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2019-02-27 19:20:34 +03:00
|
|
|
g_free(server->tlsauthz);
|
2016-02-10 21:41:14 +03:00
|
|
|
|
|
|
|
g_free(server);
|
|
|
|
}
|
|
|
|
|
|
|
|
static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
|
|
|
|
{
|
|
|
|
Object *obj;
|
|
|
|
QCryptoTLSCreds *creds;
|
|
|
|
|
|
|
|
obj = object_resolve_path_component(
|
|
|
|
object_get_objects_root(), id);
|
|
|
|
if (!obj) {
|
|
|
|
error_setg(errp, "No TLS credentials with id '%s'",
|
|
|
|
id);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
creds = (QCryptoTLSCreds *)
|
|
|
|
object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
|
|
|
|
if (!creds) {
|
|
|
|
error_setg(errp, "Object with id '%s' is not TLS credentials",
|
|
|
|
id);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-06-28 19:09:09 +03:00
|
|
|
if (!qcrypto_tls_creds_check_endpoint(creds,
|
|
|
|
QCRYPTO_TLS_CREDS_ENDPOINT_SERVER,
|
|
|
|
errp)) {
|
2016-02-10 21:41:14 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
object_ref(obj);
|
|
|
|
return creds;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-26 10:36:41 +03:00
|
|
|
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
|
2020-09-24 18:26:54 +03:00
|
|
|
const char *tls_authz, uint32_t max_connections,
|
|
|
|
Error **errp)
|
2016-02-10 21:41:14 +03:00
|
|
|
{
|
|
|
|
if (nbd_server) {
|
|
|
|
error_setg(errp, "NBD server already running");
|
2016-02-10 21:41:03 +03:00
|
|
|
return;
|
2012-08-22 18:43:07 +04:00
|
|
|
}
|
2016-02-10 21:41:03 +03:00
|
|
|
|
2016-02-10 21:41:14 +03:00
|
|
|
nbd_server = g_new0(NBDServerData, 1);
|
2020-09-24 18:26:54 +03:00
|
|
|
nbd_server->max_connections = max_connections;
|
2017-12-18 13:16:42 +03:00
|
|
|
nbd_server->listener = qio_net_listener_new();
|
|
|
|
|
|
|
|
qio_net_listener_set_name(nbd_server->listener,
|
|
|
|
"nbd-listener");
|
|
|
|
|
2021-02-09 18:27:58 +03:00
|
|
|
/*
|
|
|
|
* Because this server is persistent, a backlog of SOMAXCONN is
|
|
|
|
* better than trying to size it to max_connections.
|
|
|
|
*/
|
|
|
|
if (qio_net_listener_open_sync(nbd_server->listener, addr, SOMAXCONN,
|
|
|
|
errp) < 0) {
|
2016-02-10 21:41:14 +03:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-04-26 10:36:41 +03:00
|
|
|
if (tls_creds) {
|
2016-02-10 21:41:14 +03:00
|
|
|
nbd_server->tlscreds = nbd_get_tls_creds(tls_creds, errp);
|
|
|
|
if (!nbd_server->tlscreds) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
nbd: allow authorization with nbd-server-start QMP command
As with the previous patch to qemu-nbd, the nbd-server-start QMP command
also needs to be able to specify authorization when enabling TLS encryption.
First the client must create a QAuthZ object instance using the
'object-add' command:
{
'execute': 'object-add',
'arguments': {
'qom-type': 'authz-list',
'id': 'authz0',
'parameters': {
'policy': 'deny',
'rules': [
{
'match': '*CN=fred',
'policy': 'allow'
}
]
}
}
}
They can then reference this in the new 'tls-authz' parameter when
executing the 'nbd-server-start' command:
{
'execute': 'nbd-server-start',
'arguments': {
'addr': {
'type': 'inet',
'host': '127.0.0.1',
'port': '9000'
},
'tls-creds': 'tls0',
'tls-authz': 'authz0'
}
}
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Message-Id: <20190227162035.18543-3-berrange@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2019-02-27 19:20:34 +03:00
|
|
|
nbd_server->tlsauthz = g_strdup(tls_authz);
|
|
|
|
|
2020-09-24 18:26:54 +03:00
|
|
|
nbd_update_server_watch(nbd_server);
|
2016-02-10 21:41:14 +03:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
error:
|
|
|
|
nbd_server_free(nbd_server);
|
|
|
|
nbd_server = NULL;
|
2012-08-22 18:43:07 +04:00
|
|
|
}
|
|
|
|
|
2020-02-24 17:29:57 +03:00
|
|
|
void nbd_server_start_options(NbdServerOptions *arg, Error **errp)
|
|
|
|
{
|
nbd/server: CVE-2024-7409: Cap default max-connections to 100
Allowing an unlimited number of clients to any web service is a recipe
for a rudimentary denial of service attack: the client merely needs to
open lots of sockets without closing them, until qemu no longer has
any more fds available to allocate.
For qemu-nbd, we default to allowing only 1 connection unless more are
explicitly asked for (-e or --shared); this was historically picked as
a nice default (without an explicit -t, a non-persistent qemu-nbd goes
away after a client disconnects, without needing any additional
follow-up commands), and we are not going to change that interface now
(besides, someday we want to point people towards qemu-storage-daemon
instead of qemu-nbd).
But for qemu proper, and the newer qemu-storage-daemon, the QMP
nbd-server-start command has historically had a default of unlimited
number of connections, in part because unlike qemu-nbd it is
inherently persistent until nbd-server-stop. Allowing multiple client
sockets is particularly useful for clients that can take advantage of
MULTI_CONN (creating parallel sockets to increase throughput),
although known clients that do so (such as libnbd's nbdcopy) typically
use only 8 or 16 connections (the benefits of scaling diminish once
more sockets are competing for kernel attention). Picking a number
large enough for typical use cases, but not unlimited, makes it
slightly harder for a malicious client to perform a denial of service
merely by opening lots of connections withot progressing through the
handshake.
This change does not eliminate CVE-2024-7409 on its own, but reduces
the chance for fd exhaustion or unlimited memory usage as an attack
surface. On the other hand, by itself, it makes it more obvious that
with a finite limit, we have the problem of an unauthenticated client
holding 100 fds opened as a way to block out a legitimate client from
being able to connect; thus, later patches will further add timeouts
to reject clients that are not making progress.
This is an INTENTIONAL change in behavior, and will break any client
of nbd-server-start that was not passing an explicit max-connections
parameter, yet expects more than 100 simultaneous connections. We are
not aware of any such client (as stated above, most clients aware of
MULTI_CONN get by just fine on 8 or 16 connections, and probably cope
with later connections failing by relying on the earlier connections;
libvirt has not yet been passing max-connections, but generally
creates NBD servers with the intent for a single client for the sake
of live storage migration; meanwhile, the KubeSAN project anticipates
a large cluster sharing multiple clients [up to 8 per node, and up to
100 nodes in a cluster], but it currently uses qemu-nbd with an
explicit --shared=0 rather than qemu-storage-daemon with
nbd-server-start).
We considered using a deprecation period (declare that omitting
max-parameters is deprecated, and make it mandatory in 3 releases -
then we don't need to pick an arbitrary default); that has zero risk
of breaking any apps that accidentally depended on more than 100
connections, and where such breakage might not be noticed under unit
testing but only under the larger loads of production usage. But it
does not close the denial-of-service hole until far into the future,
and requires all apps to change to add the parameter even if 100 was
good enough. It also has a drawback that any app (like libvirt) that
is accidentally relying on an unlimited default should seriously
consider their own CVE now, at which point they are going to change to
pass explicit max-connections sooner than waiting for 3 qemu releases.
Finally, if our changed default breaks an app, that app can always
pass in an explicit max-parameters with a larger value.
It is also intentional that the HMP interface to nbd-server-start is
not changed to expose max-connections (any client needing to fine-tune
things should be using QMP).
Suggested-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-12-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
[ericb: Expand commit message to summarize Dan's argument for why we
break corner-case back-compat behavior without a deprecation period]
Signed-off-by: Eric Blake <eblake@redhat.com>
2024-08-06 21:53:00 +03:00
|
|
|
if (!arg->has_max_connections) {
|
|
|
|
arg->max_connections = NBD_DEFAULT_MAX_CONNECTIONS;
|
|
|
|
}
|
|
|
|
|
2020-09-24 18:26:54 +03:00
|
|
|
nbd_server_start(arg->addr, arg->tls_creds, arg->tls_authz,
|
|
|
|
arg->max_connections, errp);
|
2020-02-24 17:29:57 +03:00
|
|
|
}
|
|
|
|
|
2017-04-26 10:36:41 +03:00
|
|
|
void qmp_nbd_server_start(SocketAddressLegacy *addr,
|
2022-11-04 19:06:52 +03:00
|
|
|
const char *tls_creds,
|
|
|
|
const char *tls_authz,
|
2020-09-24 18:26:54 +03:00
|
|
|
bool has_max_connections, uint32_t max_connections,
|
2017-04-26 10:36:41 +03:00
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
SocketAddress *addr_flat = socket_address_flatten(addr);
|
|
|
|
|
nbd/server: CVE-2024-7409: Cap default max-connections to 100
Allowing an unlimited number of clients to any web service is a recipe
for a rudimentary denial of service attack: the client merely needs to
open lots of sockets without closing them, until qemu no longer has
any more fds available to allocate.
For qemu-nbd, we default to allowing only 1 connection unless more are
explicitly asked for (-e or --shared); this was historically picked as
a nice default (without an explicit -t, a non-persistent qemu-nbd goes
away after a client disconnects, without needing any additional
follow-up commands), and we are not going to change that interface now
(besides, someday we want to point people towards qemu-storage-daemon
instead of qemu-nbd).
But for qemu proper, and the newer qemu-storage-daemon, the QMP
nbd-server-start command has historically had a default of unlimited
number of connections, in part because unlike qemu-nbd it is
inherently persistent until nbd-server-stop. Allowing multiple client
sockets is particularly useful for clients that can take advantage of
MULTI_CONN (creating parallel sockets to increase throughput),
although known clients that do so (such as libnbd's nbdcopy) typically
use only 8 or 16 connections (the benefits of scaling diminish once
more sockets are competing for kernel attention). Picking a number
large enough for typical use cases, but not unlimited, makes it
slightly harder for a malicious client to perform a denial of service
merely by opening lots of connections withot progressing through the
handshake.
This change does not eliminate CVE-2024-7409 on its own, but reduces
the chance for fd exhaustion or unlimited memory usage as an attack
surface. On the other hand, by itself, it makes it more obvious that
with a finite limit, we have the problem of an unauthenticated client
holding 100 fds opened as a way to block out a legitimate client from
being able to connect; thus, later patches will further add timeouts
to reject clients that are not making progress.
This is an INTENTIONAL change in behavior, and will break any client
of nbd-server-start that was not passing an explicit max-connections
parameter, yet expects more than 100 simultaneous connections. We are
not aware of any such client (as stated above, most clients aware of
MULTI_CONN get by just fine on 8 or 16 connections, and probably cope
with later connections failing by relying on the earlier connections;
libvirt has not yet been passing max-connections, but generally
creates NBD servers with the intent for a single client for the sake
of live storage migration; meanwhile, the KubeSAN project anticipates
a large cluster sharing multiple clients [up to 8 per node, and up to
100 nodes in a cluster], but it currently uses qemu-nbd with an
explicit --shared=0 rather than qemu-storage-daemon with
nbd-server-start).
We considered using a deprecation period (declare that omitting
max-parameters is deprecated, and make it mandatory in 3 releases -
then we don't need to pick an arbitrary default); that has zero risk
of breaking any apps that accidentally depended on more than 100
connections, and where such breakage might not be noticed under unit
testing but only under the larger loads of production usage. But it
does not close the denial-of-service hole until far into the future,
and requires all apps to change to add the parameter even if 100 was
good enough. It also has a drawback that any app (like libvirt) that
is accidentally relying on an unlimited default should seriously
consider their own CVE now, at which point they are going to change to
pass explicit max-connections sooner than waiting for 3 qemu releases.
Finally, if our changed default breaks an app, that app can always
pass in an explicit max-parameters with a larger value.
It is also intentional that the HMP interface to nbd-server-start is
not changed to expose max-connections (any client needing to fine-tune
things should be using QMP).
Suggested-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-ID: <20240807174943.771624-12-eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
[ericb: Expand commit message to summarize Dan's argument for why we
break corner-case back-compat behavior without a deprecation period]
Signed-off-by: Eric Blake <eblake@redhat.com>
2024-08-06 21:53:00 +03:00
|
|
|
if (!has_max_connections) {
|
|
|
|
max_connections = NBD_DEFAULT_MAX_CONNECTIONS;
|
|
|
|
}
|
|
|
|
|
2020-09-24 18:26:54 +03:00
|
|
|
nbd_server_start(addr_flat, tls_creds, tls_authz, max_connections, errp);
|
2017-04-26 10:36:41 +03:00
|
|
|
qapi_free_SocketAddress(addr_flat);
|
|
|
|
}
|
|
|
|
|
2020-09-24 18:27:01 +03:00
|
|
|
void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp)
|
2020-09-24 18:26:50 +03:00
|
|
|
{
|
2020-09-24 18:26:53 +03:00
|
|
|
BlockExport *export;
|
|
|
|
BlockDriverState *bs;
|
|
|
|
BlockBackend *on_eject_blk;
|
2020-09-24 18:27:01 +03:00
|
|
|
BlockExportOptions *export_opts;
|
2020-09-24 18:26:53 +03:00
|
|
|
|
|
|
|
bs = bdrv_lookup_bs(arg->device, arg->device, errp);
|
|
|
|
if (!bs) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-09-24 18:27:01 +03:00
|
|
|
/*
|
|
|
|
* block-export-add would default to the node-name, but we may have to use
|
|
|
|
* the device name as a default here for compatibility.
|
|
|
|
*/
|
2022-11-04 19:06:52 +03:00
|
|
|
if (!arg->name) {
|
2020-10-27 08:05:48 +03:00
|
|
|
arg->name = g_strdup(arg->device);
|
2020-09-24 18:27:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
export_opts = g_new(BlockExportOptions, 1);
|
|
|
|
*export_opts = (BlockExportOptions) {
|
|
|
|
.type = BLOCK_EXPORT_TYPE_NBD,
|
2020-09-24 18:27:04 +03:00
|
|
|
.id = g_strdup(arg->name),
|
2020-09-24 18:27:01 +03:00
|
|
|
.node_name = g_strdup(bdrv_get_node_name(bs)),
|
2020-09-24 18:27:11 +03:00
|
|
|
.has_writable = arg->has_writable,
|
|
|
|
.writable = arg->writable,
|
2020-09-24 18:26:50 +03:00
|
|
|
};
|
2020-10-27 08:05:49 +03:00
|
|
|
QAPI_CLONE_MEMBERS(BlockExportOptionsNbdBase, &export_opts->u.nbd,
|
2020-10-27 08:05:48 +03:00
|
|
|
qapi_NbdServerAddOptions_base(arg));
|
2022-11-04 19:06:52 +03:00
|
|
|
if (arg->bitmap) {
|
qapi: nbd-export: allow select bitmaps by node/name pair
Hi all! Current logic of relying on search through backing chain is not
safe neither convenient.
Sometimes it leads to necessity of extra bitmap copying. Also, we are
going to add "snapshot-access" driver, to access some snapshot state
through NBD. And this driver is not formally a filter, and of course
it's not a COW format driver. So, searching through backing chain will
not work. Instead of widening the workaround of bitmap searching, let's
extend the interface so that user can select bitmap precisely.
Note, that checking for bitmap active status is not copied to the new
API, I don't see a reason for it, user should understand the risks. And
anyway, bitmap from other node is unrelated to this export being
read-only or read-write.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@openvz.org>
Message-Id: <20220314213226.362217-3-v.sementsov-og@mail.ru>
[eblake: Adjust S-o-b to Vladimir's new email, with permission]
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2022-03-15 00:32:25 +03:00
|
|
|
BlockDirtyBitmapOrStr *el = g_new(BlockDirtyBitmapOrStr, 1);
|
|
|
|
|
|
|
|
*el = (BlockDirtyBitmapOrStr) {
|
|
|
|
.type = QTYPE_QSTRING,
|
|
|
|
.u.local = g_strdup(arg->bitmap),
|
|
|
|
};
|
2020-10-27 08:05:49 +03:00
|
|
|
export_opts->u.nbd.has_bitmaps = true;
|
qapi: nbd-export: allow select bitmaps by node/name pair
Hi all! Current logic of relying on search through backing chain is not
safe neither convenient.
Sometimes it leads to necessity of extra bitmap copying. Also, we are
going to add "snapshot-access" driver, to access some snapshot state
through NBD. And this driver is not formally a filter, and of course
it's not a COW format driver. So, searching through backing chain will
not work. Instead of widening the workaround of bitmap searching, let's
extend the interface so that user can select bitmap precisely.
Note, that checking for bitmap active status is not copied to the new
API, I don't see a reason for it, user should understand the risks. And
anyway, bitmap from other node is unrelated to this export being
read-only or read-write.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@openvz.org>
Message-Id: <20220314213226.362217-3-v.sementsov-og@mail.ru>
[eblake: Adjust S-o-b to Vladimir's new email, with permission]
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2022-03-15 00:32:25 +03:00
|
|
|
QAPI_LIST_PREPEND(export_opts->u.nbd.bitmaps, el);
|
2020-10-27 08:05:49 +03:00
|
|
|
}
|
2020-09-24 18:26:53 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* nbd-server-add doesn't complain when a read-only device should be
|
|
|
|
* exported as writable, but simply downgrades it. This is an error with
|
|
|
|
* block-export-add.
|
|
|
|
*/
|
|
|
|
if (bdrv_is_read_only(bs)) {
|
2020-09-24 18:27:11 +03:00
|
|
|
export_opts->has_writable = true;
|
|
|
|
export_opts->writable = false;
|
2020-09-24 18:26:53 +03:00
|
|
|
}
|
|
|
|
|
2020-09-24 18:27:01 +03:00
|
|
|
export = blk_exp_add(export_opts, errp);
|
2020-09-24 18:26:53 +03:00
|
|
|
if (!export) {
|
2020-09-24 18:27:01 +03:00
|
|
|
goto fail;
|
2020-09-24 18:26:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nbd-server-add removes the export when the named BlockBackend used for
|
|
|
|
* @device goes away.
|
|
|
|
*/
|
|
|
|
on_eject_blk = blk_by_name(arg->device);
|
|
|
|
if (on_eject_blk) {
|
|
|
|
nbd_export_set_on_eject_blk(export, on_eject_blk);
|
|
|
|
}
|
2020-09-24 18:27:01 +03:00
|
|
|
|
|
|
|
fail:
|
|
|
|
qapi_free_BlockExportOptions(export_opts);
|
2012-08-22 18:43:07 +04:00
|
|
|
}
|
|
|
|
|
2018-01-19 16:57:16 +03:00
|
|
|
void qmp_nbd_server_remove(const char *name,
|
2020-09-24 18:27:06 +03:00
|
|
|
bool has_mode, BlockExportRemoveMode mode,
|
2018-01-19 16:57:16 +03:00
|
|
|
Error **errp)
|
|
|
|
{
|
2020-09-24 18:27:06 +03:00
|
|
|
BlockExport *exp;
|
2018-01-19 16:57:16 +03:00
|
|
|
|
2020-09-24 18:27:06 +03:00
|
|
|
exp = blk_exp_find(name);
|
|
|
|
if (exp && exp->drv->type != BLOCK_EXPORT_TYPE_NBD) {
|
|
|
|
error_setg(errp, "Block export '%s' is not an NBD export", name);
|
2018-01-19 16:57:16 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-09-24 18:27:06 +03:00
|
|
|
qmp_block_export_del(name, has_mode, mode, errp);
|
2018-01-19 16:57:16 +03:00
|
|
|
}
|
|
|
|
|
2012-08-22 18:43:07 +04:00
|
|
|
void qmp_nbd_server_stop(Error **errp)
|
|
|
|
{
|
2019-01-11 22:47:14 +03:00
|
|
|
if (!nbd_server) {
|
|
|
|
error_setg(errp, "NBD server not running");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-09-24 18:27:03 +03:00
|
|
|
blk_exp_close_all_type(BLOCK_EXPORT_TYPE_NBD);
|
2012-08-22 18:43:07 +04:00
|
|
|
|
2016-02-10 21:41:14 +03:00
|
|
|
nbd_server_free(nbd_server);
|
|
|
|
nbd_server = NULL;
|
2012-08-22 18:43:07 +04:00
|
|
|
}
|