qemu/chardev/char-socket.c

1560 lines
47 KiB
C
Raw Normal View History

/*
* QEMU System Emulator
*
* Copyright (c) 2003-2008 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "chardev/char.h"
#include "io/channel-socket.h"
#include "io/channel-websock.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qapi/error.h"
#include "qapi/clone-visitor.h"
#include "qapi/qapi-visit-sockets.h"
#include "qemu/yank.h"
#include "chardev/char-io.h"
#include "chardev/char-socket.h"
static gboolean socket_reconnect_timeout(gpointer opaque);
static void tcp_chr_telnet_init(Chardev *chr);
static void tcp_chr_change_state(SocketChardev *s, TCPChardevState state)
{
switch (state) {
case TCP_CHARDEV_STATE_DISCONNECTED:
break;
case TCP_CHARDEV_STATE_CONNECTING:
assert(s->state == TCP_CHARDEV_STATE_DISCONNECTED);
break;
case TCP_CHARDEV_STATE_CONNECTED:
assert(s->state == TCP_CHARDEV_STATE_CONNECTING);
break;
}
s->state = state;
}
static void tcp_chr_reconn_timer_cancel(SocketChardev *s)
{
if (s->reconnect_timer) {
g_source_destroy(s->reconnect_timer);
g_source_unref(s->reconnect_timer);
s->reconnect_timer = NULL;
}
}
static void qemu_chr_socket_restart_timer(Chardev *chr)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
char *name;
assert(s->state == TCP_CHARDEV_STATE_DISCONNECTED);
chardev: fix race with client connections in tcp_chr_wait_connected When the 'reconnect' option is given for a client connection, the qmp_chardev_open_socket_client method will run an asynchronous connection attempt. The QIOChannel socket executes this is a single use background thread, so the connection will succeed immediately (assuming the server is listening). The chardev, however, won't get the result from this background thread until the main loop starts running and processes idle callbacks. Thus when tcp_chr_wait_connected is run s->ioc will be NULL, but the state will be TCP_CHARDEV_STATE_CONNECTING, and there may already be an established connection that will be associated with the chardev by the pending idle callback. tcp_chr_wait_connected doesn't check the state, only s->ioc, so attempts to establish another connection synchronously. If the server allows multiple connections this is unhelpful but not a fatal problem as the duplicate connection will get ignored by the tcp_chr_new_client method when it sees the state is already connected. If the server only supports a single connection, however, the tcp_chr_wait_connected method will hang forever because the server will not accept its synchronous connection attempt until the first connection is closed. To deal with this tcp_chr_wait_connected needs to synchronize with the completion of the background connection task. To do this it needs to create the QIOTask directly and use the qio_task_wait_thread method. This will cancel the pending idle callback and directly dispatch the task completion callback, allowing the connection to be associated with the chardev. If the background connection failed, it can still attempt a new synchronous connection. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20190211182442.8542-15-berrange@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
2019-02-11 21:24:40 +03:00
assert(!s->reconnect_timer);
name = g_strdup_printf("chardev-socket-reconnect-%s", chr->label);
s->reconnect_timer = qemu_chr_timeout_add_ms(chr,
s->reconnect_time * 1000,
socket_reconnect_timeout,
chr);
g_source_set_name(s->reconnect_timer, name);
g_free(name);
}
static void check_report_connect_error(Chardev *chr,
Error *err)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
if (!s->connect_err_reported) {
error_reportf_err(err,
"Unable to connect character device %s: ",
chr->label);
s->connect_err_reported = true;
} else {
error_free(err);
}
qemu_chr_socket_restart_timer(chr);
}
static void tcp_chr_accept(QIONetListener *listener,
QIOChannelSocket *cioc,
void *opaque);
static int tcp_chr_read_poll(void *opaque);
char-socket: Lock tcp_chr_disconnect() and socket_reconnect_timeout() There's a race condition in which the tcp_chr_read() ioc handler can close a connection that is being written to from another thread. Running iotest 136 in a loop triggers this problem and crashes QEMU. (gdb) bt #0 0x00005558b842902d in object_get_class (obj=0x0) at qom/object.c:860 #1 0x00005558b84f92db in qio_channel_writev_full (ioc=0x0, iov=0x7ffc355decf0, niov=1, fds=0x0, nfds=0, errp=0x0) at io/channel.c:76 #2 0x00005558b84e0e9e in io_channel_send_full (ioc=0x0, buf=0x5558baf5beb0, len=138, fds=0x0, nfds=0) at chardev/char-io.c:123 #3 0x00005558b84e4a69 in tcp_chr_write (chr=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138) at chardev/char-socket.c:135 #4 0x00005558b84dca55 in qemu_chr_write_buffer (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, offset=0x7ffc355dedd0, write_all=false) at chardev/char.c:112 #5 0x00005558b84dcbc2 in qemu_chr_write (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, write_all=false) at chardev/char.c:147 #6 0x00005558b84dfb26 in qemu_chr_fe_write (be=0x5558ba476610, buf=0x5558baf5beb0 "...", len=138) at chardev/char-fe.c:42 #7 0x00005558b8088c86 in monitor_flush_locked (mon=0x5558ba476610) at monitor.c:406 #8 0x00005558b8088e8c in monitor_puts (mon=0x5558ba476610, str=0x5558ba921e49 "") at monitor.c:449 #9 0x00005558b8089178 in qmp_send_response (mon=0x5558ba476610, rsp=0x5558bb161600) at monitor.c:498 #10 0x00005558b808920c in monitor_qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:526 #11 0x00005558b8089307 in monitor_qapi_event_queue_no_reenter (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:551 #12 0x00005558b80896c0 in qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:626 #13 0x00005558b855f23b in qapi_event_send_shutdown (guest=false, reason=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at qapi/qapi-events-run-state.c:43 #14 0x00005558b81911ef in qemu_system_shutdown (cause=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at vl.c:1837 #15 0x00005558b8191308 in main_loop_should_exit () at vl.c:1885 #16 0x00005558b819140d in main_loop () at vl.c:1924 #17 0x00005558b8198c84 in main (argc=18, argv=0x7ffc355df3f8, envp=0x7ffc355df490) at vl.c:4665 This patch adds a lock to protect tcp_chr_disconnect() and socket_reconnect_timeout() Signed-off-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Message-Id: <1565625509-404969-3-git-send-email-andrey.shinkevich@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-08-12 18:58:29 +03:00
static void tcp_chr_disconnect_locked(Chardev *chr);
/* Called with chr_write_lock held. */
static int tcp_chr_write(Chardev *chr, const uint8_t *buf, int len)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
if (s->state == TCP_CHARDEV_STATE_CONNECTED) {
int ret = io_channel_send_full(s->ioc, buf, len,
s->write_msgfds,
s->write_msgfds_num);
/* free the written msgfds in any cases
* other than ret < 0 && errno == EAGAIN
*/
if (!(ret < 0 && EAGAIN == errno)
&& s->write_msgfds_num) {
g_free(s->write_msgfds);
s->write_msgfds = 0;
s->write_msgfds_num = 0;
}
if (ret < 0 && errno != EAGAIN) {
if (tcp_chr_read_poll(chr) <= 0) {
char-socket: return -1 in case of disconnect during tcp_chr_write During testing of the vhost-user-blk reconnect functionality the qemu SIGSEGV was triggered: start qemu as: x86_64-softmmu/qemu-system-x86_64 -m 1024M -M q35 \ -object memory-backend-file,id=ram-node0,size=1024M,mem-path=/dev/shm/qemu,share=on \ -numa node,cpus=0,memdev=ram-node0 \ -chardev socket,id=chardev0,path=./vhost.sock,noserver,reconnect=1 \ -device vhost-user-blk-pci,chardev=chardev0,num-queues=4 --enable-kvm start vhost-user-blk daemon: ./vhost-user-blk -s ./vhost.sock -b test-img.raw If vhost-user-blk will be killed during the vhost initialization process, for instance after getting VHOST_SET_VRING_CALL command, then QEMU will fail with the following backtrace: Thread 1 "qemu-system-x86" received signal SIGSEGV, Segmentation fault. 0x00005555559272bb in vhost_user_read (dev=0x7fffef2d53e0, msg=0x7fffffffd5b0) at ./hw/virtio/vhost-user.c:260 260 CharBackend *chr = u->user->chr; #0 0x00005555559272bb in vhost_user_read (dev=0x7fffef2d53e0, msg=0x7fffffffd5b0) at ./hw/virtio/vhost-user.c:260 #1 0x000055555592acb8 in vhost_user_get_config (dev=0x7fffef2d53e0, config=0x7fffef2d5394 "", config_len=60) at ./hw/virtio/vhost-user.c:1645 #2 0x0000555555925525 in vhost_dev_get_config (hdev=0x7fffef2d53e0, config=0x7fffef2d5394 "", config_len=60) at ./hw/virtio/vhost.c:1490 #3 0x00005555558cc46b in vhost_user_blk_device_realize (dev=0x7fffef2d51a0, errp=0x7fffffffd8f0) at ./hw/block/vhost-user-blk.c:429 #4 0x0000555555920090 in virtio_device_realize (dev=0x7fffef2d51a0, errp=0x7fffffffd948) at ./hw/virtio/virtio.c:3615 #5 0x0000555555a9779c in device_set_realized (obj=0x7fffef2d51a0, value=true, errp=0x7fffffffdb88) at ./hw/core/qdev.c:891 ... The problem is that vhost_user_write doesn't get an error after disconnect and try to call vhost_user_read(). The tcp_chr_write() routine should return -1 in case of disconnect. Indicate the EIO error if this routine is called in the disconnected state. Signed-off-by: Dima Stepanov <dimastep@yandex-team.ru> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <aeb7806bfc945faadf09f64dcfa30f59de3ac053.1590396396.git.dimastep@yandex-team.ru> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-05-28 12:11:18 +03:00
/* Perform disconnect and return error. */
char-socket: Lock tcp_chr_disconnect() and socket_reconnect_timeout() There's a race condition in which the tcp_chr_read() ioc handler can close a connection that is being written to from another thread. Running iotest 136 in a loop triggers this problem and crashes QEMU. (gdb) bt #0 0x00005558b842902d in object_get_class (obj=0x0) at qom/object.c:860 #1 0x00005558b84f92db in qio_channel_writev_full (ioc=0x0, iov=0x7ffc355decf0, niov=1, fds=0x0, nfds=0, errp=0x0) at io/channel.c:76 #2 0x00005558b84e0e9e in io_channel_send_full (ioc=0x0, buf=0x5558baf5beb0, len=138, fds=0x0, nfds=0) at chardev/char-io.c:123 #3 0x00005558b84e4a69 in tcp_chr_write (chr=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138) at chardev/char-socket.c:135 #4 0x00005558b84dca55 in qemu_chr_write_buffer (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, offset=0x7ffc355dedd0, write_all=false) at chardev/char.c:112 #5 0x00005558b84dcbc2 in qemu_chr_write (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, write_all=false) at chardev/char.c:147 #6 0x00005558b84dfb26 in qemu_chr_fe_write (be=0x5558ba476610, buf=0x5558baf5beb0 "...", len=138) at chardev/char-fe.c:42 #7 0x00005558b8088c86 in monitor_flush_locked (mon=0x5558ba476610) at monitor.c:406 #8 0x00005558b8088e8c in monitor_puts (mon=0x5558ba476610, str=0x5558ba921e49 "") at monitor.c:449 #9 0x00005558b8089178 in qmp_send_response (mon=0x5558ba476610, rsp=0x5558bb161600) at monitor.c:498 #10 0x00005558b808920c in monitor_qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:526 #11 0x00005558b8089307 in monitor_qapi_event_queue_no_reenter (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:551 #12 0x00005558b80896c0 in qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:626 #13 0x00005558b855f23b in qapi_event_send_shutdown (guest=false, reason=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at qapi/qapi-events-run-state.c:43 #14 0x00005558b81911ef in qemu_system_shutdown (cause=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at vl.c:1837 #15 0x00005558b8191308 in main_loop_should_exit () at vl.c:1885 #16 0x00005558b819140d in main_loop () at vl.c:1924 #17 0x00005558b8198c84 in main (argc=18, argv=0x7ffc355df3f8, envp=0x7ffc355df490) at vl.c:4665 This patch adds a lock to protect tcp_chr_disconnect() and socket_reconnect_timeout() Signed-off-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Message-Id: <1565625509-404969-3-git-send-email-andrey.shinkevich@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-08-12 18:58:29 +03:00
tcp_chr_disconnect_locked(chr);
} /* else let the read handler finish it properly */
}
return ret;
} else {
char-socket: return -1 in case of disconnect during tcp_chr_write During testing of the vhost-user-blk reconnect functionality the qemu SIGSEGV was triggered: start qemu as: x86_64-softmmu/qemu-system-x86_64 -m 1024M -M q35 \ -object memory-backend-file,id=ram-node0,size=1024M,mem-path=/dev/shm/qemu,share=on \ -numa node,cpus=0,memdev=ram-node0 \ -chardev socket,id=chardev0,path=./vhost.sock,noserver,reconnect=1 \ -device vhost-user-blk-pci,chardev=chardev0,num-queues=4 --enable-kvm start vhost-user-blk daemon: ./vhost-user-blk -s ./vhost.sock -b test-img.raw If vhost-user-blk will be killed during the vhost initialization process, for instance after getting VHOST_SET_VRING_CALL command, then QEMU will fail with the following backtrace: Thread 1 "qemu-system-x86" received signal SIGSEGV, Segmentation fault. 0x00005555559272bb in vhost_user_read (dev=0x7fffef2d53e0, msg=0x7fffffffd5b0) at ./hw/virtio/vhost-user.c:260 260 CharBackend *chr = u->user->chr; #0 0x00005555559272bb in vhost_user_read (dev=0x7fffef2d53e0, msg=0x7fffffffd5b0) at ./hw/virtio/vhost-user.c:260 #1 0x000055555592acb8 in vhost_user_get_config (dev=0x7fffef2d53e0, config=0x7fffef2d5394 "", config_len=60) at ./hw/virtio/vhost-user.c:1645 #2 0x0000555555925525 in vhost_dev_get_config (hdev=0x7fffef2d53e0, config=0x7fffef2d5394 "", config_len=60) at ./hw/virtio/vhost.c:1490 #3 0x00005555558cc46b in vhost_user_blk_device_realize (dev=0x7fffef2d51a0, errp=0x7fffffffd8f0) at ./hw/block/vhost-user-blk.c:429 #4 0x0000555555920090 in virtio_device_realize (dev=0x7fffef2d51a0, errp=0x7fffffffd948) at ./hw/virtio/virtio.c:3615 #5 0x0000555555a9779c in device_set_realized (obj=0x7fffef2d51a0, value=true, errp=0x7fffffffdb88) at ./hw/core/qdev.c:891 ... The problem is that vhost_user_write doesn't get an error after disconnect and try to call vhost_user_read(). The tcp_chr_write() routine should return -1 in case of disconnect. Indicate the EIO error if this routine is called in the disconnected state. Signed-off-by: Dima Stepanov <dimastep@yandex-team.ru> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <aeb7806bfc945faadf09f64dcfa30f59de3ac053.1590396396.git.dimastep@yandex-team.ru> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-05-28 12:11:18 +03:00
/* Indicate an error. */
errno = EIO;
return -1;
}
}
static int tcp_chr_read_poll(void *opaque)
{
Chardev *chr = CHARDEV(opaque);
SocketChardev *s = SOCKET_CHARDEV(opaque);
if (s->state != TCP_CHARDEV_STATE_CONNECTED) {
return 0;
}
s->max_size = qemu_chr_be_can_write(chr);
return s->max_size;
}
static void tcp_chr_process_IAC_bytes(Chardev *chr,
SocketChardev *s,
uint8_t *buf, int *size)
{
/* Handle any telnet or tn3270 client's basic IAC options.
* For telnet options, it satisfies char by char mode with no echo.
* For tn3270 options, it satisfies binary mode with EOR.
* All IAC options will be removed from the buf and the do_opt
* pointer will be used to track the state of the width of the
* IAC information.
*
* RFC854: "All TELNET commands consist of at least a two byte sequence.
* The commands dealing with option negotiation are three byte sequences,
* the third byte being the code for the option referenced."
* "IAC BREAK", "IAC IP", "IAC NOP" and the double IAC are two bytes.
* "IAC SB", "IAC SE" and "IAC EOR" are saved to split up data boundary
* for tn3270.
* NOP, Break and Interrupt Process(IP) might be encountered during a TN3270
* session, and NOP and IP need to be done later.
*/
int i;
int j = 0;
for (i = 0; i < *size; i++) {
if (s->do_telnetopt > 1) {
if ((unsigned char)buf[i] == IAC && s->do_telnetopt == 2) {
/* Double IAC means send an IAC */
if (j != i) {
buf[j] = buf[i];
}
j++;
s->do_telnetopt = 1;
} else {
if ((unsigned char)buf[i] == IAC_BREAK
&& s->do_telnetopt == 2) {
/* Handle IAC break commands by sending a serial break */
qemu_chr_be_event(chr, CHR_EVENT_BREAK);
s->do_telnetopt++;
} else if (s->is_tn3270 && ((unsigned char)buf[i] == IAC_EOR
|| (unsigned char)buf[i] == IAC_SB
|| (unsigned char)buf[i] == IAC_SE)
&& s->do_telnetopt == 2) {
buf[j++] = IAC;
buf[j++] = buf[i];
s->do_telnetopt++;
} else if (s->is_tn3270 && ((unsigned char)buf[i] == IAC_IP
|| (unsigned char)buf[i] == IAC_NOP)
&& s->do_telnetopt == 2) {
/* TODO: IP and NOP need to be implemented later. */
s->do_telnetopt++;
}
s->do_telnetopt++;
}
if (s->do_telnetopt >= 4) {
s->do_telnetopt = 1;
}
} else {
if ((unsigned char)buf[i] == IAC) {
s->do_telnetopt = 2;
} else {
if (j != i) {
buf[j] = buf[i];
}
j++;
}
}
}
*size = j;
}
static int tcp_get_msgfds(Chardev *chr, int *fds, int num)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
int to_copy = (s->read_msgfds_num < num) ? s->read_msgfds_num : num;
assert(num <= TCP_MAX_FDS);
if (to_copy) {
int i;
memcpy(fds, s->read_msgfds, to_copy * sizeof(int));
/* Close unused fds */
for (i = to_copy; i < s->read_msgfds_num; i++) {
close(s->read_msgfds[i]);
}
g_free(s->read_msgfds);
s->read_msgfds = 0;
s->read_msgfds_num = 0;
}
return to_copy;
}
static int tcp_set_msgfds(Chardev *chr, int *fds, int num)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
/* clear old pending fd array */
g_free(s->write_msgfds);
s->write_msgfds = NULL;
s->write_msgfds_num = 0;
if ((s->state != TCP_CHARDEV_STATE_CONNECTED) ||
!qio_channel_has_feature(s->ioc,
QIO_CHANNEL_FEATURE_FD_PASS)) {
return -1;
}
if (num) {
s->write_msgfds = g_new(int, num);
memcpy(s->write_msgfds, fds, num * sizeof(int));
}
s->write_msgfds_num = num;
return 0;
}
static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
struct iovec iov = { .iov_base = buf, .iov_len = len };
int ret;
size_t i;
int *msgfds = NULL;
size_t msgfds_num = 0;
if (qio_channel_has_feature(s->ioc, QIO_CHANNEL_FEATURE_FD_PASS)) {
ret = qio_channel_readv_full(s->ioc, &iov, 1,
&msgfds, &msgfds_num,
NULL);
} else {
ret = qio_channel_readv_full(s->ioc, &iov, 1,
NULL, NULL,
NULL);
}
if (ret == QIO_CHANNEL_ERR_BLOCK) {
errno = EAGAIN;
ret = -1;
} else if (ret == -1) {
errno = EIO;
}
if (msgfds_num) {
/* close and clean read_msgfds */
for (i = 0; i < s->read_msgfds_num; i++) {
close(s->read_msgfds[i]);
}
if (s->read_msgfds_num) {
g_free(s->read_msgfds);
}
s->read_msgfds = msgfds;
s->read_msgfds_num = msgfds_num;
}
for (i = 0; i < s->read_msgfds_num; i++) {
int fd = s->read_msgfds[i];
if (fd < 0) {
continue;
}
/* O_NONBLOCK is preserved across SCM_RIGHTS so reset it */
qemu_set_block(fd);
#ifndef MSG_CMSG_CLOEXEC
qemu_set_cloexec(fd);
#endif
}
return ret;
}
static GSource *tcp_chr_add_watch(Chardev *chr, GIOCondition cond)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
if (!s->ioc) {
return NULL;
}
return qio_channel_create_watch(s->ioc, cond);
}
static void remove_hup_source(SocketChardev *s)
{
if (s->hup_source != NULL) {
g_source_destroy(s->hup_source);
g_source_unref(s->hup_source);
s->hup_source = NULL;
}
}
static void char_socket_yank_iochannel(void *opaque)
{
QIOChannel *ioc = QIO_CHANNEL(opaque);
qio_channel_shutdown(ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
}
static void tcp_chr_free_connection(Chardev *chr)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
int i;
if (s->read_msgfds_num) {
for (i = 0; i < s->read_msgfds_num; i++) {
close(s->read_msgfds[i]);
}
g_free(s->read_msgfds);
s->read_msgfds = NULL;
s->read_msgfds_num = 0;
}
remove_hup_source(s);
chardev/char-socket: add POLLHUP handler The following behavior was observed for QEMU configured by libvirt to use guest agent as usual for the guests without virtio-serial driver (Windows or the guest remaining in BIOS stage). In QEMU on first connect to listen character device socket the listen socket is removed from poll just after the accept(). virtio_serial_guest_ready() returns 0 and the descriptor of the connected Unix socket is removed from poll and it will not be present in poll() until the guest will initialize the driver and change the state of the serial to "guest connected". In libvirt connect() to guest agent is performed on restart and is run under VM state lock. Connect() is blocking and can wait forever. In this case libvirt can not perform ANY operation on that VM. The bug can be easily reproduced this way: Terminal 1: qemu-system-x86_64 -m 512 -device pci-serial,chardev=serial1 -chardev socket,id=serial1,path=/tmp/console.sock,server,nowait (virtio-serial and isa-serial also fit) Terminal 2: minicom -D unix\#/tmp/console.sock (type something and press enter) C-a x (to exit) Do 3 times: minicom -D unix\#/tmp/console.sock C-a x It needs 4 connections, because the first one is accepted by QEMU, then two are queued by the kernel, and the 4th blocks. The problem is that QEMU doesn't add a read watcher after succesful read until the guest device wants to acquire recieved data, so I propose to install a separate pullhup watcher regardless of whether the device waits for data or not. Signed-off-by: Klim Kireev <klim.kireev@virtuozzo.com> Message-Id: <20180125135129.9305-1-klim.kireev@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-01-25 16:51:29 +03:00
tcp_set_msgfds(chr, NULL, 0);
remove_fd_in_watch(chr);
if (s->registered_yank &&
(s->state == TCP_CHARDEV_STATE_CONNECTING
|| s->state == TCP_CHARDEV_STATE_CONNECTED)) {
yank_unregister_function(CHARDEV_YANK_INSTANCE(chr->label),
char_socket_yank_iochannel,
QIO_CHANNEL(s->sioc));
}
object_unref(OBJECT(s->sioc));
s->sioc = NULL;
object_unref(OBJECT(s->ioc));
s->ioc = NULL;
g_free(chr->filename);
chr->filename = NULL;
tcp_chr_change_state(s, TCP_CHARDEV_STATE_DISCONNECTED);
}
static const char *qemu_chr_socket_protocol(SocketChardev *s)
{
if (s->is_telnet) {
return "telnet";
}
return s->is_websock ? "websocket" : "tcp";
}
static char *qemu_chr_socket_address(SocketChardev *s, const char *prefix)
{
switch (s->addr->type) {
case SOCKET_ADDRESS_TYPE_INET:
return g_strdup_printf("%s%s:%s:%s%s", prefix,
qemu_chr_socket_protocol(s),
s->addr->u.inet.host,
s->addr->u.inet.port,
s->is_listen ? ",server=on" : "");
break;
case SOCKET_ADDRESS_TYPE_UNIX:
{
const char *tight = "", *abstract = "";
UnixSocketAddress *sa = &s->addr->u.q_unix;
#ifdef CONFIG_LINUX
if (sa->has_abstract && sa->abstract) {
abstract = ",abstract=on";
if (sa->has_tight && sa->tight) {
tight = ",tight=on";
}
}
#endif
return g_strdup_printf("%sunix:%s%s%s%s", prefix, sa->path,
abstract, tight,
s->is_listen ? ",server=on" : "");
break;
}
case SOCKET_ADDRESS_TYPE_FD:
return g_strdup_printf("%sfd:%s%s", prefix, s->addr->u.fd.str,
s->is_listen ? ",server=on" : "");
break;
case SOCKET_ADDRESS_TYPE_VSOCK:
return g_strdup_printf("%svsock:%s:%s", prefix,
s->addr->u.vsock.cid,
s->addr->u.vsock.port);
default:
abort();
}
}
static void update_disconnected_filename(SocketChardev *s)
{
Chardev *chr = CHARDEV(s);
g_free(chr->filename);
if (s->addr) {
chr->filename = qemu_chr_socket_address(s, "disconnected:");
} else {
chr->filename = g_strdup("disconnected:socket");
}
}
/* NB may be called even if tcp_chr_connect has not been
* reached, due to TLS or telnet initialization failure,
* so can *not* assume s->state == TCP_CHARDEV_STATE_CONNECTED
char-socket: Lock tcp_chr_disconnect() and socket_reconnect_timeout() There's a race condition in which the tcp_chr_read() ioc handler can close a connection that is being written to from another thread. Running iotest 136 in a loop triggers this problem and crashes QEMU. (gdb) bt #0 0x00005558b842902d in object_get_class (obj=0x0) at qom/object.c:860 #1 0x00005558b84f92db in qio_channel_writev_full (ioc=0x0, iov=0x7ffc355decf0, niov=1, fds=0x0, nfds=0, errp=0x0) at io/channel.c:76 #2 0x00005558b84e0e9e in io_channel_send_full (ioc=0x0, buf=0x5558baf5beb0, len=138, fds=0x0, nfds=0) at chardev/char-io.c:123 #3 0x00005558b84e4a69 in tcp_chr_write (chr=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138) at chardev/char-socket.c:135 #4 0x00005558b84dca55 in qemu_chr_write_buffer (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, offset=0x7ffc355dedd0, write_all=false) at chardev/char.c:112 #5 0x00005558b84dcbc2 in qemu_chr_write (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, write_all=false) at chardev/char.c:147 #6 0x00005558b84dfb26 in qemu_chr_fe_write (be=0x5558ba476610, buf=0x5558baf5beb0 "...", len=138) at chardev/char-fe.c:42 #7 0x00005558b8088c86 in monitor_flush_locked (mon=0x5558ba476610) at monitor.c:406 #8 0x00005558b8088e8c in monitor_puts (mon=0x5558ba476610, str=0x5558ba921e49 "") at monitor.c:449 #9 0x00005558b8089178 in qmp_send_response (mon=0x5558ba476610, rsp=0x5558bb161600) at monitor.c:498 #10 0x00005558b808920c in monitor_qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:526 #11 0x00005558b8089307 in monitor_qapi_event_queue_no_reenter (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:551 #12 0x00005558b80896c0 in qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:626 #13 0x00005558b855f23b in qapi_event_send_shutdown (guest=false, reason=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at qapi/qapi-events-run-state.c:43 #14 0x00005558b81911ef in qemu_system_shutdown (cause=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at vl.c:1837 #15 0x00005558b8191308 in main_loop_should_exit () at vl.c:1885 #16 0x00005558b819140d in main_loop () at vl.c:1924 #17 0x00005558b8198c84 in main (argc=18, argv=0x7ffc355df3f8, envp=0x7ffc355df490) at vl.c:4665 This patch adds a lock to protect tcp_chr_disconnect() and socket_reconnect_timeout() Signed-off-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Message-Id: <1565625509-404969-3-git-send-email-andrey.shinkevich@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-08-12 18:58:29 +03:00
* This must be called with chr->chr_write_lock held.
*/
char-socket: Lock tcp_chr_disconnect() and socket_reconnect_timeout() There's a race condition in which the tcp_chr_read() ioc handler can close a connection that is being written to from another thread. Running iotest 136 in a loop triggers this problem and crashes QEMU. (gdb) bt #0 0x00005558b842902d in object_get_class (obj=0x0) at qom/object.c:860 #1 0x00005558b84f92db in qio_channel_writev_full (ioc=0x0, iov=0x7ffc355decf0, niov=1, fds=0x0, nfds=0, errp=0x0) at io/channel.c:76 #2 0x00005558b84e0e9e in io_channel_send_full (ioc=0x0, buf=0x5558baf5beb0, len=138, fds=0x0, nfds=0) at chardev/char-io.c:123 #3 0x00005558b84e4a69 in tcp_chr_write (chr=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138) at chardev/char-socket.c:135 #4 0x00005558b84dca55 in qemu_chr_write_buffer (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, offset=0x7ffc355dedd0, write_all=false) at chardev/char.c:112 #5 0x00005558b84dcbc2 in qemu_chr_write (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, write_all=false) at chardev/char.c:147 #6 0x00005558b84dfb26 in qemu_chr_fe_write (be=0x5558ba476610, buf=0x5558baf5beb0 "...", len=138) at chardev/char-fe.c:42 #7 0x00005558b8088c86 in monitor_flush_locked (mon=0x5558ba476610) at monitor.c:406 #8 0x00005558b8088e8c in monitor_puts (mon=0x5558ba476610, str=0x5558ba921e49 "") at monitor.c:449 #9 0x00005558b8089178 in qmp_send_response (mon=0x5558ba476610, rsp=0x5558bb161600) at monitor.c:498 #10 0x00005558b808920c in monitor_qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:526 #11 0x00005558b8089307 in monitor_qapi_event_queue_no_reenter (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:551 #12 0x00005558b80896c0 in qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:626 #13 0x00005558b855f23b in qapi_event_send_shutdown (guest=false, reason=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at qapi/qapi-events-run-state.c:43 #14 0x00005558b81911ef in qemu_system_shutdown (cause=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at vl.c:1837 #15 0x00005558b8191308 in main_loop_should_exit () at vl.c:1885 #16 0x00005558b819140d in main_loop () at vl.c:1924 #17 0x00005558b8198c84 in main (argc=18, argv=0x7ffc355df3f8, envp=0x7ffc355df490) at vl.c:4665 This patch adds a lock to protect tcp_chr_disconnect() and socket_reconnect_timeout() Signed-off-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Message-Id: <1565625509-404969-3-git-send-email-andrey.shinkevich@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-08-12 18:58:29 +03:00
static void tcp_chr_disconnect_locked(Chardev *chr)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
bool emit_close = s->state == TCP_CHARDEV_STATE_CONNECTED;
tcp_chr_free_connection(chr);
if (s->listener) {
qio_net_listener_set_client_func_full(s->listener, tcp_chr_accept,
chr, NULL, chr->gcontext);
}
update_disconnected_filename(s);
if (emit_close) {
qemu_chr_be_event(chr, CHR_EVENT_CLOSED);
}
char-socket: initialize reconnect timer only when the timer doesn't start When the disconnect event is triggered in the connecting stage, the tcp_chr_disconnect_locked may be called twice. The first call: #0 qemu_chr_socket_restart_timer (chr=0x55555582ee90) at chardev/char-socket.c:120 #1 0x000055555558e38c in tcp_chr_disconnect_locked (chr=<optimized out>) at chardev/char-socket.c:490 #2 0x000055555558e3cd in tcp_chr_disconnect (chr=0x55555582ee90) at chardev/char-socket.c:497 #3 0x000055555558ea32 in tcp_chr_new_client (chr=chr@entry=0x55555582ee90, sioc=sioc@entry=0x55555582f0b0) at chardev/char-socket.c:892 #4 0x000055555558eeb8 in qemu_chr_socket_connected (task=0x55555582f300, opaque=<optimized out>) at chardev/char-socket.c:1090 #5 0x0000555555574352 in qio_task_complete (task=task@entry=0x55555582f300) at io/task.c:196 #6 0x00005555555745f4 in qio_task_thread_result (opaque=0x55555582f300) at io/task.c:111 #7 qio_task_wait_thread (task=0x55555582f300) at io/task.c:190 #8 0x000055555558f17e in tcp_chr_wait_connected (chr=0x55555582ee90, errp=0x555555802a08 <error_abort>) at chardev/char-socket.c:1013 #9 0x0000555555567cbd in char_socket_client_reconnect_test (opaque=0x5555557fe020 <client8unix>) at tests/test-char.c:1152 The second call: #0 0x00007ffff5ac3277 in raise () from /lib64/libc.so.6 #1 0x00007ffff5ac4968 in abort () from /lib64/libc.so.6 #2 0x00007ffff5abc096 in __assert_fail_base () from /lib64/libc.so.6 #3 0x00007ffff5abc142 in __assert_fail () from /lib64/libc.so.6 #4 0x000055555558d10a in qemu_chr_socket_restart_timer (chr=0x55555582ee90) at chardev/char-socket.c:125 #5 0x000055555558df0c in tcp_chr_disconnect_locked (chr=<optimized out>) at chardev/char-socket.c:490 #6 0x000055555558df4d in tcp_chr_disconnect (chr=0x55555582ee90) at chardev/char-socket.c:497 #7 0x000055555558e5b2 in tcp_chr_new_client (chr=chr@entry=0x55555582ee90, sioc=sioc@entry=0x55555582f0b0) at chardev/char-socket.c:892 #8 0x000055555558e93a in tcp_chr_connect_client_sync (chr=chr@entry=0x55555582ee90, errp=errp@entry=0x7fffffffd178) at chardev/char-socket.c:944 #9 0x000055555558ec78 in tcp_chr_wait_connected (chr=0x55555582ee90, errp=0x555555802a08 <error_abort>) at chardev/char-socket.c:1035 #10 0x000055555556804b in char_socket_client_test (opaque=0x5555557fe020 <client8unix>) at tests/test-char.c:1023 Run test/test-char to reproduce this issue. test-char: chardev/char-socket.c:125: qemu_chr_socket_restart_timer: Assertion `!s->reconnect_timer' failed. Signed-off-by: Li Feng <fengli@smartx.com> Acked-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20200522025554.41063-1-fengli@smartx.com>
2020-05-22 05:55:54 +03:00
if (s->reconnect_time && !s->reconnect_timer) {
qemu_chr_socket_restart_timer(chr);
}
}
char-socket: Lock tcp_chr_disconnect() and socket_reconnect_timeout() There's a race condition in which the tcp_chr_read() ioc handler can close a connection that is being written to from another thread. Running iotest 136 in a loop triggers this problem and crashes QEMU. (gdb) bt #0 0x00005558b842902d in object_get_class (obj=0x0) at qom/object.c:860 #1 0x00005558b84f92db in qio_channel_writev_full (ioc=0x0, iov=0x7ffc355decf0, niov=1, fds=0x0, nfds=0, errp=0x0) at io/channel.c:76 #2 0x00005558b84e0e9e in io_channel_send_full (ioc=0x0, buf=0x5558baf5beb0, len=138, fds=0x0, nfds=0) at chardev/char-io.c:123 #3 0x00005558b84e4a69 in tcp_chr_write (chr=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138) at chardev/char-socket.c:135 #4 0x00005558b84dca55 in qemu_chr_write_buffer (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, offset=0x7ffc355dedd0, write_all=false) at chardev/char.c:112 #5 0x00005558b84dcbc2 in qemu_chr_write (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, write_all=false) at chardev/char.c:147 #6 0x00005558b84dfb26 in qemu_chr_fe_write (be=0x5558ba476610, buf=0x5558baf5beb0 "...", len=138) at chardev/char-fe.c:42 #7 0x00005558b8088c86 in monitor_flush_locked (mon=0x5558ba476610) at monitor.c:406 #8 0x00005558b8088e8c in monitor_puts (mon=0x5558ba476610, str=0x5558ba921e49 "") at monitor.c:449 #9 0x00005558b8089178 in qmp_send_response (mon=0x5558ba476610, rsp=0x5558bb161600) at monitor.c:498 #10 0x00005558b808920c in monitor_qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:526 #11 0x00005558b8089307 in monitor_qapi_event_queue_no_reenter (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:551 #12 0x00005558b80896c0 in qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:626 #13 0x00005558b855f23b in qapi_event_send_shutdown (guest=false, reason=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at qapi/qapi-events-run-state.c:43 #14 0x00005558b81911ef in qemu_system_shutdown (cause=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at vl.c:1837 #15 0x00005558b8191308 in main_loop_should_exit () at vl.c:1885 #16 0x00005558b819140d in main_loop () at vl.c:1924 #17 0x00005558b8198c84 in main (argc=18, argv=0x7ffc355df3f8, envp=0x7ffc355df490) at vl.c:4665 This patch adds a lock to protect tcp_chr_disconnect() and socket_reconnect_timeout() Signed-off-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Message-Id: <1565625509-404969-3-git-send-email-andrey.shinkevich@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-08-12 18:58:29 +03:00
static void tcp_chr_disconnect(Chardev *chr)
{
qemu_mutex_lock(&chr->chr_write_lock);
tcp_chr_disconnect_locked(chr);
qemu_mutex_unlock(&chr->chr_write_lock);
}
static gboolean tcp_chr_read(QIOChannel *chan, GIOCondition cond, void *opaque)
{
Chardev *chr = CHARDEV(opaque);
SocketChardev *s = SOCKET_CHARDEV(opaque);
uint8_t buf[CHR_READ_BUF_LEN];
int len, size;
if ((s->state != TCP_CHARDEV_STATE_CONNECTED) ||
s->max_size <= 0) {
return TRUE;
}
len = sizeof(buf);
if (len > s->max_size) {
len = s->max_size;
}
size = tcp_chr_recv(chr, (void *)buf, len);
if (size == 0 || (size == -1 && errno != EAGAIN)) {
/* connection closed */
tcp_chr_disconnect(chr);
} else if (size > 0) {
if (s->do_telnetopt) {
tcp_chr_process_IAC_bytes(chr, s, buf, &size);
}
if (size > 0) {
qemu_chr_be_write(chr, buf, size);
}
}
return TRUE;
}
chardev/char-socket: add POLLHUP handler The following behavior was observed for QEMU configured by libvirt to use guest agent as usual for the guests without virtio-serial driver (Windows or the guest remaining in BIOS stage). In QEMU on first connect to listen character device socket the listen socket is removed from poll just after the accept(). virtio_serial_guest_ready() returns 0 and the descriptor of the connected Unix socket is removed from poll and it will not be present in poll() until the guest will initialize the driver and change the state of the serial to "guest connected". In libvirt connect() to guest agent is performed on restart and is run under VM state lock. Connect() is blocking and can wait forever. In this case libvirt can not perform ANY operation on that VM. The bug can be easily reproduced this way: Terminal 1: qemu-system-x86_64 -m 512 -device pci-serial,chardev=serial1 -chardev socket,id=serial1,path=/tmp/console.sock,server,nowait (virtio-serial and isa-serial also fit) Terminal 2: minicom -D unix\#/tmp/console.sock (type something and press enter) C-a x (to exit) Do 3 times: minicom -D unix\#/tmp/console.sock C-a x It needs 4 connections, because the first one is accepted by QEMU, then two are queued by the kernel, and the 4th blocks. The problem is that QEMU doesn't add a read watcher after succesful read until the guest device wants to acquire recieved data, so I propose to install a separate pullhup watcher regardless of whether the device waits for data or not. Signed-off-by: Klim Kireev <klim.kireev@virtuozzo.com> Message-Id: <20180125135129.9305-1-klim.kireev@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-01-25 16:51:29 +03:00
static gboolean tcp_chr_hup(QIOChannel *channel,
GIOCondition cond,
void *opaque)
{
Chardev *chr = CHARDEV(opaque);
tcp_chr_disconnect(chr);
return G_SOURCE_REMOVE;
}
static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
int size;
if (s->state != TCP_CHARDEV_STATE_CONNECTED) {
return 0;
}
qio_channel_set_blocking(s->ioc, true, NULL);
size = tcp_chr_recv(chr, (void *) buf, len);
if (s->state != TCP_CHARDEV_STATE_DISCONNECTED) {
qio_channel_set_blocking(s->ioc, false, NULL);
}
if (size == 0) {
/* connection closed */
tcp_chr_disconnect(chr);
}
return size;
}
static char *qemu_chr_compute_filename(SocketChardev *s)
{
struct sockaddr_storage *ss = &s->sioc->localAddr;
struct sockaddr_storage *ps = &s->sioc->remoteAddr;
socklen_t ss_len = s->sioc->localAddrLen;
socklen_t ps_len = s->sioc->remoteAddrLen;
char shost[NI_MAXHOST], sserv[NI_MAXSERV];
char phost[NI_MAXHOST], pserv[NI_MAXSERV];
const char *left = "", *right = "";
switch (ss->ss_family) {
#ifndef _WIN32
case AF_UNIX:
return g_strdup_printf("unix:%s%s",
((struct sockaddr_un *)(ss))->sun_path,
s->is_listen ? ",server=on" : "");
#endif
case AF_INET6:
left = "[";
right = "]";
/* fall through */
case AF_INET:
getnameinfo((struct sockaddr *) ss, ss_len, shost, sizeof(shost),
sserv, sizeof(sserv), NI_NUMERICHOST | NI_NUMERICSERV);
getnameinfo((struct sockaddr *) ps, ps_len, phost, sizeof(phost),
pserv, sizeof(pserv), NI_NUMERICHOST | NI_NUMERICSERV);
return g_strdup_printf("%s:%s%s%s:%s%s <-> %s%s%s:%s",
qemu_chr_socket_protocol(s),
left, shost, right, sserv,
s->is_listen ? ",server=on" : "",
left, phost, right, pserv);
default:
return g_strdup_printf("unknown");
}
}
static void update_ioc_handlers(SocketChardev *s)
{
Chardev *chr = CHARDEV(s);
if (s->state != TCP_CHARDEV_STATE_CONNECTED) {
return;
}
remove_fd_in_watch(chr);
chr->gsource = io_add_watch_poll(chr, s->ioc,
tcp_chr_read_poll,
tcp_chr_read, chr,
chr->gcontext);
remove_hup_source(s);
s->hup_source = qio_channel_create_watch(s->ioc, G_IO_HUP);
g_source_set_callback(s->hup_source, (GSourceFunc)tcp_chr_hup,
chr, NULL);
g_source_attach(s->hup_source, chr->gcontext);
}
static void tcp_chr_connect(void *opaque)
{
Chardev *chr = CHARDEV(opaque);
SocketChardev *s = SOCKET_CHARDEV(opaque);
g_free(chr->filename);
chr->filename = qemu_chr_compute_filename(s);
tcp_chr_change_state(s, TCP_CHARDEV_STATE_CONNECTED);
update_ioc_handlers(s);
qemu_chr_be_event(chr, CHR_EVENT_OPENED);
}
static void tcp_chr_telnet_destroy(SocketChardev *s)
{
if (s->telnet_source) {
g_source_destroy(s->telnet_source);
g_source_unref(s->telnet_source);
s->telnet_source = NULL;
}
}
static void tcp_chr_update_read_handler(Chardev *chr)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
if (s->listener && s->state == TCP_CHARDEV_STATE_DISCONNECTED) {
/*
* It's possible that chardev context is changed in
* qemu_chr_be_update_read_handlers(). Reset it for QIO net
* listener if there is.
*/
qio_net_listener_set_client_func_full(s->listener, tcp_chr_accept,
chr, NULL, chr->gcontext);
}
if (s->telnet_source) {
tcp_chr_telnet_init(CHARDEV(s));
}
update_ioc_handlers(s);
}
static gboolean tcp_chr_telnet_init_io(QIOChannel *ioc,
GIOCondition cond G_GNUC_UNUSED,
gpointer user_data)
{
SocketChardev *s = user_data;
Chardev *chr = CHARDEV(s);
TCPChardevTelnetInit *init = s->telnet_init;
ssize_t ret;
assert(init);
ret = qio_channel_write(ioc, init->buf, init->buflen, NULL);
if (ret < 0) {
if (ret == QIO_CHANNEL_ERR_BLOCK) {
ret = 0;
} else {
tcp_chr_disconnect(chr);
goto end;
}
}
init->buflen -= ret;
if (init->buflen == 0) {
tcp_chr_connect(chr);
goto end;
}
memmove(init->buf, init->buf + ret, init->buflen);
return G_SOURCE_CONTINUE;
end:
g_free(s->telnet_init);
s->telnet_init = NULL;
g_source_unref(s->telnet_source);
s->telnet_source = NULL;
return G_SOURCE_REMOVE;
}
static void tcp_chr_telnet_init(Chardev *chr)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
TCPChardevTelnetInit *init;
size_t n = 0;
/* Destroy existing task */
tcp_chr_telnet_destroy(s);
if (s->telnet_init) {
/* We are possibly during a handshake already */
goto cont;
}
s->telnet_init = g_new0(TCPChardevTelnetInit, 1);
init = s->telnet_init;
#define IACSET(x, a, b, c) \
do { \
x[n++] = a; \
x[n++] = b; \
x[n++] = c; \
} while (0)
if (!s->is_tn3270) {
init->buflen = 12;
/* Prep the telnet negotion to put telnet in binary,
* no echo, single char mode */
IACSET(init->buf, 0xff, 0xfb, 0x01); /* IAC WILL ECHO */
IACSET(init->buf, 0xff, 0xfb, 0x03); /* IAC WILL Suppress go ahead */
IACSET(init->buf, 0xff, 0xfb, 0x00); /* IAC WILL Binary */
IACSET(init->buf, 0xff, 0xfd, 0x00); /* IAC DO Binary */
} else {
init->buflen = 21;
/* Prep the TN3270 negotion based on RFC1576 */
IACSET(init->buf, 0xff, 0xfd, 0x19); /* IAC DO EOR */
IACSET(init->buf, 0xff, 0xfb, 0x19); /* IAC WILL EOR */
IACSET(init->buf, 0xff, 0xfd, 0x00); /* IAC DO BINARY */
IACSET(init->buf, 0xff, 0xfb, 0x00); /* IAC WILL BINARY */
IACSET(init->buf, 0xff, 0xfd, 0x18); /* IAC DO TERMINAL TYPE */
IACSET(init->buf, 0xff, 0xfa, 0x18); /* IAC SB TERMINAL TYPE */
IACSET(init->buf, 0x01, 0xff, 0xf0); /* SEND IAC SE */
}
#undef IACSET
cont:
s->telnet_source = qio_channel_add_watch_source(s->ioc, G_IO_OUT,
tcp_chr_telnet_init_io,
s, NULL,
chr->gcontext);
}
static void tcp_chr_websock_handshake(QIOTask *task, gpointer user_data)
{
Chardev *chr = user_data;
SocketChardev *s = user_data;
if (qio_task_propagate_error(task, NULL)) {
tcp_chr_disconnect(chr);
} else {
if (s->do_telnetopt) {
tcp_chr_telnet_init(chr);
} else {
tcp_chr_connect(chr);
}
}
}
static void tcp_chr_websock_init(Chardev *chr)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
QIOChannelWebsock *wioc = NULL;
gchar *name;
wioc = qio_channel_websock_new_server(s->ioc);
name = g_strdup_printf("chardev-websocket-server-%s", chr->label);
qio_channel_set_name(QIO_CHANNEL(wioc), name);
g_free(name);
object_unref(OBJECT(s->ioc));
s->ioc = QIO_CHANNEL(wioc);
qio_channel_websock_handshake(wioc, tcp_chr_websock_handshake, chr, NULL);
}
static void tcp_chr_tls_handshake(QIOTask *task,
gpointer user_data)
{
Chardev *chr = user_data;
SocketChardev *s = user_data;
if (qio_task_propagate_error(task, NULL)) {
tcp_chr_disconnect(chr);
} else {
if (s->is_websock) {
tcp_chr_websock_init(chr);
} else if (s->do_telnetopt) {
tcp_chr_telnet_init(chr);
} else {
tcp_chr_connect(chr);
}
}
}
static void tcp_chr_tls_init(Chardev *chr)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
QIOChannelTLS *tioc;
gchar *name;
if (s->is_listen) {
tioc = qio_channel_tls_new_server(
s->ioc, s->tls_creds,
s->tls_authz,
NULL);
} else {
tioc = qio_channel_tls_new_client(
s->ioc, s->tls_creds,
s->addr->u.inet.host,
NULL);
}
if (tioc == NULL) {
tcp_chr_disconnect(chr);
return;
}
name = g_strdup_printf("chardev-tls-%s-%s",
s->is_listen ? "server" : "client",
chr->label);
qio_channel_set_name(QIO_CHANNEL(tioc), name);
g_free(name);
object_unref(OBJECT(s->ioc));
s->ioc = QIO_CHANNEL(tioc);
qio_channel_tls_handshake(tioc,
tcp_chr_tls_handshake,
chr,
NULL,
chr->gcontext);
}
static void tcp_chr_set_client_ioc_name(Chardev *chr,
QIOChannelSocket *sioc)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
char *name;
name = g_strdup_printf("chardev-tcp-%s-%s",
s->is_listen ? "server" : "client",
chr->label);
qio_channel_set_name(QIO_CHANNEL(sioc), name);
g_free(name);
}
static int tcp_chr_new_client(Chardev *chr, QIOChannelSocket *sioc)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
if (s->state != TCP_CHARDEV_STATE_CONNECTING) {
return -1;
}
s->ioc = QIO_CHANNEL(sioc);
object_ref(OBJECT(sioc));
s->sioc = sioc;
object_ref(OBJECT(sioc));
qio_channel_set_blocking(s->ioc, false, NULL);
if (s->do_nodelay) {
qio_channel_set_delay(s->ioc, false);
}
if (s->listener) {
qio_net_listener_set_client_func_full(s->listener, NULL, NULL,
NULL, chr->gcontext);
}
if (s->tls_creds) {
tcp_chr_tls_init(chr);
} else if (s->is_websock) {
tcp_chr_websock_init(chr);
} else if (s->do_telnetopt) {
tcp_chr_telnet_init(chr);
} else {
tcp_chr_connect(chr);
}
return 0;
}
static int tcp_chr_add_client(Chardev *chr, int fd)
{
int ret;
QIOChannelSocket *sioc;
SocketChardev *s = SOCKET_CHARDEV(chr);
if (s->state != TCP_CHARDEV_STATE_DISCONNECTED) {
return -1;
}
sioc = qio_channel_socket_new_fd(fd, NULL);
if (!sioc) {
return -1;
}
tcp_chr_change_state(s, TCP_CHARDEV_STATE_CONNECTING);
tcp_chr_set_client_ioc_name(chr, sioc);
if (s->registered_yank) {
yank_register_function(CHARDEV_YANK_INSTANCE(chr->label),
char_socket_yank_iochannel,
QIO_CHANNEL(sioc));
}
ret = tcp_chr_new_client(chr, sioc);
object_unref(OBJECT(sioc));
return ret;
}
static void tcp_chr_accept(QIONetListener *listener,
QIOChannelSocket *cioc,
void *opaque)
{
Chardev *chr = CHARDEV(opaque);
SocketChardev *s = SOCKET_CHARDEV(chr);
tcp_chr_change_state(s, TCP_CHARDEV_STATE_CONNECTING);
tcp_chr_set_client_ioc_name(chr, cioc);
if (s->registered_yank) {
yank_register_function(CHARDEV_YANK_INSTANCE(chr->label),
char_socket_yank_iochannel,
QIO_CHANNEL(cioc));
}
tcp_chr_new_client(chr, cioc);
}
static int tcp_chr_connect_client_sync(Chardev *chr, Error **errp)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
QIOChannelSocket *sioc = qio_channel_socket_new();
tcp_chr_change_state(s, TCP_CHARDEV_STATE_CONNECTING);
tcp_chr_set_client_ioc_name(chr, sioc);
if (qio_channel_socket_connect_sync(sioc, s->addr, errp) < 0) {
tcp_chr_change_state(s, TCP_CHARDEV_STATE_DISCONNECTED);
object_unref(OBJECT(sioc));
return -1;
}
if (s->registered_yank) {
yank_register_function(CHARDEV_YANK_INSTANCE(chr->label),
char_socket_yank_iochannel,
QIO_CHANNEL(sioc));
}
tcp_chr_new_client(chr, sioc);
object_unref(OBJECT(sioc));
return 0;
}
static void tcp_chr_accept_server_sync(Chardev *chr)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
QIOChannelSocket *sioc;
info_report("QEMU waiting for connection on: %s",
chr->filename);
tcp_chr_change_state(s, TCP_CHARDEV_STATE_CONNECTING);
sioc = qio_net_listener_wait_client(s->listener);
tcp_chr_set_client_ioc_name(chr, sioc);
if (s->registered_yank) {
yank_register_function(CHARDEV_YANK_INSTANCE(chr->label),
char_socket_yank_iochannel,
QIO_CHANNEL(sioc));
}
tcp_chr_new_client(chr, sioc);
object_unref(OBJECT(sioc));
}
static int tcp_chr_wait_connected(Chardev *chr, Error **errp)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
chardev: disallow TLS/telnet/websocket with tcp_chr_wait_connected In the previous commit commit 1dc8a6695c731abb7461c637b2512c3670d82be4 Author: Marc-André Lureau <marcandre.lureau@redhat.com> Date: Tue Aug 16 12:33:32 2016 +0400 char: fix waiting for TLS and telnet connection the tcp_chr_wait_connected() method was changed to check for a non-NULL 's->ioc' as a sign that there is already a connection present, as opposed to checking the "connected" flag to supposedly fix handling of TLS/telnet connections. The original code would repeatedly call tcp_chr_wait_connected creating many connections as 'connected' would never become true. The changed code would still repeatedly call tcp_chr_wait_connected busy waiting because s->ioc is set but the chardev will never see CHR_EVENT_OPENED. IOW, the code is still broken with TLS/telnet, but in a different way. Checking for a non-NULL 's->ioc' does not mean that a CHR_EVENT_OPENED will be ready for a TLS/telnet connection. These protocols (and the websocket protocol) all require the main loop to be running in order to complete the protocol handshake before emitting CHR_EVENT_OPENED. The tcp_chr_wait_connected() method is only used during early startup before a main loop is running, so TLS/telnet/websock connections can never complete initialization. Making this work would require changing tcp_chr_wait_connected to run a main loop. This is quite complex since we must not allow GSource's that other parts of QEMU have registered to run yet. The current callers of tcp_chr_wait_connected do not require use of the TLS/telnet/websocket protocols, so the simplest option is to just forbid this combination completely for now. Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20190211182442.8542-14-berrange@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
2019-02-11 21:24:39 +03:00
const char *opts[] = { "telnet", "tn3270", "websock", "tls-creds" };
bool optset[] = { s->is_telnet, s->is_tn3270, s->is_websock, s->tls_creds };
size_t i;
QEMU_BUILD_BUG_ON(G_N_ELEMENTS(opts) != G_N_ELEMENTS(optset));
for (i = 0; i < G_N_ELEMENTS(opts); i++) {
if (optset[i]) {
error_setg(errp,
"'%s' option is incompatible with waiting for "
"connection completion", opts[i]);
return -1;
}
}
chardev: fix race with client connections in tcp_chr_wait_connected When the 'reconnect' option is given for a client connection, the qmp_chardev_open_socket_client method will run an asynchronous connection attempt. The QIOChannel socket executes this is a single use background thread, so the connection will succeed immediately (assuming the server is listening). The chardev, however, won't get the result from this background thread until the main loop starts running and processes idle callbacks. Thus when tcp_chr_wait_connected is run s->ioc will be NULL, but the state will be TCP_CHARDEV_STATE_CONNECTING, and there may already be an established connection that will be associated with the chardev by the pending idle callback. tcp_chr_wait_connected doesn't check the state, only s->ioc, so attempts to establish another connection synchronously. If the server allows multiple connections this is unhelpful but not a fatal problem as the duplicate connection will get ignored by the tcp_chr_new_client method when it sees the state is already connected. If the server only supports a single connection, however, the tcp_chr_wait_connected method will hang forever because the server will not accept its synchronous connection attempt until the first connection is closed. To deal with this tcp_chr_wait_connected needs to synchronize with the completion of the background connection task. To do this it needs to create the QIOTask directly and use the qio_task_wait_thread method. This will cancel the pending idle callback and directly dispatch the task completion callback, allowing the connection to be associated with the chardev. If the background connection failed, it can still attempt a new synchronous connection. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20190211182442.8542-15-berrange@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
2019-02-11 21:24:40 +03:00
tcp_chr_reconn_timer_cancel(s);
/*
* We expect states to be as follows:
*
* - server
* - wait -> CONNECTED
* - nowait -> DISCONNECTED
* - client
* - reconnect == 0 -> CONNECTED
* - reconnect != 0 -> CONNECTING
*
*/
if (s->state == TCP_CHARDEV_STATE_CONNECTING) {
if (!s->connect_task) {
error_setg(errp,
"Unexpected 'connecting' state without connect task "
"while waiting for connection completion");
return -1;
}
/*
* tcp_chr_wait_connected should only ever be run from the
* main loop thread associated with chr->gcontext, otherwise
* qio_task_wait_thread has a dangerous race condition with
* free'ing of the s->connect_task object.
*
* Acquiring the main context doesn't 100% prove we're in
* the main loop thread, but it does at least guarantee
* that the main loop won't be executed by another thread
* avoiding the race condition with the task idle callback.
*/
g_main_context_acquire(chr->gcontext);
qio_task_wait_thread(s->connect_task);
g_main_context_release(chr->gcontext);
/*
* The completion callback (qemu_chr_socket_connected) for
* s->connect_task should have set this to NULL by the time
* qio_task_wait_thread has returned.
*/
assert(!s->connect_task);
/*
* NB we are *not* guaranteed to have "s->state == ..CONNECTED"
* at this point as this first connect may be failed, so
* allow the next loop to run regardless.
*/
}
while (s->state != TCP_CHARDEV_STATE_CONNECTED) {
if (s->is_listen) {
tcp_chr_accept_server_sync(chr);
} else {
Error *err = NULL;
if (tcp_chr_connect_client_sync(chr, &err) < 0) {
if (s->reconnect_time) {
error_free(err);
g_usleep(s->reconnect_time * 1000ULL * 1000ULL);
} else {
error_propagate(errp, err);
return -1;
}
}
}
}
return 0;
}
static void char_socket_finalize(Object *obj)
{
Chardev *chr = CHARDEV(obj);
SocketChardev *s = SOCKET_CHARDEV(obj);
tcp_chr_free_connection(chr);
tcp_chr_reconn_timer_cancel(s);
qapi_free_SocketAddress(s->addr);
tcp_chr_telnet_destroy(s);
g_free(s->telnet_init);
if (s->listener) {
qio_net_listener_set_client_func_full(s->listener, NULL, NULL,
NULL, chr->gcontext);
object_unref(OBJECT(s->listener));
}
if (s->tls_creds) {
object_unref(OBJECT(s->tls_creds));
}
g_free(s->tls_authz);
if (s->registered_yank) {
/*
* In the chardev-change special-case, we shouldn't unregister the yank
* instance, as it still may be needed.
*/
if (!chr->handover_yank_instance) {
yank_unregister_instance(CHARDEV_YANK_INSTANCE(chr->label));
}
}
qemu_chr_be_event(chr, CHR_EVENT_CLOSED);
}
static void qemu_chr_socket_connected(QIOTask *task, void *opaque)
{
QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task));
Chardev *chr = CHARDEV(opaque);
SocketChardev *s = SOCKET_CHARDEV(chr);
Error *err = NULL;
chardev: fix race with client connections in tcp_chr_wait_connected When the 'reconnect' option is given for a client connection, the qmp_chardev_open_socket_client method will run an asynchronous connection attempt. The QIOChannel socket executes this is a single use background thread, so the connection will succeed immediately (assuming the server is listening). The chardev, however, won't get the result from this background thread until the main loop starts running and processes idle callbacks. Thus when tcp_chr_wait_connected is run s->ioc will be NULL, but the state will be TCP_CHARDEV_STATE_CONNECTING, and there may already be an established connection that will be associated with the chardev by the pending idle callback. tcp_chr_wait_connected doesn't check the state, only s->ioc, so attempts to establish another connection synchronously. If the server allows multiple connections this is unhelpful but not a fatal problem as the duplicate connection will get ignored by the tcp_chr_new_client method when it sees the state is already connected. If the server only supports a single connection, however, the tcp_chr_wait_connected method will hang forever because the server will not accept its synchronous connection attempt until the first connection is closed. To deal with this tcp_chr_wait_connected needs to synchronize with the completion of the background connection task. To do this it needs to create the QIOTask directly and use the qio_task_wait_thread method. This will cancel the pending idle callback and directly dispatch the task completion callback, allowing the connection to be associated with the chardev. If the background connection failed, it can still attempt a new synchronous connection. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20190211182442.8542-15-berrange@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
2019-02-11 21:24:40 +03:00
s->connect_task = NULL;
if (qio_task_propagate_error(task, &err)) {
tcp_chr_change_state(s, TCP_CHARDEV_STATE_DISCONNECTED);
if (s->registered_yank) {
yank_unregister_function(CHARDEV_YANK_INSTANCE(chr->label),
char_socket_yank_iochannel,
QIO_CHANNEL(sioc));
}
check_report_connect_error(chr, err);
goto cleanup;
}
s->connect_err_reported = false;
tcp_chr_new_client(chr, sioc);
cleanup:
object_unref(OBJECT(sioc));
}
chardev: fix race with client connections in tcp_chr_wait_connected When the 'reconnect' option is given for a client connection, the qmp_chardev_open_socket_client method will run an asynchronous connection attempt. The QIOChannel socket executes this is a single use background thread, so the connection will succeed immediately (assuming the server is listening). The chardev, however, won't get the result from this background thread until the main loop starts running and processes idle callbacks. Thus when tcp_chr_wait_connected is run s->ioc will be NULL, but the state will be TCP_CHARDEV_STATE_CONNECTING, and there may already be an established connection that will be associated with the chardev by the pending idle callback. tcp_chr_wait_connected doesn't check the state, only s->ioc, so attempts to establish another connection synchronously. If the server allows multiple connections this is unhelpful but not a fatal problem as the duplicate connection will get ignored by the tcp_chr_new_client method when it sees the state is already connected. If the server only supports a single connection, however, the tcp_chr_wait_connected method will hang forever because the server will not accept its synchronous connection attempt until the first connection is closed. To deal with this tcp_chr_wait_connected needs to synchronize with the completion of the background connection task. To do this it needs to create the QIOTask directly and use the qio_task_wait_thread method. This will cancel the pending idle callback and directly dispatch the task completion callback, allowing the connection to be associated with the chardev. If the background connection failed, it can still attempt a new synchronous connection. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20190211182442.8542-15-berrange@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
2019-02-11 21:24:40 +03:00
static void tcp_chr_connect_client_task(QIOTask *task,
gpointer opaque)
{
QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task));
SocketAddress *addr = opaque;
Error *err = NULL;
qio_channel_socket_connect_sync(ioc, addr, &err);
qio_task_set_error(task, err);
}
static void tcp_chr_connect_client_async(Chardev *chr)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
QIOChannelSocket *sioc;
tcp_chr_change_state(s, TCP_CHARDEV_STATE_CONNECTING);
sioc = qio_channel_socket_new();
tcp_chr_set_client_ioc_name(chr, sioc);
if (s->registered_yank) {
yank_register_function(CHARDEV_YANK_INSTANCE(chr->label),
char_socket_yank_iochannel,
QIO_CHANNEL(sioc));
}
chardev: fix race with client connections in tcp_chr_wait_connected When the 'reconnect' option is given for a client connection, the qmp_chardev_open_socket_client method will run an asynchronous connection attempt. The QIOChannel socket executes this is a single use background thread, so the connection will succeed immediately (assuming the server is listening). The chardev, however, won't get the result from this background thread until the main loop starts running and processes idle callbacks. Thus when tcp_chr_wait_connected is run s->ioc will be NULL, but the state will be TCP_CHARDEV_STATE_CONNECTING, and there may already be an established connection that will be associated with the chardev by the pending idle callback. tcp_chr_wait_connected doesn't check the state, only s->ioc, so attempts to establish another connection synchronously. If the server allows multiple connections this is unhelpful but not a fatal problem as the duplicate connection will get ignored by the tcp_chr_new_client method when it sees the state is already connected. If the server only supports a single connection, however, the tcp_chr_wait_connected method will hang forever because the server will not accept its synchronous connection attempt until the first connection is closed. To deal with this tcp_chr_wait_connected needs to synchronize with the completion of the background connection task. To do this it needs to create the QIOTask directly and use the qio_task_wait_thread method. This will cancel the pending idle callback and directly dispatch the task completion callback, allowing the connection to be associated with the chardev. If the background connection failed, it can still attempt a new synchronous connection. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20190211182442.8542-15-berrange@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
2019-02-11 21:24:40 +03:00
/*
* Normally code would use the qio_channel_socket_connect_async
* method which uses a QIOTask + qio_task_set_error internally
* to avoid blocking. The tcp_chr_wait_connected method, however,
* needs a way to synchronize with completion of the background
* connect task which can't be done with the QIOChannelSocket
* async APIs. Thus we must use QIOTask directly to implement
* the non-blocking concept locally.
*/
s->connect_task = qio_task_new(OBJECT(sioc),
qemu_chr_socket_connected,
object_ref(OBJECT(chr)),
(GDestroyNotify)object_unref);
chardev: fix race with client connections in tcp_chr_wait_connected When the 'reconnect' option is given for a client connection, the qmp_chardev_open_socket_client method will run an asynchronous connection attempt. The QIOChannel socket executes this is a single use background thread, so the connection will succeed immediately (assuming the server is listening). The chardev, however, won't get the result from this background thread until the main loop starts running and processes idle callbacks. Thus when tcp_chr_wait_connected is run s->ioc will be NULL, but the state will be TCP_CHARDEV_STATE_CONNECTING, and there may already be an established connection that will be associated with the chardev by the pending idle callback. tcp_chr_wait_connected doesn't check the state, only s->ioc, so attempts to establish another connection synchronously. If the server allows multiple connections this is unhelpful but not a fatal problem as the duplicate connection will get ignored by the tcp_chr_new_client method when it sees the state is already connected. If the server only supports a single connection, however, the tcp_chr_wait_connected method will hang forever because the server will not accept its synchronous connection attempt until the first connection is closed. To deal with this tcp_chr_wait_connected needs to synchronize with the completion of the background connection task. To do this it needs to create the QIOTask directly and use the qio_task_wait_thread method. This will cancel the pending idle callback and directly dispatch the task completion callback, allowing the connection to be associated with the chardev. If the background connection failed, it can still attempt a new synchronous connection. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20190211182442.8542-15-berrange@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
2019-02-11 21:24:40 +03:00
qio_task_run_in_thread(s->connect_task,
tcp_chr_connect_client_task,
s->addr,
NULL,
chr->gcontext);
}
static gboolean socket_reconnect_timeout(gpointer opaque)
{
Chardev *chr = CHARDEV(opaque);
SocketChardev *s = SOCKET_CHARDEV(opaque);
char-socket: Lock tcp_chr_disconnect() and socket_reconnect_timeout() There's a race condition in which the tcp_chr_read() ioc handler can close a connection that is being written to from another thread. Running iotest 136 in a loop triggers this problem and crashes QEMU. (gdb) bt #0 0x00005558b842902d in object_get_class (obj=0x0) at qom/object.c:860 #1 0x00005558b84f92db in qio_channel_writev_full (ioc=0x0, iov=0x7ffc355decf0, niov=1, fds=0x0, nfds=0, errp=0x0) at io/channel.c:76 #2 0x00005558b84e0e9e in io_channel_send_full (ioc=0x0, buf=0x5558baf5beb0, len=138, fds=0x0, nfds=0) at chardev/char-io.c:123 #3 0x00005558b84e4a69 in tcp_chr_write (chr=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138) at chardev/char-socket.c:135 #4 0x00005558b84dca55 in qemu_chr_write_buffer (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, offset=0x7ffc355dedd0, write_all=false) at chardev/char.c:112 #5 0x00005558b84dcbc2 in qemu_chr_write (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, write_all=false) at chardev/char.c:147 #6 0x00005558b84dfb26 in qemu_chr_fe_write (be=0x5558ba476610, buf=0x5558baf5beb0 "...", len=138) at chardev/char-fe.c:42 #7 0x00005558b8088c86 in monitor_flush_locked (mon=0x5558ba476610) at monitor.c:406 #8 0x00005558b8088e8c in monitor_puts (mon=0x5558ba476610, str=0x5558ba921e49 "") at monitor.c:449 #9 0x00005558b8089178 in qmp_send_response (mon=0x5558ba476610, rsp=0x5558bb161600) at monitor.c:498 #10 0x00005558b808920c in monitor_qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:526 #11 0x00005558b8089307 in monitor_qapi_event_queue_no_reenter (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:551 #12 0x00005558b80896c0 in qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:626 #13 0x00005558b855f23b in qapi_event_send_shutdown (guest=false, reason=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at qapi/qapi-events-run-state.c:43 #14 0x00005558b81911ef in qemu_system_shutdown (cause=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at vl.c:1837 #15 0x00005558b8191308 in main_loop_should_exit () at vl.c:1885 #16 0x00005558b819140d in main_loop () at vl.c:1924 #17 0x00005558b8198c84 in main (argc=18, argv=0x7ffc355df3f8, envp=0x7ffc355df490) at vl.c:4665 This patch adds a lock to protect tcp_chr_disconnect() and socket_reconnect_timeout() Signed-off-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Message-Id: <1565625509-404969-3-git-send-email-andrey.shinkevich@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-08-12 18:58:29 +03:00
qemu_mutex_lock(&chr->chr_write_lock);
g_source_unref(s->reconnect_timer);
s->reconnect_timer = NULL;
char-socket: Lock tcp_chr_disconnect() and socket_reconnect_timeout() There's a race condition in which the tcp_chr_read() ioc handler can close a connection that is being written to from another thread. Running iotest 136 in a loop triggers this problem and crashes QEMU. (gdb) bt #0 0x00005558b842902d in object_get_class (obj=0x0) at qom/object.c:860 #1 0x00005558b84f92db in qio_channel_writev_full (ioc=0x0, iov=0x7ffc355decf0, niov=1, fds=0x0, nfds=0, errp=0x0) at io/channel.c:76 #2 0x00005558b84e0e9e in io_channel_send_full (ioc=0x0, buf=0x5558baf5beb0, len=138, fds=0x0, nfds=0) at chardev/char-io.c:123 #3 0x00005558b84e4a69 in tcp_chr_write (chr=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138) at chardev/char-socket.c:135 #4 0x00005558b84dca55 in qemu_chr_write_buffer (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, offset=0x7ffc355dedd0, write_all=false) at chardev/char.c:112 #5 0x00005558b84dcbc2 in qemu_chr_write (s=0x5558ba460380, buf=0x5558baf5beb0 "...", len=138, write_all=false) at chardev/char.c:147 #6 0x00005558b84dfb26 in qemu_chr_fe_write (be=0x5558ba476610, buf=0x5558baf5beb0 "...", len=138) at chardev/char-fe.c:42 #7 0x00005558b8088c86 in monitor_flush_locked (mon=0x5558ba476610) at monitor.c:406 #8 0x00005558b8088e8c in monitor_puts (mon=0x5558ba476610, str=0x5558ba921e49 "") at monitor.c:449 #9 0x00005558b8089178 in qmp_send_response (mon=0x5558ba476610, rsp=0x5558bb161600) at monitor.c:498 #10 0x00005558b808920c in monitor_qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:526 #11 0x00005558b8089307 in monitor_qapi_event_queue_no_reenter (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:551 #12 0x00005558b80896c0 in qapi_event_emit (event=QAPI_EVENT_SHUTDOWN, qdict=0x5558bb161600) at monitor.c:626 #13 0x00005558b855f23b in qapi_event_send_shutdown (guest=false, reason=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at qapi/qapi-events-run-state.c:43 #14 0x00005558b81911ef in qemu_system_shutdown (cause=SHUTDOWN_CAUSE_HOST_QMP_QUIT) at vl.c:1837 #15 0x00005558b8191308 in main_loop_should_exit () at vl.c:1885 #16 0x00005558b819140d in main_loop () at vl.c:1924 #17 0x00005558b8198c84 in main (argc=18, argv=0x7ffc355df3f8, envp=0x7ffc355df490) at vl.c:4665 This patch adds a lock to protect tcp_chr_disconnect() and socket_reconnect_timeout() Signed-off-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Message-Id: <1565625509-404969-3-git-send-email-andrey.shinkevich@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-08-12 18:58:29 +03:00
qemu_mutex_unlock(&chr->chr_write_lock);
if (chr->be_open) {
return false;
}
tcp_chr_connect_client_async(chr);
return false;
}
static int qmp_chardev_open_socket_server(Chardev *chr,
bool is_telnet,
bool is_waitconnect,
Error **errp)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
char *name;
if (is_telnet) {
s->do_telnetopt = 1;
}
s->listener = qio_net_listener_new();
name = g_strdup_printf("chardev-tcp-listener-%s", chr->label);
qio_net_listener_set_name(s->listener, name);
g_free(name);
if (s->addr->type == SOCKET_ADDRESS_TYPE_FD && !*s->addr->u.fd.str) {
goto skip_listen;
}
if (qio_net_listener_open_sync(s->listener, s->addr, 1, errp) < 0) {
object_unref(OBJECT(s->listener));
s->listener = NULL;
return -1;
}
qapi_free_SocketAddress(s->addr);
s->addr = socket_local_address(s->listener->sioc[0]->fd, errp);
skip_listen:
update_disconnected_filename(s);
if (is_waitconnect) {
tcp_chr_accept_server_sync(chr);
} else {
qio_net_listener_set_client_func_full(s->listener,
tcp_chr_accept,
chr, NULL,
chr->gcontext);
}
return 0;
}
static int qmp_chardev_open_socket_client(Chardev *chr,
int64_t reconnect,
Error **errp)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
if (reconnect > 0) {
s->reconnect_time = reconnect;
tcp_chr_connect_client_async(chr);
return 0;
} else {
return tcp_chr_connect_client_sync(chr, errp);
}
}
static bool qmp_chardev_validate_socket(ChardevSocket *sock,
SocketAddress *addr,
Error **errp)
{
/* Validate any options which have a dependency on address type */
switch (addr->type) {
case SOCKET_ADDRESS_TYPE_FD:
if (sock->has_reconnect) {
error_setg(errp,
"'reconnect' option is incompatible with "
"'fd' address type");
return false;
}
if (sock->has_tls_creds &&
!(sock->has_server && sock->server)) {
error_setg(errp,
"'tls_creds' option is incompatible with "
"'fd' address type as client");
return false;
}
break;
case SOCKET_ADDRESS_TYPE_UNIX:
if (sock->has_tls_creds) {
error_setg(errp,
"'tls_creds' option is incompatible with "
"'unix' address type");
return false;
}
break;
case SOCKET_ADDRESS_TYPE_INET:
break;
case SOCKET_ADDRESS_TYPE_VSOCK:
if (sock->has_tls_creds) {
error_setg(errp,
"'tls_creds' option is incompatible with "
"'vsock' address type");
return false;
}
default:
break;
}
if (sock->has_tls_authz && !sock->has_tls_creds) {
error_setg(errp, "'tls_authz' option requires 'tls_creds' option");
return false;
}
/* Validate any options which have a dependancy on client vs server */
if (!sock->has_server || sock->server) {
if (sock->has_reconnect) {
error_setg(errp,
"'reconnect' option is incompatible with "
"socket in server listen mode");
return false;
}
} else {
if (sock->has_websocket && sock->websocket) {
error_setg(errp, "%s", "Websocket client is not implemented");
return false;
}
if (sock->has_wait) {
error_setg(errp, "%s",
"'wait' option is incompatible with "
"socket in client connect mode");
return false;
}
}
return true;
}
static void qmp_chardev_open_socket(Chardev *chr,
ChardevBackend *backend,
bool *be_opened,
Error **errp)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
ChardevSocket *sock = backend->u.socket.data;
bool do_nodelay = sock->has_nodelay ? sock->nodelay : false;
bool is_listen = sock->has_server ? sock->server : true;
bool is_telnet = sock->has_telnet ? sock->telnet : false;
bool is_tn3270 = sock->has_tn3270 ? sock->tn3270 : false;
bool is_waitconnect = sock->has_wait ? sock->wait : false;
bool is_websock = sock->has_websocket ? sock->websocket : false;
int64_t reconnect = sock->has_reconnect ? sock->reconnect : 0;
SocketAddress *addr;
s->is_listen = is_listen;
s->is_telnet = is_telnet;
s->is_tn3270 = is_tn3270;
s->is_websock = is_websock;
s->do_nodelay = do_nodelay;
if (sock->tls_creds) {
Object *creds;
creds = object_resolve_path_component(
object_get_objects_root(), sock->tls_creds);
if (!creds) {
error_setg(errp, "No TLS credentials with id '%s'",
sock->tls_creds);
return;
}
s->tls_creds = (QCryptoTLSCreds *)
object_dynamic_cast(creds,
TYPE_QCRYPTO_TLS_CREDS);
if (!s->tls_creds) {
error_setg(errp, "Object with id '%s' is not TLS credentials",
sock->tls_creds);
return;
}
object_ref(OBJECT(s->tls_creds));
if (!qcrypto_tls_creds_check_endpoint(s->tls_creds,
is_listen
? QCRYPTO_TLS_CREDS_ENDPOINT_SERVER
: QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT,
errp)) {
return;
}
}
s->tls_authz = g_strdup(sock->tls_authz);
s->addr = addr = socket_address_flatten(sock->addr);
if (!qmp_chardev_validate_socket(sock, addr, errp)) {
return;
}
qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE);
/* TODO SOCKET_ADDRESS_FD where fd has AF_UNIX */
if (addr->type == SOCKET_ADDRESS_TYPE_UNIX) {
qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_FD_PASS);
}
/*
* In the chardev-change special-case, we shouldn't register a new yank
* instance, as there already may be one.
*/
if (!chr->handover_yank_instance) {
if (!yank_register_instance(CHARDEV_YANK_INSTANCE(chr->label), errp)) {
return;
}
}
s->registered_yank = true;
/* be isn't opened until we get a connection */
*be_opened = false;
update_disconnected_filename(s);
if (s->is_listen) {
if (qmp_chardev_open_socket_server(chr, is_telnet || is_tn3270,
is_waitconnect, errp) < 0) {
return;
}
} else {
if (qmp_chardev_open_socket_client(chr, reconnect, errp) < 0) {
return;
}
}
}
static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend,
Error **errp)
{
const char *path = qemu_opt_get(opts, "path");
const char *host = qemu_opt_get(opts, "host");
const char *port = qemu_opt_get(opts, "port");
char: allow passing pre-opened socket file descriptor at startup When starting QEMU management apps will usually setup a monitor socket, and then open it immediately after startup. If not using QEMU's own -daemonize arg, this process can be troublesome to handle correctly. The mgmt app will need to repeatedly call connect() until it succeeds, because it does not know when QEMU has created the listener socket. If can't retry connect() forever though, because an error might have caused QEMU to exit before it even creates the monitor. The obvious way to fix this kind of problem is to just pass in a pre-opened socket file descriptor for the QEMU monitor to listen on. The management app can now immediately call connect() just once. If connect() fails it knows that QEMU has exited with an error. The SocketAddress(Legacy) structs allow for FD passing via the monitor, and now via inherited file descriptors from the process that spawned QEMU. The final missing piece is adding a 'fd' parameter in the socket chardev options. This allows both HMP usage, pass any FD number with SCM_RIGHTS, then running HMP commands: getfd myfd chardev-add socket,fd=myfd Note that numeric FDs cannot be referenced directly in HMP, only named FDs. And also CLI usage, by leak FD 3 from parent by clearing O_CLOEXEC, then spawning QEMU with -chardev socket,fd=3,id=mon -mon chardev=mon,mode=control Note that named FDs cannot be referenced in CLI args, only numeric FDs. We do not wire this up in the legacy chardev syntax, so you cannot use FD passing with '-qmp', you must use the modern '-mon' + '-chardev' pair. When passing pre-opened FDs there is a restriction on use of TLS encryption. It can be used on a server socket chardev, but cannot be used for a client socket chardev. This is because when validating a server's certificate, the client needs to have a hostname available to match against the certificate identity. An illustrative example of usage is: #!/usr/bin/perl use IO::Socket::UNIX; use Fcntl; unlink "/tmp/qmp"; my $srv = IO::Socket::UNIX->new( Type => SOCK_STREAM(), Local => "/tmp/qmp", Listen => 1, ); my $flags = fcntl $srv, F_GETFD, 0; fcntl $srv, F_SETFD, $flags & ~FD_CLOEXEC; my $fd = $srv->fileno(); exec "qemu-system-x86_64", \ "-chardev", "socket,fd=$fd,server,nowait,id=mon", \ "-mon", "chardev=mon,mode=control"; Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2017-12-21 15:57:54 +03:00
const char *fd = qemu_opt_get(opts, "fd");
#ifdef CONFIG_LINUX
bool tight = qemu_opt_get_bool(opts, "tight", true);
bool abstract = qemu_opt_get_bool(opts, "abstract", false);
#endif
SocketAddressLegacy *addr;
ChardevSocket *sock;
if ((!!path + !!fd + !!host) > 1) {
error_setg(errp,
"None or one of 'path', 'fd' or 'host' option required.");
return;
}
if (host && !port) {
error_setg(errp, "chardev: socket: no port given");
return;
}
backend->type = CHARDEV_BACKEND_KIND_SOCKET;
sock = backend->u.socket.data = g_new0(ChardevSocket, 1);
qemu_chr_parse_common(opts, qapi_ChardevSocket_base(sock));
if (qemu_opt_get(opts, "delay") && qemu_opt_get(opts, "nodelay")) {
error_setg(errp, "'delay' and 'nodelay' are mutually exclusive");
return;
}
sock->has_nodelay =
qemu_opt_get(opts, "delay") ||
qemu_opt_get(opts, "nodelay");
sock->nodelay =
!qemu_opt_get_bool(opts, "delay", true) ||
qemu_opt_get_bool(opts, "nodelay", false);
/*
* We have different default to QMP for 'server', hence
* we can't just check for existence of 'server'
*/
sock->has_server = true;
sock->server = qemu_opt_get_bool(opts, "server", false);
sock->has_telnet = qemu_opt_get(opts, "telnet");
sock->telnet = qemu_opt_get_bool(opts, "telnet", false);
sock->has_tn3270 = qemu_opt_get(opts, "tn3270");
sock->tn3270 = qemu_opt_get_bool(opts, "tn3270", false);
sock->has_websocket = qemu_opt_get(opts, "websocket");
sock->websocket = qemu_opt_get_bool(opts, "websocket", false);
/*
* We have different default to QMP for 'wait' when 'server'
* is set, hence we can't just check for existence of 'wait'
*/
sock->has_wait = qemu_opt_find(opts, "wait") || sock->server;
sock->wait = qemu_opt_get_bool(opts, "wait", true);
sock->has_reconnect = qemu_opt_find(opts, "reconnect");
sock->reconnect = qemu_opt_get_number(opts, "reconnect", 0);
sock->has_tls_creds = qemu_opt_get(opts, "tls-creds");
sock->tls_creds = g_strdup(qemu_opt_get(opts, "tls-creds"));
sock->has_tls_authz = qemu_opt_get(opts, "tls-authz");
sock->tls_authz = g_strdup(qemu_opt_get(opts, "tls-authz"));
addr = g_new0(SocketAddressLegacy, 1);
if (path) {
UnixSocketAddress *q_unix;
addr->type = SOCKET_ADDRESS_TYPE_UNIX;
q_unix = addr->u.q_unix.data = g_new0(UnixSocketAddress, 1);
q_unix->path = g_strdup(path);
#ifdef CONFIG_LINUX
sockets: Fix default of UnixSocketAddress member @tight An optional bool member of a QAPI struct can be false, true, or absent. The previous commit demonstrated that socket_listen() and socket_connect() are broken for absent @tight, and indeed QMP chardev- add also defaults absent member @tight to false instead of true. In C, QAPI members are represented by two fields, has_MEMBER and MEMBER. We have: has_MEMBER MEMBER false true false true true true absent false false/ignore When has_MEMBER is false, MEMBER should be set to false on write, and ignored on read. For QMP, the QAPI visitors handle absent @tight by setting both @has_tight and @tight to false. unix_listen_saddr() and unix_connect_saddr() however use @tight only, disregarding @has_tight. This is wrong and means that absent @tight defaults to false whereas it should default to true. The same is true for @has_abstract, though @abstract defaults to false and therefore has the same behavior for all of QMP, HMP and CLI. Fix unix_listen_saddr() and unix_connect_saddr() to check @has_abstract/@has_tight, and to default absent @tight to true. However, this is only half of the story. HMP chardev-add and CLI -chardev so far correctly defaulted @tight to true, but defaults to false again with the above fix for HMP and CLI. In fact, the "tight" and "abstract" options now break completely. Digging deeper, we find that qemu_chr_parse_socket() also ignores @has_tight, leaving it false when it sets @tight. That is also wrong, but the two wrongs cancelled out. Fix qemu_chr_parse_socket() to set @has_tight and @has_abstract; writing testcases for HMP and CLI is left for another day. Fixes: 776b97d3605ed0fc94443048fdf988c7725e38a9 Reported-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-11-02 12:44:18 +03:00
q_unix->has_tight = true;
q_unix->tight = tight;
sockets: Fix default of UnixSocketAddress member @tight An optional bool member of a QAPI struct can be false, true, or absent. The previous commit demonstrated that socket_listen() and socket_connect() are broken for absent @tight, and indeed QMP chardev- add also defaults absent member @tight to false instead of true. In C, QAPI members are represented by two fields, has_MEMBER and MEMBER. We have: has_MEMBER MEMBER false true false true true true absent false false/ignore When has_MEMBER is false, MEMBER should be set to false on write, and ignored on read. For QMP, the QAPI visitors handle absent @tight by setting both @has_tight and @tight to false. unix_listen_saddr() and unix_connect_saddr() however use @tight only, disregarding @has_tight. This is wrong and means that absent @tight defaults to false whereas it should default to true. The same is true for @has_abstract, though @abstract defaults to false and therefore has the same behavior for all of QMP, HMP and CLI. Fix unix_listen_saddr() and unix_connect_saddr() to check @has_abstract/@has_tight, and to default absent @tight to true. However, this is only half of the story. HMP chardev-add and CLI -chardev so far correctly defaulted @tight to true, but defaults to false again with the above fix for HMP and CLI. In fact, the "tight" and "abstract" options now break completely. Digging deeper, we find that qemu_chr_parse_socket() also ignores @has_tight, leaving it false when it sets @tight. That is also wrong, but the two wrongs cancelled out. Fix qemu_chr_parse_socket() to set @has_tight and @has_abstract; writing testcases for HMP and CLI is left for another day. Fixes: 776b97d3605ed0fc94443048fdf988c7725e38a9 Reported-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-11-02 12:44:18 +03:00
q_unix->has_abstract = true;
q_unix->abstract = abstract;
#endif
} else if (host) {
addr->type = SOCKET_ADDRESS_TYPE_INET;
addr->u.inet.data = g_new(InetSocketAddress, 1);
*addr->u.inet.data = (InetSocketAddress) {
.host = g_strdup(host),
.port = g_strdup(port),
.has_to = qemu_opt_get(opts, "to"),
.to = qemu_opt_get_number(opts, "to", 0),
.has_ipv4 = qemu_opt_get(opts, "ipv4"),
.ipv4 = qemu_opt_get_bool(opts, "ipv4", 0),
.has_ipv6 = qemu_opt_get(opts, "ipv6"),
.ipv6 = qemu_opt_get_bool(opts, "ipv6", 0),
};
} else {
addr->type = SOCKET_ADDRESS_TYPE_FD;
char: allow passing pre-opened socket file descriptor at startup When starting QEMU management apps will usually setup a monitor socket, and then open it immediately after startup. If not using QEMU's own -daemonize arg, this process can be troublesome to handle correctly. The mgmt app will need to repeatedly call connect() until it succeeds, because it does not know when QEMU has created the listener socket. If can't retry connect() forever though, because an error might have caused QEMU to exit before it even creates the monitor. The obvious way to fix this kind of problem is to just pass in a pre-opened socket file descriptor for the QEMU monitor to listen on. The management app can now immediately call connect() just once. If connect() fails it knows that QEMU has exited with an error. The SocketAddress(Legacy) structs allow for FD passing via the monitor, and now via inherited file descriptors from the process that spawned QEMU. The final missing piece is adding a 'fd' parameter in the socket chardev options. This allows both HMP usage, pass any FD number with SCM_RIGHTS, then running HMP commands: getfd myfd chardev-add socket,fd=myfd Note that numeric FDs cannot be referenced directly in HMP, only named FDs. And also CLI usage, by leak FD 3 from parent by clearing O_CLOEXEC, then spawning QEMU with -chardev socket,fd=3,id=mon -mon chardev=mon,mode=control Note that named FDs cannot be referenced in CLI args, only numeric FDs. We do not wire this up in the legacy chardev syntax, so you cannot use FD passing with '-qmp', you must use the modern '-mon' + '-chardev' pair. When passing pre-opened FDs there is a restriction on use of TLS encryption. It can be used on a server socket chardev, but cannot be used for a client socket chardev. This is because when validating a server's certificate, the client needs to have a hostname available to match against the certificate identity. An illustrative example of usage is: #!/usr/bin/perl use IO::Socket::UNIX; use Fcntl; unlink "/tmp/qmp"; my $srv = IO::Socket::UNIX->new( Type => SOCK_STREAM(), Local => "/tmp/qmp", Listen => 1, ); my $flags = fcntl $srv, F_GETFD, 0; fcntl $srv, F_SETFD, $flags & ~FD_CLOEXEC; my $fd = $srv->fileno(); exec "qemu-system-x86_64", \ "-chardev", "socket,fd=$fd,server,nowait,id=mon", \ "-mon", "chardev=mon,mode=control"; Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2017-12-21 15:57:54 +03:00
addr->u.fd.data = g_new(String, 1);
addr->u.fd.data->str = g_strdup(fd);
}
sock->addr = addr;
}
static void
char_socket_get_addr(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
SocketChardev *s = SOCKET_CHARDEV(obj);
visit_type_SocketAddress(v, name, &s->addr, errp);
}
static bool
char_socket_get_connected(Object *obj, Error **errp)
{
SocketChardev *s = SOCKET_CHARDEV(obj);
return s->state == TCP_CHARDEV_STATE_CONNECTED;
}
static void char_socket_class_init(ObjectClass *oc, void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
cc->supports_yank = true;
cc->parse = qemu_chr_parse_socket;
cc->open = qmp_chardev_open_socket;
cc->chr_wait_connected = tcp_chr_wait_connected;
cc->chr_write = tcp_chr_write;
cc->chr_sync_read = tcp_chr_sync_read;
cc->chr_disconnect = tcp_chr_disconnect;
cc->get_msgfds = tcp_get_msgfds;
cc->set_msgfds = tcp_set_msgfds;
cc->chr_add_client = tcp_chr_add_client;
cc->chr_add_watch = tcp_chr_add_watch;
cc->chr_update_read_handler = tcp_chr_update_read_handler;
object_class_property_add(oc, "addr", "SocketAddress",
char_socket_get_addr, NULL,
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
NULL, NULL);
object_class_property_add_bool(oc, "connected", char_socket_get_connected,
qom: Drop parameter @errp of object_property_add() & friends The only way object_property_add() can fail is when a property with the same name already exists. Since our property names are all hardcoded, failure is a programming error, and the appropriate way to handle it is passing &error_abort. Same for its variants, except for object_property_add_child(), which additionally fails when the child already has a parent. Parentage is also under program control, so this is a programming error, too. We have a bit over 500 callers. Almost half of them pass &error_abort, slightly fewer ignore errors, one test case handles errors, and the remaining few callers pass them to their own callers. The previous few commits demonstrated once again that ignoring programming errors is a bad idea. Of the few ones that pass on errors, several violate the Error API. The Error ** argument must be NULL, &error_abort, &error_fatal, or a pointer to a variable containing NULL. Passing an argument of the latter kind twice without clearing it in between is wrong: if the first call sets an error, it no longer points to NULL for the second call. ich9_pm_add_properties(), sparc32_ledma_realize(), sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize() are wrong that way. When the one appropriate choice of argument is &error_abort, letting users pick the argument is a bad idea. Drop parameter @errp and assert the preconditions instead. There's one exception to "duplicate property name is a programming error": the way object_property_add() implements the magic (and undocumented) "automatic arrayification". Don't drop @errp there. Instead, rename object_property_add() to object_property_try_add(), and add the obvious wrapper object_property_add(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-15-armbru@redhat.com> [Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
NULL);
}
static const TypeInfo char_socket_type_info = {
.name = TYPE_CHARDEV_SOCKET,
.parent = TYPE_CHARDEV,
.instance_size = sizeof(SocketChardev),
.instance_finalize = char_socket_finalize,
.class_init = char_socket_class_init,
};
static void register_types(void)
{
type_register_static(&char_socket_type_info);
}
type_init(register_types);