2010-07-07 22:58:02 +04:00
|
|
|
/*
|
|
|
|
* QEMU VNC display driver
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Anthony Liguori <anthony@codemonkey.ws>
|
|
|
|
* Copyright (C) 2006 Fabrice Bellard
|
|
|
|
* Copyright (C) 2009 Red Hat, Inc
|
|
|
|
* Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com>
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2016-01-29 20:49:51 +03:00
|
|
|
#include "qemu/osdep.h"
|
2010-07-07 22:58:02 +04:00
|
|
|
#include "vnc.h"
|
|
|
|
#include "vnc-jobs.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/sockets.h"
|
2015-10-30 14:10:04 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2015-03-17 20:16:21 +03:00
|
|
|
#include "block/aio.h"
|
2021-03-11 21:29:55 +03:00
|
|
|
#include "trace.h"
|
2010-07-07 22:58:02 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Locking:
|
|
|
|
*
|
2012-10-18 20:40:53 +04:00
|
|
|
* There are three levels of locking:
|
2010-07-07 22:58:02 +04:00
|
|
|
* - jobs queue lock: for each operation on the queue (push, pop, isEmpty?)
|
|
|
|
* - VncDisplay global lock: mainly used for framebuffer updates to avoid
|
|
|
|
* screen corruption if the framebuffer is updated
|
2012-10-18 20:40:53 +04:00
|
|
|
* while the worker is doing something.
|
2010-07-07 22:58:02 +04:00
|
|
|
* - VncState::output lock: used to make sure the output buffer is not corrupted
|
2012-10-18 20:40:53 +04:00
|
|
|
* if two threads try to write on it at the same time
|
2010-07-07 22:58:02 +04:00
|
|
|
*
|
2012-10-18 20:40:53 +04:00
|
|
|
* While the VNC worker thread is working, the VncDisplay global lock is held
|
|
|
|
* to avoid screen corruption (this does not block vnc_refresh() because it
|
|
|
|
* uses trylock()) but the output lock is not held because the thread works on
|
2010-07-07 22:58:02 +04:00
|
|
|
* its own output buffer.
|
|
|
|
* When the encoding job is done, the worker thread will hold the output lock
|
|
|
|
* and copy its output buffer in vs->output.
|
2012-10-18 20:40:53 +04:00
|
|
|
*/
|
2010-07-07 22:58:02 +04:00
|
|
|
|
|
|
|
struct VncJobQueue {
|
|
|
|
QemuCond cond;
|
|
|
|
QemuMutex mutex;
|
|
|
|
QemuThread thread;
|
|
|
|
bool exit;
|
|
|
|
QTAILQ_HEAD(, VncJob) jobs;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct VncJobQueue VncJobQueue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use a single global queue, but most of the functions are
|
2012-10-18 20:40:53 +04:00
|
|
|
* already reentrant, so we can easily add more than one encoding thread
|
2010-07-07 22:58:02 +04:00
|
|
|
*/
|
|
|
|
static VncJobQueue *queue;
|
|
|
|
|
|
|
|
static void vnc_lock_queue(VncJobQueue *queue)
|
|
|
|
{
|
|
|
|
qemu_mutex_lock(&queue->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vnc_unlock_queue(VncJobQueue *queue)
|
|
|
|
{
|
|
|
|
qemu_mutex_unlock(&queue->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
VncJob *vnc_job_new(VncState *vs)
|
|
|
|
{
|
2015-11-03 19:12:03 +03:00
|
|
|
VncJob *job = g_new0(VncJob, 1);
|
2010-07-07 22:58:02 +04:00
|
|
|
|
2018-05-07 13:22:54 +03:00
|
|
|
assert(vs->magic == VNC_MAGIC);
|
2010-07-07 22:58:02 +04:00
|
|
|
job->vs = vs;
|
|
|
|
vnc_lock_queue(queue);
|
|
|
|
QLIST_INIT(&job->rectangles);
|
|
|
|
vnc_unlock_queue(queue);
|
|
|
|
return job;
|
|
|
|
}
|
|
|
|
|
|
|
|
int vnc_job_add_rect(VncJob *job, int x, int y, int w, int h)
|
|
|
|
{
|
2015-11-03 19:12:03 +03:00
|
|
|
VncRectEntry *entry = g_new0(VncRectEntry, 1);
|
2010-07-07 22:58:02 +04:00
|
|
|
|
2021-03-11 21:29:55 +03:00
|
|
|
trace_vnc_job_add_rect(job->vs, job, x, y, w, h);
|
|
|
|
|
2010-07-07 22:58:02 +04:00
|
|
|
entry->rect.x = x;
|
|
|
|
entry->rect.y = y;
|
|
|
|
entry->rect.w = w;
|
|
|
|
entry->rect.h = h;
|
|
|
|
|
|
|
|
vnc_lock_queue(queue);
|
|
|
|
QLIST_INSERT_HEAD(&job->rectangles, entry, next);
|
|
|
|
vnc_unlock_queue(queue);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void vnc_job_push(VncJob *job)
|
|
|
|
{
|
|
|
|
vnc_lock_queue(queue);
|
|
|
|
if (queue->exit || QLIST_EMPTY(&job->rectangles)) {
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(job);
|
2010-07-07 22:58:02 +04:00
|
|
|
} else {
|
|
|
|
QTAILQ_INSERT_TAIL(&queue->jobs, job, next);
|
|
|
|
qemu_cond_broadcast(&queue->cond);
|
|
|
|
}
|
|
|
|
vnc_unlock_queue(queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool vnc_has_job_locked(VncState *vs)
|
|
|
|
{
|
|
|
|
VncJob *job;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(job, &queue->jobs, next) {
|
|
|
|
if (job->vs == vs || !vs) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void vnc_jobs_join(VncState *vs)
|
|
|
|
{
|
|
|
|
vnc_lock_queue(queue);
|
|
|
|
while (vnc_has_job_locked(vs)) {
|
|
|
|
qemu_cond_wait(&queue->cond, &queue->mutex);
|
|
|
|
}
|
|
|
|
vnc_unlock_queue(queue);
|
2012-03-14 10:58:47 +04:00
|
|
|
vnc_jobs_consume_buffer(vs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vnc_jobs_consume_buffer(VncState *vs)
|
|
|
|
{
|
|
|
|
bool flush;
|
|
|
|
|
|
|
|
vnc_lock_output(vs);
|
|
|
|
if (vs->jobs_buffer.offset) {
|
2015-02-27 19:20:57 +03:00
|
|
|
if (vs->ioc != NULL && buffer_empty(&vs->output)) {
|
|
|
|
if (vs->ioc_tag) {
|
|
|
|
g_source_remove(vs->ioc_tag);
|
|
|
|
}
|
vnc: fix segfault in closed connection handling
On one of our client's node, due to trying to read from closed ioc,
a segmentation fault occured. Corresponding backtrace:
0 object_get_class (obj=obj@entry=0x0)
1 qio_channel_readv_full (ioc=0x0, iov=0x7ffe55277180 ...
2 qio_channel_read (ioc=<optimized out> ...
3 vnc_client_read_buf (vs=vs@entry=0x55625f3c6000, ...
4 vnc_client_read_plain (vs=0x55625f3c6000)
5 vnc_client_read (vs=0x55625f3c6000)
6 vnc_client_io (ioc=<optimized out>, condition=G_IO_IN, ...
7 g_main_dispatch (context=0x556251568a50)
8 g_main_context_dispatch (context=context@entry=0x556251568a50)
9 glib_pollfds_poll ()
10 os_host_main_loop_wait (timeout=<optimized out>)
11 main_loop_wait (nonblocking=nonblocking@entry=0)
12 main_loop () at vl.c:1909
13 main (argc=<optimized out>, argv=<optimized out>, ...
Having analyzed the coredump, I understood that the reason is that
ioc_tag is reset on vnc_disconnect_start and ioc is cleaned
in vnc_disconnect_finish. Between these two events due to some
reasons the ioc_tag was set again and after vnc_disconnect_finish
the handler is running with freed ioc,
which led to the segmentation fault.
The patch checks vs->disconnecting in places where we call
qio_channel_add_watch and resets handler if disconnecting == TRUE
to prevent such an occurrence.
Signed-off-by: Klim Kireev <klim.kireev@virtuozzo.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Message-id: 20180207094844.21402-1-klim.kireev@virtuozzo.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
2018-02-07 12:48:44 +03:00
|
|
|
if (vs->disconnecting == FALSE) {
|
|
|
|
vs->ioc_tag = qio_channel_add_watch(
|
2020-10-29 06:22:41 +03:00
|
|
|
vs->ioc, G_IO_IN | G_IO_HUP | G_IO_ERR | G_IO_OUT,
|
|
|
|
vnc_client_io, vs, NULL);
|
vnc: fix segfault in closed connection handling
On one of our client's node, due to trying to read from closed ioc,
a segmentation fault occured. Corresponding backtrace:
0 object_get_class (obj=obj@entry=0x0)
1 qio_channel_readv_full (ioc=0x0, iov=0x7ffe55277180 ...
2 qio_channel_read (ioc=<optimized out> ...
3 vnc_client_read_buf (vs=vs@entry=0x55625f3c6000, ...
4 vnc_client_read_plain (vs=0x55625f3c6000)
5 vnc_client_read (vs=0x55625f3c6000)
6 vnc_client_io (ioc=<optimized out>, condition=G_IO_IN, ...
7 g_main_dispatch (context=0x556251568a50)
8 g_main_context_dispatch (context=context@entry=0x556251568a50)
9 glib_pollfds_poll ()
10 os_host_main_loop_wait (timeout=<optimized out>)
11 main_loop_wait (nonblocking=nonblocking@entry=0)
12 main_loop () at vl.c:1909
13 main (argc=<optimized out>, argv=<optimized out>, ...
Having analyzed the coredump, I understood that the reason is that
ioc_tag is reset on vnc_disconnect_start and ioc is cleaned
in vnc_disconnect_finish. Between these two events due to some
reasons the ioc_tag was set again and after vnc_disconnect_finish
the handler is running with freed ioc,
which led to the segmentation fault.
The patch checks vs->disconnecting in places where we call
qio_channel_add_watch and resets handler if disconnecting == TRUE
to prevent such an occurrence.
Signed-off-by: Klim Kireev <klim.kireev@virtuozzo.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Message-id: 20180207094844.21402-1-klim.kireev@virtuozzo.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
2018-02-07 12:48:44 +03:00
|
|
|
}
|
2015-10-30 14:10:04 +03:00
|
|
|
}
|
|
|
|
buffer_move(&vs->output, &vs->jobs_buffer);
|
ui: fix VNC client throttling when forced update is requested
The VNC server must throttle data sent to the client to prevent the 'output'
buffer size growing without bound, if the client stops reading data off the
socket (either maliciously or due to stalled/slow network connection).
The current throttling is very crude because it simply checks whether the
output buffer offset is zero. This check is disabled if the client has requested
a forced update, because we want to send these as soon as possible.
As a result, the VNC client can cause QEMU to allocate arbitrary amounts of RAM.
They can first start something in the guest that triggers lots of framebuffer
updates eg play a youtube video. Then repeatedly send full framebuffer update
requests, but never read data back from the server. This can easily make QEMU's
VNC server send buffer consume 100MB of RAM per second, until the OOM killer
starts reaping processes (hopefully the rogue QEMU process, but it might pick
others...).
To address this we make the throttling more intelligent, so we can throttle
full updates. When we get a forced update request, we keep track of exactly how
much data we put on the output buffer. We will not process a subsequent forced
update request until this data has been fully sent on the wire. We always allow
one forced update request to be in flight, regardless of what data is queued
for incremental updates or audio data. The slight complication is that we do
not initially know how much data an update will send, as this is done in the
background by the VNC job thread. So we must track the fact that the job thread
has an update pending, and not process any further updates until this job is
has been completed & put data on the output buffer.
This unbounded memory growth affects all VNC server configurations supported by
QEMU, with no workaround possible. The mitigating factor is that it can only be
triggered by a client that has authenticated with the VNC server, and who is
able to trigger a large quantity of framebuffer updates or audio samples from
the guest OS. Mostly they'll just succeed in getting the OOM killer to kill
their own QEMU process, but its possible other processes can get taken out as
collateral damage.
This is a more general variant of the similar unbounded memory usage flaw in
the websockets server, that was previously assigned CVE-2017-15268, and fixed
in 2.11 by:
commit a7b20a8efa28e5f22c26c06cd06c2f12bc863493
Author: Daniel P. Berrange <berrange@redhat.com>
Date: Mon Oct 9 14:43:42 2017 +0100
io: monitor encoutput buffer size from websocket GSource
This new general memory usage flaw has been assigned CVE-2017-15124, and is
partially fixed by this patch.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-11-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
2017-12-18 22:12:25 +03:00
|
|
|
|
|
|
|
if (vs->job_update == VNC_STATE_UPDATE_FORCE) {
|
|
|
|
vs->force_update_offset = vs->output.offset;
|
|
|
|
}
|
|
|
|
vs->job_update = VNC_STATE_UPDATE_NONE;
|
2012-03-14 10:58:47 +04:00
|
|
|
}
|
2015-02-27 19:20:57 +03:00
|
|
|
flush = vs->ioc != NULL && vs->abort != true;
|
2012-03-14 10:58:47 +04:00
|
|
|
vnc_unlock_output(vs);
|
|
|
|
|
|
|
|
if (flush) {
|
|
|
|
vnc_flush(vs);
|
|
|
|
}
|
2010-07-07 22:58:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy data for local use
|
|
|
|
*/
|
|
|
|
static void vnc_async_encoding_start(VncState *orig, VncState *local)
|
|
|
|
{
|
2015-10-30 14:10:10 +03:00
|
|
|
buffer_init(&local->output, "vnc-worker-output");
|
2015-02-27 19:20:57 +03:00
|
|
|
local->sioc = NULL; /* Don't do any network work on this thread */
|
|
|
|
local->ioc = NULL; /* Don't do any network work on this thread */
|
2015-10-30 14:10:10 +03:00
|
|
|
|
2010-07-07 22:58:02 +04:00
|
|
|
local->vnc_encoding = orig->vnc_encoding;
|
|
|
|
local->features = orig->features;
|
|
|
|
local->vd = orig->vd;
|
2011-02-04 11:05:56 +03:00
|
|
|
local->lossy_rect = orig->lossy_rect;
|
2010-07-07 22:58:02 +04:00
|
|
|
local->write_pixels = orig->write_pixels;
|
2012-10-10 15:29:43 +04:00
|
|
|
local->client_pf = orig->client_pf;
|
|
|
|
local->client_be = orig->client_be;
|
2010-07-07 22:58:02 +04:00
|
|
|
local->tight = orig->tight;
|
|
|
|
local->zlib = orig->zlib;
|
|
|
|
local->hextile = orig->hextile;
|
2011-02-04 11:06:01 +03:00
|
|
|
local->zrle = orig->zrle;
|
2021-03-11 21:29:55 +03:00
|
|
|
local->client_width = orig->client_width;
|
|
|
|
local->client_height = orig->client_height;
|
2010-07-07 22:58:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vnc_async_encoding_end(VncState *orig, VncState *local)
|
|
|
|
{
|
2018-08-08 01:18:30 +03:00
|
|
|
buffer_free(&local->output);
|
2010-07-07 22:58:02 +04:00
|
|
|
orig->tight = local->tight;
|
|
|
|
orig->zlib = local->zlib;
|
|
|
|
orig->hextile = local->hextile;
|
2011-02-04 11:06:01 +03:00
|
|
|
orig->zrle = local->zrle;
|
2011-02-04 11:05:56 +03:00
|
|
|
orig->lossy_rect = local->lossy_rect;
|
2010-07-07 22:58:02 +04:00
|
|
|
}
|
|
|
|
|
2021-03-11 21:29:55 +03:00
|
|
|
static bool vnc_worker_clamp_rect(VncState *vs, VncJob *job, VncRect *rect)
|
|
|
|
{
|
|
|
|
trace_vnc_job_clamp_rect(vs, job, rect->x, rect->y, rect->w, rect->h);
|
|
|
|
|
|
|
|
if (rect->x >= vs->client_width) {
|
|
|
|
goto discard;
|
|
|
|
}
|
|
|
|
rect->w = MIN(vs->client_width - rect->x, rect->w);
|
|
|
|
if (rect->w == 0) {
|
|
|
|
goto discard;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rect->y >= vs->client_height) {
|
|
|
|
goto discard;
|
|
|
|
}
|
|
|
|
rect->h = MIN(vs->client_height - rect->y, rect->h);
|
|
|
|
if (rect->h == 0) {
|
|
|
|
goto discard;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_vnc_job_clamped_rect(vs, job, rect->x, rect->y, rect->w, rect->h);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
discard:
|
|
|
|
trace_vnc_job_discard_rect(vs, job, rect->x, rect->y, rect->w, rect->h);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-07-07 22:58:02 +04:00
|
|
|
static int vnc_worker_thread_loop(VncJobQueue *queue)
|
|
|
|
{
|
|
|
|
VncJob *job;
|
|
|
|
VncRectEntry *entry, *tmp;
|
2015-10-30 14:10:10 +03:00
|
|
|
VncState vs = {};
|
2010-07-07 22:58:02 +04:00
|
|
|
int n_rectangles;
|
|
|
|
int saved_offset;
|
|
|
|
|
|
|
|
vnc_lock_queue(queue);
|
|
|
|
while (QTAILQ_EMPTY(&queue->jobs) && !queue->exit) {
|
|
|
|
qemu_cond_wait(&queue->cond, &queue->mutex);
|
|
|
|
}
|
|
|
|
/* Here job can only be NULL if queue->exit is true */
|
|
|
|
job = QTAILQ_FIRST(&queue->jobs);
|
|
|
|
vnc_unlock_queue(queue);
|
|
|
|
|
|
|
|
if (queue->exit) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2023-06-09 12:23:06 +03:00
|
|
|
assert(job->vs->magic == VNC_MAGIC);
|
|
|
|
|
2010-07-07 22:58:02 +04:00
|
|
|
vnc_lock_output(job->vs);
|
2015-02-27 19:20:57 +03:00
|
|
|
if (job->vs->ioc == NULL || job->vs->abort == true) {
|
2012-03-14 10:58:47 +04:00
|
|
|
vnc_unlock_output(job->vs);
|
2010-07-07 22:58:02 +04:00
|
|
|
goto disconnected;
|
|
|
|
}
|
2015-10-30 14:10:11 +03:00
|
|
|
if (buffer_empty(&job->vs->output)) {
|
|
|
|
/*
|
|
|
|
* Looks like a NOP as it obviously moves no data. But it
|
|
|
|
* moves the empty buffer, so we don't have to malloc a new
|
|
|
|
* one for vs.output
|
|
|
|
*/
|
|
|
|
buffer_move_empty(&vs.output, &job->vs->output);
|
|
|
|
}
|
2010-07-07 22:58:02 +04:00
|
|
|
vnc_unlock_output(job->vs);
|
|
|
|
|
|
|
|
/* Make a local copy of vs and switch output buffers */
|
|
|
|
vnc_async_encoding_start(job->vs, &vs);
|
2018-05-07 13:22:54 +03:00
|
|
|
vs.magic = VNC_MAGIC;
|
2010-07-07 22:58:02 +04:00
|
|
|
|
|
|
|
/* Start sending rectangles */
|
|
|
|
n_rectangles = 0;
|
|
|
|
vnc_write_u8(&vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE);
|
|
|
|
vnc_write_u8(&vs, 0);
|
|
|
|
saved_offset = vs.output.offset;
|
|
|
|
vnc_write_u16(&vs, 0);
|
|
|
|
|
|
|
|
vnc_lock_display(job->vs->vd);
|
|
|
|
QLIST_FOREACH_SAFE(entry, &job->rectangles, next, tmp) {
|
|
|
|
int n;
|
|
|
|
|
2015-02-27 19:20:57 +03:00
|
|
|
if (job->vs->ioc == NULL) {
|
2010-07-07 22:58:02 +04:00
|
|
|
vnc_unlock_display(job->vs->vd);
|
vnc: Fix qemu crashed when vnc client disconnect suddenly
Hi,
When I use RealVNC viewer client (http://www.realvnc.com/) to connect vnc server,
the client disconnect suddenly, and I click reconnect button immediately, then the Qemu crashed.
In the function vnc_worker_thread_loop, will call vnc_async_encoding_start
to set the local vs->output buffer by global queue's buffer. Then send rectangles to
the vnc client call function vnc_send_framebuffer_update. Finally, Under normal circumstances,
call vnc_async_encoding_end to set the global queue'buffer by the local vs->output conversely.
When the vnc client disconnect, the job->vs->csock will be set to -1. And the current prcoess
logic will goto disconnected partion without call function vnc_async_encoding_end.
But, the function vnc_send_framebuffer_update will call buffer_reserve, which
maybe call g_realloc reset the local vs's buffer, meaning the global queue's buffer is modified also.
If anyone use the original global queue's buffer memory will cause corruption and then crash qemu.
This patch assure the function vnc_async_encoding_end being called
even though the vnc client disconnect suddenly.
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
2014-01-23 17:30:57 +04:00
|
|
|
/* Copy persistent encoding data */
|
|
|
|
vnc_async_encoding_end(job->vs, &vs);
|
2010-07-07 22:58:02 +04:00
|
|
|
goto disconnected;
|
|
|
|
}
|
|
|
|
|
2021-03-11 21:29:55 +03:00
|
|
|
if (vnc_worker_clamp_rect(&vs, job, &entry->rect)) {
|
|
|
|
n = vnc_send_framebuffer_update(&vs, entry->rect.x, entry->rect.y,
|
|
|
|
entry->rect.w, entry->rect.h);
|
2010-07-07 22:58:02 +04:00
|
|
|
|
2021-03-11 21:29:55 +03:00
|
|
|
if (n >= 0) {
|
|
|
|
n_rectangles += n;
|
|
|
|
}
|
2010-07-07 22:58:02 +04:00
|
|
|
}
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(entry);
|
2010-07-07 22:58:02 +04:00
|
|
|
}
|
2021-03-11 21:29:55 +03:00
|
|
|
trace_vnc_job_nrects(&vs, job, n_rectangles);
|
2010-07-07 22:58:02 +04:00
|
|
|
vnc_unlock_display(job->vs->vd);
|
|
|
|
|
|
|
|
/* Put n_rectangles at the beginning of the message */
|
|
|
|
vs.output.buffer[saved_offset] = (n_rectangles >> 8) & 0xFF;
|
|
|
|
vs.output.buffer[saved_offset + 1] = n_rectangles & 0xFF;
|
|
|
|
|
|
|
|
vnc_lock_output(job->vs);
|
2015-02-27 19:20:57 +03:00
|
|
|
if (job->vs->ioc != NULL) {
|
2015-10-30 14:10:04 +03:00
|
|
|
buffer_move(&job->vs->jobs_buffer, &vs.output);
|
2012-03-14 10:58:47 +04:00
|
|
|
/* Copy persistent encoding data */
|
|
|
|
vnc_async_encoding_end(job->vs, &vs);
|
|
|
|
|
2018-08-08 01:18:30 +03:00
|
|
|
qemu_bh_schedule(job->vs->bh);
|
vnc: Fix qemu crashed when vnc client disconnect suddenly
Hi,
When I use RealVNC viewer client (http://www.realvnc.com/) to connect vnc server,
the client disconnect suddenly, and I click reconnect button immediately, then the Qemu crashed.
In the function vnc_worker_thread_loop, will call vnc_async_encoding_start
to set the local vs->output buffer by global queue's buffer. Then send rectangles to
the vnc client call function vnc_send_framebuffer_update. Finally, Under normal circumstances,
call vnc_async_encoding_end to set the global queue'buffer by the local vs->output conversely.
When the vnc client disconnect, the job->vs->csock will be set to -1. And the current prcoess
logic will goto disconnected partion without call function vnc_async_encoding_end.
But, the function vnc_send_framebuffer_update will call buffer_reserve, which
maybe call g_realloc reset the local vs's buffer, meaning the global queue's buffer is modified also.
If anyone use the original global queue's buffer memory will cause corruption and then crash qemu.
This patch assure the function vnc_async_encoding_end being called
even though the vnc client disconnect suddenly.
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
2014-01-23 17:30:57 +04:00
|
|
|
} else {
|
2015-10-30 14:10:04 +03:00
|
|
|
buffer_reset(&vs.output);
|
vnc: Fix qemu crashed when vnc client disconnect suddenly
Hi,
When I use RealVNC viewer client (http://www.realvnc.com/) to connect vnc server,
the client disconnect suddenly, and I click reconnect button immediately, then the Qemu crashed.
In the function vnc_worker_thread_loop, will call vnc_async_encoding_start
to set the local vs->output buffer by global queue's buffer. Then send rectangles to
the vnc client call function vnc_send_framebuffer_update. Finally, Under normal circumstances,
call vnc_async_encoding_end to set the global queue'buffer by the local vs->output conversely.
When the vnc client disconnect, the job->vs->csock will be set to -1. And the current prcoess
logic will goto disconnected partion without call function vnc_async_encoding_end.
But, the function vnc_send_framebuffer_update will call buffer_reserve, which
maybe call g_realloc reset the local vs's buffer, meaning the global queue's buffer is modified also.
If anyone use the original global queue's buffer memory will cause corruption and then crash qemu.
This patch assure the function vnc_async_encoding_end being called
even though the vnc client disconnect suddenly.
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
2014-01-23 17:30:57 +04:00
|
|
|
/* Copy persistent encoding data */
|
|
|
|
vnc_async_encoding_end(job->vs, &vs);
|
2010-07-07 22:58:02 +04:00
|
|
|
}
|
|
|
|
vnc_unlock_output(job->vs);
|
|
|
|
|
2012-03-14 10:58:47 +04:00
|
|
|
disconnected:
|
2010-07-07 22:58:02 +04:00
|
|
|
vnc_lock_queue(queue);
|
|
|
|
QTAILQ_REMOVE(&queue->jobs, job, next);
|
|
|
|
vnc_unlock_queue(queue);
|
|
|
|
qemu_cond_broadcast(&queue->cond);
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(job);
|
2018-05-07 13:22:54 +03:00
|
|
|
vs.magic = 0;
|
2010-07-07 22:58:02 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VncJobQueue *vnc_queue_init(void)
|
|
|
|
{
|
2015-11-03 19:12:03 +03:00
|
|
|
VncJobQueue *queue = g_new0(VncJobQueue, 1);
|
2010-07-07 22:58:02 +04:00
|
|
|
|
|
|
|
qemu_cond_init(&queue->cond);
|
|
|
|
qemu_mutex_init(&queue->mutex);
|
|
|
|
QTAILQ_INIT(&queue->jobs);
|
|
|
|
return queue;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vnc_queue_clear(VncJobQueue *q)
|
|
|
|
{
|
|
|
|
qemu_cond_destroy(&queue->cond);
|
|
|
|
qemu_mutex_destroy(&queue->mutex);
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(q);
|
2010-07-07 22:58:02 +04:00
|
|
|
queue = NULL; /* Unset global queue */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *vnc_worker_thread(void *arg)
|
|
|
|
{
|
|
|
|
VncJobQueue *queue = arg;
|
|
|
|
|
2011-03-12 19:43:51 +03:00
|
|
|
qemu_thread_get_self(&queue->thread);
|
2010-07-07 22:58:02 +04:00
|
|
|
|
|
|
|
while (!vnc_worker_thread_loop(queue)) ;
|
|
|
|
vnc_queue_clear(queue);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-10-28 15:04:48 +04:00
|
|
|
static bool vnc_worker_thread_running(void)
|
|
|
|
{
|
|
|
|
return queue; /* Check global queue */
|
|
|
|
}
|
|
|
|
|
2010-07-07 22:58:02 +04:00
|
|
|
void vnc_start_worker_thread(void)
|
|
|
|
{
|
|
|
|
VncJobQueue *q;
|
|
|
|
|
|
|
|
if (vnc_worker_thread_running())
|
2022-10-24 10:28:02 +03:00
|
|
|
return;
|
2010-07-07 22:58:02 +04:00
|
|
|
|
|
|
|
q = vnc_queue_init();
|
2014-01-30 14:20:32 +04:00
|
|
|
qemu_thread_create(&q->thread, "vnc_worker", vnc_worker_thread, q,
|
|
|
|
QEMU_THREAD_DETACHED);
|
2010-07-07 22:58:02 +04:00
|
|
|
queue = q; /* Set global queue */
|
|
|
|
}
|