Check return values from g_poll and select
The current implementation of os_host_main_loop_wait() on Windows, returns 1 only when a g_poll() event occurs because the return value of select() is overridden. This is wrong as we may skip a socket event, as shown in this example: 1. select() returns 0 2. g_poll() returns 1 (socket event occurs) 3. os_host_main_loop_wait() returns 1 4. qemu_iohandler_poll() sees no socket event because select() has return before the event occurs 5. select() returns 1 6. g_poll() returns 0 (g_poll overrides select's return value) 7. os_host_main_loop_wait() returns 0 8. qemu_iohandler_poll() doesn't check for socket events because the return value of os_host_main_loop_wait() is zero. 9. goto 5 This patch use one variable for each of these return values, so we don't miss a select() event anymore. Also move the call to select() after g_poll(), this will improve latency as we don't have to go through two os_host_main_loop_wait() calls to detect a socket event. Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Fabien Chouteau <chouteau@adacore.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
parent
7cd5da7eef
commit
5e3bc735d9
27
main-loop.c
27
main-loop.c
@ -330,7 +330,7 @@ void qemu_fd_register(int fd)
|
|||||||
static int os_host_main_loop_wait(uint32_t timeout)
|
static int os_host_main_loop_wait(uint32_t timeout)
|
||||||
{
|
{
|
||||||
GMainContext *context = g_main_context_default();
|
GMainContext *context = g_main_context_default();
|
||||||
int ret, i;
|
int select_ret, g_poll_ret, ret, i;
|
||||||
PollingEntry *pe;
|
PollingEntry *pe;
|
||||||
WaitObjects *w = &wait_objects;
|
WaitObjects *w = &wait_objects;
|
||||||
gint poll_timeout;
|
gint poll_timeout;
|
||||||
@ -345,13 +345,6 @@ static int os_host_main_loop_wait(uint32_t timeout)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nfds >= 0) {
|
|
||||||
ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
|
|
||||||
if (ret != 0) {
|
|
||||||
timeout = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
g_main_context_prepare(context, &max_priority);
|
g_main_context_prepare(context, &max_priority);
|
||||||
n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
|
n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
|
||||||
poll_fds, ARRAY_SIZE(poll_fds));
|
poll_fds, ARRAY_SIZE(poll_fds));
|
||||||
@ -367,9 +360,9 @@ static int os_host_main_loop_wait(uint32_t timeout)
|
|||||||
}
|
}
|
||||||
|
|
||||||
qemu_mutex_unlock_iothread();
|
qemu_mutex_unlock_iothread();
|
||||||
ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout);
|
g_poll_ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout);
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
if (ret > 0) {
|
if (g_poll_ret > 0) {
|
||||||
for (i = 0; i < w->num; i++) {
|
for (i = 0; i < w->num; i++) {
|
||||||
w->revents[i] = poll_fds[n_poll_fds + i].revents;
|
w->revents[i] = poll_fds[n_poll_fds + i].revents;
|
||||||
}
|
}
|
||||||
@ -384,12 +377,18 @@ static int os_host_main_loop_wait(uint32_t timeout)
|
|||||||
g_main_context_dispatch(context);
|
g_main_context_dispatch(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If an edge-triggered socket event occurred, select will return a
|
/* Call select after g_poll to avoid a useless iteration and therefore
|
||||||
* positive result on the next iteration. We do not need to do anything
|
* improve socket latency.
|
||||||
* here.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
return ret;
|
if (nfds >= 0) {
|
||||||
|
select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
|
||||||
|
if (select_ret != 0) {
|
||||||
|
timeout = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return select_ret || g_poll_ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user