linux-user: Queue synchronous signals separately

If a synchronous signal and an asynchronous signal arrive near simultaneously,
and the signal number of the asynchronous signal is lower than that of the
synchronous signal the the handler for the asynchronous would be called first,
and then the handler for the synchronous signal would be called within or
after the first handler with an incorrect context.

This is fixed by queuing synchronous signals separately. Note that this does
risk delaying a asynchronous signal until the synchronous signal handler
returns rather than handling the signal on another thread, but this seems
unlikely to cause problems for real guest programs and is unavoidable unless
we could guarantee to roll back and reexecute whatever guest instruction
caused the synchronous signal (which would be a bit odd if we've already
logged its execution, for instance, and would require careful analysis of
all guest CPUs to check it was possible in all cases).

Signed-off-by: Timothy Edward Baldwin <T.E.Baldwin99@members.leeds.ac.uk>
Message-id: 1441497448-32489-24-git-send-email-T.E.Baldwin99@members.leeds.ac.uk
[PMM: added a comment]
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Riku Voipio <riku.voipio@linaro.org>
This commit is contained in:
Timothy E Baldwin 2016-05-27 15:51:53 +01:00 committed by Riku Voipio
parent 907f5fddaa
commit 655ed67c2a
2 changed files with 43 additions and 32 deletions

View File

@ -119,6 +119,7 @@ typedef struct TaskState {
struct image_info *info;
struct linux_binprm *bprm;
struct emulated_sigtable sync_signal;
struct emulated_sigtable sigtab[TARGET_NSIG];
/* This thread's signal mask, as requested by the guest program.
* The actual signal mask of this thread may differ:

View File

@ -502,18 +502,11 @@ int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
{
CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque;
struct emulated_sigtable *k;
trace_user_queue_signal(env, sig);
k = &ts->sigtab[sig - 1];
/* we queue exactly one signal */
if (k->pending) {
return 0;
}
k->info = *info;
k->pending = 1;
ts->sync_signal.info = *info;
ts->sync_signal.pending = sig;
/* signal that a new signal is pending */
atomic_set(&ts->signal_pending, 1);
return 1; /* indicates that the signal was queued */
@ -530,9 +523,13 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
void *puc)
{
CPUArchState *env = thread_cpu->env_ptr;
CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque;
int sig;
target_siginfo_t tinfo;
ucontext_t *uc = puc;
struct emulated_sigtable *k;
/* the CPU emulator uses some host signals to detect exceptions,
we forward to it some signals */
@ -551,7 +548,11 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
rewind_if_in_safe_syscall(puc);
host_to_target_siginfo_noswap(&tinfo, info);
if (queue_signal(env, sig, &tinfo) == 1) {
k = &ts->sigtab[sig - 1];
k->info = tinfo;
k->pending = sig;
ts->signal_pending = 1;
/* Block host signals until target signal handler entered. We
* can't block SIGSEGV or SIGBUS while we're executing guest
* code in case the guest code provokes one in the window between
@ -564,7 +565,6 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
/* interrupt the virtual CPU as soon as possible */
cpu_exit(thread_cpu);
}
}
/* do_sigaltstack() returns target values and errnos. */
@ -5761,14 +5761,6 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig)
handler = sa->_sa_handler;
}
if (sig == TARGET_SIGSEGV && sigismember(&ts->signal_mask, SIGSEGV)) {
/* Guest has blocked SIGSEGV but we got one anyway. Assume this
* is a forced SIGSEGV (ie one the kernel handles via force_sig_info
* because it got a real MMU fault), and treat as if default handler.
*/
handler = TARGET_SIG_DFL;
}
if (handler == TARGET_SIG_DFL) {
/* default handler : ignore some signal. The other are job control or fatal */
if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
@ -5841,14 +5833,32 @@ void process_pending_signals(CPUArchState *cpu_env)
sigfillset(&set);
sigprocmask(SIG_SETMASK, &set, 0);
sig = ts->sync_signal.pending;
if (sig) {
/* Synchronous signals are forced,
* see force_sig_info() and callers in Linux
* Note that not all of our queue_signal() calls in QEMU correspond
* to force_sig_info() calls in Linux (some are send_sig_info()).
* However it seems like a kernel bug to me to allow the process
* to block a synchronous signal since it could then just end up
* looping round and round indefinitely.
*/
if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
|| sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
}
handle_pending_signal(cpu_env, sig);
}
for (sig = 1; sig <= TARGET_NSIG; sig++) {
blocked_set = ts->in_sigsuspend ?
&ts->sigsuspend_mask : &ts->signal_mask;
if (ts->sigtab[sig - 1].pending &&
(!sigismember(blocked_set,
target_to_host_signal_table[sig])
|| sig == TARGET_SIGSEGV)) {
target_to_host_signal_table[sig]))) {
handle_pending_signal(cpu_env, sig);
/* Restart scan from the beginning */
sig = 1;