kvm: Unconditionally reenter kernel after IO exits
KVM requires to reenter the kernel after IO exits in order to complete instruction emulation. Failing to do so will leave the kernel state inconsistently behind. To ensure that we will get back ASAP, we issue a self-signal that will cause KVM_RUN to return once the pending operations are completed. We can move kvm_arch_process_irqchip_events out of the inner VCPU loop. The only state that mattered at its old place was a pending INIT request. Catch it in kvm_arch_pre_run and also trigger a self-signal to process the request on next kvm_cpu_exec. This patch also fixes the missing exit_request check in kvm_cpu_exec in the CONFIG_IOTHREAD case. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> CC: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
46d62fac8a
commit
9ccfac9ea4
31
kvm-all.c
31
kvm-all.c
@ -199,7 +199,6 @@ int kvm_pit_in_kernel(void)
|
||||
return kvm_state->pit_in_kernel;
|
||||
}
|
||||
|
||||
|
||||
int kvm_init_vcpu(CPUState *env)
|
||||
{
|
||||
KVMState *s = kvm_state;
|
||||
@ -896,29 +895,33 @@ int kvm_cpu_exec(CPUState *env)
|
||||
|
||||
DPRINTF("kvm_cpu_exec()\n");
|
||||
|
||||
if (kvm_arch_process_irqchip_events(env)) {
|
||||
env->exit_request = 0;
|
||||
env->exception_index = EXCP_HLT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
do {
|
||||
#ifndef CONFIG_IOTHREAD
|
||||
if (env->exit_request) {
|
||||
DPRINTF("interrupt exit requested\n");
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (kvm_arch_process_irqchip_events(env)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (env->kvm_vcpu_dirty) {
|
||||
kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
|
||||
env->kvm_vcpu_dirty = 0;
|
||||
}
|
||||
|
||||
kvm_arch_pre_run(env, run);
|
||||
if (env->exit_request) {
|
||||
DPRINTF("interrupt exit requested\n");
|
||||
/*
|
||||
* KVM requires us to reenter the kernel after IO exits to complete
|
||||
* instruction emulation. This self-signal will ensure that we
|
||||
* leave ASAP again.
|
||||
*/
|
||||
qemu_cpu_kick_self();
|
||||
}
|
||||
cpu_single_env = NULL;
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
cpu_single_env = env;
|
||||
kvm_arch_post_run(env, run);
|
||||
|
@ -1426,6 +1426,11 @@ int kvm_arch_get_registers(CPUState *env)
|
||||
|
||||
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
|
||||
{
|
||||
/* Force the VCPU out of its inner loop to process the INIT request */
|
||||
if (env->interrupt_request & CPU_INTERRUPT_INIT) {
|
||||
env->exit_request = 1;
|
||||
}
|
||||
|
||||
/* Inject NMI */
|
||||
if (env->interrupt_request & CPU_INTERRUPT_NMI) {
|
||||
env->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
|
Loading…
Reference in New Issue
Block a user