i386/xen: implement HVMOP_set_evtchn_upcall_vector

The HVMOP_set_evtchn_upcall_vector hypercall sets the per-vCPU upcall
vector, to be delivered to the local APIC just like an MSI (with an EOI).

This takes precedence over the system-wide delivery method set by the
HVMOP_set_param hypercall with HVM_PARAM_CALLBACK_IRQ. It's used by
Windows and Xen (PV shim) guests but normally not by Linux.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
[dwmw2: Rework for upstream kernel changes and split from HVMOP_set_param]
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
This commit is contained in:
Ankur Arora 2022-12-06 11:14:07 +00:00 committed by David Woodhouse
parent 3b06f29b24
commit 105b47fdf2
4 changed files with 84 additions and 3 deletions

View File

@ -1803,6 +1803,7 @@ typedef struct CPUArchState {
uint64_t xen_vcpu_info_default_gpa; uint64_t xen_vcpu_info_default_gpa;
uint64_t xen_vcpu_time_info_gpa; uint64_t xen_vcpu_time_info_gpa;
uint64_t xen_vcpu_runstate_gpa; uint64_t xen_vcpu_runstate_gpa;
uint8_t xen_vcpu_callback_vector;
#endif #endif
#if defined(CONFIG_HVF) #if defined(CONFIG_HVF)
HVFX86LazyFlags hvf_lflags; HVFX86LazyFlags hvf_lflags;

View File

@ -11,3 +11,4 @@ kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1
kvm_xen_soft_reset(void) "" kvm_xen_soft_reset(void) ""
kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64 kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64
kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64 kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64
kvm_xen_set_vcpu_callback(int cpu, int vector) "callback vcpu %d vector %d"

View File

@ -27,6 +27,7 @@
#include "hw/xen/interface/sched.h" #include "hw/xen/interface/sched.h"
#include "hw/xen/interface/memory.h" #include "hw/xen/interface/memory.h"
#include "hw/xen/interface/hvm/hvm_op.h" #include "hw/xen/interface/hvm/hvm_op.h"
#include "hw/xen/interface/hvm/params.h"
#include "hw/xen/interface/vcpu.h" #include "hw/xen/interface/vcpu.h"
#include "hw/xen/interface/event_channel.h" #include "hw/xen/interface/event_channel.h"
@ -193,7 +194,8 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu,
fi.submap |= 1 << XENFEAT_writable_page_tables | fi.submap |= 1 << XENFEAT_writable_page_tables |
1 << XENFEAT_writable_descriptor_tables | 1 << XENFEAT_writable_descriptor_tables |
1 << XENFEAT_auto_translated_physmap | 1 << XENFEAT_auto_translated_physmap |
1 << XENFEAT_supervisor_mode_kernel; 1 << XENFEAT_supervisor_mode_kernel |
1 << XENFEAT_hvm_callback_vector;
} }
err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi)); err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi));
@ -220,6 +222,31 @@ static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi); return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
} }
static int kvm_xen_set_vcpu_callback_vector(CPUState *cs)
{
uint8_t vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
struct kvm_xen_vcpu_attr xva;
xva.type = KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR;
xva.u.vector = vector;
trace_kvm_xen_set_vcpu_callback(cs->cpu_index, vector);
return kvm_vcpu_ioctl(cs, KVM_XEN_HVM_SET_ATTR, &xva);
}
static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
env->xen_vcpu_callback_vector = data.host_int;
if (kvm_xen_has_cap(EVTCHN_SEND)) {
kvm_xen_set_vcpu_callback_vector(cs);
}
}
static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data) static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
{ {
X86CPU *cpu = X86_CPU(cs); X86CPU *cpu = X86_CPU(cs);
@ -276,12 +303,16 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
env->xen_vcpu_info_default_gpa = INVALID_GPA; env->xen_vcpu_info_default_gpa = INVALID_GPA;
env->xen_vcpu_time_info_gpa = INVALID_GPA; env->xen_vcpu_time_info_gpa = INVALID_GPA;
env->xen_vcpu_runstate_gpa = INVALID_GPA; env->xen_vcpu_runstate_gpa = INVALID_GPA;
env->xen_vcpu_callback_vector = 0;
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA); kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA);
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
INVALID_GPA); INVALID_GPA);
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
INVALID_GPA); INVALID_GPA);
if (kvm_xen_has_cap(EVTCHN_SEND)) {
kvm_xen_set_vcpu_callback_vector(cs);
}
} }
@ -458,17 +489,53 @@ static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit, X86CPU *cpu,
return true; return true;
} }
static int kvm_xen_hcall_evtchn_upcall_vector(struct kvm_xen_exit *exit,
X86CPU *cpu, uint64_t arg)
{
struct xen_hvm_evtchn_upcall_vector up;
CPUState *target_cs;
/* No need for 32/64 compat handling */
qemu_build_assert(sizeof(up) == 8);
if (kvm_copy_from_gva(CPU(cpu), arg, &up, sizeof(up))) {
return -EFAULT;
}
if (up.vector < 0x10) {
return -EINVAL;
}
target_cs = qemu_get_cpu(up.vcpu);
if (!target_cs) {
return -EINVAL;
}
async_run_on_cpu(target_cs, do_set_vcpu_callback_vector,
RUN_ON_CPU_HOST_INT(up.vector));
return 0;
}
static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu, static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, uint64_t arg) int cmd, uint64_t arg)
{ {
int ret = -ENOSYS;
switch (cmd) { switch (cmd) {
case HVMOP_set_evtchn_upcall_vector:
ret = kvm_xen_hcall_evtchn_upcall_vector(exit, cpu,
exit->u.hcall.params[0]);
break;
case HVMOP_pagetable_dying: case HVMOP_pagetable_dying:
exit->u.hcall.result = -ENOSYS; ret = -ENOSYS;
return true; break;
default: default:
return false; return false;
} }
exit->u.hcall.result = ret;
return true;
} }
static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target, static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
@ -812,6 +879,17 @@ int kvm_put_xen_state(CPUState *cs)
} }
} }
if (!kvm_xen_has_cap(EVTCHN_SEND)) {
return 0;
}
if (env->xen_vcpu_callback_vector) {
ret = kvm_xen_set_vcpu_callback_vector(cs);
if (ret < 0) {
return ret;
}
}
return 0; return 0;
} }

View File

@ -1274,6 +1274,7 @@ static const VMStateDescription vmstate_xen_vcpu = {
VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU), VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU), VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU), VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU),
VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
} }
}; };