diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 23a003aaa7..ede3ef1225 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -21,6 +21,7 @@ #include #include +#include #include "standard-headers/asm-x86/kvm_para.h" #include "hw/xen/interface/arch-x86/cpuid.h" @@ -209,6 +210,13 @@ int kvm_get_vm_type(MachineState *ms) return kvm_type; } +bool kvm_enable_hypercall(uint64_t enable_mask) +{ + KVMState *s = KVM_STATE(current_accel()); + + return !kvm_vm_enable_cap(s, KVM_CAP_EXIT_HYPERCALL, 0, enable_mask); +} + bool kvm_has_smm(void) { return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM); @@ -5322,6 +5330,50 @@ static bool host_supports_vmx(void) return ecx & CPUID_EXT_VMX; } +/* + * Currently the handling here only supports use of KVM_HC_MAP_GPA_RANGE + * to service guest-initiated memory attribute update requests so that + * KVM_SET_MEMORY_ATTRIBUTES can update whether or not a page should be + * backed by the private memory pool provided by guest_memfd, and as such + * is only applicable to guest_memfd-backed guests (e.g. SNP/TDX). + * + * Other other use-cases for KVM_HC_MAP_GPA_RANGE, such as for SEV live + * migration, are not implemented here currently. + * + * For the guest_memfd use-case, these exits will generally be synthesized + * by KVM based on platform-specific hypercalls, like GHCB requests in the + * case of SEV-SNP, and not issued directly within the guest though the + * KVM_HC_MAP_GPA_RANGE hypercall. So in this case, KVM_HC_MAP_GPA_RANGE is + * not actually advertised to guests via the KVM CPUID feature bit, as + * opposed to SEV live migration where it would be. Since it is unlikely the + * SEV live migration use-case would be useful for guest-memfd backed guests, + * because private/shared page tracking is already provided through other + * means, these 2 use-cases should be treated as being mutually-exclusive. + */ +static int kvm_handle_hc_map_gpa_range(struct kvm_run *run) +{ + uint64_t gpa, size, attributes; + + if (!machine_require_guest_memfd(current_machine)) + return -EINVAL; + + gpa = run->hypercall.args[0]; + size = run->hypercall.args[1] * TARGET_PAGE_SIZE; + attributes = run->hypercall.args[2]; + + trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags); + + return kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED); +} + +static int kvm_handle_hypercall(struct kvm_run *run) +{ + if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE) + return kvm_handle_hc_map_gpa_range(run); + + return -EINVAL; +} + #define VMX_INVALID_GUEST_STATE 0x80000021 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) @@ -5417,6 +5469,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) ret = kvm_xen_handle_exit(cpu, &run->xen); break; #endif + case KVM_EXIT_HYPERCALL: + ret = kvm_handle_hypercall(run); + break; default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); ret = -1; diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 6b44844d95..34fc60774b 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -33,6 +33,7 @@ bool kvm_has_smm(void); bool kvm_enable_x2apic(void); bool kvm_hv_vpindex_settable(void); +bool kvm_enable_hypercall(uint64_t enable_mask); bool kvm_enable_sgx_provisioning(KVMState *s); bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); diff --git a/target/i386/kvm/trace-events b/target/i386/kvm/trace-events index b365a8e8e2..74a6234ff7 100644 --- a/target/i386/kvm/trace-events +++ b/target/i386/kvm/trace-events @@ -5,6 +5,7 @@ kvm_x86_fixup_msi_error(uint32_t gsi) "VT-d failed to remap interrupt for GSI %" kvm_x86_add_msi_route(int virq) "Adding route entry for virq %d" kvm_x86_remove_msi_route(int virq) "Removing route entry for virq %d" kvm_x86_update_msi_routes(int num) "Updated %d MSI routes" +kvm_hc_map_gpa_range(uint64_t gpa, uint64_t size, uint64_t attributes, uint64_t flags) "gpa 0x%" PRIx64 " size 0x%" PRIx64 " attributes 0x%" PRIx64 " flags 0x%" PRIx64 # xen-emu.c kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d cpl %d input %" PRIu64 " a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIx64