arm/kvm: add support for MTE

Extend the 'mte' property for the virt machine to cover KVM as
well. For KVM, we don't allocate tag memory, but instead enable the
capability.

If MTE has been enabled, we need to disable migration, as we do not
yet have a way to migrate the tags as well. Therefore, MTE will stay
off with KVM unless requested explicitly.

Signed-off-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230428095533.21747-2-cohuck@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Cornelia Huck 2023-04-28 11:55:33 +02:00 committed by Peter Maydell
parent 96e6d25fdd
commit b320e21c48
6 changed files with 109 additions and 36 deletions

View File

@ -2146,7 +2146,7 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
if (vms->mte && (kvm_enabled() || hvf_enabled())) {
if (vms->mte && hvf_enabled()) {
error_report("mach-virt: %s does not support providing "
"MTE to the guest CPU",
current_accel_name());
@ -2216,6 +2216,7 @@ static void machvirt_init(MachineState *machine)
}
if (vms->mte) {
if (tcg_enabled()) {
/* Create the memory region only once, but link to all cpus. */
if (!tag_sysmem) {
/*
@ -2235,21 +2236,29 @@ static void machvirt_init(MachineState *machine)
if (vms->secure) {
secure_tag_sysmem = g_new(MemoryRegion, 1);
memory_region_init(secure_tag_sysmem, OBJECT(machine),
"secure-tag-memory", UINT64_MAX / 32);
"secure-tag-memory",
UINT64_MAX / 32);
/* As with ram, secure-tag takes precedence over tag. */
memory_region_add_subregion_overlap(secure_tag_sysmem, 0,
tag_sysmem, -1);
memory_region_add_subregion_overlap(secure_tag_sysmem,
0, tag_sysmem, -1);
}
}
object_property_set_link(cpuobj, "tag-memory", OBJECT(tag_sysmem),
&error_abort);
object_property_set_link(cpuobj, "tag-memory",
OBJECT(tag_sysmem), &error_abort);
if (vms->secure) {
object_property_set_link(cpuobj, "secure-tag-memory",
OBJECT(secure_tag_sysmem),
&error_abort);
}
} else if (kvm_enabled()) {
if (!kvm_arm_mte_supported()) {
error_report("MTE requested, but not supported by KVM");
exit(1);
}
kvm_arm_enable_mte(cpuobj, &error_abort);
}
}
qdev_realize(DEVICE(cpuobj), NULL, &error_fatal);

View File

@ -1480,6 +1480,7 @@ void arm_cpu_post_init(Object *obj)
qdev_prop_allow_set_link_before_realize,
OBJ_PROP_LINK_STRONG);
}
cpu->has_mte = true;
}
#endif
}
@ -1616,7 +1617,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (cpu->tag_memory) {
error_setg(errp,
"Cannot enable %s when guest CPUs has MTE enabled",
"Cannot enable %s when guest CPUs has tag memory enabled",
current_accel_name());
return;
}
@ -1996,10 +1997,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
#ifndef CONFIG_USER_ONLY
if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
if (!cpu->has_mte && cpu_isar_feature(aa64_mte, cpu)) {
/*
* Disable the MTE feature bits if we do not have tag-memory
* provided by the machine.
* Disable the MTE feature bits if we do not have the feature
* setup by the machine.
*/
cpu->isar.id_aa64pfr1 =
FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);

View File

@ -935,6 +935,9 @@ struct ArchCPU {
*/
uint32_t psci_conduit;
/* CPU has Memory Tag Extension */
bool has_mte;
/* For v8M, initial value of the Secure VTOR */
uint32_t init_svtor;
/* For v8M, initial value of the Non-secure VTOR */
@ -1053,6 +1056,7 @@ struct ArchCPU {
bool prop_pauth;
bool prop_pauth_impdef;
bool prop_lpa2;
OnOffAuto prop_mte;
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
uint32_t dcz_blocksize;

View File

@ -31,6 +31,7 @@
#include "hw/boards.h"
#include "hw/irq.h"
#include "qemu/log.h"
#include "migration/blocker.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
@ -1064,3 +1065,37 @@ bool kvm_arch_cpu_check_are_resettable(void)
void kvm_arch_accel_class_init(ObjectClass *oc)
{
}
void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
{
static bool tried_to_enable;
static bool succeeded_to_enable;
Error *mte_migration_blocker = NULL;
int ret;
if (!tried_to_enable) {
/*
* MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
* sense), and we only want a single migration blocker as well.
*/
tried_to_enable = true;
ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0);
if (ret) {
error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
return;
}
/* TODO: add proper migration support with MTE enabled */
error_setg(&mte_migration_blocker,
"Live migration disabled due to MTE enabled");
if (migrate_add_blocker(mte_migration_blocker, errp)) {
error_free(mte_migration_blocker);
return;
}
succeeded_to_enable = true;
}
if (succeeded_to_enable) {
object_property_set_bool(cpuobj, "has_mte", true, NULL);
}
}

View File

@ -756,6 +756,11 @@ bool kvm_arm_steal_time_supported(void)
return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
}
bool kvm_arm_mte_supported(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
}
QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
uint32_t kvm_arm_sve_get_vls(CPUState *cs)

View File

@ -313,6 +313,13 @@ bool kvm_arm_pmu_supported(void);
*/
bool kvm_arm_sve_supported(void);
/**
* kvm_arm_mte_supported:
*
* Returns: true if KVM can enable MTE, and false otherwise.
*/
bool kvm_arm_mte_supported(void);
/**
* kvm_arm_get_max_vm_ipa_size:
* @ms: Machine state handle
@ -377,6 +384,8 @@ void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa);
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
void kvm_arm_enable_mte(Object *cpuobj, Error **errp);
#else
/*
@ -403,6 +412,11 @@ static inline bool kvm_arm_steal_time_supported(void)
return false;
}
static inline bool kvm_arm_mte_supported(void)
{
return false;
}
/*
* These functions should never actually be called without KVM support.
*/
@ -451,6 +465,11 @@ static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs)
g_assert_not_reached();
}
static inline void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
{
g_assert_not_reached();
}
#endif
static inline const char *gic_class_name(void)