target-arm queue:
* Some not-yet-enabled preliminaries for M-profile MVE support * Consistently use "Cortex-Axx", not "Cortex Axx" in docs, comments * docs: Fix installation of man pages with Sphinx 4.x * Mark LDS{MIN,MAX} as signed operations * Fix missing syndrome value for DAIF and PAC check exceptions * Implement BFloat16 extensions * Refactoring of hvf accelerator code in preparation for aarch64 support * Fix some coverity nits in test code -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmC4/AoZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3gQ0D/9tPOcsqQ78ZQGGkZRPM8LP HgDsXIgzhkCguNWlOQqBmDppOdEP5yVUw084dEqKbi/0pIqrQMC/Xn6KgZWuQEd5 Ye6a6xvO0ZG/Df4fbhB9afGll+6aDCg1QK8Zm6xPzQUuAbyo8vZfxG5neV5o+QHb XboXm1Cpn28V9zglZb3izt5Uka/4MRND/AfRviwZzGc07KKRGxuVJZ1jGizWxf61 oX+xhgv6+SCNXuAshW8UwFfyD0OMU1+1q9xLRA0QYaVTsbK0HGkN3gk7Zk+gzViJ //PVY0BlUx2Li03lxrfE2jinFCNPxTq0WV6K7eU4qsXXq6/mF/a1DLM/PaunRzgd wodu5RewC9P934d2MsDDRlBZKboa0zmcYaCwMNchoob7AVf9hCvc5VFbmYiF6Z9w pROxbL9lqP2c1bh+0ID8qB6NFYewyX/FNaPE213hOClwxowtvtyZsST5AkWiZkAe TlfR8sNTHAjUXvAsApMPX3MJeI9X2kWbo2PI1wTXojNZNLHJ5mBN9U32LzbeULXE LvLAU8EmuE8T6nZsfG1XW1Z6CL9/9QaTaOX0QQ9s0LRvG+NNNUG1ACkm5Jm5KA8U U69eS4IZgc6Y/3PbNTfhLQx4IAyzH9YxjC0gzeCbYosumQdNUUEfOfyoxEACDLuH BbpkRCpN2tZuHXG+JSsssQ== =hjbH -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20210603' into staging target-arm queue: * Some not-yet-enabled preliminaries for M-profile MVE support * Consistently use "Cortex-Axx", not "Cortex Axx" in docs, comments * docs: Fix installation of man pages with Sphinx 4.x * Mark LDS{MIN,MAX} as signed operations * Fix missing syndrome value for DAIF and PAC check exceptions * Implement BFloat16 extensions * Refactoring of hvf accelerator code in preparation for aarch64 support * Fix some coverity nits in test code # gpg: Signature made Thu 03 Jun 2021 16:58:02 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20210603: (45 commits) tests/unit/test-vmstate: Assert that dup() and mkstemp() succeed tests/qtest/tpm-tests: Remove unnecessary NULL checks tests/qtest/pflash-cfi02-test: Avoid potential integer overflow tests/qtest/hd-geo-test: Fix checks on mkstemp() return value tests/qtest/e1000e-test: Check qemu_recv() succeeded tests/qtest/bios-tables-test: Check for dup2() failure hvf: Simplify post reset/init/loadvm hooks hvf: Introduce hvf vcpu struct hvf: Remove hvf-accel-ops.h hvf: Make synchronize functions static hvf: Use cpu_synchronize_state() hvf: Split out common code on vcpu init and destroy hvf: Remove use of hv_uvaddr_t and hv_gpaddr_t hvf: Make hvf_set_phys_mem() static hvf: Move hvf internal definitions into common header hvf: Move cpu functions into common directory hvf: Move vcpu thread functions into common directory hvf: Move assert_hvf_ok() into common directory target/arm: Enable BFloat16 extensions linux-user/aarch64: Enable hwcap bits for bfloat16 ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
453d9c61dd
@ -436,7 +436,15 @@ M: Roman Bolshakov <r.bolshakov@yadro.com>
|
||||
W: https://wiki.qemu.org/Features/HVF
|
||||
S: Maintained
|
||||
F: target/i386/hvf/
|
||||
|
||||
HVF
|
||||
M: Cameron Esfahani <dirty@apple.com>
|
||||
M: Roman Bolshakov <r.bolshakov@yadro.com>
|
||||
W: https://wiki.qemu.org/Features/HVF
|
||||
S: Maintained
|
||||
F: accel/hvf/
|
||||
F: include/sysemu/hvf.h
|
||||
F: include/sysemu/hvf_int.h
|
||||
|
||||
WHPX CPUs
|
||||
M: Sunil Muthuswamy <sunilmut@microsoft.com>
|
||||
|
471
accel/hvf/hvf-accel-ops.c
Normal file
471
accel/hvf/hvf-accel-ops.c
Normal file
@ -0,0 +1,471 @@
|
||||
/*
|
||||
* Copyright 2008 IBM Corporation
|
||||
* 2008 Red Hat, Inc.
|
||||
* Copyright 2011 Intel Corporation
|
||||
* Copyright 2016 Veertu, Inc.
|
||||
* Copyright 2017 The Android Open Source Project
|
||||
*
|
||||
* QEMU Hypervisor.framework support
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* This file contain code under public domain from the hvdos project:
|
||||
* https://github.com/mist64/hvdos
|
||||
*
|
||||
* Parts Copyright (c) 2011 NetApp, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "qemu/guest-random.h"
|
||||
|
||||
HVFState *hvf_state;
|
||||
|
||||
/* Memory slots */
|
||||
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
int x;
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
slot = &hvf_state->slots[x];
|
||||
if (slot->size && start < (slot->start + slot->size) &&
|
||||
(start + size) > slot->start) {
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct mac_slot {
|
||||
int present;
|
||||
uint64_t size;
|
||||
uint64_t gpa_start;
|
||||
uint64_t gva;
|
||||
};
|
||||
|
||||
struct mac_slot mac_slots[32];
|
||||
|
||||
static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
|
||||
{
|
||||
struct mac_slot *macslot;
|
||||
hv_return_t ret;
|
||||
|
||||
macslot = &mac_slots[slot->slot_id];
|
||||
|
||||
if (macslot->present) {
|
||||
if (macslot->size != slot->size) {
|
||||
macslot->present = 0;
|
||||
ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
|
||||
assert_hvf_ok(ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (!slot->size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
macslot->present = 1;
|
||||
macslot->gpa_start = slot->start;
|
||||
macslot->size = slot->size;
|
||||
ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
|
||||
assert_hvf_ok(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
|
||||
{
|
||||
hvf_slot *mem;
|
||||
MemoryRegion *area = section->mr;
|
||||
bool writeable = !area->readonly && !area->rom_device;
|
||||
hv_memory_flags_t flags;
|
||||
|
||||
if (!memory_region_is_ram(area)) {
|
||||
if (writeable) {
|
||||
return;
|
||||
} else if (!memory_region_is_romd(area)) {
|
||||
/*
|
||||
* If the memory device is not in romd_mode, then we actually want
|
||||
* to remove the hvf memory slot so all accesses will trap.
|
||||
*/
|
||||
add = false;
|
||||
}
|
||||
}
|
||||
|
||||
mem = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
if (mem && add) {
|
||||
if (mem->size == int128_get64(section->size) &&
|
||||
mem->start == section->offset_within_address_space &&
|
||||
mem->mem == (memory_region_get_ram_ptr(area) +
|
||||
section->offset_within_region)) {
|
||||
return; /* Same region was attempted to register, go away. */
|
||||
}
|
||||
}
|
||||
|
||||
/* Region needs to be reset. set the size to 0 and remap it. */
|
||||
if (mem) {
|
||||
mem->size = 0;
|
||||
if (do_hvf_set_memory(mem, 0)) {
|
||||
error_report("Failed to reset overlapping slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
if (!add) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (area->readonly ||
|
||||
(!memory_region_is_ram(area) && memory_region_is_romd(area))) {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
|
||||
} else {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
|
||||
}
|
||||
|
||||
/* Now make a new slot. */
|
||||
int x;
|
||||
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
mem = &hvf_state->slots[x];
|
||||
if (!mem->size) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x == hvf_state->num_slots) {
|
||||
error_report("No free slots");
|
||||
abort();
|
||||
}
|
||||
|
||||
mem->size = int128_get64(section->size);
|
||||
mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
|
||||
mem->start = section->offset_within_address_space;
|
||||
mem->region = area;
|
||||
|
||||
if (do_hvf_set_memory(mem, flags)) {
|
||||
error_report("Error registering new memory slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
if (!cpu->vcpu_dirty) {
|
||||
hvf_get_registers(cpu);
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->vcpu_dirty) {
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
|
||||
run_on_cpu_data arg)
|
||||
{
|
||||
/* QEMU state is the reference, push it to HVF now and on next entry */
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_post_init(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
|
||||
slot = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
/* protect region against writes; begin tracking it */
|
||||
if (on) {
|
||||
slot->flags |= HVF_SLOT_LOG;
|
||||
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ);
|
||||
/* stop tracking region*/
|
||||
} else {
|
||||
slot->flags &= ~HVF_SLOT_LOG;
|
||||
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_log_start(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (old != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_log_stop(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (new != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 0);
|
||||
}
|
||||
|
||||
static void hvf_log_sync(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
/*
|
||||
* sync of dirty pages is handled elsewhere; just make sure we keep
|
||||
* tracking the region.
|
||||
*/
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_region_add(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, true);
|
||||
}
|
||||
|
||||
static void hvf_region_del(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, false);
|
||||
}
|
||||
|
||||
static MemoryListener hvf_memory_listener = {
|
||||
.priority = 10,
|
||||
.region_add = hvf_region_add,
|
||||
.region_del = hvf_region_del,
|
||||
.log_start = hvf_log_start,
|
||||
.log_stop = hvf_log_stop,
|
||||
.log_sync = hvf_log_sync,
|
||||
};
|
||||
|
||||
static void dummy_signal(int sig)
|
||||
{
|
||||
}
|
||||
|
||||
bool hvf_allowed;
|
||||
|
||||
static int hvf_accel_init(MachineState *ms)
|
||||
{
|
||||
int x;
|
||||
hv_return_t ret;
|
||||
HVFState *s;
|
||||
|
||||
ret = hv_vm_create(HV_VM_DEFAULT);
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
s = g_new0(HVFState, 1);
|
||||
|
||||
s->num_slots = 32;
|
||||
for (x = 0; x < s->num_slots; ++x) {
|
||||
s->slots[x].size = 0;
|
||||
s->slots[x].slot_id = x;
|
||||
}
|
||||
|
||||
hvf_state = s;
|
||||
memory_listener_register(&hvf_memory_listener, &address_space_memory);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvf_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "HVF";
|
||||
ac->init_machine = hvf_accel_init;
|
||||
ac->allowed = &hvf_allowed;
|
||||
}
|
||||
|
||||
static const TypeInfo hvf_accel_type = {
|
||||
.name = TYPE_HVF_ACCEL,
|
||||
.parent = TYPE_ACCEL,
|
||||
.class_init = hvf_accel_class_init,
|
||||
};
|
||||
|
||||
static void hvf_type_init(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_type);
|
||||
}
|
||||
|
||||
type_init(hvf_type_init);
|
||||
|
||||
static void hvf_vcpu_destroy(CPUState *cpu)
|
||||
{
|
||||
hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
hvf_arch_vcpu_destroy(cpu);
|
||||
g_free(cpu->hvf);
|
||||
cpu->hvf = NULL;
|
||||
}
|
||||
|
||||
static int hvf_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
int r;
|
||||
|
||||
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
|
||||
|
||||
/* init cpu signals */
|
||||
sigset_t set;
|
||||
struct sigaction sigact;
|
||||
|
||||
memset(&sigact, 0, sizeof(sigact));
|
||||
sigact.sa_handler = dummy_signal;
|
||||
sigaction(SIG_IPI, &sigact, NULL);
|
||||
|
||||
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
||||
sigdelset(&set, SIG_IPI);
|
||||
|
||||
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
|
||||
cpu->vcpu_dirty = 1;
|
||||
assert_hvf_ok(r);
|
||||
|
||||
return hvf_arch_init_vcpu(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* The HVF-specific vCPU thread function. This one should only run when the host
|
||||
* CPU supports the VMX "unrestricted guest" feature.
|
||||
*/
|
||||
static void *hvf_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
int r;
|
||||
|
||||
assert(hvf_enabled());
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
hvf_init_vcpu(cpu);
|
||||
|
||||
/* signal CPU creation */
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
r = hvf_vcpu_exec(cpu);
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void hvf_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
/*
|
||||
* HVF currently does not support TCG, and only runs in
|
||||
* unrestricted-guest mode.
|
||||
*/
|
||||
assert(hvf_enabled());
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
|
||||
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
ops->create_vcpu_thread = hvf_start_vcpu_thread;
|
||||
|
||||
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
|
||||
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
|
||||
ops->synchronize_state = hvf_cpu_synchronize_state;
|
||||
ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
|
||||
};
|
||||
static const TypeInfo hvf_accel_ops_type = {
|
||||
.name = ACCEL_OPS_NAME("hvf"),
|
||||
|
||||
.parent = TYPE_ACCEL_OPS,
|
||||
.class_init = hvf_accel_ops_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
static void hvf_accel_ops_register_types(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_ops_type);
|
||||
}
|
||||
type_init(hvf_accel_ops_register_types);
|
47
accel/hvf/hvf-all.c
Normal file
47
accel/hvf/hvf-all.c
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* QEMU Hypervisor.framework support
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*
|
||||
* Contributions after 2012-01-13 are licensed under the terms of the
|
||||
* GNU GPL, version 2 or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
|
||||
void assert_hvf_ok(hv_return_t ret)
|
||||
{
|
||||
if (ret == HV_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (ret) {
|
||||
case HV_ERROR:
|
||||
error_report("Error: HV_ERROR");
|
||||
break;
|
||||
case HV_BUSY:
|
||||
error_report("Error: HV_BUSY");
|
||||
break;
|
||||
case HV_BAD_ARGUMENT:
|
||||
error_report("Error: HV_BAD_ARGUMENT");
|
||||
break;
|
||||
case HV_NO_RESOURCES:
|
||||
error_report("Error: HV_NO_RESOURCES");
|
||||
break;
|
||||
case HV_NO_DEVICE:
|
||||
error_report("Error: HV_NO_DEVICE");
|
||||
break;
|
||||
case HV_UNSUPPORTED:
|
||||
error_report("Error: HV_UNSUPPORTED");
|
||||
break;
|
||||
default:
|
||||
error_report("Unknown Error");
|
||||
}
|
||||
|
||||
abort();
|
||||
}
|
7
accel/hvf/meson.build
Normal file
7
accel/hvf/meson.build
Normal file
@ -0,0 +1,7 @@
|
||||
hvf_ss = ss.source_set()
|
||||
hvf_ss.add(files(
|
||||
'hvf-all.c',
|
||||
'hvf-accel-ops.c',
|
||||
))
|
||||
|
||||
specific_ss.add_all(when: 'CONFIG_HVF', if_true: hvf_ss)
|
@ -2,6 +2,7 @@ specific_ss.add(files('accel-common.c'))
|
||||
softmmu_ss.add(files('accel-softmmu.c'))
|
||||
user_ss.add(files('accel-user.c'))
|
||||
|
||||
subdir('hvf')
|
||||
subdir('qtest')
|
||||
subdir('kvm')
|
||||
subdir('tcg')
|
||||
|
@ -279,6 +279,7 @@ man_pages = [
|
||||
['Stefan Hajnoczi <stefanha@redhat.com>',
|
||||
'Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>'], 1),
|
||||
]
|
||||
man_make_section_directory = False
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
|
@ -5,7 +5,7 @@ The QEMU Aspeed machines model BMCs of various OpenPOWER systems and
|
||||
Aspeed evaluation boards. They are based on different releases of the
|
||||
Aspeed SoC : the AST2400 integrating an ARM926EJ-S CPU (400MHz), the
|
||||
AST2500 with an ARM1176JZS CPU (800MHz) and more recently the AST2600
|
||||
with dual cores ARM Cortex A7 CPUs (1.2GHz).
|
||||
with dual cores ARM Cortex-A7 CPUs (1.2GHz).
|
||||
|
||||
The SoC comes with RAM, Gigabit ethernet, USB, SD/MMC, USB, SPI, I2C,
|
||||
etc.
|
||||
@ -24,7 +24,7 @@ AST2500 SoC based machines :
|
||||
|
||||
AST2600 SoC based machines :
|
||||
|
||||
- ``ast2600-evb`` Aspeed AST2600 Evaluation board (Cortex A7)
|
||||
- ``ast2600-evb`` Aspeed AST2600 Evaluation board (Cortex-A7)
|
||||
- ``tacoma-bmc`` OpenPOWER Witherspoon POWER9 AST2600 BMC
|
||||
|
||||
Supported devices
|
||||
|
@ -3,19 +3,19 @@ Nuvoton iBMC boards (``npcm750-evb``, ``quanta-gsj``)
|
||||
|
||||
The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are
|
||||
designed to be used as Baseboard Management Controllers (BMCs) in various
|
||||
servers. They all feature one or two ARM Cortex A9 CPU cores, as well as an
|
||||
servers. They all feature one or two ARM Cortex-A9 CPU cores, as well as an
|
||||
assortment of peripherals targeted for either Enterprise or Data Center /
|
||||
Hyperscale applications. The former is a superset of the latter, so NPCM750 has
|
||||
all the peripherals of NPCM730 and more.
|
||||
|
||||
.. _Nuvoton iBMC: https://www.nuvoton.com/products/cloud-computing/ibmc/
|
||||
|
||||
The NPCM750 SoC has two Cortex A9 cores and is targeted for the Enterprise
|
||||
The NPCM750 SoC has two Cortex-A9 cores and is targeted for the Enterprise
|
||||
segment. The following machines are based on this chip :
|
||||
|
||||
- ``npcm750-evb`` Nuvoton NPCM750 Evaluation board
|
||||
|
||||
The NPCM730 SoC has two Cortex A9 cores and is targeted for Data Center and
|
||||
The NPCM730 SoC has two Cortex-A9 cores and is targeted for Data Center and
|
||||
Hyperscale applications. The following machines are based on this chip :
|
||||
|
||||
- ``quanta-gsj`` Quanta GSJ server BMC
|
||||
|
@ -10,7 +10,7 @@ Supported devices
|
||||
|
||||
The SABRE Lite machine supports the following devices:
|
||||
|
||||
* Up to 4 Cortex A9 cores
|
||||
* Up to 4 Cortex-A9 cores
|
||||
* Generic Interrupt Controller
|
||||
* 1 Clock Controller Module
|
||||
* 1 System Reset Controller
|
||||
|
@ -176,13 +176,12 @@ static void partsN(uncanon)(FloatPartsN *p, float_status *s,
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
overflow_norm = false;
|
||||
switch (s->float_rounding_mode) {
|
||||
case float_round_nearest_even:
|
||||
overflow_norm = false;
|
||||
inc = ((p->frac_lo & roundeven_mask) != frac_lsbm1 ? frac_lsbm1 : 0);
|
||||
break;
|
||||
case float_round_ties_away:
|
||||
overflow_norm = false;
|
||||
inc = frac_lsbm1;
|
||||
break;
|
||||
case float_round_to_zero:
|
||||
@ -199,6 +198,8 @@ static void partsN(uncanon)(FloatPartsN *p, float_status *s,
|
||||
break;
|
||||
case float_round_to_odd:
|
||||
overflow_norm = true;
|
||||
/* fall through */
|
||||
case float_round_to_odd_inf:
|
||||
inc = p->frac_lo & frac_lsb ? 0 : round_mask;
|
||||
break;
|
||||
default:
|
||||
@ -259,6 +260,7 @@ static void partsN(uncanon)(FloatPartsN *p, float_status *s,
|
||||
? frac_lsbm1 : 0);
|
||||
break;
|
||||
case float_round_to_odd:
|
||||
case float_round_to_odd_inf:
|
||||
inc = p->frac_lo & frac_lsb ? 0 : round_mask;
|
||||
break;
|
||||
default:
|
||||
|
@ -176,6 +176,12 @@ static void armv7m_realize(DeviceState *dev, Error **errp)
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (object_property_find(OBJECT(s->cpu), "init-nsvtor")) {
|
||||
if (!object_property_set_uint(OBJECT(s->cpu), "init-nsvtor",
|
||||
s->init_nsvtor, errp)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (object_property_find(OBJECT(s->cpu), "start-powered-off")) {
|
||||
if (!object_property_set_bool(OBJECT(s->cpu), "start-powered-off",
|
||||
s->start_powered_off, errp)) {
|
||||
@ -254,6 +260,7 @@ static Property armv7m_properties[] = {
|
||||
MemoryRegion *),
|
||||
DEFINE_PROP_LINK("idau", ARMv7MState, idau, TYPE_IDAU_INTERFACE, Object *),
|
||||
DEFINE_PROP_UINT32("init-svtor", ARMv7MState, init_svtor, 0),
|
||||
DEFINE_PROP_UINT32("init-nsvtor", ARMv7MState, init_nsvtor, 0),
|
||||
DEFINE_PROP_BOOL("enable-bitband", ARMv7MState, enable_bitband, false),
|
||||
DEFINE_PROP_BOOL("start-powered-off", ARMv7MState, start_powered_off,
|
||||
false),
|
||||
|
@ -947,7 +947,7 @@ static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data)
|
||||
MachineClass *mc = MACHINE_CLASS(oc);
|
||||
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
|
||||
|
||||
mc->desc = "Aspeed AST2600 EVB (Cortex A7)";
|
||||
mc->desc = "Aspeed AST2600 EVB (Cortex-A7)";
|
||||
amc->soc_name = "ast2600-a1";
|
||||
amc->hw_strap1 = AST2600_EVB_HW_STRAP1;
|
||||
amc->hw_strap2 = AST2600_EVB_HW_STRAP2;
|
||||
@ -966,7 +966,7 @@ static void aspeed_machine_tacoma_class_init(ObjectClass *oc, void *data)
|
||||
MachineClass *mc = MACHINE_CLASS(oc);
|
||||
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
|
||||
|
||||
mc->desc = "OpenPOWER Tacoma BMC (Cortex A7)";
|
||||
mc->desc = "OpenPOWER Tacoma BMC (Cortex-A7)";
|
||||
amc->soc_name = "ast2600-a1";
|
||||
amc->hw_strap1 = TACOMA_BMC_HW_STRAP1;
|
||||
amc->hw_strap2 = TACOMA_BMC_HW_STRAP2;
|
||||
@ -1003,7 +1003,7 @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data)
|
||||
MachineClass *mc = MACHINE_CLASS(oc);
|
||||
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
|
||||
|
||||
mc->desc = "IBM Rainier BMC (Cortex A7)";
|
||||
mc->desc = "IBM Rainier BMC (Cortex-A7)";
|
||||
amc->soc_name = "ast2600-a1";
|
||||
amc->hw_strap1 = RAINIER_BMC_HW_STRAP1;
|
||||
amc->hw_strap2 = RAINIER_BMC_HW_STRAP2;
|
||||
|
@ -67,7 +67,7 @@ static void mcimx6ul_evk_init(MachineState *machine)
|
||||
|
||||
static void mcimx6ul_evk_machine_init(MachineClass *mc)
|
||||
{
|
||||
mc->desc = "Freescale i.MX6UL Evaluation Kit (Cortex A7)";
|
||||
mc->desc = "Freescale i.MX6UL Evaluation Kit (Cortex-A7)";
|
||||
mc->init = mcimx6ul_evk_init;
|
||||
mc->max_cpus = FSL_IMX6UL_NUM_CPUS;
|
||||
mc->default_ram_id = "mcimx6ul-evk.ram";
|
||||
|
@ -67,7 +67,7 @@ static void mcimx7d_sabre_init(MachineState *machine)
|
||||
|
||||
static void mcimx7d_sabre_machine_init(MachineClass *mc)
|
||||
{
|
||||
mc->desc = "Freescale i.MX7 DUAL SABRE (Cortex A7)";
|
||||
mc->desc = "Freescale i.MX7 DUAL SABRE (Cortex-A7)";
|
||||
mc->init = mcimx7d_sabre_init;
|
||||
mc->max_cpus = FSL_IMX7_NUM_CPUS;
|
||||
mc->default_ram_id = "mcimx7d-sabre.ram";
|
||||
|
@ -299,7 +299,7 @@ static void npcm750_evb_machine_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
npcm7xx_set_soc_type(nmc, TYPE_NPCM750);
|
||||
|
||||
mc->desc = "Nuvoton NPCM750 Evaluation Board (Cortex A9)";
|
||||
mc->desc = "Nuvoton NPCM750 Evaluation Board (Cortex-A9)";
|
||||
mc->init = npcm750_evb_init;
|
||||
mc->default_ram_size = 512 * MiB;
|
||||
};
|
||||
@ -311,7 +311,7 @@ static void gsj_machine_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
npcm7xx_set_soc_type(nmc, TYPE_NPCM730);
|
||||
|
||||
mc->desc = "Quanta GSJ (Cortex A9)";
|
||||
mc->desc = "Quanta GSJ (Cortex-A9)";
|
||||
mc->init = quanta_gsj_init;
|
||||
mc->default_ram_size = 512 * MiB;
|
||||
};
|
||||
|
@ -105,7 +105,7 @@ static void sabrelite_init(MachineState *machine)
|
||||
|
||||
static void sabrelite_machine_init(MachineClass *mc)
|
||||
{
|
||||
mc->desc = "Freescale i.MX6 Quad SABRE Lite Board (Cortex A9)";
|
||||
mc->desc = "Freescale i.MX6 Quad SABRE Lite Board (Cortex-A9)";
|
||||
mc->init = sabrelite_init;
|
||||
mc->max_cpus = FSL_IMX6_NUM_CPUS;
|
||||
mc->ignore_memory_transaction_failures = true;
|
||||
|
@ -35,7 +35,7 @@
|
||||
#define NPCM7XX_CLOCK_REF_HZ (25000000)
|
||||
|
||||
/* Register Field Definitions */
|
||||
#define NPCM7XX_CLK_WDRCR_CA9C BIT(0) /* Cortex A9 Cores */
|
||||
#define NPCM7XX_CLK_WDRCR_CA9C BIT(0) /* Cortex-A9 Cores */
|
||||
|
||||
#define PLLCON_LOKI BIT(31)
|
||||
#define PLLCON_LOKS BIT(30)
|
||||
|
@ -134,8 +134,10 @@ typedef enum __attribute__((__packed__)) {
|
||||
float_round_up = 2,
|
||||
float_round_to_zero = 3,
|
||||
float_round_ties_away = 4,
|
||||
/* Not an IEEE rounding mode: round to the closest odd mantissa value */
|
||||
/* Not an IEEE rounding mode: round to closest odd, overflow to max */
|
||||
float_round_to_odd = 5,
|
||||
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
|
||||
float_round_to_odd_inf = 6,
|
||||
} FloatRoundMode;
|
||||
|
||||
/*
|
||||
|
@ -18,7 +18,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* The Allwinner H3 is a System on Chip containing four ARM Cortex A7
|
||||
* The Allwinner H3 is a System on Chip containing four ARM Cortex-A7
|
||||
* processor cores. Features and specifications include DDR2/DDR3 memory,
|
||||
* SD/MMC storage cards, 10/100/1000Mbit Ethernet, USB 2.0, HDMI and
|
||||
* various I/O modules.
|
||||
|
@ -46,6 +46,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MState, ARMV7M)
|
||||
* devices will be automatically layered on top of this view.)
|
||||
* + Property "idau": IDAU interface (forwarded to CPU object)
|
||||
* + Property "init-svtor": secure VTOR reset value (forwarded to CPU object)
|
||||
* + Property "init-nsvtor": non-secure VTOR reset value (forwarded to CPU object)
|
||||
* + Property "vfp": enable VFP (forwarded to CPU object)
|
||||
* + Property "dsp": enable DSP (forwarded to CPU object)
|
||||
* + Property "enable-bitband": expose bitbanded IO
|
||||
@ -69,6 +70,7 @@ struct ARMv7MState {
|
||||
MemoryRegion *board_memory;
|
||||
Object *idau;
|
||||
uint32_t init_svtor;
|
||||
uint32_t init_nsvtor;
|
||||
bool enable_bitband;
|
||||
bool start_powered_off;
|
||||
bool vfp;
|
||||
|
@ -214,6 +214,7 @@ struct KVMState;
|
||||
struct kvm_run;
|
||||
|
||||
struct hax_vcpu_state;
|
||||
struct hvf_vcpu_state;
|
||||
|
||||
#define TB_JMP_CACHE_BITS 12
|
||||
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
|
||||
@ -406,7 +407,7 @@ struct CPUState {
|
||||
|
||||
struct hax_vcpu_state *hax_vcpu;
|
||||
|
||||
int hvf_fd;
|
||||
struct hvf_vcpu_state *hvf;
|
||||
|
||||
/* track IOMMUs whose translations we've cached in the TCG TLB */
|
||||
GArray *iommu_notifiers;
|
||||
|
58
include/sysemu/hvf_int.h
Normal file
58
include/sysemu/hvf_int.h
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* QEMU Hypervisor.framework (HVF) support
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
/* header to be included in HVF-specific code */
|
||||
|
||||
#ifndef HVF_INT_H
|
||||
#define HVF_INT_H
|
||||
|
||||
#include <Hypervisor/hv.h>
|
||||
|
||||
/* hvf_slot flags */
|
||||
#define HVF_SLOT_LOG (1 << 0)
|
||||
|
||||
typedef struct hvf_slot {
|
||||
uint64_t start;
|
||||
uint64_t size;
|
||||
uint8_t *mem;
|
||||
int slot_id;
|
||||
uint32_t flags;
|
||||
MemoryRegion *region;
|
||||
} hvf_slot;
|
||||
|
||||
typedef struct hvf_vcpu_caps {
|
||||
uint64_t vmx_cap_pinbased;
|
||||
uint64_t vmx_cap_procbased;
|
||||
uint64_t vmx_cap_procbased2;
|
||||
uint64_t vmx_cap_entry;
|
||||
uint64_t vmx_cap_exit;
|
||||
uint64_t vmx_cap_preemption_timer;
|
||||
} hvf_vcpu_caps;
|
||||
|
||||
struct HVFState {
|
||||
AccelState parent;
|
||||
hvf_slot slots[32];
|
||||
int num_slots;
|
||||
|
||||
hvf_vcpu_caps *hvf_caps;
|
||||
};
|
||||
extern HVFState *hvf_state;
|
||||
|
||||
struct hvf_vcpu_state {
|
||||
int fd;
|
||||
};
|
||||
|
||||
void assert_hvf_ok(hv_return_t ret);
|
||||
int hvf_arch_init_vcpu(CPUState *cpu);
|
||||
void hvf_arch_vcpu_destroy(CPUState *cpu);
|
||||
int hvf_vcpu_exec(CPUState *);
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
|
||||
int hvf_put_registers(CPUState *);
|
||||
int hvf_get_registers(CPUState *);
|
||||
|
||||
#endif
|
@ -659,7 +659,9 @@ static uint32_t get_elf_hwcap2(void)
|
||||
GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM);
|
||||
GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM);
|
||||
GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM);
|
||||
GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16);
|
||||
GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM);
|
||||
GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16);
|
||||
GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
|
||||
GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
|
||||
GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
|
||||
|
@ -327,6 +327,7 @@ static void arm_cpu_reset(DeviceState *dev)
|
||||
env->regs[14] = 0xffffffff;
|
||||
|
||||
env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
|
||||
env->v7m.vecbase[M_REG_NS] = cpu->init_nsvtor & 0xffffff80;
|
||||
|
||||
/* Load the initial SP and PC from offset 0 and 4 in the vector table */
|
||||
vecbase = env->v7m.vecbase[env->v7m.secure];
|
||||
@ -1272,6 +1273,15 @@ void arm_cpu_post_init(Object *obj)
|
||||
&cpu->init_svtor,
|
||||
OBJ_PROP_FLAG_READWRITE);
|
||||
}
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
|
||||
/*
|
||||
* Initial value of the NS VTOR (for cores without the Security
|
||||
* extension, this is the only VTOR)
|
||||
*/
|
||||
object_property_add_uint32_ptr(obj, "init-nsvtor",
|
||||
&cpu->init_nsvtor,
|
||||
OBJ_PROP_FLAG_READWRITE);
|
||||
}
|
||||
|
||||
qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
|
||||
|
||||
@ -1463,6 +1473,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
|
||||
u = cpu->isar.id_isar6;
|
||||
u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
|
||||
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
|
||||
cpu->isar.id_isar6 = u;
|
||||
|
||||
u = cpu->isar.mvfr0;
|
||||
@ -1503,6 +1514,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
|
||||
t = cpu->isar.id_aa64isar1;
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
|
||||
cpu->isar.id_aa64isar1 = t;
|
||||
|
||||
@ -1518,6 +1530,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
u = cpu->isar.id_isar6;
|
||||
u = FIELD_DP32(u, ID_ISAR6, DP, 0);
|
||||
u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
|
||||
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
|
||||
u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
|
||||
cpu->isar.id_isar6 = u;
|
||||
|
||||
|
@ -563,7 +563,8 @@ typedef struct CPUARMState {
|
||||
uint32_t fpdscr[M_REG_NUM_BANKS];
|
||||
uint32_t cpacr[M_REG_NUM_BANKS];
|
||||
uint32_t nsacr;
|
||||
int ltpsize;
|
||||
uint32_t ltpsize;
|
||||
uint32_t vpr;
|
||||
} v7m;
|
||||
|
||||
/* Information associated with an exception about to be taken:
|
||||
@ -868,6 +869,8 @@ struct ARMCPU {
|
||||
|
||||
/* For v8M, initial value of the Secure VTOR */
|
||||
uint32_t init_svtor;
|
||||
/* For v8M, initial value of the Non-secure VTOR */
|
||||
uint32_t init_nsvtor;
|
||||
|
||||
/* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
|
||||
* QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
|
||||
@ -1561,6 +1564,7 @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
|
||||
|
||||
#define FPCR_LTPSIZE_SHIFT 16 /* LTPSIZE, M-profile only */
|
||||
#define FPCR_LTPSIZE_MASK (7 << FPCR_LTPSIZE_SHIFT)
|
||||
#define FPCR_LTPSIZE_LENGTH 3
|
||||
|
||||
#define FPCR_NZCV_MASK (FPCR_N | FPCR_Z | FPCR_C | FPCR_V)
|
||||
#define FPCR_NZCVQC_MASK (FPCR_NZCV_MASK | FPCR_QC)
|
||||
@ -1761,6 +1765,11 @@ FIELD(V7M_FPCCR, ASPEN, 31, 1)
|
||||
R_V7M_FPCCR_UFRDY_MASK | \
|
||||
R_V7M_FPCCR_ASPEN_MASK)
|
||||
|
||||
/* v7M VPR bits */
|
||||
FIELD(V7M_VPR, P0, 0, 16)
|
||||
FIELD(V7M_VPR, MASK01, 16, 4)
|
||||
FIELD(V7M_VPR, MASK23, 20, 4)
|
||||
|
||||
/*
|
||||
* System register ID fields.
|
||||
*/
|
||||
@ -3783,6 +3792,11 @@ static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
|
||||
return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0;
|
||||
@ -3817,6 +3831,28 @@ static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa32_mve(const ARMISARegisters *id)
|
||||
{
|
||||
/*
|
||||
* Return true if MVE is supported (either integer or floating point).
|
||||
* We must check for M-profile as the MVFR1 field means something
|
||||
* else for A-profile.
|
||||
*/
|
||||
return isar_feature_aa32_mprofile(id) &&
|
||||
FIELD_EX32(id->mvfr1, MVFR1, MVE) > 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa32_mve_fp(const ARMISARegisters *id)
|
||||
{
|
||||
/*
|
||||
* Return true if MVE is supported (either integer or floating point).
|
||||
* We must check for M-profile as the MVFR1 field means something
|
||||
* else for A-profile.
|
||||
*/
|
||||
return isar_feature_aa32_mprofile(id) &&
|
||||
FIELD_EX32(id->mvfr1, MVFR1, MVE) >= 2;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id)
|
||||
{
|
||||
/*
|
||||
@ -4122,6 +4158,11 @@ static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id)
|
||||
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id)
|
||||
{
|
||||
/* We always set the AdvSIMD and FP fields identically. */
|
||||
@ -4266,6 +4307,11 @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
|
||||
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0;
|
||||
|
@ -661,6 +661,7 @@ static void aarch64_max_initfn(Object *obj)
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);
|
||||
@ -708,6 +709,7 @@ static void aarch64_max_initfn(Object *obj)
|
||||
t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* PMULL */
|
||||
t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);
|
||||
t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);
|
||||
@ -731,6 +733,7 @@ static void aarch64_max_initfn(Object *obj)
|
||||
u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
|
||||
u = FIELD_DP32(u, ID_ISAR6, SB, 1);
|
||||
u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
|
||||
u = FIELD_DP32(u, ID_ISAR6, BF16, 1);
|
||||
u = FIELD_DP32(u, ID_ISAR6, I8MM, 1);
|
||||
cpu->isar.id_isar6 = u;
|
||||
|
||||
|
@ -968,6 +968,7 @@ static void arm_max_initfn(Object *obj)
|
||||
t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
|
||||
t = FIELD_DP32(t, ID_ISAR6, SB, 1);
|
||||
t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
|
||||
t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
|
||||
t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
|
||||
cpu->isar.id_isar6 = t;
|
||||
|
||||
|
@ -1197,6 +1197,8 @@ DEF_HELPER_FLAGS_5(sve_fcvt_hd, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve_fcvt_sd, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve_bfcvt, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(sve_fcvtzs_hh, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
@ -2752,6 +2754,8 @@ DEF_HELPER_FLAGS_5(sve2_fcvtnt_sh, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve2_fcvtnt_ds, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve_bfcvtnt, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(sve2_fcvtlt_hs, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
|
@ -143,6 +143,8 @@ DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
|
||||
|
||||
DEF_HELPER_2(vfp_fcvtds, f64, f32, env)
|
||||
DEF_HELPER_2(vfp_fcvtsd, f32, f64, env)
|
||||
DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, ptr)
|
||||
DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, ptr)
|
||||
|
||||
DEF_HELPER_2(vfp_uitoh, f16, i32, ptr)
|
||||
DEF_HELPER_2(vfp_uitos, f32, i32, ptr)
|
||||
@ -1000,6 +1002,19 @@ DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
|
||||
DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(gvec_bfdot, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(gvec_bfdot_idx, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(gvec_bfmmla, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, ptr, i32)
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
#include "helper-a64.h"
|
||||
#include "helper-sve.h"
|
||||
|
@ -2601,10 +2601,7 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
|
||||
limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
|
||||
|
||||
if (val < limit) {
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
cpu_restore_state(cs, GETPC(), true);
|
||||
raise_exception(env, EXCP_STKOF, 0, 1);
|
||||
raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
|
||||
}
|
||||
|
||||
if (is_psp) {
|
||||
|
@ -318,6 +318,25 @@ static const VMStateDescription vmstate_m_fp = {
|
||||
}
|
||||
};
|
||||
|
||||
static bool mve_needed(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
|
||||
return cpu_isar_feature(aa32_mve, cpu);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_m_mve = {
|
||||
.name = "cpu/m/mve",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = mve_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_m = {
|
||||
.name = "cpu/m",
|
||||
.version_id = 4,
|
||||
@ -344,6 +363,7 @@ static const VMStateDescription vmstate_m = {
|
||||
&vmstate_m_other_sp,
|
||||
&vmstate_m_v8m,
|
||||
&vmstate_m_fp,
|
||||
&vmstate_m_mve,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
@ -563,20 +563,14 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
|
||||
|
||||
switch (tcf) {
|
||||
case 1:
|
||||
/*
|
||||
* Tag check fail causes a synchronous exception.
|
||||
*
|
||||
* In restore_state_to_opc, we set the exception syndrome
|
||||
* for the load or store operation. Unwind first so we
|
||||
* may overwrite that with the syndrome for the tag check.
|
||||
*/
|
||||
cpu_restore_state(env_cpu(env), ra, true);
|
||||
/* Tag check fail causes a synchronous exception. */
|
||||
env->exception.vaddress = dirty_ptr;
|
||||
|
||||
is_write = FIELD_EX32(desc, MTEDESC, WRITE);
|
||||
syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0,
|
||||
is_write, 0x11);
|
||||
raise_exception(env, EXCP_DATA_ABORT, syn, exception_target_el(env));
|
||||
raise_exception_ra(env, EXCP_DATA_ABORT, syn,
|
||||
exception_target_el(env), ra);
|
||||
/* noreturn, but fall through to the assert anyway */
|
||||
|
||||
case 0:
|
||||
|
@ -521,6 +521,7 @@ Vimm_1r 1111 001 . 1 . 000 ... .... cmode:4 0 . op:1 1 .... @1reg_imm
|
||||
VRINTZ 1111 001 11 . 11 .. 10 .... 0 1011 . . 0 .... @2misc
|
||||
|
||||
VCVT_F16_F32 1111 001 11 . 11 .. 10 .... 0 1100 0 . 0 .... @2misc_q0
|
||||
VCVT_B16_F32 1111 001 11 . 11 .. 10 .... 0 1100 1 . 0 .... @2misc_q0
|
||||
|
||||
VRINTM 1111 001 11 . 11 .. 10 .... 0 1101 . . 0 .... @2misc
|
||||
|
||||
|
@ -52,6 +52,8 @@ VUDOT 1111 110 00 . 10 .... .... 1101 . q:1 . 1 .... \
|
||||
vm=%vm_dp vn=%vn_dp vd=%vd_dp
|
||||
VUSDOT 1111 110 01 . 10 .... .... 1101 . q:1 . 0 .... \
|
||||
vm=%vm_dp vn=%vn_dp vd=%vd_dp
|
||||
VDOT_b16 1111 110 00 . 00 .... .... 1101 . q:1 . 0 .... \
|
||||
vm=%vm_dp vn=%vn_dp vd=%vd_dp
|
||||
|
||||
# VFM[AS]L
|
||||
VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
|
||||
@ -65,6 +67,11 @@ VUMMLA 1111 1100 0.10 .... .... 1100 .1.1 .... \
|
||||
vm=%vm_dp vn=%vn_dp vd=%vd_dp
|
||||
VUSMMLA 1111 1100 1.10 .... .... 1100 .1.0 .... \
|
||||
vm=%vm_dp vn=%vn_dp vd=%vd_dp
|
||||
VMMLA_b16 1111 1100 0.00 .... .... 1100 .1.0 .... \
|
||||
vm=%vm_dp vn=%vn_dp vd=%vd_dp
|
||||
|
||||
VFMA_b16 1111 110 0 0.11 .... .... 1000 . q:1 . 1 .... \
|
||||
vm=%vm_dp vn=%vn_dp vd=%vd_dp
|
||||
|
||||
VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
|
||||
vn=%vn_dp vd=%vd_dp size=1
|
||||
@ -79,6 +86,8 @@ VUSDOT_scalar 1111 1110 1 . 00 .... .... 1101 . q:1 index:1 0 vm:4 \
|
||||
vn=%vn_dp vd=%vd_dp
|
||||
VSUDOT_scalar 1111 1110 1 . 00 .... .... 1101 . q:1 index:1 1 vm:4 \
|
||||
vn=%vn_dp vd=%vd_dp
|
||||
VDOT_b16_scal 1111 1110 0 . 00 .... .... 1101 . q:1 index:1 0 vm:4 \
|
||||
vn=%vn_dp vd=%vd_dp
|
||||
|
||||
%vfml_scalar_q0_rm 0:3 5:1
|
||||
%vfml_scalar_q1_index 5:1 3:1
|
||||
@ -86,3 +95,5 @@ VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 0 . 1 index:1 ... \
|
||||
rm=%vfml_scalar_q0_rm vn=%vn_sp vd=%vd_dp q=0
|
||||
VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 1 . 1 . rm:3 \
|
||||
index=%vfml_scalar_q1_index vn=%vn_dp vd=%vd_dp q=1
|
||||
VFMA_b16_scal 1111 1110 0.11 .... .... 1000 . q:1 . 1 . vm:3 \
|
||||
index=%vfml_scalar_q1_index vn=%vn_dp vd=%vd_dp
|
||||
|
@ -27,8 +27,8 @@
|
||||
#define SIGNBIT (uint32_t)0x80000000
|
||||
#define SIGNBIT64 ((uint64_t)1 << 63)
|
||||
|
||||
static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
|
||||
uint32_t syndrome, uint32_t target_el)
|
||||
void raise_exception(CPUARMState *env, uint32_t excp,
|
||||
uint32_t syndrome, uint32_t target_el)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
@ -49,22 +49,21 @@ static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
|
||||
cs->exception_index = excp;
|
||||
env->exception.syndrome = syndrome;
|
||||
env->exception.target_el = target_el;
|
||||
|
||||
return cs;
|
||||
}
|
||||
|
||||
void raise_exception(CPUARMState *env, uint32_t excp,
|
||||
uint32_t syndrome, uint32_t target_el)
|
||||
{
|
||||
CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
|
||||
uint32_t target_el, uintptr_t ra)
|
||||
{
|
||||
CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
|
||||
cpu_loop_exit_restore(cs, ra);
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
/*
|
||||
* restore_state_to_opc() will set env->exception.syndrome, so
|
||||
* we must restore CPU state here before setting the syndrome
|
||||
* the caller passed us, and cannot use cpu_loop_exit_restore().
|
||||
*/
|
||||
cpu_restore_state(cs, ra, true);
|
||||
raise_exception(env, excp, syndrome, target_el);
|
||||
}
|
||||
|
||||
uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
|
||||
@ -96,15 +95,12 @@ void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
|
||||
* raising an exception if the limit is breached.
|
||||
*/
|
||||
if (newvalue < v7m_sp_limit(env)) {
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
/*
|
||||
* Stack limit exceptions are a rare case, so rather than syncing
|
||||
* PC/condbits before the call, we use cpu_restore_state() to
|
||||
* get them right before raising the exception.
|
||||
* PC/condbits before the call, we use raise_exception_ra() so
|
||||
* that cpu_restore_state() will sort them out.
|
||||
*/
|
||||
cpu_restore_state(cs, GETPC(), true);
|
||||
raise_exception(env, EXCP_STKOF, 0, 1);
|
||||
raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1036,6 +1036,7 @@ FNMLS_zpzzz 01100101 .. 1 ..... 111 ... ..... ..... @rdn_pg_rm_ra
|
||||
# SVE floating-point convert precision
|
||||
FCVT_sh 01100101 10 0010 00 101 ... ..... ..... @rd_pg_rn_e0
|
||||
FCVT_hs 01100101 10 0010 01 101 ... ..... ..... @rd_pg_rn_e0
|
||||
BFCVT 01100101 10 0010 10 101 ... ..... ..... @rd_pg_rn_e0
|
||||
FCVT_dh 01100101 11 0010 00 101 ... ..... ..... @rd_pg_rn_e0
|
||||
FCVT_hd 01100101 11 0010 01 101 ... ..... ..... @rd_pg_rn_e0
|
||||
FCVT_ds 01100101 11 0010 10 101 ... ..... ..... @rd_pg_rn_e0
|
||||
@ -1567,8 +1568,10 @@ SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
|
||||
USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm
|
||||
|
||||
### SVE2 floating point matrix multiply accumulate
|
||||
|
||||
FMMLA 01100100 .. 1 ..... 111001 ..... ..... @rda_rn_rm
|
||||
{
|
||||
BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_e0
|
||||
FMMLA 01100100 .. 1 ..... 111 001 ..... ..... @rda_rn_rm
|
||||
}
|
||||
|
||||
### SVE2 Memory Gather Load Group
|
||||
|
||||
@ -1610,6 +1613,7 @@ RAX1 01000101 00 1 ..... 11110 1 ..... ..... @rd_rn_rm_e0
|
||||
FCVTXNT_ds 01100100 00 0010 10 101 ... ..... ..... @rd_pg_rn_e0
|
||||
FCVTX_ds 01100101 00 0010 10 101 ... ..... ..... @rd_pg_rn_e0
|
||||
FCVTNT_sh 01100100 10 0010 00 101 ... ..... ..... @rd_pg_rn_e0
|
||||
BFCVTNT 01100100 10 0010 10 101 ... ..... ..... @rd_pg_rn_e0
|
||||
FCVTLT_hs 01100100 10 0010 01 101 ... ..... ..... @rd_pg_rn_e0
|
||||
FCVTNT_ds 01100100 11 0010 10 101 ... ..... ..... @rd_pg_rn_e0
|
||||
FCVTLT_sd 01100100 11 0010 11 101 ... ..... ..... @rd_pg_rn_e0
|
||||
@ -1623,8 +1627,19 @@ FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
|
||||
FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_e0
|
||||
FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_e0
|
||||
|
||||
BFMLALB_zzzw 01100100 11 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
|
||||
BFMLALT_zzzw 01100100 11 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
|
||||
|
||||
### SVE2 floating-point bfloat16 dot-product
|
||||
BFDOT_zzzz 01100100 01 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
|
||||
|
||||
### SVE2 floating-point multiply-add long (indexed)
|
||||
FMLALB_zzxw 01100100 10 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
|
||||
FMLALT_zzxw 01100100 10 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
|
||||
FMLSLB_zzxw 01100100 10 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2
|
||||
FMLSLT_zzxw 01100100 10 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2
|
||||
BFMLALB_zzxw 01100100 11 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
|
||||
BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
|
||||
|
||||
### SVE2 floating-point bfloat16 dot-product (indexed)
|
||||
BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
|
||||
|
@ -4708,6 +4708,7 @@ static inline uint64_t vfp_float64_to_uint64_rtz(float64 f, float_status *s)
|
||||
|
||||
DO_ZPZ_FP(sve_fcvt_sh, uint32_t, H1_4, sve_f32_to_f16)
|
||||
DO_ZPZ_FP(sve_fcvt_hs, uint32_t, H1_4, sve_f16_to_f32)
|
||||
DO_ZPZ_FP(sve_bfcvt, uint32_t, H1_4, float32_to_bfloat16)
|
||||
DO_ZPZ_FP(sve_fcvt_dh, uint64_t, , sve_f64_to_f16)
|
||||
DO_ZPZ_FP(sve_fcvt_hd, uint64_t, , sve_f16_to_f64)
|
||||
DO_ZPZ_FP(sve_fcvt_ds, uint64_t, , float64_to_float32)
|
||||
@ -7740,6 +7741,7 @@ void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
|
||||
} while (i != 0); \
|
||||
}
|
||||
|
||||
DO_FCVTNT(sve_bfcvtnt, uint32_t, uint16_t, H1_4, H1_2, float32_to_bfloat16)
|
||||
DO_FCVTNT(sve2_fcvtnt_sh, uint32_t, uint16_t, H1_4, H1_2, sve_f32_to_f16)
|
||||
DO_FCVTNT(sve2_fcvtnt_ds, uint64_t, uint32_t, , H1_4, float64_to_float32)
|
||||
|
||||
|
@ -3355,8 +3355,9 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
|
||||
int o3_opc = extract32(insn, 12, 4);
|
||||
bool r = extract32(insn, 22, 1);
|
||||
bool a = extract32(insn, 23, 1);
|
||||
TCGv_i64 tcg_rs, clean_addr;
|
||||
TCGv_i64 tcg_rs, tcg_rt, clean_addr;
|
||||
AtomicThreeOpFn *fn = NULL;
|
||||
MemOp mop = s->be_data | size | MO_ALIGN;
|
||||
|
||||
if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
|
||||
unallocated_encoding(s);
|
||||
@ -3377,9 +3378,11 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
|
||||
break;
|
||||
case 004: /* LDSMAX */
|
||||
fn = tcg_gen_atomic_fetch_smax_i64;
|
||||
mop |= MO_SIGN;
|
||||
break;
|
||||
case 005: /* LDSMIN */
|
||||
fn = tcg_gen_atomic_fetch_smin_i64;
|
||||
mop |= MO_SIGN;
|
||||
break;
|
||||
case 006: /* LDUMAX */
|
||||
fn = tcg_gen_atomic_fetch_umax_i64;
|
||||
@ -3422,6 +3425,7 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
|
||||
}
|
||||
|
||||
tcg_rs = read_cpu_reg(s, rs, true);
|
||||
tcg_rt = cpu_reg(s, rt);
|
||||
|
||||
if (o3_opc == 1) { /* LDCLR */
|
||||
tcg_gen_not_i64(tcg_rs, tcg_rs);
|
||||
@ -3430,8 +3434,11 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
|
||||
/* The tcg atomic primitives are all full barriers. Therefore we
|
||||
* can ignore the Acquire and Release bits of this instruction.
|
||||
*/
|
||||
fn(cpu_reg(s, rt), clean_addr, tcg_rs, get_mem_index(s),
|
||||
s->be_data | size | MO_ALIGN);
|
||||
fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
|
||||
|
||||
if ((mop & MO_SIGN) && size != MO_64) {
|
||||
tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -6273,6 +6280,9 @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
|
||||
case 0x3: /* FSQRT */
|
||||
gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
|
||||
goto done;
|
||||
case 0x6: /* BFCVT */
|
||||
gen_fpst = gen_helper_bfcvt;
|
||||
break;
|
||||
case 0x8: /* FRINTN */
|
||||
case 0x9: /* FRINTP */
|
||||
case 0xa: /* FRINTM */
|
||||
@ -6494,8 +6504,7 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
|
||||
int rd = extract32(insn, 0, 5);
|
||||
|
||||
if (mos) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
goto do_unallocated;
|
||||
}
|
||||
|
||||
switch (opcode) {
|
||||
@ -6504,8 +6513,7 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
|
||||
/* FCVT between half, single and double precision */
|
||||
int dtype = extract32(opcode, 0, 2);
|
||||
if (type == 2 || dtype == type) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
goto do_unallocated;
|
||||
}
|
||||
if (!fp_access_check(s)) {
|
||||
return;
|
||||
@ -6517,8 +6525,7 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
|
||||
|
||||
case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
|
||||
if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
goto do_unallocated;
|
||||
}
|
||||
/* fall through */
|
||||
case 0x0 ... 0x3:
|
||||
@ -6540,8 +6547,7 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
|
||||
break;
|
||||
case 3:
|
||||
if (!dc_isar_feature(aa64_fp16, s)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
goto do_unallocated;
|
||||
}
|
||||
|
||||
if (!fp_access_check(s)) {
|
||||
@ -6550,11 +6556,28 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
|
||||
handle_fp_1src_half(s, opcode, rd, rn);
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
goto do_unallocated;
|
||||
}
|
||||
break;
|
||||
|
||||
case 0x6:
|
||||
switch (type) {
|
||||
case 1: /* BFCVT */
|
||||
if (!dc_isar_feature(aa64_bf16, s)) {
|
||||
goto do_unallocated;
|
||||
}
|
||||
if (!fp_access_check(s)) {
|
||||
return;
|
||||
}
|
||||
handle_fp_1src_single(s, opcode, rd, rn);
|
||||
break;
|
||||
default:
|
||||
goto do_unallocated;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
do_unallocated:
|
||||
unallocated_encoding(s);
|
||||
break;
|
||||
}
|
||||
@ -10330,6 +10353,13 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar,
|
||||
tcg_temp_free_i32(ahp);
|
||||
}
|
||||
break;
|
||||
case 0x36: /* BFCVTN, BFCVTN2 */
|
||||
{
|
||||
TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
|
||||
gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
|
||||
tcg_temp_free_ptr(fpst);
|
||||
}
|
||||
break;
|
||||
case 0x56: /* FCVTXN, FCVTXN2 */
|
||||
/* 64 bit to 32 bit float conversion
|
||||
* with von Neumann rounding (round to odd)
|
||||
@ -12205,6 +12235,24 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
|
||||
}
|
||||
feature = dc_isar_feature(aa64_fcma, s);
|
||||
break;
|
||||
case 0x1d: /* BFMMLA */
|
||||
if (size != MO_16 || !is_q) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
feature = dc_isar_feature(aa64_bf16, s);
|
||||
break;
|
||||
case 0x1f:
|
||||
switch (size) {
|
||||
case 1: /* BFDOT */
|
||||
case 3: /* BFMLAL{B,T} */
|
||||
feature = dc_isar_feature(aa64_bf16, s);
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
@ -12288,6 +12336,23 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
|
||||
}
|
||||
return;
|
||||
|
||||
case 0xd: /* BFMMLA */
|
||||
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
|
||||
return;
|
||||
case 0xf:
|
||||
switch (size) {
|
||||
case 1: /* BFDOT */
|
||||
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
|
||||
break;
|
||||
case 3: /* BFMLAL{B,T} */
|
||||
gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
|
||||
gen_helper_gvec_bfmlal);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
return;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -12730,6 +12795,16 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
|
||||
}
|
||||
handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
|
||||
return;
|
||||
case 0x36: /* BFCVTN, BFCVTN2 */
|
||||
if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
if (!fp_access_check(s)) {
|
||||
return;
|
||||
}
|
||||
handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
|
||||
return;
|
||||
case 0x17: /* FCVTL, FCVTL2 */
|
||||
if (!fp_access_check(s)) {
|
||||
return;
|
||||
@ -13389,12 +13464,35 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case 0x0f: /* SUDOT, USDOT */
|
||||
if (is_scalar || (size & 1) || !dc_isar_feature(aa64_i8mm, s)) {
|
||||
case 0x0f:
|
||||
switch (size) {
|
||||
case 0: /* SUDOT */
|
||||
case 2: /* USDOT */
|
||||
if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
size = MO_32;
|
||||
break;
|
||||
case 1: /* BFDOT */
|
||||
if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
size = MO_32;
|
||||
break;
|
||||
case 3: /* BFMLAL{B,T} */
|
||||
if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
/* can't set is_fp without other incorrect size checks */
|
||||
size = MO_16;
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
size = MO_32;
|
||||
break;
|
||||
case 0x11: /* FCMLA #0 */
|
||||
case 0x13: /* FCMLA #90 */
|
||||
@ -13510,13 +13608,26 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
|
||||
u ? gen_helper_gvec_udot_idx_b
|
||||
: gen_helper_gvec_sdot_idx_b);
|
||||
return;
|
||||
case 0x0f: /* SUDOT, USDOT */
|
||||
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
|
||||
extract32(insn, 23, 1)
|
||||
? gen_helper_gvec_usdot_idx_b
|
||||
: gen_helper_gvec_sudot_idx_b);
|
||||
return;
|
||||
|
||||
case 0x0f:
|
||||
switch (extract32(insn, 22, 2)) {
|
||||
case 0: /* SUDOT */
|
||||
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
|
||||
gen_helper_gvec_sudot_idx_b);
|
||||
return;
|
||||
case 1: /* BFDOT */
|
||||
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
|
||||
gen_helper_gvec_bfdot_idx);
|
||||
return;
|
||||
case 2: /* USDOT */
|
||||
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
|
||||
gen_helper_gvec_usdot_idx_b);
|
||||
return;
|
||||
case 3: /* BFMLAL{B,T} */
|
||||
gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
|
||||
gen_helper_gvec_bfmlal_idx);
|
||||
return;
|
||||
}
|
||||
g_assert_not_reached();
|
||||
case 0x11: /* FCMLA #0 */
|
||||
case 0x13: /* FCMLA #90 */
|
||||
case 0x15: /* FCMLA #180 */
|
||||
|
@ -296,6 +296,15 @@ static bool trans_VUSDOT(DisasContext *s, arg_VUSDOT *a)
|
||||
gen_helper_gvec_usdot_b);
|
||||
}
|
||||
|
||||
static bool trans_VDOT_b16(DisasContext *s, arg_VDOT_b16 *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa32_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
|
||||
gen_helper_gvec_bfdot);
|
||||
}
|
||||
|
||||
static bool trans_VFML(DisasContext *s, arg_VFML *a)
|
||||
{
|
||||
int opr_sz;
|
||||
@ -381,6 +390,15 @@ static bool trans_VSUDOT_scalar(DisasContext *s, arg_VSUDOT_scalar *a)
|
||||
gen_helper_gvec_sudot_idx_b);
|
||||
}
|
||||
|
||||
static bool trans_VDOT_b16_scal(DisasContext *s, arg_VDOT_b16_scal *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa32_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
|
||||
gen_helper_gvec_bfdot_idx);
|
||||
}
|
||||
|
||||
static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
|
||||
{
|
||||
int opr_sz;
|
||||
@ -3422,6 +3440,51 @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_VCVT_B16_F32(DisasContext *s, arg_2misc *a)
|
||||
{
|
||||
TCGv_ptr fpst;
|
||||
TCGv_i64 tmp;
|
||||
TCGv_i32 dst0, dst1;
|
||||
|
||||
if (!dc_isar_feature(aa32_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* UNDEF accesses to D16-D31 if they don't exist. */
|
||||
if (!dc_isar_feature(aa32_simd_r32, s) &&
|
||||
((a->vd | a->vm) & 0x10)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((a->vm & 1) || (a->size != 1)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!vfp_access_check(s)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
fpst = fpstatus_ptr(FPST_STD);
|
||||
tmp = tcg_temp_new_i64();
|
||||
dst0 = tcg_temp_new_i32();
|
||||
dst1 = tcg_temp_new_i32();
|
||||
|
||||
read_neon_element64(tmp, a->vm, 0, MO_64);
|
||||
gen_helper_bfcvt_pair(dst0, tmp, fpst);
|
||||
|
||||
read_neon_element64(tmp, a->vm, 1, MO_64);
|
||||
gen_helper_bfcvt_pair(dst1, tmp, fpst);
|
||||
|
||||
write_neon_element32(dst0, a->vd, 0, MO_32);
|
||||
write_neon_element32(dst1, a->vd, 1, MO_32);
|
||||
|
||||
tcg_temp_free_i64(tmp);
|
||||
tcg_temp_free_i32(dst0);
|
||||
tcg_temp_free_i32(dst1);
|
||||
tcg_temp_free_ptr(fpst);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a)
|
||||
{
|
||||
TCGv_ptr fpst;
|
||||
@ -4063,3 +4126,31 @@ static bool trans_VUSMMLA(DisasContext *s, arg_VUSMMLA *a)
|
||||
return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
|
||||
gen_helper_gvec_usmmla_b);
|
||||
}
|
||||
|
||||
static bool trans_VMMLA_b16(DisasContext *s, arg_VMMLA_b16 *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa32_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
|
||||
gen_helper_gvec_bfmmla);
|
||||
}
|
||||
|
||||
static bool trans_VFMA_b16(DisasContext *s, arg_VFMA_b16 *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa32_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
return do_neon_ddda_fpst(s, 7, a->vd, a->vn, a->vm, a->q, FPST_STD,
|
||||
gen_helper_gvec_bfmlal);
|
||||
}
|
||||
|
||||
static bool trans_VFMA_b16_scal(DisasContext *s, arg_VFMA_b16_scal *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa32_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
return do_neon_ddda_fpst(s, 6, a->vd, a->vn, a->vm,
|
||||
(a->index << 1) | a->q, FPST_STD,
|
||||
gen_helper_gvec_bfmlal_idx);
|
||||
}
|
||||
|
@ -4777,6 +4777,14 @@ static bool trans_FCVT_hs(DisasContext *s, arg_rpr_esz *a)
|
||||
return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hs);
|
||||
}
|
||||
|
||||
static bool trans_BFCVT(DisasContext *s, arg_rpr_esz *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa64_sve_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvt);
|
||||
}
|
||||
|
||||
static bool trans_FCVT_dh(DisasContext *s, arg_rpr_esz *a)
|
||||
{
|
||||
return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_dh);
|
||||
@ -8472,6 +8480,14 @@ static bool trans_FCVTNT_sh(DisasContext *s, arg_rpr_esz *a)
|
||||
return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_sh);
|
||||
}
|
||||
|
||||
static bool trans_BFCVTNT(DisasContext *s, arg_rpr_esz *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa64_sve_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvtnt);
|
||||
}
|
||||
|
||||
static bool trans_FCVTNT_ds(DisasContext *s, arg_rpr_esz *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa64_sve2, s)) {
|
||||
@ -8637,3 +8653,99 @@ static bool trans_UMMLA(DisasContext *s, arg_rrrr_esz *a)
|
||||
{
|
||||
return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_ummla_b, 0);
|
||||
}
|
||||
|
||||
static bool trans_BFDOT_zzzz(DisasContext *s, arg_rrrr_esz *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa64_sve_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
if (sve_access_check(s)) {
|
||||
gen_gvec_ool_zzzz(s, gen_helper_gvec_bfdot,
|
||||
a->rd, a->rn, a->rm, a->ra, 0);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_BFDOT_zzxz(DisasContext *s, arg_rrxr_esz *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa64_sve_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
if (sve_access_check(s)) {
|
||||
gen_gvec_ool_zzzz(s, gen_helper_gvec_bfdot_idx,
|
||||
a->rd, a->rn, a->rm, a->ra, a->index);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_BFMMLA(DisasContext *s, arg_rrrr_esz *a)
|
||||
{
|
||||
if (!dc_isar_feature(aa64_sve_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
if (sve_access_check(s)) {
|
||||
gen_gvec_ool_zzzz(s, gen_helper_gvec_bfmmla,
|
||||
a->rd, a->rn, a->rm, a->ra, 0);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
|
||||
{
|
||||
if (!dc_isar_feature(aa64_sve_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
if (sve_access_check(s)) {
|
||||
TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
|
||||
unsigned vsz = vec_full_reg_size(s);
|
||||
|
||||
tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
|
||||
vec_full_reg_offset(s, a->rn),
|
||||
vec_full_reg_offset(s, a->rm),
|
||||
vec_full_reg_offset(s, a->ra),
|
||||
status, vsz, vsz, sel,
|
||||
gen_helper_gvec_bfmlal);
|
||||
tcg_temp_free_ptr(status);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_BFMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
|
||||
{
|
||||
return do_BFMLAL_zzzw(s, a, false);
|
||||
}
|
||||
|
||||
static bool trans_BFMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
|
||||
{
|
||||
return do_BFMLAL_zzzw(s, a, true);
|
||||
}
|
||||
|
||||
static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
|
||||
{
|
||||
if (!dc_isar_feature(aa64_sve_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
if (sve_access_check(s)) {
|
||||
TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
|
||||
unsigned vsz = vec_full_reg_size(s);
|
||||
|
||||
tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
|
||||
vec_full_reg_offset(s, a->rn),
|
||||
vec_full_reg_offset(s, a->rm),
|
||||
vec_full_reg_offset(s, a->ra),
|
||||
status, vsz, vsz, (a->index << 1) | sel,
|
||||
gen_helper_gvec_bfmlal_idx);
|
||||
tcg_temp_free_ptr(status);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_BFMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
|
||||
{
|
||||
return do_BFMLAL_zzxw(s, a, false);
|
||||
}
|
||||
|
||||
static bool trans_BFMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
|
||||
{
|
||||
return do_BFMLAL_zzxw(s, a, true);
|
||||
}
|
||||
|
@ -543,11 +543,16 @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
|
||||
/* VMOV scalar to general purpose register */
|
||||
TCGv_i32 tmp;
|
||||
|
||||
/* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
|
||||
if (a->size == MO_32
|
||||
? !dc_isar_feature(aa32_fpsp_v2, s)
|
||||
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
|
||||
return false;
|
||||
/*
|
||||
* SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
|
||||
* all sizes, whether the CPU has fp or not.
|
||||
*/
|
||||
if (!dc_isar_feature(aa32_mve, s)) {
|
||||
if (a->size == MO_32
|
||||
? !dc_isar_feature(aa32_fpsp_v2, s)
|
||||
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* UNDEF accesses to D16-D31 if they don't exist */
|
||||
@ -571,11 +576,16 @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
|
||||
/* VMOV general purpose register to scalar */
|
||||
TCGv_i32 tmp;
|
||||
|
||||
/* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
|
||||
if (a->size == MO_32
|
||||
? !dc_isar_feature(aa32_fpsp_v2, s)
|
||||
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
|
||||
return false;
|
||||
/*
|
||||
* SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
|
||||
* all sizes, whether the CPU has fp or not.
|
||||
*/
|
||||
if (!dc_isar_feature(aa32_mve, s)) {
|
||||
if (a->size == MO_32
|
||||
? !dc_isar_feature(aa32_fpsp_v2, s)
|
||||
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* UNDEF accesses to D16-D31 if they don't exist */
|
||||
@ -671,7 +681,7 @@ typedef enum FPSysRegCheckResult {
|
||||
|
||||
static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
|
||||
{
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
||||
return FPSysRegCheckFailed;
|
||||
}
|
||||
|
||||
@ -681,16 +691,22 @@ static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
|
||||
break;
|
||||
case ARM_VFP_FPSCR_NZCVQC:
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
|
||||
return false;
|
||||
return FPSysRegCheckFailed;
|
||||
}
|
||||
break;
|
||||
case ARM_VFP_FPCXT_S:
|
||||
case ARM_VFP_FPCXT_NS:
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
|
||||
return false;
|
||||
return FPSysRegCheckFailed;
|
||||
}
|
||||
if (!s->v8m_secure) {
|
||||
return false;
|
||||
return FPSysRegCheckFailed;
|
||||
}
|
||||
break;
|
||||
case ARM_VFP_VPR:
|
||||
case ARM_VFP_P0:
|
||||
if (!dc_isar_feature(aa32_mve, s)) {
|
||||
return FPSysRegCheckFailed;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -807,6 +823,25 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
|
||||
tcg_temp_free_i32(sfpa);
|
||||
break;
|
||||
}
|
||||
case ARM_VFP_VPR:
|
||||
/* Behaves as NOP if not privileged */
|
||||
if (IS_USER(s)) {
|
||||
break;
|
||||
}
|
||||
tmp = loadfn(s, opaque);
|
||||
store_cpu_field(tmp, v7m.vpr);
|
||||
break;
|
||||
case ARM_VFP_P0:
|
||||
{
|
||||
TCGv_i32 vpr;
|
||||
tmp = loadfn(s, opaque);
|
||||
vpr = load_cpu_field(v7m.vpr);
|
||||
tcg_gen_deposit_i32(vpr, vpr, tmp,
|
||||
R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
|
||||
store_cpu_field(vpr, v7m.vpr);
|
||||
tcg_temp_free_i32(tmp);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -925,6 +960,19 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
|
||||
tcg_temp_free_i32(fpscr);
|
||||
break;
|
||||
}
|
||||
case ARM_VFP_VPR:
|
||||
/* Behaves as NOP if not privileged */
|
||||
if (IS_USER(s)) {
|
||||
break;
|
||||
}
|
||||
tmp = load_cpu_field(v7m.vpr);
|
||||
storefn(s, opaque, tmp);
|
||||
break;
|
||||
case ARM_VFP_P0:
|
||||
tmp = load_cpu_field(v7m.vpr);
|
||||
tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
|
||||
storefn(s, opaque, tmp);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -1254,7 +1302,7 @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
|
||||
{
|
||||
TCGv_i32 tmp;
|
||||
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1287,7 +1335,7 @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
|
||||
{
|
||||
TCGv_i32 tmp;
|
||||
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1329,7 +1377,7 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
|
||||
* floating point register. Note that this does not require support
|
||||
* for double precision arithmetic.
|
||||
*/
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1368,7 +1416,7 @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
|
||||
uint32_t offset;
|
||||
TCGv_i32 addr, tmp;
|
||||
|
||||
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1403,7 +1451,7 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
|
||||
uint32_t offset;
|
||||
TCGv_i32 addr, tmp;
|
||||
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1439,7 +1487,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
|
||||
TCGv_i64 tmp;
|
||||
|
||||
/* Note that this does not require support for double arithmetic. */
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1479,7 +1527,7 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
|
||||
TCGv_i32 addr, tmp;
|
||||
int i, n;
|
||||
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1557,7 +1605,7 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
|
||||
int i, n;
|
||||
|
||||
/* Note that this does not require support for double arithmetic. */
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1915,9 +1963,7 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
|
||||
int veclen = s->vec_len;
|
||||
TCGv_i32 f0, fd;
|
||||
|
||||
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
||||
return false;
|
||||
}
|
||||
/* Note that the caller must check the aa32_fpsp_v2 feature. */
|
||||
|
||||
if (!dc_isar_feature(aa32_fpshvec, s) &&
|
||||
(veclen != 0 || s->vec_stride != 0)) {
|
||||
@ -1992,6 +2038,8 @@ static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
|
||||
*/
|
||||
TCGv_i32 f0;
|
||||
|
||||
/* Note that the caller must check the aa32_fp16_arith feature */
|
||||
|
||||
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
||||
return false;
|
||||
}
|
||||
@ -2020,9 +2068,7 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
|
||||
int veclen = s->vec_len;
|
||||
TCGv_i64 f0, fd;
|
||||
|
||||
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
||||
return false;
|
||||
}
|
||||
/* Note that the caller must check the aa32_fpdp_v2 feature. */
|
||||
|
||||
/* UNDEF accesses to D16-D31 if they don't exist */
|
||||
if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
|
||||
@ -2800,23 +2846,37 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
|
||||
return true;
|
||||
}
|
||||
|
||||
#define DO_VFP_2OP(INSN, PREC, FN) \
|
||||
#define DO_VFP_2OP(INSN, PREC, FN, CHECK) \
|
||||
static bool trans_##INSN##_##PREC(DisasContext *s, \
|
||||
arg_##INSN##_##PREC *a) \
|
||||
{ \
|
||||
if (!dc_isar_feature(CHECK, s)) { \
|
||||
return false; \
|
||||
} \
|
||||
return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
|
||||
}
|
||||
|
||||
DO_VFP_2OP(VMOV_reg, sp, tcg_gen_mov_i32)
|
||||
DO_VFP_2OP(VMOV_reg, dp, tcg_gen_mov_i64)
|
||||
#define DO_VFP_VMOV(INSN, PREC, FN) \
|
||||
static bool trans_##INSN##_##PREC(DisasContext *s, \
|
||||
arg_##INSN##_##PREC *a) \
|
||||
{ \
|
||||
if (!dc_isar_feature(aa32_fp##PREC##_v2, s) && \
|
||||
!dc_isar_feature(aa32_mve, s)) { \
|
||||
return false; \
|
||||
} \
|
||||
return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
|
||||
}
|
||||
|
||||
DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh)
|
||||
DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss)
|
||||
DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd)
|
||||
DO_VFP_VMOV(VMOV_reg, sp, tcg_gen_mov_i32)
|
||||
DO_VFP_VMOV(VMOV_reg, dp, tcg_gen_mov_i64)
|
||||
|
||||
DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh)
|
||||
DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs)
|
||||
DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd)
|
||||
DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh, aa32_fp16_arith)
|
||||
DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss, aa32_fpsp_v2)
|
||||
DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd, aa32_fpdp_v2)
|
||||
|
||||
DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh, aa32_fp16_arith)
|
||||
DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs, aa32_fpsp_v2)
|
||||
DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd, aa32_fpdp_v2)
|
||||
|
||||
static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
|
||||
{
|
||||
@ -2833,9 +2893,9 @@ static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
|
||||
gen_helper_vfp_sqrtd(vd, vm, cpu_env);
|
||||
}
|
||||
|
||||
DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp)
|
||||
DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp)
|
||||
DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp)
|
||||
DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith)
|
||||
DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp, aa32_fpsp_v2)
|
||||
DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp, aa32_fpdp_v2)
|
||||
|
||||
static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
|
||||
{
|
||||
@ -3025,6 +3085,30 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a)
|
||||
{
|
||||
TCGv_ptr fpst;
|
||||
TCGv_i32 tmp;
|
||||
|
||||
if (!dc_isar_feature(aa32_bf16, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!vfp_access_check(s)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
fpst = fpstatus_ptr(FPST_FPCR);
|
||||
tmp = tcg_temp_new_i32();
|
||||
|
||||
vfp_load_reg32(tmp, a->vm);
|
||||
gen_helper_bfcvt(tmp, tmp, fpst);
|
||||
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
|
||||
tcg_temp_free_ptr(fpst);
|
||||
tcg_temp_free_i32(tmp);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
|
||||
{
|
||||
TCGv_ptr fpst;
|
||||
|
@ -2385,7 +2385,7 @@ static void do_mmla_b(void *vd, void *vn, void *vm, void *va, uint32_t desc,
|
||||
* Process the entire segment at once, writing back the
|
||||
* results only after we've consumed all of the inputs.
|
||||
*
|
||||
* Key to indicies by column:
|
||||
* Key to indices by column:
|
||||
* i j i j
|
||||
*/
|
||||
sum0 = a[H4(0 + 0)];
|
||||
@ -2412,3 +2412,141 @@ static void do_mmla_b(void *vd, void *vn, void *vm, void *va, uint32_t desc,
|
||||
DO_MMLA_B(gvec_smmla_b, do_smmla_b)
|
||||
DO_MMLA_B(gvec_ummla_b, do_ummla_b)
|
||||
DO_MMLA_B(gvec_usmmla_b, do_usmmla_b)
|
||||
|
||||
/*
|
||||
* BFloat16 Dot Product
|
||||
*/
|
||||
|
||||
static float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2)
|
||||
{
|
||||
/* FPCR is ignored for BFDOT and BFMMLA. */
|
||||
float_status bf_status = {
|
||||
.tininess_before_rounding = float_tininess_before_rounding,
|
||||
.float_rounding_mode = float_round_to_odd_inf,
|
||||
.flush_to_zero = true,
|
||||
.flush_inputs_to_zero = true,
|
||||
.default_nan_mode = true,
|
||||
};
|
||||
float32 t1, t2;
|
||||
|
||||
/*
|
||||
* Extract each BFloat16 from the element pair, and shift
|
||||
* them such that they become float32.
|
||||
*/
|
||||
t1 = float32_mul(e1 << 16, e2 << 16, &bf_status);
|
||||
t2 = float32_mul(e1 & 0xffff0000u, e2 & 0xffff0000u, &bf_status);
|
||||
t1 = float32_add(t1, t2, &bf_status);
|
||||
t1 = float32_add(sum, t1, &bf_status);
|
||||
|
||||
return t1;
|
||||
}
|
||||
|
||||
void HELPER(gvec_bfdot)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
|
||||
{
|
||||
intptr_t i, opr_sz = simd_oprsz(desc);
|
||||
float32 *d = vd, *a = va;
|
||||
uint32_t *n = vn, *m = vm;
|
||||
|
||||
for (i = 0; i < opr_sz / 4; ++i) {
|
||||
d[i] = bfdotadd(a[i], n[i], m[i]);
|
||||
}
|
||||
clear_tail(d, opr_sz, simd_maxsz(desc));
|
||||
}
|
||||
|
||||
void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm,
|
||||
void *va, uint32_t desc)
|
||||
{
|
||||
intptr_t i, j, opr_sz = simd_oprsz(desc);
|
||||
intptr_t index = simd_data(desc);
|
||||
intptr_t elements = opr_sz / 4;
|
||||
intptr_t eltspersegment = MIN(16 / 4, elements);
|
||||
float32 *d = vd, *a = va;
|
||||
uint32_t *n = vn, *m = vm;
|
||||
|
||||
for (i = 0; i < elements; i += eltspersegment) {
|
||||
uint32_t m_idx = m[i + H4(index)];
|
||||
|
||||
for (j = i; j < i + eltspersegment; j++) {
|
||||
d[j] = bfdotadd(a[j], n[j], m_idx);
|
||||
}
|
||||
}
|
||||
clear_tail(d, opr_sz, simd_maxsz(desc));
|
||||
}
|
||||
|
||||
void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
|
||||
{
|
||||
intptr_t s, opr_sz = simd_oprsz(desc);
|
||||
float32 *d = vd, *a = va;
|
||||
uint32_t *n = vn, *m = vm;
|
||||
|
||||
for (s = 0; s < opr_sz / 4; s += 4) {
|
||||
float32 sum00, sum01, sum10, sum11;
|
||||
|
||||
/*
|
||||
* Process the entire segment at once, writing back the
|
||||
* results only after we've consumed all of the inputs.
|
||||
*
|
||||
* Key to indicies by column:
|
||||
* i j i k j k
|
||||
*/
|
||||
sum00 = a[s + H4(0 + 0)];
|
||||
sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)]);
|
||||
sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)]);
|
||||
|
||||
sum01 = a[s + H4(0 + 1)];
|
||||
sum01 = bfdotadd(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)]);
|
||||
sum01 = bfdotadd(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)]);
|
||||
|
||||
sum10 = a[s + H4(2 + 0)];
|
||||
sum10 = bfdotadd(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)]);
|
||||
sum10 = bfdotadd(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)]);
|
||||
|
||||
sum11 = a[s + H4(2 + 1)];
|
||||
sum11 = bfdotadd(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)]);
|
||||
sum11 = bfdotadd(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)]);
|
||||
|
||||
d[s + H4(0 + 0)] = sum00;
|
||||
d[s + H4(0 + 1)] = sum01;
|
||||
d[s + H4(2 + 0)] = sum10;
|
||||
d[s + H4(2 + 1)] = sum11;
|
||||
}
|
||||
clear_tail(d, opr_sz, simd_maxsz(desc));
|
||||
}
|
||||
|
||||
void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va,
|
||||
void *stat, uint32_t desc)
|
||||
{
|
||||
intptr_t i, opr_sz = simd_oprsz(desc);
|
||||
intptr_t sel = simd_data(desc);
|
||||
float32 *d = vd, *a = va;
|
||||
bfloat16 *n = vn, *m = vm;
|
||||
|
||||
for (i = 0; i < opr_sz / 4; ++i) {
|
||||
float32 nn = n[H2(i * 2 + sel)] << 16;
|
||||
float32 mm = m[H2(i * 2 + sel)] << 16;
|
||||
d[H4(i)] = float32_muladd(nn, mm, a[H4(i)], 0, stat);
|
||||
}
|
||||
clear_tail(d, opr_sz, simd_maxsz(desc));
|
||||
}
|
||||
|
||||
void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
|
||||
void *va, void *stat, uint32_t desc)
|
||||
{
|
||||
intptr_t i, j, opr_sz = simd_oprsz(desc);
|
||||
intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1);
|
||||
intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 1, 3);
|
||||
intptr_t elements = opr_sz / 4;
|
||||
intptr_t eltspersegment = MIN(16 / 4, elements);
|
||||
float32 *d = vd, *a = va;
|
||||
bfloat16 *n = vn, *m = vm;
|
||||
|
||||
for (i = 0; i < elements; i += eltspersegment) {
|
||||
float32 m_idx = m[H2(2 * i + index)] << 16;
|
||||
|
||||
for (j = i; j < i + eltspersegment; j++) {
|
||||
float32 n_j = n[H2(2 * j + sel)] << 16;
|
||||
d[H4(j)] = float32_muladd(n_j, m_idx, a[H4(j)], 0, stat);
|
||||
}
|
||||
}
|
||||
clear_tail(d, opr_sz, simd_maxsz(desc));
|
||||
}
|
||||
|
@ -205,6 +205,8 @@ VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \
|
||||
|
||||
# VCVTB and VCVTT to f16: Vd format is always vd_sp;
|
||||
# Vm format depends on size bit
|
||||
VCVT_b16_f32 ---- 1110 1.11 0011 .... 1001 t:1 1.0 .... \
|
||||
vd=%vd_sp vm=%vm_sp
|
||||
VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
|
||||
vd=%vd_sp vm=%vm_sp
|
||||
VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
|
||||
|
@ -195,8 +195,10 @@ uint32_t vfp_get_fpscr(CPUARMState *env)
|
||||
|
||||
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
|
||||
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
|
||||
if (!cpu_isar_feature(any_fp16, env_archcpu(env))) {
|
||||
if (!cpu_isar_feature(any_fp16, cpu)) {
|
||||
val &= ~FPCR_FZ16;
|
||||
}
|
||||
|
||||
@ -210,11 +212,12 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
|
||||
* because in v7A no-short-vector-support cores still had to
|
||||
* allow Stride/Len to be written with the only effect that
|
||||
* some insns are required to UNDEF if the guest sets them.
|
||||
*
|
||||
* TODO: if M-profile MVE implemented, set LTPSIZE.
|
||||
*/
|
||||
env->vfp.vec_len = extract32(val, 16, 3);
|
||||
env->vfp.vec_stride = extract32(val, 20, 2);
|
||||
} else if (cpu_isar_feature(aa32_mve, cpu)) {
|
||||
env->v7m.ltpsize = extract32(val, FPCR_LTPSIZE_SHIFT,
|
||||
FPCR_LTPSIZE_LENGTH);
|
||||
}
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_NEON)) {
|
||||
@ -408,6 +411,18 @@ float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
|
||||
return float64_to_float32(x, &env->vfp.fp_status);
|
||||
}
|
||||
|
||||
uint32_t HELPER(bfcvt)(float32 x, void *status)
|
||||
{
|
||||
return float32_to_bfloat16(x, status);
|
||||
}
|
||||
|
||||
uint32_t HELPER(bfcvt_pair)(uint64_t pair, void *status)
|
||||
{
|
||||
bfloat16 lo = float32_to_bfloat16(extract64(pair, 0, 32), status);
|
||||
bfloat16 hi = float32_to_bfloat16(extract64(pair, 32, 32), status);
|
||||
return deposit32(lo, 16, 16, hi);
|
||||
}
|
||||
|
||||
/*
|
||||
* VFP3 fixed point conversion. The AArch32 versions of fix-to-float
|
||||
* must always round-to-nearest; the AArch64 ones honour the FPSCR
|
||||
|
@ -1,146 +0,0 @@
|
||||
/*
|
||||
* Copyright 2008 IBM Corporation
|
||||
* 2008 Red Hat, Inc.
|
||||
* Copyright 2011 Intel Corporation
|
||||
* Copyright 2016 Veertu, Inc.
|
||||
* Copyright 2017 The Android Open Source Project
|
||||
*
|
||||
* QEMU Hypervisor.framework support
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* This file contain code under public domain from the hvdos project:
|
||||
* https://github.com/mist64/hvdos
|
||||
*
|
||||
* Parts Copyright (c) 2011 NetApp, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "target/i386/cpu.h"
|
||||
#include "qemu/guest-random.h"
|
||||
|
||||
#include "hvf-accel-ops.h"
|
||||
|
||||
/*
|
||||
* The HVF-specific vCPU thread function. This one should only run when the host
|
||||
* CPU supports the VMX "unrestricted guest" feature.
|
||||
*/
|
||||
static void *hvf_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
int r;
|
||||
|
||||
assert(hvf_enabled());
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
hvf_init_vcpu(cpu);
|
||||
|
||||
/* signal CPU creation */
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
r = hvf_vcpu_exec(cpu);
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void hvf_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
/*
|
||||
* HVF currently does not support TCG, and only runs in
|
||||
* unrestricted-guest mode.
|
||||
*/
|
||||
assert(hvf_enabled());
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
|
||||
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
ops->create_vcpu_thread = hvf_start_vcpu_thread;
|
||||
|
||||
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
|
||||
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
|
||||
ops->synchronize_state = hvf_cpu_synchronize_state;
|
||||
ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
|
||||
};
|
||||
static const TypeInfo hvf_accel_ops_type = {
|
||||
.name = ACCEL_OPS_NAME("hvf"),
|
||||
|
||||
.parent = TYPE_ACCEL_OPS,
|
||||
.class_init = hvf_accel_ops_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
static void hvf_accel_ops_register_types(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_ops_type);
|
||||
}
|
||||
type_init(hvf_accel_ops_register_types);
|
@ -1,23 +0,0 @@
|
||||
/*
|
||||
* Accelerator CPUS Interface
|
||||
*
|
||||
* Copyright 2020 SUSE LLC
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef HVF_CPUS_H
|
||||
#define HVF_CPUS_H
|
||||
|
||||
#include "sysemu/cpus.h"
|
||||
|
||||
int hvf_init_vcpu(CPUState *);
|
||||
int hvf_vcpu_exec(CPUState *);
|
||||
void hvf_cpu_synchronize_state(CPUState *);
|
||||
void hvf_cpu_synchronize_post_reset(CPUState *);
|
||||
void hvf_cpu_synchronize_post_init(CPUState *);
|
||||
void hvf_cpu_synchronize_pre_loadvm(CPUState *);
|
||||
void hvf_vcpu_destroy(CPUState *);
|
||||
|
||||
#endif /* HVF_CPUS_H */
|
@ -18,42 +18,11 @@
|
||||
|
||||
#include "qemu/accel.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
#include "cpu.h"
|
||||
#include "x86.h"
|
||||
|
||||
/* hvf_slot flags */
|
||||
#define HVF_SLOT_LOG (1 << 0)
|
||||
|
||||
typedef struct hvf_slot {
|
||||
uint64_t start;
|
||||
uint64_t size;
|
||||
uint8_t *mem;
|
||||
int slot_id;
|
||||
uint32_t flags;
|
||||
MemoryRegion *region;
|
||||
} hvf_slot;
|
||||
|
||||
typedef struct hvf_vcpu_caps {
|
||||
uint64_t vmx_cap_pinbased;
|
||||
uint64_t vmx_cap_procbased;
|
||||
uint64_t vmx_cap_procbased2;
|
||||
uint64_t vmx_cap_entry;
|
||||
uint64_t vmx_cap_exit;
|
||||
uint64_t vmx_cap_preemption_timer;
|
||||
} hvf_vcpu_caps;
|
||||
|
||||
struct HVFState {
|
||||
AccelState parent;
|
||||
hvf_slot slots[32];
|
||||
int num_slots;
|
||||
|
||||
hvf_vcpu_caps *hvf_caps;
|
||||
};
|
||||
extern HVFState *hvf_state;
|
||||
|
||||
void hvf_set_phys_mem(MemoryRegionSection *, bool);
|
||||
void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int);
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
/* Functions exported to host specific mode */
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "hvf-i386.h"
|
||||
#include "vmcs.h"
|
||||
@ -72,171 +73,6 @@
|
||||
#include "qemu/accel.h"
|
||||
#include "target/i386/cpu.h"
|
||||
|
||||
#include "hvf-accel-ops.h"
|
||||
|
||||
HVFState *hvf_state;
|
||||
|
||||
static void assert_hvf_ok(hv_return_t ret)
|
||||
{
|
||||
if (ret == HV_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (ret) {
|
||||
case HV_ERROR:
|
||||
error_report("Error: HV_ERROR");
|
||||
break;
|
||||
case HV_BUSY:
|
||||
error_report("Error: HV_BUSY");
|
||||
break;
|
||||
case HV_BAD_ARGUMENT:
|
||||
error_report("Error: HV_BAD_ARGUMENT");
|
||||
break;
|
||||
case HV_NO_RESOURCES:
|
||||
error_report("Error: HV_NO_RESOURCES");
|
||||
break;
|
||||
case HV_NO_DEVICE:
|
||||
error_report("Error: HV_NO_DEVICE");
|
||||
break;
|
||||
case HV_UNSUPPORTED:
|
||||
error_report("Error: HV_UNSUPPORTED");
|
||||
break;
|
||||
default:
|
||||
error_report("Unknown Error");
|
||||
}
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
/* Memory slots */
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
int x;
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
slot = &hvf_state->slots[x];
|
||||
if (slot->size && start < (slot->start + slot->size) &&
|
||||
(start + size) > slot->start) {
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct mac_slot {
|
||||
int present;
|
||||
uint64_t size;
|
||||
uint64_t gpa_start;
|
||||
uint64_t gva;
|
||||
};
|
||||
|
||||
struct mac_slot mac_slots[32];
|
||||
|
||||
static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
|
||||
{
|
||||
struct mac_slot *macslot;
|
||||
hv_return_t ret;
|
||||
|
||||
macslot = &mac_slots[slot->slot_id];
|
||||
|
||||
if (macslot->present) {
|
||||
if (macslot->size != slot->size) {
|
||||
macslot->present = 0;
|
||||
ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
|
||||
assert_hvf_ok(ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (!slot->size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
macslot->present = 1;
|
||||
macslot->gpa_start = slot->start;
|
||||
macslot->size = slot->size;
|
||||
ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);
|
||||
assert_hvf_ok(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
|
||||
{
|
||||
hvf_slot *mem;
|
||||
MemoryRegion *area = section->mr;
|
||||
bool writeable = !area->readonly && !area->rom_device;
|
||||
hv_memory_flags_t flags;
|
||||
|
||||
if (!memory_region_is_ram(area)) {
|
||||
if (writeable) {
|
||||
return;
|
||||
} else if (!memory_region_is_romd(area)) {
|
||||
/*
|
||||
* If the memory device is not in romd_mode, then we actually want
|
||||
* to remove the hvf memory slot so all accesses will trap.
|
||||
*/
|
||||
add = false;
|
||||
}
|
||||
}
|
||||
|
||||
mem = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
if (mem && add) {
|
||||
if (mem->size == int128_get64(section->size) &&
|
||||
mem->start == section->offset_within_address_space &&
|
||||
mem->mem == (memory_region_get_ram_ptr(area) +
|
||||
section->offset_within_region)) {
|
||||
return; /* Same region was attempted to register, go away. */
|
||||
}
|
||||
}
|
||||
|
||||
/* Region needs to be reset. set the size to 0 and remap it. */
|
||||
if (mem) {
|
||||
mem->size = 0;
|
||||
if (do_hvf_set_memory(mem, 0)) {
|
||||
error_report("Failed to reset overlapping slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
if (!add) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (area->readonly ||
|
||||
(!memory_region_is_ram(area) && memory_region_is_romd(area))) {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
|
||||
} else {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
|
||||
}
|
||||
|
||||
/* Now make a new slot. */
|
||||
int x;
|
||||
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
mem = &hvf_state->slots[x];
|
||||
if (!mem->size) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x == hvf_state->num_slots) {
|
||||
error_report("No free slots");
|
||||
abort();
|
||||
}
|
||||
|
||||
mem->size = int128_get64(section->size);
|
||||
mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
|
||||
mem->start = section->offset_within_address_space;
|
||||
mem->region = area;
|
||||
|
||||
if (do_hvf_set_memory(mem, flags)) {
|
||||
error_report("Error registering new memory slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void vmx_update_tpr(CPUState *cpu)
|
||||
{
|
||||
/* TODO: need integrate APIC handling */
|
||||
@ -244,11 +80,11 @@ void vmx_update_tpr(CPUState *cpu)
|
||||
int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;
|
||||
int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);
|
||||
|
||||
wreg(cpu->hvf_fd, HV_X86_TPR, tpr);
|
||||
wreg(cpu->hvf->fd, HV_X86_TPR, tpr);
|
||||
if (irr == -1) {
|
||||
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
|
||||
wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
|
||||
} else {
|
||||
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
|
||||
wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
|
||||
irr >> 4);
|
||||
}
|
||||
}
|
||||
@ -256,7 +92,7 @@ void vmx_update_tpr(CPUState *cpu)
|
||||
static void update_apic_tpr(CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;
|
||||
int tpr = rreg(cpu->hvf->fd, HV_X86_TPR) >> 4;
|
||||
cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
|
||||
}
|
||||
|
||||
@ -276,56 +112,6 @@ void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
|
||||
}
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
if (!cpu->vcpu_dirty) {
|
||||
hvf_get_registers(cpu);
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->vcpu_dirty) {
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu,
|
||||
run_on_cpu_data arg)
|
||||
{
|
||||
hvf_put_registers(cpu);
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
|
||||
run_on_cpu_data arg)
|
||||
{
|
||||
hvf_put_registers(cpu);
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_post_init(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_pre_loadvm(CPUState *cpu,
|
||||
run_on_cpu_data arg)
|
||||
{
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
|
||||
{
|
||||
int read, write;
|
||||
@ -370,90 +156,12 @@ static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
|
||||
slot = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
/* protect region against writes; begin tracking it */
|
||||
if (on) {
|
||||
slot->flags |= HVF_SLOT_LOG;
|
||||
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ);
|
||||
/* stop tracking region*/
|
||||
} else {
|
||||
slot->flags &= ~HVF_SLOT_LOG;
|
||||
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_log_start(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (old != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_log_stop(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (new != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 0);
|
||||
}
|
||||
|
||||
static void hvf_log_sync(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
/*
|
||||
* sync of dirty pages is handled elsewhere; just make sure we keep
|
||||
* tracking the region.
|
||||
*/
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_region_add(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, true);
|
||||
}
|
||||
|
||||
static void hvf_region_del(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, false);
|
||||
}
|
||||
|
||||
static MemoryListener hvf_memory_listener = {
|
||||
.priority = 10,
|
||||
.region_add = hvf_region_add,
|
||||
.region_del = hvf_region_del,
|
||||
.log_start = hvf_log_start,
|
||||
.log_stop = hvf_log_stop,
|
||||
.log_sync = hvf_log_sync,
|
||||
};
|
||||
|
||||
void hvf_vcpu_destroy(CPUState *cpu)
|
||||
void hvf_arch_vcpu_destroy(CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);
|
||||
g_free(env->hvf_mmio_buf);
|
||||
assert_hvf_ok(ret);
|
||||
}
|
||||
|
||||
static void dummy_signal(int sig)
|
||||
{
|
||||
}
|
||||
|
||||
static void init_tsc_freq(CPUX86State *env)
|
||||
@ -498,23 +206,10 @@ static inline bool apic_bus_freq_is_known(CPUX86State *env)
|
||||
return env->apic_bus_freq != 0;
|
||||
}
|
||||
|
||||
int hvf_init_vcpu(CPUState *cpu)
|
||||
int hvf_arch_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
|
||||
X86CPU *x86cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86cpu->env;
|
||||
int r;
|
||||
|
||||
/* init cpu signals */
|
||||
sigset_t set;
|
||||
struct sigaction sigact;
|
||||
|
||||
memset(&sigact, 0, sizeof(sigact));
|
||||
sigact.sa_handler = dummy_signal;
|
||||
sigaction(SIG_IPI, &sigact, NULL);
|
||||
|
||||
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
||||
sigdelset(&set, SIG_IPI);
|
||||
|
||||
init_emu();
|
||||
init_decoder();
|
||||
@ -531,10 +226,6 @@ int hvf_init_vcpu(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
|
||||
cpu->vcpu_dirty = 1;
|
||||
assert_hvf_ok(r);
|
||||
|
||||
if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,
|
||||
&hvf_state->hvf_caps->vmx_cap_pinbased)) {
|
||||
abort();
|
||||
@ -553,43 +244,43 @@ int hvf_init_vcpu(CPUState *cpu)
|
||||
}
|
||||
|
||||
/* set VMCS control fields */
|
||||
wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS,
|
||||
wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS,
|
||||
cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,
|
||||
VMCS_PIN_BASED_CTLS_EXTINT |
|
||||
VMCS_PIN_BASED_CTLS_NMI |
|
||||
VMCS_PIN_BASED_CTLS_VNMI));
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS,
|
||||
wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS,
|
||||
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,
|
||||
VMCS_PRI_PROC_BASED_CTLS_HLT |
|
||||
VMCS_PRI_PROC_BASED_CTLS_MWAIT |
|
||||
VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
|
||||
VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
|
||||
VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
|
||||
wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,
|
||||
wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS,
|
||||
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,
|
||||
VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
|
||||
wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
|
||||
0));
|
||||
wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
|
||||
wvmcs(cpu->hvf->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
|
||||
wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
|
||||
|
||||
x86cpu = X86_CPU(cpu);
|
||||
x86cpu->env.xsave_buf = qemu_memalign(4096, 4096);
|
||||
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FMASK, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FSBASE, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_GSBASE, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_KERNELGSBASE, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_TSC_AUX, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_TSC, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_CS, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_EIP, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_ESP, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -630,16 +321,16 @@ static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_in
|
||||
}
|
||||
if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
|
||||
env->has_error_code = true;
|
||||
env->error_code = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_ERROR);
|
||||
env->error_code = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_ERROR);
|
||||
}
|
||||
}
|
||||
if ((rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
|
||||
if ((rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
|
||||
VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
} else {
|
||||
env->hflags2 &= ~HF2_NMI_MASK;
|
||||
}
|
||||
if (rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
|
||||
if (rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
|
||||
(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
|
||||
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
|
||||
env->hflags |= HF_INHIBIT_IRQ_MASK;
|
||||
@ -718,20 +409,20 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
hv_return_t r = hv_vcpu_run(cpu->hvf_fd);
|
||||
hv_return_t r = hv_vcpu_run(cpu->hvf->fd);
|
||||
assert_hvf_ok(r);
|
||||
|
||||
/* handle VMEXIT */
|
||||
uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);
|
||||
uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);
|
||||
uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd,
|
||||
uint64_t exit_reason = rvmcs(cpu->hvf->fd, VMCS_EXIT_REASON);
|
||||
uint64_t exit_qual = rvmcs(cpu->hvf->fd, VMCS_EXIT_QUALIFICATION);
|
||||
uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf->fd,
|
||||
VMCS_EXIT_INSTRUCTION_LENGTH);
|
||||
|
||||
uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
|
||||
uint64_t idtvec_info = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
|
||||
|
||||
hvf_store_events(cpu, ins_len, idtvec_info);
|
||||
rip = rreg(cpu->hvf_fd, HV_X86_RIP);
|
||||
env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
|
||||
rip = rreg(cpu->hvf->fd, HV_X86_RIP);
|
||||
env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
@ -761,7 +452,7 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
case EXIT_REASON_EPT_FAULT:
|
||||
{
|
||||
hvf_slot *slot;
|
||||
uint64_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);
|
||||
uint64_t gpa = rvmcs(cpu->hvf->fd, VMCS_GUEST_PHYSICAL_ADDRESS);
|
||||
|
||||
if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&
|
||||
((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {
|
||||
@ -806,7 +497,7 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
store_regs(cpu);
|
||||
break;
|
||||
} else if (!string && !in) {
|
||||
RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX);
|
||||
RAX(env) = rreg(cpu->hvf->fd, HV_X86_RAX);
|
||||
hvf_handle_io(env, port, &RAX(env), 1, size, 1);
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
@ -822,21 +513,21 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_CPUID: {
|
||||
uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
|
||||
uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);
|
||||
uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
|
||||
uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
|
||||
uint32_t rax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
|
||||
uint32_t rbx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RBX);
|
||||
uint32_t rcx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
|
||||
uint32_t rdx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
|
||||
|
||||
if (rax == 1) {
|
||||
/* CPUID1.ecx.OSXSAVE needs to know CR4 */
|
||||
env->cr[4] = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);
|
||||
env->cr[4] = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
|
||||
}
|
||||
hvf_cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);
|
||||
|
||||
wreg(cpu->hvf_fd, HV_X86_RAX, rax);
|
||||
wreg(cpu->hvf_fd, HV_X86_RBX, rbx);
|
||||
wreg(cpu->hvf_fd, HV_X86_RCX, rcx);
|
||||
wreg(cpu->hvf_fd, HV_X86_RDX, rdx);
|
||||
wreg(cpu->hvf->fd, HV_X86_RAX, rax);
|
||||
wreg(cpu->hvf->fd, HV_X86_RBX, rbx);
|
||||
wreg(cpu->hvf->fd, HV_X86_RCX, rcx);
|
||||
wreg(cpu->hvf->fd, HV_X86_RDX, rdx);
|
||||
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
@ -844,16 +535,16 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
case EXIT_REASON_XSETBV: {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
|
||||
uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
|
||||
uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
|
||||
uint32_t eax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
|
||||
uint32_t ecx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
|
||||
uint32_t edx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
|
||||
|
||||
if (ecx) {
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
}
|
||||
env->xcr0 = ((uint64_t)edx << 32) | eax;
|
||||
wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);
|
||||
wreg(cpu->hvf->fd, HV_X86_XCR0, env->xcr0 | 1);
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
}
|
||||
@ -892,11 +583,11 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
|
||||
switch (cr) {
|
||||
case 0x0: {
|
||||
macvm_set_cr0(cpu->hvf_fd, RRX(env, reg));
|
||||
macvm_set_cr0(cpu->hvf->fd, RRX(env, reg));
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
macvm_set_cr4(cpu->hvf_fd, RRX(env, reg));
|
||||
macvm_set_cr4(cpu->hvf->fd, RRX(env, reg));
|
||||
break;
|
||||
}
|
||||
case 8: {
|
||||
@ -932,7 +623,7 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_TASK_SWITCH: {
|
||||
uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
|
||||
uint64_t vinfo = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
|
||||
x68_segment_selector sel = {.sel = exit_qual & 0xffff};
|
||||
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
|
||||
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
|
||||
@ -945,8 +636,8 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_RDPMC:
|
||||
wreg(cpu->hvf_fd, HV_X86_RAX, 0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RDX, 0);
|
||||
wreg(cpu->hvf->fd, HV_X86_RAX, 0);
|
||||
wreg(cpu->hvf->fd, HV_X86_RDX, 0);
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
case VMX_REASON_VMCALL:
|
||||
@ -962,48 +653,3 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool hvf_allowed;
|
||||
|
||||
static int hvf_accel_init(MachineState *ms)
|
||||
{
|
||||
int x;
|
||||
hv_return_t ret;
|
||||
HVFState *s;
|
||||
|
||||
ret = hv_vm_create(HV_VM_DEFAULT);
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
s = g_new0(HVFState, 1);
|
||||
|
||||
s->num_slots = 32;
|
||||
for (x = 0; x < s->num_slots; ++x) {
|
||||
s->slots[x].size = 0;
|
||||
s->slots[x].slot_id = x;
|
||||
}
|
||||
|
||||
hvf_state = s;
|
||||
memory_listener_register(&hvf_memory_listener, &address_space_memory);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvf_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "HVF";
|
||||
ac->init_machine = hvf_accel_init;
|
||||
ac->allowed = &hvf_allowed;
|
||||
}
|
||||
|
||||
static const TypeInfo hvf_accel_type = {
|
||||
.name = TYPE_HVF_ACCEL,
|
||||
.parent = TYPE_ACCEL,
|
||||
.class_init = hvf_accel_class_init,
|
||||
};
|
||||
|
||||
static void hvf_type_init(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_type);
|
||||
}
|
||||
|
||||
type_init(hvf_type_init);
|
||||
|
@ -1,6 +1,5 @@
|
||||
i386_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
|
||||
'hvf.c',
|
||||
'hvf-accel-ops.c',
|
||||
'x86.c',
|
||||
'x86_cpuid.c',
|
||||
'x86_decode.c',
|
||||
|
@ -30,6 +30,8 @@
|
||||
#include "vmcs.h"
|
||||
#include "cpu.h"
|
||||
#include "x86.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
|
||||
#include "exec/address-spaces.h"
|
||||
|
||||
@ -179,15 +181,15 @@ static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
|
||||
uint64_t val;
|
||||
|
||||
/* BUG, should take considering overlap.. */
|
||||
wreg(cpu->hvf_fd, HV_X86_RIP, rip);
|
||||
wreg(cpu->hvf->fd, HV_X86_RIP, rip);
|
||||
env->eip = rip;
|
||||
|
||||
/* after moving forward in rip, we need to clean INTERRUPTABILITY */
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
val = rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
|
||||
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
|
||||
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,
|
||||
wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY,
|
||||
val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
|
||||
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
|
||||
}
|
||||
@ -199,9 +201,9 @@ static inline void vmx_clear_nmi_blocking(CPUState *cpu)
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
env->hflags2 &= ~HF2_NMI_MASK;
|
||||
uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
uint32_t gi = (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
|
||||
wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
|
||||
}
|
||||
|
||||
static inline void vmx_set_nmi_blocking(CPUState *cpu)
|
||||
@ -210,16 +212,16 @@ static inline void vmx_set_nmi_blocking(CPUState *cpu)
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
uint32_t gi = (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
|
||||
wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
|
||||
}
|
||||
|
||||
static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
|
||||
{
|
||||
uint64_t val;
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
|
||||
val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
|
||||
VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
|
||||
|
||||
}
|
||||
@ -228,8 +230,8 @@ static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
|
||||
{
|
||||
|
||||
uint64_t val;
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
|
||||
val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
|
||||
~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
|
||||
}
|
||||
|
||||
|
@ -62,11 +62,11 @@ bool x86_read_segment_descriptor(struct CPUState *cpu,
|
||||
}
|
||||
|
||||
if (GDT_SEL == sel.ti) {
|
||||
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
|
||||
base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
|
||||
} else {
|
||||
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
|
||||
base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
|
||||
}
|
||||
|
||||
if (sel.index * 8 >= limit) {
|
||||
@ -85,11 +85,11 @@ bool x86_write_segment_descriptor(struct CPUState *cpu,
|
||||
uint32_t limit;
|
||||
|
||||
if (GDT_SEL == sel.ti) {
|
||||
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
|
||||
base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
|
||||
} else {
|
||||
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
|
||||
base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
|
||||
}
|
||||
|
||||
if (sel.index * 8 >= limit) {
|
||||
@ -103,8 +103,8 @@ bool x86_write_segment_descriptor(struct CPUState *cpu,
|
||||
bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
|
||||
int gate)
|
||||
{
|
||||
target_ulong base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);
|
||||
uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
|
||||
target_ulong base = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE);
|
||||
uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
|
||||
|
||||
memset(idt_desc, 0, sizeof(*idt_desc));
|
||||
if (gate * 8 >= limit) {
|
||||
@ -118,7 +118,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
|
||||
|
||||
bool x86_is_protected(struct CPUState *cpu)
|
||||
{
|
||||
uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
|
||||
uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
|
||||
return cr0 & CR0_PE;
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ bool x86_is_v8086(struct CPUState *cpu)
|
||||
|
||||
bool x86_is_long_mode(struct CPUState *cpu)
|
||||
{
|
||||
return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
|
||||
return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
|
||||
}
|
||||
|
||||
bool x86_is_long64_mode(struct CPUState *cpu)
|
||||
@ -149,13 +149,13 @@ bool x86_is_long64_mode(struct CPUState *cpu)
|
||||
|
||||
bool x86_is_paging_mode(struct CPUState *cpu)
|
||||
{
|
||||
uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
|
||||
uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
|
||||
return cr0 & CR0_PG;
|
||||
}
|
||||
|
||||
bool x86_is_pae_enabled(struct CPUState *cpu)
|
||||
{
|
||||
uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);
|
||||
uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
|
||||
return cr4 & CR4_PAE;
|
||||
}
|
||||
|
||||
|
@ -48,47 +48,47 @@ static const struct vmx_segment_field {
|
||||
|
||||
uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg)
|
||||
{
|
||||
return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
|
||||
return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);
|
||||
}
|
||||
|
||||
uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg)
|
||||
{
|
||||
return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
|
||||
return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);
|
||||
}
|
||||
|
||||
uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)
|
||||
{
|
||||
return rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
|
||||
return rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);
|
||||
}
|
||||
|
||||
x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
|
||||
{
|
||||
x68_segment_selector sel;
|
||||
sel.sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
|
||||
sel.sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);
|
||||
return sel;
|
||||
}
|
||||
|
||||
void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg)
|
||||
{
|
||||
wvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector, selector.sel);
|
||||
wvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector, selector.sel);
|
||||
}
|
||||
|
||||
void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
|
||||
{
|
||||
desc->sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
|
||||
desc->base = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
|
||||
desc->limit = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
|
||||
desc->ar = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
|
||||
desc->sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);
|
||||
desc->base = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);
|
||||
desc->limit = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);
|
||||
desc->ar = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);
|
||||
}
|
||||
|
||||
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
|
||||
{
|
||||
const struct vmx_segment_field *sf = &vmx_segment_fields[seg];
|
||||
|
||||
wvmcs(cpu->hvf_fd, sf->base, desc->base);
|
||||
wvmcs(cpu->hvf_fd, sf->limit, desc->limit);
|
||||
wvmcs(cpu->hvf_fd, sf->selector, desc->sel);
|
||||
wvmcs(cpu->hvf_fd, sf->ar_bytes, desc->ar);
|
||||
wvmcs(cpu->hvf->fd, sf->base, desc->base);
|
||||
wvmcs(cpu->hvf->fd, sf->limit, desc->limit);
|
||||
wvmcs(cpu->hvf->fd, sf->selector, desc->sel);
|
||||
wvmcs(cpu->hvf->fd, sf->ar_bytes, desc->ar);
|
||||
}
|
||||
|
||||
void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc)
|
||||
|
@ -674,7 +674,7 @@ void simulate_rdmsr(struct CPUState *cpu)
|
||||
|
||||
switch (msr) {
|
||||
case MSR_IA32_TSC:
|
||||
val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);
|
||||
val = rdtscp() + rvmcs(cpu->hvf->fd, VMCS_TSC_OFFSET);
|
||||
break;
|
||||
case MSR_IA32_APICBASE:
|
||||
val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
|
||||
@ -683,16 +683,16 @@ void simulate_rdmsr(struct CPUState *cpu)
|
||||
val = x86_cpu->ucode_rev;
|
||||
break;
|
||||
case MSR_EFER:
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
|
||||
val = rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER);
|
||||
break;
|
||||
case MSR_FSBASE:
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);
|
||||
val = rvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE);
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);
|
||||
val = rvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE);
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);
|
||||
val = rvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE);
|
||||
break;
|
||||
case MSR_STAR:
|
||||
abort();
|
||||
@ -780,13 +780,13 @@ void simulate_wrmsr(struct CPUState *cpu)
|
||||
cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
|
||||
break;
|
||||
case MSR_FSBASE:
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);
|
||||
wvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE, data);
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);
|
||||
wvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE, data);
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);
|
||||
wvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE, data);
|
||||
break;
|
||||
case MSR_STAR:
|
||||
abort();
|
||||
@ -799,9 +799,9 @@ void simulate_wrmsr(struct CPUState *cpu)
|
||||
break;
|
||||
case MSR_EFER:
|
||||
/*printf("new efer %llx\n", EFER(cpu));*/
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);
|
||||
wvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER, data);
|
||||
if (data & MSR_EFER_NXE) {
|
||||
hv_vcpu_invalidate_tlb(cpu->hvf_fd);
|
||||
hv_vcpu_invalidate_tlb(cpu->hvf->fd);
|
||||
}
|
||||
break;
|
||||
case MSR_MTRRphysBase(0):
|
||||
@ -1425,21 +1425,21 @@ void load_regs(struct CPUState *cpu)
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
int i = 0;
|
||||
RRX(env, R_EAX) = rreg(cpu->hvf_fd, HV_X86_RAX);
|
||||
RRX(env, R_EBX) = rreg(cpu->hvf_fd, HV_X86_RBX);
|
||||
RRX(env, R_ECX) = rreg(cpu->hvf_fd, HV_X86_RCX);
|
||||
RRX(env, R_EDX) = rreg(cpu->hvf_fd, HV_X86_RDX);
|
||||
RRX(env, R_ESI) = rreg(cpu->hvf_fd, HV_X86_RSI);
|
||||
RRX(env, R_EDI) = rreg(cpu->hvf_fd, HV_X86_RDI);
|
||||
RRX(env, R_ESP) = rreg(cpu->hvf_fd, HV_X86_RSP);
|
||||
RRX(env, R_EBP) = rreg(cpu->hvf_fd, HV_X86_RBP);
|
||||
RRX(env, R_EAX) = rreg(cpu->hvf->fd, HV_X86_RAX);
|
||||
RRX(env, R_EBX) = rreg(cpu->hvf->fd, HV_X86_RBX);
|
||||
RRX(env, R_ECX) = rreg(cpu->hvf->fd, HV_X86_RCX);
|
||||
RRX(env, R_EDX) = rreg(cpu->hvf->fd, HV_X86_RDX);
|
||||
RRX(env, R_ESI) = rreg(cpu->hvf->fd, HV_X86_RSI);
|
||||
RRX(env, R_EDI) = rreg(cpu->hvf->fd, HV_X86_RDI);
|
||||
RRX(env, R_ESP) = rreg(cpu->hvf->fd, HV_X86_RSP);
|
||||
RRX(env, R_EBP) = rreg(cpu->hvf->fd, HV_X86_RBP);
|
||||
for (i = 8; i < 16; i++) {
|
||||
RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
|
||||
RRX(env, i) = rreg(cpu->hvf->fd, HV_X86_RAX + i);
|
||||
}
|
||||
|
||||
env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
|
||||
env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);
|
||||
rflags_to_lflags(env);
|
||||
env->eip = rreg(cpu->hvf_fd, HV_X86_RIP);
|
||||
env->eip = rreg(cpu->hvf->fd, HV_X86_RIP);
|
||||
}
|
||||
|
||||
void store_regs(struct CPUState *cpu)
|
||||
@ -1448,20 +1448,20 @@ void store_regs(struct CPUState *cpu)
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
int i = 0;
|
||||
wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));
|
||||
wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));
|
||||
wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));
|
||||
wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));
|
||||
wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));
|
||||
wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));
|
||||
wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));
|
||||
wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));
|
||||
wreg(cpu->hvf->fd, HV_X86_RAX, RAX(env));
|
||||
wreg(cpu->hvf->fd, HV_X86_RBX, RBX(env));
|
||||
wreg(cpu->hvf->fd, HV_X86_RCX, RCX(env));
|
||||
wreg(cpu->hvf->fd, HV_X86_RDX, RDX(env));
|
||||
wreg(cpu->hvf->fd, HV_X86_RSI, RSI(env));
|
||||
wreg(cpu->hvf->fd, HV_X86_RDI, RDI(env));
|
||||
wreg(cpu->hvf->fd, HV_X86_RBP, RBP(env));
|
||||
wreg(cpu->hvf->fd, HV_X86_RSP, RSP(env));
|
||||
for (i = 8; i < 16; i++) {
|
||||
wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));
|
||||
wreg(cpu->hvf->fd, HV_X86_RAX + i, RRX(env, i));
|
||||
}
|
||||
|
||||
lflags_to_rflags(env);
|
||||
wreg(cpu->hvf_fd, HV_X86_RFLAGS, env->eflags);
|
||||
wreg(cpu->hvf->fd, HV_X86_RFLAGS, env->eflags);
|
||||
macvm_set_rip(cpu, env->eip);
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
|
||||
pt->err_code |= MMU_PAGE_PT;
|
||||
}
|
||||
|
||||
uint32_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
|
||||
uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
|
||||
/* check protection */
|
||||
if (cr0 & CR0_WP) {
|
||||
if (pt->write_access && !pte_write_access(pte)) {
|
||||
@ -172,7 +172,7 @@ static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code,
|
||||
{
|
||||
int top_level, level;
|
||||
bool is_large = false;
|
||||
target_ulong cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);
|
||||
target_ulong cr3 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR3);
|
||||
uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
|
||||
|
||||
memset(pt, 0, sizeof(*pt));
|
||||
|
@ -62,7 +62,7 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);
|
||||
wvmcs(cpu->hvf->fd, VMCS_GUEST_CR3, tss->cr3);
|
||||
|
||||
env->eip = tss->eip;
|
||||
env->eflags = tss->eflags | 2;
|
||||
@ -111,11 +111,11 @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme
|
||||
|
||||
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
|
||||
{
|
||||
uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);
|
||||
uint64_t rip = rreg(cpu->hvf->fd, HV_X86_RIP);
|
||||
if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
|
||||
gate_type != VMCS_INTR_T_HWINTR &&
|
||||
gate_type != VMCS_INTR_T_NMI)) {
|
||||
int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
|
||||
int ins_len = rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH);
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
return;
|
||||
}
|
||||
@ -174,12 +174,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
|
||||
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
|
||||
VM_PANIC("task_switch_16");
|
||||
|
||||
macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);
|
||||
macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS);
|
||||
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
|
||||
vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
|
||||
|
||||
store_regs(cpu);
|
||||
|
||||
hv_vcpu_invalidate_tlb(cpu->hvf_fd);
|
||||
hv_vcpu_flush(cpu->hvf_fd);
|
||||
hv_vcpu_invalidate_tlb(cpu->hvf->fd);
|
||||
hv_vcpu_flush(cpu->hvf->fd);
|
||||
}
|
||||
|
@ -26,14 +26,13 @@
|
||||
#include "cpu.h"
|
||||
#include "x86_descr.h"
|
||||
#include "x86_decode.h"
|
||||
#include "sysemu/hw_accel.h"
|
||||
|
||||
#include "hw/i386/apic_internal.h"
|
||||
|
||||
#include <Hypervisor/hv.h>
|
||||
#include <Hypervisor/hv_vmx.h>
|
||||
|
||||
#include "hvf-accel-ops.h"
|
||||
|
||||
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
|
||||
SegmentCache *qseg, bool is_tr)
|
||||
{
|
||||
@ -81,7 +80,7 @@ void hvf_put_xsave(CPUState *cpu_state)
|
||||
|
||||
x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave);
|
||||
|
||||
if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
|
||||
if (hv_vcpu_write_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
@ -91,19 +90,19 @@ void hvf_put_segments(CPUState *cpu_state)
|
||||
CPUX86State *env = &X86_CPU(cpu_state)->env;
|
||||
struct vmx_segment seg;
|
||||
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
|
||||
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
|
||||
|
||||
/* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]);
|
||||
/* wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3, env->cr[3]);
|
||||
vmx_update_tpr(cpu_state);
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer);
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer);
|
||||
|
||||
macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]);
|
||||
macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]);
|
||||
macvm_set_cr4(cpu_state->hvf->fd, env->cr[4]);
|
||||
macvm_set_cr0(cpu_state->hvf->fd, env->cr[0]);
|
||||
|
||||
hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_CS);
|
||||
@ -129,31 +128,31 @@ void hvf_put_segments(CPUState *cpu_state)
|
||||
hvf_set_segment(cpu_state, &seg, &env->ldt, false);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR);
|
||||
|
||||
hv_vcpu_flush(cpu_state->hvf_fd);
|
||||
hv_vcpu_flush(cpu_state->hvf->fd);
|
||||
}
|
||||
|
||||
void hvf_put_msrs(CPUState *cpu_state)
|
||||
{
|
||||
CPUX86State *env = &X86_CPU(cpu_state)->env;
|
||||
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS,
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS,
|
||||
env->sysenter_cs);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP,
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP,
|
||||
env->sysenter_esp);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP,
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP,
|
||||
env->sysenter_eip);
|
||||
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star);
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_STAR, env->star);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar);
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_CSTAR, env->cstar);
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase);
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FMASK, env->fmask);
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_LSTAR, env->lstar);
|
||||
#endif
|
||||
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_GSBASE, env->segs[R_GS].base);
|
||||
hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FSBASE, env->segs[R_FS].base);
|
||||
}
|
||||
|
||||
|
||||
@ -163,7 +162,7 @@ void hvf_get_xsave(CPUState *cpu_state)
|
||||
|
||||
xsave = X86_CPU(cpu_state)->env.xsave_buf;
|
||||
|
||||
if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
|
||||
if (hv_vcpu_read_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) {
|
||||
abort();
|
||||
}
|
||||
|
||||
@ -202,17 +201,17 @@ void hvf_get_segments(CPUState *cpu_state)
|
||||
vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR);
|
||||
hvf_get_segment(&env->ldt, &seg);
|
||||
|
||||
env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
|
||||
env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE);
|
||||
env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
|
||||
env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE);
|
||||
env->idt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
|
||||
env->idt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE);
|
||||
env->gdt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
|
||||
env->gdt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE);
|
||||
|
||||
env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0);
|
||||
env->cr[0] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR0);
|
||||
env->cr[2] = 0;
|
||||
env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3);
|
||||
env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4);
|
||||
env->cr[3] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3);
|
||||
env->cr[4] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR4);
|
||||
|
||||
env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER);
|
||||
env->efer = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER);
|
||||
}
|
||||
|
||||
void hvf_get_msrs(CPUState *cpu_state)
|
||||
@ -220,27 +219,27 @@ void hvf_get_msrs(CPUState *cpu_state)
|
||||
CPUX86State *env = &X86_CPU(cpu_state)->env;
|
||||
uint64_t tmp;
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp);
|
||||
hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp);
|
||||
env->sysenter_cs = tmp;
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp);
|
||||
hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp);
|
||||
env->sysenter_esp = tmp;
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp);
|
||||
hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp);
|
||||
env->sysenter_eip = tmp;
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star);
|
||||
hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_STAR, &env->star);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar);
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase);
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask);
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar);
|
||||
hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_CSTAR, &env->cstar);
|
||||
hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase);
|
||||
hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_FMASK, &env->fmask);
|
||||
hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_LSTAR, &env->lstar);
|
||||
#endif
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp);
|
||||
hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_APICBASE, &tmp);
|
||||
|
||||
env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET);
|
||||
env->tsc = rdtscp() + rvmcs(cpu_state->hvf->fd, VMCS_TSC_OFFSET);
|
||||
}
|
||||
|
||||
int hvf_put_registers(CPUState *cpu_state)
|
||||
@ -248,26 +247,26 @@ int hvf_put_registers(CPUState *cpu_state)
|
||||
X86CPU *x86cpu = X86_CPU(cpu_state);
|
||||
CPUX86State *env = &x86cpu->env;
|
||||
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RAX, env->regs[R_EAX]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RBX, env->regs[R_EBX]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RCX, env->regs[R_ECX]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RDX, env->regs[R_EDX]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RBP, env->regs[R_EBP]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RSP, env->regs[R_ESP]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RSI, env->regs[R_ESI]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RDI, env->regs[R_EDI]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_R8, env->regs[8]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_R9, env->regs[9]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_R10, env->regs[10]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_R11, env->regs[11]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_R12, env->regs[12]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_R13, env->regs[13]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_R14, env->regs[14]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_R15, env->regs[15]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RFLAGS, env->eflags);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_RIP, env->eip);
|
||||
|
||||
wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_XCR0, env->xcr0);
|
||||
|
||||
hvf_put_xsave(cpu_state);
|
||||
|
||||
@ -275,14 +274,14 @@ int hvf_put_registers(CPUState *cpu_state)
|
||||
|
||||
hvf_put_msrs(cpu_state);
|
||||
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_DR0, env->dr[0]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_DR1, env->dr[1]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_DR2, env->dr[2]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_DR3, env->dr[3]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_DR4, env->dr[4]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_DR5, env->dr[5]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_DR6, env->dr[6]);
|
||||
wreg(cpu_state->hvf->fd, HV_X86_DR7, env->dr[7]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -292,40 +291,40 @@ int hvf_get_registers(CPUState *cpu_state)
|
||||
X86CPU *x86cpu = X86_CPU(cpu_state);
|
||||
CPUX86State *env = &x86cpu->env;
|
||||
|
||||
env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX);
|
||||
env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX);
|
||||
env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX);
|
||||
env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX);
|
||||
env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP);
|
||||
env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP);
|
||||
env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI);
|
||||
env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI);
|
||||
env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8);
|
||||
env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9);
|
||||
env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10);
|
||||
env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11);
|
||||
env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12);
|
||||
env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13);
|
||||
env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14);
|
||||
env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15);
|
||||
env->regs[R_EAX] = rreg(cpu_state->hvf->fd, HV_X86_RAX);
|
||||
env->regs[R_EBX] = rreg(cpu_state->hvf->fd, HV_X86_RBX);
|
||||
env->regs[R_ECX] = rreg(cpu_state->hvf->fd, HV_X86_RCX);
|
||||
env->regs[R_EDX] = rreg(cpu_state->hvf->fd, HV_X86_RDX);
|
||||
env->regs[R_EBP] = rreg(cpu_state->hvf->fd, HV_X86_RBP);
|
||||
env->regs[R_ESP] = rreg(cpu_state->hvf->fd, HV_X86_RSP);
|
||||
env->regs[R_ESI] = rreg(cpu_state->hvf->fd, HV_X86_RSI);
|
||||
env->regs[R_EDI] = rreg(cpu_state->hvf->fd, HV_X86_RDI);
|
||||
env->regs[8] = rreg(cpu_state->hvf->fd, HV_X86_R8);
|
||||
env->regs[9] = rreg(cpu_state->hvf->fd, HV_X86_R9);
|
||||
env->regs[10] = rreg(cpu_state->hvf->fd, HV_X86_R10);
|
||||
env->regs[11] = rreg(cpu_state->hvf->fd, HV_X86_R11);
|
||||
env->regs[12] = rreg(cpu_state->hvf->fd, HV_X86_R12);
|
||||
env->regs[13] = rreg(cpu_state->hvf->fd, HV_X86_R13);
|
||||
env->regs[14] = rreg(cpu_state->hvf->fd, HV_X86_R14);
|
||||
env->regs[15] = rreg(cpu_state->hvf->fd, HV_X86_R15);
|
||||
|
||||
env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
|
||||
env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP);
|
||||
env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
|
||||
env->eip = rreg(cpu_state->hvf->fd, HV_X86_RIP);
|
||||
|
||||
hvf_get_xsave(cpu_state);
|
||||
env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0);
|
||||
env->xcr0 = rreg(cpu_state->hvf->fd, HV_X86_XCR0);
|
||||
|
||||
hvf_get_segments(cpu_state);
|
||||
hvf_get_msrs(cpu_state);
|
||||
|
||||
env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0);
|
||||
env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1);
|
||||
env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2);
|
||||
env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3);
|
||||
env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4);
|
||||
env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5);
|
||||
env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6);
|
||||
env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7);
|
||||
env->dr[0] = rreg(cpu_state->hvf->fd, HV_X86_DR0);
|
||||
env->dr[1] = rreg(cpu_state->hvf->fd, HV_X86_DR1);
|
||||
env->dr[2] = rreg(cpu_state->hvf->fd, HV_X86_DR2);
|
||||
env->dr[3] = rreg(cpu_state->hvf->fd, HV_X86_DR3);
|
||||
env->dr[4] = rreg(cpu_state->hvf->fd, HV_X86_DR4);
|
||||
env->dr[5] = rreg(cpu_state->hvf->fd, HV_X86_DR5);
|
||||
env->dr[6] = rreg(cpu_state->hvf->fd, HV_X86_DR6);
|
||||
env->dr[7] = rreg(cpu_state->hvf->fd, HV_X86_DR7);
|
||||
|
||||
x86_update_hflags(env);
|
||||
return 0;
|
||||
@ -334,16 +333,16 @@ int hvf_get_registers(CPUState *cpu_state)
|
||||
static void vmx_set_int_window_exiting(CPUState *cpu)
|
||||
{
|
||||
uint64_t val;
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
|
||||
val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
|
||||
VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
|
||||
}
|
||||
|
||||
void vmx_clear_int_window_exiting(CPUState *cpu)
|
||||
{
|
||||
uint64_t val;
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
|
||||
val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
|
||||
~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
|
||||
}
|
||||
|
||||
@ -379,7 +378,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
|
||||
uint64_t info = 0;
|
||||
if (have_event) {
|
||||
info = vector | intr_type | VMCS_INTR_VALID;
|
||||
uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON);
|
||||
uint64_t reason = rvmcs(cpu_state->hvf->fd, VMCS_EXIT_REASON);
|
||||
if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) {
|
||||
vmx_clear_nmi_blocking(cpu_state);
|
||||
}
|
||||
@ -388,17 +387,17 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
|
||||
info &= ~(1 << 12); /* clear undefined bit */
|
||||
if (intr_type == VMCS_INTR_T_SWINTR ||
|
||||
intr_type == VMCS_INTR_T_SWEXCEPTION) {
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
|
||||
}
|
||||
|
||||
if (env->has_error_code) {
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR,
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR,
|
||||
env->error_code);
|
||||
/* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */
|
||||
info |= VMCS_INTR_DEL_ERRCODE;
|
||||
}
|
||||
/*printf("reinject %lx err %d\n", info, err);*/
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);
|
||||
};
|
||||
}
|
||||
|
||||
@ -406,7 +405,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
|
||||
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
|
||||
cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);
|
||||
} else {
|
||||
vmx_set_nmi_window_exiting(cpu_state);
|
||||
}
|
||||
@ -418,7 +417,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
|
||||
int line = cpu_get_pic_interrupt(&x86cpu->env);
|
||||
cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
||||
if (line >= 0) {
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |
|
||||
wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, line |
|
||||
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
|
||||
}
|
||||
}
|
||||
@ -434,10 +433,13 @@ int hvf_process_events(CPUState *cpu_state)
|
||||
X86CPU *cpu = X86_CPU(cpu_state);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
|
||||
if (!cpu_state->vcpu_dirty) {
|
||||
/* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */
|
||||
env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
|
||||
}
|
||||
|
||||
if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
|
||||
hvf_cpu_synchronize_state(cpu_state);
|
||||
cpu_synchronize_state(cpu_state);
|
||||
do_cpu_init(cpu);
|
||||
}
|
||||
|
||||
@ -451,12 +453,12 @@ int hvf_process_events(CPUState *cpu_state)
|
||||
cpu_state->halted = 0;
|
||||
}
|
||||
if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
hvf_cpu_synchronize_state(cpu_state);
|
||||
cpu_synchronize_state(cpu_state);
|
||||
do_cpu_sipi(cpu);
|
||||
}
|
||||
if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
|
||||
cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR;
|
||||
hvf_cpu_synchronize_state(cpu_state);
|
||||
cpu_synchronize_state(cpu_state);
|
||||
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
|
||||
env->tpr_access_type);
|
||||
}
|
||||
|
@ -21,8 +21,6 @@
|
||||
#include "x86_descr.h"
|
||||
|
||||
int hvf_process_events(CPUState *);
|
||||
int hvf_put_registers(CPUState *);
|
||||
int hvf_get_registers(CPUState *);
|
||||
bool hvf_inject_interrupts(CPUState *);
|
||||
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
|
||||
SegmentCache *qseg, bool is_tr);
|
||||
|
@ -489,10 +489,14 @@ static void test_acpi_asl(test_data *data)
|
||||
exp_sdt->asl_file, sdt->asl_file);
|
||||
int out = dup(STDOUT_FILENO);
|
||||
int ret G_GNUC_UNUSED;
|
||||
int dupret;
|
||||
|
||||
dup2(STDERR_FILENO, STDOUT_FILENO);
|
||||
g_assert(out >= 0);
|
||||
dupret = dup2(STDERR_FILENO, STDOUT_FILENO);
|
||||
g_assert(dupret >= 0);
|
||||
ret = system(diff) ;
|
||||
dup2(out, STDOUT_FILENO);
|
||||
dupret = dup2(out, STDOUT_FILENO);
|
||||
g_assert(dupret >= 0);
|
||||
close(out);
|
||||
g_free(diff);
|
||||
}
|
||||
|
@ -93,7 +93,8 @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *a
|
||||
/* Check data sent to the backend */
|
||||
ret = qemu_recv(test_sockets[0], &recv_len, sizeof(recv_len), 0);
|
||||
g_assert_cmpint(ret, == , sizeof(recv_len));
|
||||
qemu_recv(test_sockets[0], buffer, 64, 0);
|
||||
ret = qemu_recv(test_sockets[0], buffer, 64, 0);
|
||||
g_assert_cmpint(ret, >=, 5);
|
||||
g_assert_cmpstr(buffer, == , "TEST");
|
||||
|
||||
/* Free test data buffer */
|
||||
|
@ -464,7 +464,7 @@ static char *create_qcow2_with_mbr(MBRpartitions mbr, uint64_t sectors)
|
||||
}
|
||||
|
||||
fd = mkstemp(raw_path);
|
||||
g_assert(fd);
|
||||
g_assert(fd >= 0);
|
||||
close(fd);
|
||||
|
||||
fd = open(raw_path, O_WRONLY);
|
||||
@ -474,7 +474,7 @@ static char *create_qcow2_with_mbr(MBRpartitions mbr, uint64_t sectors)
|
||||
close(fd);
|
||||
|
||||
fd = mkstemp(qcow2_path);
|
||||
g_assert(fd);
|
||||
g_assert(fd >= 0);
|
||||
close(fd);
|
||||
|
||||
qemu_img_path = getenv("QTEST_QEMU_IMG");
|
||||
|
@ -406,7 +406,7 @@ static void test_geometry(const void *opaque)
|
||||
|
||||
for (int region = 0; region < nb_erase_regions; ++region) {
|
||||
for (uint32_t i = 0; i < c->nb_blocs[region]; ++i) {
|
||||
uint64_t byte_addr = i * c->sector_len[region];
|
||||
uint64_t byte_addr = (uint64_t)i * c->sector_len[region];
|
||||
g_assert_cmphex(flash_read(c, byte_addr), ==, bank_mask(c));
|
||||
}
|
||||
}
|
||||
|
@ -123,14 +123,10 @@ void tpm_test_swtpm_migration_test(const char *src_tpm_path,
|
||||
qtest_quit(src_qemu);
|
||||
|
||||
tpm_util_swtpm_kill(dst_tpm_pid);
|
||||
if (dst_tpm_addr) {
|
||||
g_unlink(dst_tpm_addr->u.q_unix.path);
|
||||
qapi_free_SocketAddress(dst_tpm_addr);
|
||||
}
|
||||
g_unlink(dst_tpm_addr->u.q_unix.path);
|
||||
qapi_free_SocketAddress(dst_tpm_addr);
|
||||
|
||||
tpm_util_swtpm_kill(src_tpm_pid);
|
||||
if (src_tpm_addr) {
|
||||
g_unlink(src_tpm_addr->u.q_unix.path);
|
||||
qapi_free_SocketAddress(src_tpm_addr);
|
||||
}
|
||||
g_unlink(src_tpm_addr->u.q_unix.path);
|
||||
qapi_free_SocketAddress(src_tpm_addr);
|
||||
}
|
||||
|
@ -40,10 +40,12 @@ static int temp_fd;
|
||||
/* Duplicate temp_fd and seek to the beginning of the file */
|
||||
static QEMUFile *open_test_file(bool write)
|
||||
{
|
||||
int fd = dup(temp_fd);
|
||||
int fd;
|
||||
QIOChannel *ioc;
|
||||
QEMUFile *f;
|
||||
|
||||
fd = dup(temp_fd);
|
||||
g_assert(fd >= 0);
|
||||
lseek(fd, 0, SEEK_SET);
|
||||
if (write) {
|
||||
g_assert_cmpint(ftruncate(fd, 0), ==, 0);
|
||||
@ -1486,6 +1488,7 @@ int main(int argc, char **argv)
|
||||
g_autofree char *temp_file = g_strdup_printf("%s/vmst.test.XXXXXX",
|
||||
g_get_tmp_dir());
|
||||
temp_fd = mkstemp(temp_file);
|
||||
g_assert(temp_fd >= 0);
|
||||
|
||||
module_call_init(MODULE_INIT_QOM);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user