2008-11-05 19:29:27 +03:00
|
|
|
/*
|
|
|
|
* QEMU KVM support
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2008
|
2008-11-24 22:36:26 +03:00
|
|
|
* Red Hat, Inc. 2008
|
2008-11-05 19:29:27 +03:00
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
2008-11-24 22:36:26 +03:00
|
|
|
* Glauber Costa <gcosta@redhat.com>
|
2008-11-05 19:29:27 +03:00
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/mman.h>
|
2008-11-13 22:21:00 +03:00
|
|
|
#include <stdarg.h>
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
#include <linux/kvm.h>
|
|
|
|
|
|
|
|
#include "qemu-common.h"
|
2010-02-22 19:57:54 +03:00
|
|
|
#include "qemu-barrier.h"
|
2008-11-05 19:29:27 +03:00
|
|
|
#include "sysemu.h"
|
2009-05-02 02:29:37 +04:00
|
|
|
#include "hw/hw.h"
|
2009-03-12 23:12:48 +03:00
|
|
|
#include "gdbstub.h"
|
2008-11-05 19:29:27 +03:00
|
|
|
#include "kvm.h"
|
2010-04-23 21:04:14 +04:00
|
|
|
#include "bswap.h"
|
2011-12-18 16:06:05 +04:00
|
|
|
#include "memory.h"
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2011-01-10 14:50:05 +03:00
|
|
|
/* This check must be after config-host.h is included */
|
|
|
|
#ifdef CONFIG_EVENTFD
|
|
|
|
#include <sys/eventfd.h>
|
|
|
|
#endif
|
|
|
|
|
2008-12-09 23:09:57 +03:00
|
|
|
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
|
|
|
|
#define PAGE_SIZE TARGET_PAGE_SIZE
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
//#define DEBUG_KVM
|
|
|
|
|
|
|
|
#ifdef DEBUG_KVM
|
2010-04-18 18:22:14 +04:00
|
|
|
#define DPRINTF(fmt, ...) \
|
2008-11-05 19:29:27 +03:00
|
|
|
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
|
|
|
|
#else
|
2010-04-18 18:22:14 +04:00
|
|
|
#define DPRINTF(fmt, ...) \
|
2008-11-05 19:29:27 +03:00
|
|
|
do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2008-11-19 20:41:58 +03:00
|
|
|
typedef struct KVMSlot
|
|
|
|
{
|
2009-10-02 01:12:16 +04:00
|
|
|
target_phys_addr_t start_addr;
|
|
|
|
ram_addr_t memory_size;
|
2011-12-15 21:55:26 +04:00
|
|
|
void *ram;
|
2008-11-19 20:41:58 +03:00
|
|
|
int slot;
|
|
|
|
int flags;
|
|
|
|
} KVMSlot;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2008-11-24 22:36:26 +03:00
|
|
|
typedef struct kvm_dirty_log KVMDirtyLog;
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
struct KVMState
|
|
|
|
{
|
|
|
|
KVMSlot slots[32];
|
|
|
|
int fd;
|
|
|
|
int vmfd;
|
2008-12-09 23:09:57 +03:00
|
|
|
int coalesced_mmio;
|
2010-01-26 14:21:16 +03:00
|
|
|
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
|
2011-10-18 21:43:12 +04:00
|
|
|
bool coalesced_flush_in_progress;
|
2009-05-01 22:42:15 +04:00
|
|
|
int broken_set_mem_region;
|
2009-05-01 22:52:46 +04:00
|
|
|
int migration_log;
|
2009-11-25 02:33:03 +03:00
|
|
|
int vcpu_events;
|
2010-03-01 21:10:29 +03:00
|
|
|
int robust_singlestep;
|
2010-03-12 17:20:49 +03:00
|
|
|
int debugregs;
|
2009-03-12 23:12:48 +03:00
|
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
|
|
|
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
|
|
|
|
#endif
|
2009-07-21 19:26:58 +04:00
|
|
|
int pit_in_kernel;
|
2010-06-17 13:53:07 +04:00
|
|
|
int xsave, xcrs;
|
2011-01-10 14:50:05 +03:00
|
|
|
int many_ioeventfds;
|
2011-10-15 13:49:47 +04:00
|
|
|
int irqchip_inject_ioctl;
|
|
|
|
#ifdef KVM_CAP_IRQ_ROUTING
|
|
|
|
struct kvm_irq_routing *irq_routes;
|
|
|
|
int nr_allocated_irq_routes;
|
|
|
|
uint32_t *used_gsi_bitmap;
|
|
|
|
unsigned int max_gsi;
|
|
|
|
#endif
|
2008-11-05 19:29:27 +03:00
|
|
|
};
|
|
|
|
|
2011-02-07 14:19:25 +03:00
|
|
|
KVMState *kvm_state;
|
2012-01-31 22:17:52 +04:00
|
|
|
bool kvm_kernel_irqchip;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2011-01-21 23:48:17 +03:00
|
|
|
static const KVMCapabilityInfo kvm_required_capabilites[] = {
|
|
|
|
KVM_CAP_INFO(USER_MEMORY),
|
|
|
|
KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
|
|
|
|
KVM_CAP_LAST_INFO
|
|
|
|
};
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
static KVMSlot *kvm_alloc_slot(KVMState *s)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
|
2011-01-04 11:32:13 +03:00
|
|
|
if (s->slots[i].memory_size == 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
return &s->slots[i];
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2009-04-17 18:26:29 +04:00
|
|
|
fprintf(stderr, "%s: no free slot available\n", __func__);
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
|
2009-10-02 01:12:16 +04:00
|
|
|
target_phys_addr_t start_addr,
|
|
|
|
target_phys_addr_t end_addr)
|
2009-04-17 18:26:29 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
|
|
|
|
KVMSlot *mem = &s->slots[i];
|
|
|
|
|
|
|
|
if (start_addr == mem->start_addr &&
|
|
|
|
end_addr == mem->start_addr + mem->memory_size) {
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-04-17 18:26:33 +04:00
|
|
|
/*
|
|
|
|
* Find overlapping slot with lowest start address
|
|
|
|
*/
|
|
|
|
static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
|
2009-10-02 01:12:16 +04:00
|
|
|
target_phys_addr_t start_addr,
|
|
|
|
target_phys_addr_t end_addr)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2009-04-17 18:26:33 +04:00
|
|
|
KVMSlot *found = NULL;
|
2008-11-05 19:29:27 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
|
|
|
|
KVMSlot *mem = &s->slots[i];
|
|
|
|
|
2009-04-17 18:26:33 +04:00
|
|
|
if (mem->memory_size == 0 ||
|
|
|
|
(found && found->start_addr < mem->start_addr)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (end_addr > mem->start_addr &&
|
|
|
|
start_addr < mem->start_addr + mem->memory_size) {
|
|
|
|
found = mem;
|
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2009-04-17 18:26:33 +04:00
|
|
|
return found;
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
|
|
|
|
2011-12-15 21:55:26 +04:00
|
|
|
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
|
|
|
|
target_phys_addr_t *phys_addr)
|
2010-10-11 22:31:20 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
|
|
|
|
KVMSlot *mem = &s->slots[i];
|
|
|
|
|
2011-12-15 21:55:26 +04:00
|
|
|
if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
|
|
|
|
*phys_addr = mem->start_addr + (ram - mem->ram);
|
2010-10-11 22:31:20 +04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-24 22:36:26 +03:00
|
|
|
static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
|
|
|
|
{
|
|
|
|
struct kvm_userspace_memory_region mem;
|
|
|
|
|
|
|
|
mem.slot = slot->slot;
|
|
|
|
mem.guest_phys_addr = slot->start_addr;
|
|
|
|
mem.memory_size = slot->memory_size;
|
2011-12-15 21:55:26 +04:00
|
|
|
mem.userspace_addr = (unsigned long)slot->ram;
|
2008-11-24 22:36:26 +03:00
|
|
|
mem.flags = slot->flags;
|
2009-05-01 22:52:46 +04:00
|
|
|
if (s->migration_log) {
|
|
|
|
mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
|
|
|
|
}
|
2008-11-24 22:36:26 +03:00
|
|
|
return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
|
|
|
|
}
|
|
|
|
|
2009-06-27 11:24:58 +04:00
|
|
|
static void kvm_reset_vcpu(void *opaque)
|
|
|
|
{
|
|
|
|
CPUState *env = opaque;
|
|
|
|
|
2009-11-06 21:39:24 +03:00
|
|
|
kvm_arch_reset_vcpu(env);
|
2009-06-27 11:24:58 +04:00
|
|
|
}
|
2008-11-24 22:36:26 +03:00
|
|
|
|
2009-07-21 19:26:58 +04:00
|
|
|
int kvm_pit_in_kernel(void)
|
|
|
|
{
|
|
|
|
return kvm_state->pit_in_kernel;
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
int kvm_init_vcpu(CPUState *env)
|
|
|
|
{
|
|
|
|
KVMState *s = kvm_state;
|
|
|
|
long mmap_size;
|
|
|
|
int ret;
|
|
|
|
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("kvm_init_vcpu\n");
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2008-11-13 22:21:00 +03:00
|
|
|
ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
|
2008-11-05 19:29:27 +03:00
|
|
|
if (ret < 0) {
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("kvm_create_vcpu failed\n");
|
2008-11-05 19:29:27 +03:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->kvm_fd = ret;
|
|
|
|
env->kvm_state = s;
|
2011-03-15 14:26:20 +03:00
|
|
|
env->kvm_vcpu_dirty = 1;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
|
|
|
|
if (mmap_size < 0) {
|
2011-02-02 00:15:48 +03:00
|
|
|
ret = mmap_size;
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
|
2008-11-05 19:29:27 +03:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
|
|
|
|
env->kvm_fd, 0);
|
|
|
|
if (env->kvm_run == MAP_FAILED) {
|
|
|
|
ret = -errno;
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("mmap'ing vcpu state failed\n");
|
2008-11-05 19:29:27 +03:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2011-01-04 11:32:13 +03:00
|
|
|
if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
|
|
|
|
s->coalesced_mmio_ring =
|
|
|
|
(void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
|
|
|
|
}
|
2010-01-26 14:21:16 +03:00
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
ret = kvm_arch_init_vcpu(env);
|
2009-06-27 11:24:58 +04:00
|
|
|
if (ret == 0) {
|
2009-06-27 11:25:07 +04:00
|
|
|
qemu_register_reset(kvm_reset_vcpu, env);
|
2009-11-06 21:39:24 +03:00
|
|
|
kvm_arch_reset_vcpu(env);
|
2009-06-27 11:24:58 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-24 22:36:26 +03:00
|
|
|
/*
|
|
|
|
* dirty pages logging control
|
|
|
|
*/
|
2011-04-06 23:09:54 +04:00
|
|
|
|
|
|
|
static int kvm_mem_flags(KVMState *s, bool log_dirty)
|
|
|
|
{
|
|
|
|
return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
|
2008-11-24 22:36:26 +03:00
|
|
|
{
|
|
|
|
KVMState *s = kvm_state;
|
2011-04-06 23:09:54 +04:00
|
|
|
int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
|
2009-05-01 22:52:46 +04:00
|
|
|
int old_flags;
|
|
|
|
|
|
|
|
old_flags = mem->flags;
|
2008-11-24 22:36:26 +03:00
|
|
|
|
2011-04-06 23:09:54 +04:00
|
|
|
flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty);
|
2008-11-24 22:36:26 +03:00
|
|
|
mem->flags = flags;
|
|
|
|
|
2009-05-01 22:52:46 +04:00
|
|
|
/* If nothing changed effectively, no need to issue ioctl */
|
|
|
|
if (s->migration_log) {
|
|
|
|
flags |= KVM_MEM_LOG_DIRTY_PAGES;
|
|
|
|
}
|
2011-04-06 23:09:54 +04:00
|
|
|
|
2009-05-01 22:52:46 +04:00
|
|
|
if (flags == old_flags) {
|
2011-04-06 23:09:54 +04:00
|
|
|
return 0;
|
2009-05-01 22:52:46 +04:00
|
|
|
}
|
|
|
|
|
2008-11-24 22:36:26 +03:00
|
|
|
return kvm_set_user_memory_region(s, mem);
|
|
|
|
}
|
|
|
|
|
2011-04-06 23:09:54 +04:00
|
|
|
static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
|
|
|
|
ram_addr_t size, bool log_dirty)
|
|
|
|
{
|
|
|
|
KVMState *s = kvm_state;
|
|
|
|
KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
|
|
|
|
|
|
|
|
if (mem == NULL) {
|
|
|
|
fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
|
|
|
|
TARGET_FMT_plx "\n", __func__, phys_addr,
|
|
|
|
(target_phys_addr_t)(phys_addr + size - 1));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return kvm_slot_dirty_pages_log_change(mem, log_dirty);
|
|
|
|
}
|
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
static void kvm_log_start(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
2008-11-24 22:36:26 +03:00
|
|
|
{
|
2011-12-18 16:06:05 +04:00
|
|
|
int r;
|
|
|
|
|
|
|
|
r = kvm_dirty_pages_log_change(section->offset_within_address_space,
|
|
|
|
section->size, true);
|
|
|
|
if (r < 0) {
|
|
|
|
abort();
|
|
|
|
}
|
2008-11-24 22:36:26 +03:00
|
|
|
}
|
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
static void kvm_log_stop(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
2008-11-24 22:36:26 +03:00
|
|
|
{
|
2011-12-18 16:06:05 +04:00
|
|
|
int r;
|
|
|
|
|
|
|
|
r = kvm_dirty_pages_log_change(section->offset_within_address_space,
|
|
|
|
section->size, false);
|
|
|
|
if (r < 0) {
|
|
|
|
abort();
|
|
|
|
}
|
2008-11-24 22:36:26 +03:00
|
|
|
}
|
|
|
|
|
2010-01-27 23:07:21 +03:00
|
|
|
static int kvm_set_migration_log(int enable)
|
2009-05-01 22:52:46 +04:00
|
|
|
{
|
|
|
|
KVMState *s = kvm_state;
|
|
|
|
KVMSlot *mem;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
s->migration_log = enable;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
|
|
|
|
mem = &s->slots[i];
|
|
|
|
|
2010-07-14 23:36:49 +04:00
|
|
|
if (!mem->memory_size) {
|
|
|
|
continue;
|
|
|
|
}
|
2009-05-01 22:52:46 +04:00
|
|
|
if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
err = kvm_set_user_memory_region(s, mem);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-23 21:04:14 +04:00
|
|
|
/* get kvm's dirty pages bitmap and update qemu's */
|
2011-12-19 15:18:13 +04:00
|
|
|
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
|
|
|
|
unsigned long *bitmap)
|
2009-07-27 14:49:56 +04:00
|
|
|
{
|
2010-04-23 21:04:14 +04:00
|
|
|
unsigned int i, j;
|
2012-01-11 23:46:21 +04:00
|
|
|
unsigned long page_number, c;
|
|
|
|
target_phys_addr_t addr, addr1;
|
2011-12-19 15:18:13 +04:00
|
|
|
unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
|
2010-04-23 21:04:14 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap-traveling is faster than memory-traveling (for addr...)
|
|
|
|
* especially when most of the memory is not dirty.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
if (bitmap[i] != 0) {
|
|
|
|
c = leul_to_cpu(bitmap[i]);
|
|
|
|
do {
|
|
|
|
j = ffsl(c) - 1;
|
|
|
|
c &= ~(1ul << j);
|
|
|
|
page_number = i * HOST_LONG_BITS + j;
|
|
|
|
addr1 = page_number * TARGET_PAGE_SIZE;
|
2011-12-19 15:18:13 +04:00
|
|
|
addr = section->offset_within_region + addr1;
|
2011-10-16 20:04:59 +04:00
|
|
|
memory_region_set_dirty(section->mr, addr, TARGET_PAGE_SIZE);
|
2010-04-23 21:04:14 +04:00
|
|
|
} while (c != 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2009-07-27 14:49:56 +04:00
|
|
|
}
|
|
|
|
|
2010-04-23 21:04:14 +04:00
|
|
|
#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
|
|
|
|
|
2008-11-24 22:36:26 +03:00
|
|
|
/**
|
|
|
|
* kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
|
2011-10-16 20:04:59 +04:00
|
|
|
* This function updates qemu's dirty bitmap using
|
|
|
|
* memory_region_set_dirty(). This means all bits are set
|
|
|
|
* to dirty.
|
2008-11-24 22:36:26 +03:00
|
|
|
*
|
2009-04-17 18:26:29 +04:00
|
|
|
* @start_add: start of logged region.
|
2008-11-24 22:36:26 +03:00
|
|
|
* @end_addr: end of logged region.
|
|
|
|
*/
|
2011-12-19 15:18:13 +04:00
|
|
|
static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
|
2008-11-24 22:36:26 +03:00
|
|
|
{
|
|
|
|
KVMState *s = kvm_state;
|
2009-05-01 22:52:47 +04:00
|
|
|
unsigned long size, allocated_size = 0;
|
|
|
|
KVMDirtyLog d;
|
|
|
|
KVMSlot *mem;
|
|
|
|
int ret = 0;
|
2011-12-19 15:18:13 +04:00
|
|
|
target_phys_addr_t start_addr = section->offset_within_address_space;
|
|
|
|
target_phys_addr_t end_addr = start_addr + section->size;
|
2008-11-24 22:36:26 +03:00
|
|
|
|
2009-05-01 22:52:47 +04:00
|
|
|
d.dirty_bitmap = NULL;
|
|
|
|
while (start_addr < end_addr) {
|
|
|
|
mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
|
|
|
|
if (mem == NULL) {
|
|
|
|
break;
|
|
|
|
}
|
2008-11-24 22:36:26 +03:00
|
|
|
|
fix crash in migration, 32-bit userspace on 64-bit host
This change fixes a long-standing immediate crash (memory corruption
and abort in glibc malloc code) in migration on 32bits.
The bug is present since this commit:
commit 692d9aca97b865b0f7903565274a52606910f129
Author: Bruce Rogers <brogers@novell.com>
Date: Wed Sep 23 16:13:18 2009 -0600
qemu-kvm: allocate correct size for dirty bitmap
The dirty bitmap copied out to userspace is stored in a long array,
and gets copied out to userspace accordingly. This patch accounts
for that correctly. Currently I'm seeing kvm crashing due to writing
beyond the end of the alloc'd dirty bitmap memory, because the buffer
has the wrong size.
Signed-off-by: Bruce Rogers
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
- buf = qemu_malloc((slots[i].len / 4096 + 7) / 8 + 2);
+ buf = qemu_malloc(BITMAP_SIZE(slots[i].len));
r = kvm_get_map(kvm, KVM_GET_DIRTY_LOG, i, buf);
BITMAP_SIZE is now open-coded in that function, like this:
size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), HOST_LONG_BITS) / 8;
The problem is that HOST_LONG_BITS in 32bit userspace is 32
but it's 64 in 64bit kernel. So userspace aligns this to
32, and kernel to 64, but since no length is passed from
userspace to kernel on ioctl, kernel uses its size calculation
and copies 4 extra bytes to userspace, corrupting memory.
Here's how it looks like during migrate execution:
our=20, kern=24
our=4, kern=8
...
our=4, kern=8
our=4064, kern=4064
our=512, kern=512
our=4, kern=8
our=20, kern=24
our=4, kern=8
...
our=4, kern=8
our=4064, kern=4064
*** glibc detected *** ./x86_64-softmmu/qemu-system-x86_64: realloc(): invalid next size: 0x08f20528 ***
(our is userspace size above, kern is the size as calculated
by the kernel).
Fix this by always aligning to 64 in a hope that no platform will
have sizeof(long)>8 any time soon, and add a comment describing it
all. It's a small price to pay for bad kernel design.
Alternatively it's possible to fix that in the kernel by using
different size calculation depending on the current process.
But this becomes quite ugly.
Special thanks goes to Stefan Hajnoczi for spotting the fundamental
cause of the issue, and to Alexander Graf for his support in #qemu.
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
CC: Bruce Rogers <brogers@novell.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
2011-04-26 20:13:49 +04:00
|
|
|
/* XXX bad kernel interface alert
|
|
|
|
* For dirty bitmap, kernel allocates array of size aligned to
|
|
|
|
* bits-per-long. But for case when the kernel is 64bits and
|
|
|
|
* the userspace is 32bits, userspace can't align to the same
|
|
|
|
* bits-per-long, since sizeof(long) is different between kernel
|
|
|
|
* and user space. This way, userspace will provide buffer which
|
|
|
|
* may be 4 bytes less than the kernel will use, resulting in
|
|
|
|
* userspace memory corruption (which is not detectable by valgrind
|
|
|
|
* too, in most cases).
|
|
|
|
* So for now, let's align to 64 instead of HOST_LONG_BITS here, in
|
|
|
|
* a hope that sizeof(long) wont become >8 any time soon.
|
|
|
|
*/
|
|
|
|
size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
|
|
|
|
/*HOST_LONG_BITS*/ 64) / 8;
|
2009-05-01 22:52:47 +04:00
|
|
|
if (!d.dirty_bitmap) {
|
2011-08-21 07:09:37 +04:00
|
|
|
d.dirty_bitmap = g_malloc(size);
|
2009-05-01 22:52:47 +04:00
|
|
|
} else if (size > allocated_size) {
|
2011-08-21 07:09:37 +04:00
|
|
|
d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
|
2009-05-01 22:52:47 +04:00
|
|
|
}
|
|
|
|
allocated_size = size;
|
|
|
|
memset(d.dirty_bitmap, 0, allocated_size);
|
2008-11-24 22:36:26 +03:00
|
|
|
|
2009-05-01 22:52:47 +04:00
|
|
|
d.slot = mem->slot;
|
2008-11-24 22:36:26 +03:00
|
|
|
|
2009-07-28 00:23:59 +04:00
|
|
|
if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("ioctl failed %d\n", errno);
|
2009-05-01 22:52:47 +04:00
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
2008-11-24 22:36:26 +03:00
|
|
|
|
2011-12-19 15:18:13 +04:00
|
|
|
kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
|
2010-04-23 21:04:14 +04:00
|
|
|
start_addr = mem->start_addr + mem->memory_size;
|
2008-11-24 22:36:26 +03:00
|
|
|
}
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(d.dirty_bitmap);
|
2009-05-01 22:52:47 +04:00
|
|
|
|
|
|
|
return ret;
|
2008-11-24 22:36:26 +03:00
|
|
|
}
|
|
|
|
|
2009-10-02 01:12:16 +04:00
|
|
|
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
|
2008-12-09 23:09:57 +03:00
|
|
|
{
|
|
|
|
int ret = -ENOSYS;
|
|
|
|
KVMState *s = kvm_state;
|
|
|
|
|
|
|
|
if (s->coalesced_mmio) {
|
|
|
|
struct kvm_coalesced_mmio_zone zone;
|
|
|
|
|
|
|
|
zone.addr = start;
|
|
|
|
zone.size = size;
|
|
|
|
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-10-02 01:12:16 +04:00
|
|
|
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
|
2008-12-09 23:09:57 +03:00
|
|
|
{
|
|
|
|
int ret = -ENOSYS;
|
|
|
|
KVMState *s = kvm_state;
|
|
|
|
|
|
|
|
if (s->coalesced_mmio) {
|
|
|
|
struct kvm_coalesced_mmio_zone zone;
|
|
|
|
|
|
|
|
zone.addr = start;
|
|
|
|
zone.size = size;
|
|
|
|
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-05-09 00:33:24 +04:00
|
|
|
int kvm_check_extension(KVMState *s, unsigned int extension)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
|
|
|
|
if (ret < 0) {
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-01-10 14:50:05 +03:00
|
|
|
static int kvm_check_many_ioeventfds(void)
|
|
|
|
{
|
2011-01-25 19:17:14 +03:00
|
|
|
/* Userspace can use ioeventfd for io notification. This requires a host
|
|
|
|
* that supports eventfd(2) and an I/O thread; since eventfd does not
|
|
|
|
* support SIGIO it cannot interrupt the vcpu.
|
|
|
|
*
|
|
|
|
* Older kernels have a 6 device limit on the KVM io bus. Find out so we
|
2011-01-10 14:50:05 +03:00
|
|
|
* can avoid creating too many ioeventfds.
|
|
|
|
*/
|
2011-08-22 17:24:58 +04:00
|
|
|
#if defined(CONFIG_EVENTFD)
|
2011-01-10 14:50:05 +03:00
|
|
|
int ioeventfds[7];
|
|
|
|
int i, ret = 0;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
|
|
|
|
ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
|
|
|
|
if (ioeventfds[i] < 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true);
|
|
|
|
if (ret < 0) {
|
|
|
|
close(ioeventfds[i]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Decide whether many devices are supported or not */
|
|
|
|
ret = i == ARRAY_SIZE(ioeventfds);
|
|
|
|
|
|
|
|
while (i-- > 0) {
|
|
|
|
kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false);
|
|
|
|
close(ioeventfds[i]);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-01-21 23:48:17 +03:00
|
|
|
static const KVMCapabilityInfo *
|
|
|
|
kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
|
|
|
|
{
|
|
|
|
while (list->name) {
|
|
|
|
if (!kvm_check_extension(s, list->value)) {
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
list++;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
|
2010-01-27 23:07:08 +03:00
|
|
|
{
|
|
|
|
KVMState *s = kvm_state;
|
|
|
|
KVMSlot *mem, old;
|
|
|
|
int err;
|
2011-12-18 16:06:05 +04:00
|
|
|
MemoryRegion *mr = section->mr;
|
|
|
|
bool log_dirty = memory_region_is_logging(mr);
|
|
|
|
target_phys_addr_t start_addr = section->offset_within_address_space;
|
|
|
|
ram_addr_t size = section->size;
|
2011-12-15 21:55:26 +04:00
|
|
|
void *ram = NULL;
|
2010-01-27 23:07:08 +03:00
|
|
|
|
2010-07-28 19:13:23 +04:00
|
|
|
/* kvm works in page size chunks, but the function may be called
|
|
|
|
with sub-page size and unaligned start address. */
|
|
|
|
size = TARGET_PAGE_ALIGN(size);
|
|
|
|
start_addr = TARGET_PAGE_ALIGN(start_addr);
|
2010-01-27 23:07:08 +03:00
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
if (!memory_region_is_ram(mr)) {
|
|
|
|
return;
|
2011-12-15 21:55:26 +04:00
|
|
|
}
|
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region;
|
|
|
|
|
2010-01-27 23:07:08 +03:00
|
|
|
while (1) {
|
|
|
|
mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
|
|
|
|
if (!mem) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
if (add && start_addr >= mem->start_addr &&
|
2010-01-27 23:07:08 +03:00
|
|
|
(start_addr + size <= mem->start_addr + mem->memory_size) &&
|
2011-12-15 21:55:26 +04:00
|
|
|
(ram - start_addr == mem->ram - mem->start_addr)) {
|
2010-01-27 23:07:08 +03:00
|
|
|
/* The new slot fits into the existing one and comes with
|
2011-04-06 23:09:54 +04:00
|
|
|
* identical parameters - update flags and done. */
|
|
|
|
kvm_slot_dirty_pages_log_change(mem, log_dirty);
|
2010-01-27 23:07:08 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
old = *mem;
|
|
|
|
|
2012-01-15 18:13:59 +04:00
|
|
|
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
|
|
|
kvm_physical_sync_dirty_bitmap(section);
|
|
|
|
}
|
|
|
|
|
2010-01-27 23:07:08 +03:00
|
|
|
/* unregister the overlapping slot */
|
|
|
|
mem->memory_size = 0;
|
|
|
|
err = kvm_set_user_memory_region(s, mem);
|
|
|
|
if (err) {
|
|
|
|
fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
|
|
|
|
__func__, strerror(-err));
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Workaround for older KVM versions: we can't join slots, even not by
|
|
|
|
* unregistering the previous ones and then registering the larger
|
|
|
|
* slot. We have to maintain the existing fragmentation. Sigh.
|
|
|
|
*
|
|
|
|
* This workaround assumes that the new slot starts at the same
|
|
|
|
* address as the first existing one. If not or if some overlapping
|
|
|
|
* slot comes around later, we will fail (not seen in practice so far)
|
|
|
|
* - and actually require a recent KVM version. */
|
|
|
|
if (s->broken_set_mem_region &&
|
2011-12-18 16:06:05 +04:00
|
|
|
old.start_addr == start_addr && old.memory_size < size && add) {
|
2010-01-27 23:07:08 +03:00
|
|
|
mem = kvm_alloc_slot(s);
|
|
|
|
mem->memory_size = old.memory_size;
|
|
|
|
mem->start_addr = old.start_addr;
|
2011-12-15 21:55:26 +04:00
|
|
|
mem->ram = old.ram;
|
2011-04-06 23:09:54 +04:00
|
|
|
mem->flags = kvm_mem_flags(s, log_dirty);
|
2010-01-27 23:07:08 +03:00
|
|
|
|
|
|
|
err = kvm_set_user_memory_region(s, mem);
|
|
|
|
if (err) {
|
|
|
|
fprintf(stderr, "%s: error updating slot: %s\n", __func__,
|
|
|
|
strerror(-err));
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
start_addr += old.memory_size;
|
2011-12-15 21:55:26 +04:00
|
|
|
ram += old.memory_size;
|
2010-01-27 23:07:08 +03:00
|
|
|
size -= old.memory_size;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* register prefix slot */
|
|
|
|
if (old.start_addr < start_addr) {
|
|
|
|
mem = kvm_alloc_slot(s);
|
|
|
|
mem->memory_size = start_addr - old.start_addr;
|
|
|
|
mem->start_addr = old.start_addr;
|
2011-12-15 21:55:26 +04:00
|
|
|
mem->ram = old.ram;
|
2011-04-06 23:09:54 +04:00
|
|
|
mem->flags = kvm_mem_flags(s, log_dirty);
|
2010-01-27 23:07:08 +03:00
|
|
|
|
|
|
|
err = kvm_set_user_memory_region(s, mem);
|
|
|
|
if (err) {
|
|
|
|
fprintf(stderr, "%s: error registering prefix slot: %s\n",
|
|
|
|
__func__, strerror(-err));
|
2011-04-16 12:15:11 +04:00
|
|
|
#ifdef TARGET_PPC
|
|
|
|
fprintf(stderr, "%s: This is probably because your kernel's " \
|
|
|
|
"PAGE_SIZE is too big. Please try to use 4k " \
|
|
|
|
"PAGE_SIZE!\n", __func__);
|
|
|
|
#endif
|
2010-01-27 23:07:08 +03:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* register suffix slot */
|
|
|
|
if (old.start_addr + old.memory_size > start_addr + size) {
|
|
|
|
ram_addr_t size_delta;
|
|
|
|
|
|
|
|
mem = kvm_alloc_slot(s);
|
|
|
|
mem->start_addr = start_addr + size;
|
|
|
|
size_delta = mem->start_addr - old.start_addr;
|
|
|
|
mem->memory_size = old.memory_size - size_delta;
|
2011-12-15 21:55:26 +04:00
|
|
|
mem->ram = old.ram + size_delta;
|
2011-04-06 23:09:54 +04:00
|
|
|
mem->flags = kvm_mem_flags(s, log_dirty);
|
2010-01-27 23:07:08 +03:00
|
|
|
|
|
|
|
err = kvm_set_user_memory_region(s, mem);
|
|
|
|
if (err) {
|
|
|
|
fprintf(stderr, "%s: error registering suffix slot: %s\n",
|
|
|
|
__func__, strerror(-err));
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* in case the KVM bug workaround already "consumed" the new slot */
|
2011-01-04 11:32:13 +03:00
|
|
|
if (!size) {
|
2010-01-27 23:07:08 +03:00
|
|
|
return;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2011-12-18 16:06:05 +04:00
|
|
|
if (!add) {
|
2010-01-27 23:07:08 +03:00
|
|
|
return;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2010-01-27 23:07:08 +03:00
|
|
|
mem = kvm_alloc_slot(s);
|
|
|
|
mem->memory_size = size;
|
|
|
|
mem->start_addr = start_addr;
|
2011-12-15 21:55:26 +04:00
|
|
|
mem->ram = ram;
|
2011-04-06 23:09:54 +04:00
|
|
|
mem->flags = kvm_mem_flags(s, log_dirty);
|
2010-01-27 23:07:08 +03:00
|
|
|
|
|
|
|
err = kvm_set_user_memory_region(s, mem);
|
|
|
|
if (err) {
|
|
|
|
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
|
|
|
|
strerror(-err));
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
static void kvm_region_add(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
kvm_set_phys_mem(section, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_region_del(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
kvm_set_phys_mem(section, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_log_sync(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
2010-01-27 23:07:21 +03:00
|
|
|
{
|
2011-12-18 16:06:05 +04:00
|
|
|
int r;
|
|
|
|
|
2011-12-19 15:18:13 +04:00
|
|
|
r = kvm_physical_sync_dirty_bitmap(section);
|
2011-12-18 16:06:05 +04:00
|
|
|
if (r < 0) {
|
|
|
|
abort();
|
|
|
|
}
|
2010-01-27 23:07:21 +03:00
|
|
|
}
|
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
static void kvm_log_global_start(struct MemoryListener *listener)
|
2010-01-27 23:07:21 +03:00
|
|
|
{
|
2011-12-18 16:06:05 +04:00
|
|
|
int r;
|
|
|
|
|
|
|
|
r = kvm_set_migration_log(1);
|
|
|
|
assert(r >= 0);
|
2010-01-27 23:07:21 +03:00
|
|
|
}
|
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
static void kvm_log_global_stop(struct MemoryListener *listener)
|
2010-01-27 23:07:21 +03:00
|
|
|
{
|
2011-12-18 16:06:05 +04:00
|
|
|
int r;
|
|
|
|
|
|
|
|
r = kvm_set_migration_log(0);
|
|
|
|
assert(r >= 0);
|
2010-01-27 23:07:21 +03:00
|
|
|
}
|
|
|
|
|
2011-12-18 16:06:05 +04:00
|
|
|
static MemoryListener kvm_memory_listener = {
|
|
|
|
.region_add = kvm_region_add,
|
|
|
|
.region_del = kvm_region_del,
|
2011-02-07 14:19:23 +03:00
|
|
|
.log_start = kvm_log_start,
|
|
|
|
.log_stop = kvm_log_stop,
|
2011-12-18 16:06:05 +04:00
|
|
|
.log_sync = kvm_log_sync,
|
|
|
|
.log_global_start = kvm_log_global_start,
|
|
|
|
.log_global_stop = kvm_log_global_stop,
|
2010-01-27 23:07:21 +03:00
|
|
|
};
|
|
|
|
|
2011-04-13 03:32:56 +04:00
|
|
|
static void kvm_handle_interrupt(CPUState *env, int mask)
|
|
|
|
{
|
|
|
|
env->interrupt_request |= mask;
|
|
|
|
|
|
|
|
if (!qemu_cpu_is_self(env)) {
|
|
|
|
qemu_cpu_kick(env);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-15 13:49:47 +04:00
|
|
|
int kvm_irqchip_set_irq(KVMState *s, int irq, int level)
|
|
|
|
{
|
|
|
|
struct kvm_irq_level event;
|
|
|
|
int ret;
|
|
|
|
|
2012-01-31 22:17:52 +04:00
|
|
|
assert(kvm_irqchip_in_kernel());
|
2011-10-15 13:49:47 +04:00
|
|
|
|
|
|
|
event.level = level;
|
|
|
|
event.irq = irq;
|
|
|
|
ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event);
|
|
|
|
if (ret < 0) {
|
|
|
|
perror("kvm_set_irqchip_line");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef KVM_CAP_IRQ_ROUTING
|
|
|
|
static void set_gsi(KVMState *s, unsigned int gsi)
|
|
|
|
{
|
|
|
|
assert(gsi < s->max_gsi);
|
|
|
|
|
|
|
|
s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_init_irq_routing(KVMState *s)
|
|
|
|
{
|
|
|
|
int gsi_count;
|
|
|
|
|
|
|
|
gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
|
|
|
|
if (gsi_count > 0) {
|
|
|
|
unsigned int gsi_bits, i;
|
|
|
|
|
|
|
|
/* Round up so we can search ints using ffs */
|
|
|
|
gsi_bits = (gsi_count + 31) / 32;
|
|
|
|
s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
|
|
|
|
s->max_gsi = gsi_bits;
|
|
|
|
|
|
|
|
/* Mark any over-allocated bits as already in use */
|
|
|
|
for (i = gsi_count; i < gsi_bits; i++) {
|
|
|
|
set_gsi(s, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
|
|
|
|
s->nr_allocated_irq_routes = 0;
|
|
|
|
|
|
|
|
kvm_arch_init_irq_routing(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_add_routing_entry(KVMState *s,
|
|
|
|
struct kvm_irq_routing_entry *entry)
|
|
|
|
{
|
|
|
|
struct kvm_irq_routing_entry *new;
|
|
|
|
int n, size;
|
|
|
|
|
|
|
|
if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
|
|
|
|
n = s->nr_allocated_irq_routes * 2;
|
|
|
|
if (n < 64) {
|
|
|
|
n = 64;
|
|
|
|
}
|
|
|
|
size = sizeof(struct kvm_irq_routing);
|
|
|
|
size += n * sizeof(*new);
|
|
|
|
s->irq_routes = g_realloc(s->irq_routes, size);
|
|
|
|
s->nr_allocated_irq_routes = n;
|
|
|
|
}
|
|
|
|
n = s->irq_routes->nr++;
|
|
|
|
new = &s->irq_routes->entries[n];
|
|
|
|
memset(new, 0, sizeof(*new));
|
|
|
|
new->gsi = entry->gsi;
|
|
|
|
new->type = entry->type;
|
|
|
|
new->flags = entry->flags;
|
|
|
|
new->u = entry->u;
|
|
|
|
|
|
|
|
set_gsi(s, entry->gsi);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_irqchip_add_route(KVMState *s, int irq, int irqchip, int pin)
|
|
|
|
{
|
|
|
|
struct kvm_irq_routing_entry e;
|
|
|
|
|
|
|
|
e.gsi = irq;
|
|
|
|
e.type = KVM_IRQ_ROUTING_IRQCHIP;
|
|
|
|
e.flags = 0;
|
|
|
|
e.u.irqchip.irqchip = irqchip;
|
|
|
|
e.u.irqchip.pin = pin;
|
|
|
|
kvm_add_routing_entry(s, &e);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_irqchip_commit_routes(KVMState *s)
|
|
|
|
{
|
|
|
|
s->irq_routes->flags = 0;
|
|
|
|
return kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* !KVM_CAP_IRQ_ROUTING */
|
|
|
|
|
|
|
|
static void kvm_init_irq_routing(KVMState *s)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* !KVM_CAP_IRQ_ROUTING */
|
|
|
|
|
|
|
|
static int kvm_irqchip_create(KVMState *s)
|
|
|
|
{
|
|
|
|
QemuOptsList *list = qemu_find_opts("machine");
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (QTAILQ_EMPTY(&list->head) ||
|
|
|
|
!qemu_opt_get_bool(QTAILQ_FIRST(&list->head),
|
|
|
|
"kernel_irqchip", false) ||
|
|
|
|
!kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Create kernel irqchip failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->irqchip_inject_ioctl = KVM_IRQ_LINE;
|
|
|
|
if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
|
|
|
|
s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
|
|
|
|
}
|
2012-01-31 22:17:52 +04:00
|
|
|
kvm_kernel_irqchip = true;
|
2011-10-15 13:49:47 +04:00
|
|
|
|
|
|
|
kvm_init_irq_routing(s);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-21 23:48:16 +03:00
|
|
|
int kvm_init(void)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
2009-06-07 13:30:25 +04:00
|
|
|
static const char upgrade_note[] =
|
|
|
|
"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
|
|
|
|
"(see http://sourceforge.net/projects/kvm).\n";
|
2008-11-05 19:29:27 +03:00
|
|
|
KVMState *s;
|
2011-01-21 23:48:17 +03:00
|
|
|
const KVMCapabilityInfo *missing_cap;
|
2008-11-05 19:29:27 +03:00
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
2011-08-21 07:09:37 +04:00
|
|
|
s = g_malloc0(sizeof(KVMState));
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2009-03-12 23:12:48 +03:00
|
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
2009-09-12 11:36:22 +04:00
|
|
|
QTAILQ_INIT(&s->kvm_sw_breakpoints);
|
2009-03-12 23:12:48 +03:00
|
|
|
#endif
|
2011-01-04 11:32:13 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
|
2008-11-05 19:29:27 +03:00
|
|
|
s->slots[i].slot = i;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
s->vmfd = -1;
|
2009-12-02 14:24:42 +03:00
|
|
|
s->fd = qemu_open("/dev/kvm", O_RDWR);
|
2008-11-05 19:29:27 +03:00
|
|
|
if (s->fd == -1) {
|
|
|
|
fprintf(stderr, "Could not access KVM kernel module: %m\n");
|
|
|
|
ret = -errno;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
|
|
|
|
if (ret < KVM_API_VERSION) {
|
2011-01-04 11:32:13 +03:00
|
|
|
if (ret > 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
ret = -EINVAL;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
fprintf(stderr, "kvm version too old\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret > KVM_API_VERSION) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
fprintf(stderr, "kvm version not supported\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
|
2010-04-01 20:42:37 +04:00
|
|
|
if (s->vmfd < 0) {
|
|
|
|
#ifdef TARGET_S390X
|
|
|
|
fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
|
|
|
|
"your host kernel command line\n");
|
|
|
|
#endif
|
2011-10-27 06:15:13 +04:00
|
|
|
ret = s->vmfd;
|
2008-11-05 19:29:27 +03:00
|
|
|
goto err;
|
2010-04-01 20:42:37 +04:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2011-01-21 23:48:17 +03:00
|
|
|
missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
|
|
|
|
if (!missing_cap) {
|
|
|
|
missing_cap =
|
|
|
|
kvm_check_extension_list(s, kvm_arch_required_capabilities);
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
2011-01-21 23:48:17 +03:00
|
|
|
if (missing_cap) {
|
2009-05-09 00:33:24 +04:00
|
|
|
ret = -EINVAL;
|
2011-01-21 23:48:17 +03:00
|
|
|
fprintf(stderr, "kvm does not support %s\n%s",
|
|
|
|
missing_cap->name, upgrade_note);
|
2008-12-09 22:59:09 +03:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2009-05-09 00:33:24 +04:00
|
|
|
s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
|
2008-12-09 23:09:57 +03:00
|
|
|
|
2009-05-01 22:42:15 +04:00
|
|
|
s->broken_set_mem_region = 1;
|
2010-12-10 10:52:36 +03:00
|
|
|
ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
|
2009-05-01 22:42:15 +04:00
|
|
|
if (ret > 0) {
|
|
|
|
s->broken_set_mem_region = 0;
|
|
|
|
}
|
|
|
|
|
2009-11-25 02:33:03 +03:00
|
|
|
#ifdef KVM_CAP_VCPU_EVENTS
|
|
|
|
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
|
|
|
|
#endif
|
|
|
|
|
2010-03-01 21:10:29 +03:00
|
|
|
s->robust_singlestep =
|
|
|
|
kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
|
|
|
|
|
2010-03-12 17:20:49 +03:00
|
|
|
#ifdef KVM_CAP_DEBUGREGS
|
|
|
|
s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
|
|
|
|
#endif
|
|
|
|
|
2010-06-17 13:53:07 +04:00
|
|
|
#ifdef KVM_CAP_XSAVE
|
|
|
|
s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef KVM_CAP_XCRS
|
|
|
|
s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
|
|
|
|
#endif
|
|
|
|
|
2011-01-21 23:48:16 +03:00
|
|
|
ret = kvm_arch_init(s);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (ret < 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
goto err;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2011-10-15 13:49:47 +04:00
|
|
|
ret = kvm_irqchip_create(s);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
kvm_state = s;
|
2011-12-18 16:06:05 +04:00
|
|
|
memory_listener_register(&kvm_memory_listener);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2011-01-10 14:50:05 +03:00
|
|
|
s->many_ioeventfds = kvm_check_many_ioeventfds();
|
|
|
|
|
2011-04-13 03:32:56 +04:00
|
|
|
cpu_interrupt_handler = kvm_handle_interrupt;
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (s) {
|
2011-10-27 06:15:13 +04:00
|
|
|
if (s->vmfd >= 0) {
|
2008-11-05 19:29:27 +03:00
|
|
|
close(s->vmfd);
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
|
|
|
if (s->fd != -1) {
|
2008-11-05 19:29:27 +03:00
|
|
|
close(s->fd);
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
}
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(s);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-02 00:16:01 +03:00
|
|
|
static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
|
|
|
|
uint32_t count)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint8_t *ptr = data;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
if (direction == KVM_EXIT_IO_IN) {
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
2009-09-20 20:05:47 +04:00
|
|
|
stb_p(ptr, cpu_inb(port));
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
case 2:
|
2009-09-20 20:05:47 +04:00
|
|
|
stw_p(ptr, cpu_inw(port));
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
case 4:
|
2009-09-20 20:05:47 +04:00
|
|
|
stl_p(ptr, cpu_inl(port));
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
2009-09-20 20:05:47 +04:00
|
|
|
cpu_outb(port, ldub_p(ptr));
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
case 2:
|
2009-09-20 20:05:47 +04:00
|
|
|
cpu_outw(port, lduw_p(ptr));
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
case 4:
|
2009-09-20 20:05:47 +04:00
|
|
|
cpu_outl(port, ldl_p(ptr));
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ptr += size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-21 23:48:06 +03:00
|
|
|
static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
|
2010-03-23 19:37:11 +03:00
|
|
|
{
|
2011-01-21 23:48:07 +03:00
|
|
|
fprintf(stderr, "KVM internal error.");
|
2010-03-23 19:37:11 +03:00
|
|
|
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
|
|
|
|
int i;
|
|
|
|
|
2011-01-21 23:48:07 +03:00
|
|
|
fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
|
2010-03-23 19:37:11 +03:00
|
|
|
for (i = 0; i < run->internal.ndata; ++i) {
|
|
|
|
fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
|
|
|
|
i, (uint64_t)run->internal.data[i]);
|
|
|
|
}
|
2011-01-21 23:48:07 +03:00
|
|
|
} else {
|
|
|
|
fprintf(stderr, "\n");
|
2010-03-23 19:37:11 +03:00
|
|
|
}
|
|
|
|
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
|
|
|
|
fprintf(stderr, "emulation failure\n");
|
2011-01-04 11:32:13 +03:00
|
|
|
if (!kvm_arch_stop_on_emulation_error(env)) {
|
2011-01-21 23:48:08 +03:00
|
|
|
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
|
2011-03-15 14:26:27 +03:00
|
|
|
return EXCP_INTERRUPT;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2010-03-23 19:37:11 +03:00
|
|
|
}
|
|
|
|
/* FIXME: Should trigger a qmp message to let management know
|
|
|
|
* something went wrong.
|
|
|
|
*/
|
2011-01-21 23:48:06 +03:00
|
|
|
return -1;
|
2010-03-23 19:37:11 +03:00
|
|
|
}
|
|
|
|
|
2010-01-26 14:21:16 +03:00
|
|
|
void kvm_flush_coalesced_mmio_buffer(void)
|
2008-12-09 23:09:57 +03:00
|
|
|
{
|
|
|
|
KVMState *s = kvm_state;
|
2011-10-18 21:43:12 +04:00
|
|
|
|
|
|
|
if (s->coalesced_flush_in_progress) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->coalesced_flush_in_progress = true;
|
|
|
|
|
2010-01-26 14:21:16 +03:00
|
|
|
if (s->coalesced_mmio_ring) {
|
|
|
|
struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
|
2008-12-09 23:09:57 +03:00
|
|
|
while (ring->first != ring->last) {
|
|
|
|
struct kvm_coalesced_mmio *ent;
|
|
|
|
|
|
|
|
ent = &ring->coalesced_mmio[ring->first];
|
|
|
|
|
|
|
|
cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
|
2010-02-22 19:57:54 +03:00
|
|
|
smp_wmb();
|
2008-12-09 23:09:57 +03:00
|
|
|
ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
|
|
|
|
}
|
|
|
|
}
|
2011-10-18 21:43:12 +04:00
|
|
|
|
|
|
|
s->coalesced_flush_in_progress = false;
|
2008-12-09 23:09:57 +03:00
|
|
|
}
|
|
|
|
|
2010-05-04 16:45:23 +04:00
|
|
|
static void do_kvm_cpu_synchronize_state(void *_env)
|
2009-08-18 00:19:53 +04:00
|
|
|
{
|
2010-05-04 16:45:23 +04:00
|
|
|
CPUState *env = _env;
|
|
|
|
|
2010-02-03 23:17:05 +03:00
|
|
|
if (!env->kvm_vcpu_dirty) {
|
2009-08-18 00:19:53 +04:00
|
|
|
kvm_arch_get_registers(env);
|
2010-02-03 23:17:05 +03:00
|
|
|
env->kvm_vcpu_dirty = 1;
|
2009-08-18 00:19:53 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-04 16:45:23 +04:00
|
|
|
void kvm_cpu_synchronize_state(CPUState *env)
|
|
|
|
{
|
2011-01-04 11:32:13 +03:00
|
|
|
if (!env->kvm_vcpu_dirty) {
|
2010-05-04 16:45:23 +04:00
|
|
|
run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2010-05-04 16:45:23 +04:00
|
|
|
}
|
|
|
|
|
2010-03-01 21:10:30 +03:00
|
|
|
void kvm_cpu_synchronize_post_reset(CPUState *env)
|
|
|
|
{
|
|
|
|
kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
|
|
|
|
env->kvm_vcpu_dirty = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_cpu_synchronize_post_init(CPUState *env)
|
|
|
|
{
|
|
|
|
kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
|
|
|
|
env->kvm_vcpu_dirty = 0;
|
|
|
|
}
|
|
|
|
|
2008-11-05 19:29:27 +03:00
|
|
|
int kvm_cpu_exec(CPUState *env)
|
|
|
|
{
|
|
|
|
struct kvm_run *run = env->kvm_run;
|
2011-03-15 14:26:25 +03:00
|
|
|
int ret, run_ret;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("kvm_cpu_exec()\n");
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2011-03-02 10:56:13 +03:00
|
|
|
if (kvm_arch_process_async_events(env)) {
|
2011-02-02 00:16:00 +03:00
|
|
|
env->exit_request = 0;
|
2011-02-07 14:19:18 +03:00
|
|
|
return EXCP_HLT;
|
2011-02-02 00:16:00 +03:00
|
|
|
}
|
2010-05-04 16:45:27 +04:00
|
|
|
|
2011-02-07 14:19:18 +03:00
|
|
|
cpu_single_env = env;
|
|
|
|
|
2011-02-02 00:16:00 +03:00
|
|
|
do {
|
2010-02-03 23:17:05 +03:00
|
|
|
if (env->kvm_vcpu_dirty) {
|
2010-03-01 21:10:30 +03:00
|
|
|
kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
|
2010-02-03 23:17:05 +03:00
|
|
|
env->kvm_vcpu_dirty = 0;
|
2009-08-18 00:19:53 +04:00
|
|
|
}
|
|
|
|
|
2009-05-30 12:01:45 +04:00
|
|
|
kvm_arch_pre_run(env, run);
|
2011-02-02 00:16:00 +03:00
|
|
|
if (env->exit_request) {
|
|
|
|
DPRINTF("interrupt exit requested\n");
|
|
|
|
/*
|
|
|
|
* KVM requires us to reenter the kernel after IO exits to complete
|
|
|
|
* instruction emulation. This self-signal will ensure that we
|
|
|
|
* leave ASAP again.
|
|
|
|
*/
|
|
|
|
qemu_cpu_kick_self();
|
|
|
|
}
|
2010-05-04 16:45:19 +04:00
|
|
|
cpu_single_env = NULL;
|
2009-10-07 23:38:03 +04:00
|
|
|
qemu_mutex_unlock_iothread();
|
2011-02-02 00:16:00 +03:00
|
|
|
|
2011-03-15 14:26:25 +03:00
|
|
|
run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
|
2011-02-02 00:16:00 +03:00
|
|
|
|
2009-10-07 23:38:03 +04:00
|
|
|
qemu_mutex_lock_iothread();
|
2010-05-04 16:45:19 +04:00
|
|
|
cpu_single_env = env;
|
2008-11-05 19:29:27 +03:00
|
|
|
kvm_arch_post_run(env, run);
|
|
|
|
|
2011-01-21 23:48:19 +03:00
|
|
|
kvm_flush_coalesced_mmio_buffer();
|
|
|
|
|
2011-03-15 14:26:25 +03:00
|
|
|
if (run_ret < 0) {
|
2011-03-15 14:26:26 +03:00
|
|
|
if (run_ret == -EINTR || run_ret == -EAGAIN) {
|
|
|
|
DPRINTF("io window exit\n");
|
2011-03-15 14:26:27 +03:00
|
|
|
ret = EXCP_INTERRUPT;
|
2011-03-15 14:26:26 +03:00
|
|
|
break;
|
|
|
|
}
|
2011-12-16 04:20:20 +04:00
|
|
|
fprintf(stderr, "error: kvm run failed %s\n",
|
|
|
|
strerror(-run_ret));
|
2008-11-05 19:29:27 +03:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (run->exit_reason) {
|
|
|
|
case KVM_EXIT_IO:
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("handle_io\n");
|
2011-02-02 00:16:01 +03:00
|
|
|
kvm_handle_io(run->io.port,
|
|
|
|
(uint8_t *)run + run->io.data_offset,
|
|
|
|
run->io.direction,
|
|
|
|
run->io.size,
|
|
|
|
run->io.count);
|
2011-03-15 14:26:27 +03:00
|
|
|
ret = 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
case KVM_EXIT_MMIO:
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("handle_mmio\n");
|
2008-11-05 19:29:27 +03:00
|
|
|
cpu_physical_memory_rw(run->mmio.phys_addr,
|
|
|
|
run->mmio.data,
|
|
|
|
run->mmio.len,
|
|
|
|
run->mmio.is_write);
|
2011-03-15 14:26:27 +03:00
|
|
|
ret = 0;
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
case KVM_EXIT_IRQ_WINDOW_OPEN:
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("irq_window_open\n");
|
2011-03-15 14:26:27 +03:00
|
|
|
ret = EXCP_INTERRUPT;
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
case KVM_EXIT_SHUTDOWN:
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("shutdown\n");
|
2008-11-05 19:29:27 +03:00
|
|
|
qemu_system_reset_request();
|
2011-03-15 14:26:27 +03:00
|
|
|
ret = EXCP_INTERRUPT;
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
|
|
|
case KVM_EXIT_UNKNOWN:
|
2011-01-21 23:48:07 +03:00
|
|
|
fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
|
|
|
|
(uint64_t)run->hw.hardware_exit_reason);
|
2011-01-21 23:48:06 +03:00
|
|
|
ret = -1;
|
2008-11-05 19:29:27 +03:00
|
|
|
break;
|
2010-03-23 19:37:11 +03:00
|
|
|
case KVM_EXIT_INTERNAL_ERROR:
|
2011-01-21 23:48:06 +03:00
|
|
|
ret = kvm_handle_internal_error(env, run);
|
2010-03-23 19:37:11 +03:00
|
|
|
break;
|
2008-11-05 19:29:27 +03:00
|
|
|
default:
|
2010-04-18 18:22:14 +04:00
|
|
|
DPRINTF("kvm_arch_handle_exit\n");
|
2008-11-05 19:29:27 +03:00
|
|
|
ret = kvm_arch_handle_exit(env, run);
|
|
|
|
break;
|
|
|
|
}
|
2011-03-15 14:26:27 +03:00
|
|
|
} while (ret == 0);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2011-01-21 23:48:06 +03:00
|
|
|
if (ret < 0) {
|
2011-01-21 23:48:08 +03:00
|
|
|
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
|
2011-09-30 21:45:27 +04:00
|
|
|
vm_stop(RUN_STATE_INTERNAL_ERROR);
|
2008-11-10 18:55:14 +03:00
|
|
|
}
|
|
|
|
|
2011-02-07 14:19:18 +03:00
|
|
|
env->exit_request = 0;
|
|
|
|
cpu_single_env = NULL;
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-13 22:21:00 +03:00
|
|
|
int kvm_ioctl(KVMState *s, int type, ...)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
|
|
|
int ret;
|
2008-11-13 22:21:00 +03:00
|
|
|
void *arg;
|
|
|
|
va_list ap;
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2008-11-13 22:21:00 +03:00
|
|
|
va_start(ap, type);
|
|
|
|
arg = va_arg(ap, void *);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
ret = ioctl(s->fd, type, arg);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (ret == -1) {
|
2008-11-05 19:29:27 +03:00
|
|
|
ret = -errno;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-13 22:21:00 +03:00
|
|
|
int kvm_vm_ioctl(KVMState *s, int type, ...)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
|
|
|
int ret;
|
2008-11-13 22:21:00 +03:00
|
|
|
void *arg;
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, type);
|
|
|
|
arg = va_arg(ap, void *);
|
|
|
|
va_end(ap);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2008-11-13 22:21:00 +03:00
|
|
|
ret = ioctl(s->vmfd, type, arg);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (ret == -1) {
|
2008-11-05 19:29:27 +03:00
|
|
|
ret = -errno;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-13 22:21:00 +03:00
|
|
|
int kvm_vcpu_ioctl(CPUState *env, int type, ...)
|
2008-11-05 19:29:27 +03:00
|
|
|
{
|
|
|
|
int ret;
|
2008-11-13 22:21:00 +03:00
|
|
|
void *arg;
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, type);
|
|
|
|
arg = va_arg(ap, void *);
|
|
|
|
va_end(ap);
|
2008-11-05 19:29:27 +03:00
|
|
|
|
2008-11-13 22:21:00 +03:00
|
|
|
ret = ioctl(env->kvm_fd, type, arg);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (ret == -1) {
|
2008-11-05 19:29:27 +03:00
|
|
|
ret = -errno;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2008-11-05 19:29:27 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2008-12-04 23:33:06 +03:00
|
|
|
|
|
|
|
int kvm_has_sync_mmu(void)
|
|
|
|
{
|
2011-01-21 23:48:17 +03:00
|
|
|
return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
|
2008-12-04 23:33:06 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
|
2009-11-25 02:33:03 +03:00
|
|
|
int kvm_has_vcpu_events(void)
|
|
|
|
{
|
|
|
|
return kvm_state->vcpu_events;
|
|
|
|
}
|
|
|
|
|
2010-03-01 21:10:29 +03:00
|
|
|
int kvm_has_robust_singlestep(void)
|
|
|
|
{
|
|
|
|
return kvm_state->robust_singlestep;
|
|
|
|
}
|
|
|
|
|
2010-03-12 17:20:49 +03:00
|
|
|
int kvm_has_debugregs(void)
|
|
|
|
{
|
|
|
|
return kvm_state->debugregs;
|
|
|
|
}
|
|
|
|
|
2010-06-17 13:53:07 +04:00
|
|
|
int kvm_has_xsave(void)
|
|
|
|
{
|
|
|
|
return kvm_state->xsave;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_has_xcrs(void)
|
|
|
|
{
|
|
|
|
return kvm_state->xcrs;
|
|
|
|
}
|
|
|
|
|
2011-01-10 14:50:05 +03:00
|
|
|
int kvm_has_many_ioeventfds(void)
|
|
|
|
{
|
|
|
|
if (!kvm_enabled()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return kvm_state->many_ioeventfds;
|
|
|
|
}
|
|
|
|
|
2011-10-15 13:49:47 +04:00
|
|
|
int kvm_has_gsi_routing(void)
|
|
|
|
{
|
2012-01-25 21:28:05 +04:00
|
|
|
#ifdef KVM_CAP_IRQ_ROUTING
|
2011-10-15 13:49:47 +04:00
|
|
|
return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
|
2012-01-25 21:28:05 +04:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
2011-10-15 13:49:47 +04:00
|
|
|
}
|
|
|
|
|
2011-10-15 16:08:26 +04:00
|
|
|
int kvm_allows_irq0_override(void)
|
|
|
|
{
|
2012-01-31 22:17:52 +04:00
|
|
|
return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
|
2011-10-15 16:08:26 +04:00
|
|
|
}
|
|
|
|
|
2009-04-26 20:03:40 +04:00
|
|
|
void kvm_setup_guest_memory(void *start, size_t size)
|
|
|
|
{
|
|
|
|
if (!kvm_has_sync_mmu()) {
|
2010-09-25 15:26:05 +04:00
|
|
|
int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
|
2009-04-26 20:03:40 +04:00
|
|
|
|
|
|
|
if (ret) {
|
2010-09-25 15:26:05 +04:00
|
|
|
perror("qemu_madvise");
|
|
|
|
fprintf(stderr,
|
|
|
|
"Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
|
2009-04-26 20:03:40 +04:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-03-12 23:12:48 +03:00
|
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
|
|
|
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
|
|
|
|
target_ulong pc)
|
|
|
|
{
|
|
|
|
struct kvm_sw_breakpoint *bp;
|
|
|
|
|
2009-09-12 11:36:22 +04:00
|
|
|
QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
|
2011-01-04 11:32:13 +03:00
|
|
|
if (bp->pc == pc) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return bp;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_sw_breakpoints_active(CPUState *env)
|
|
|
|
{
|
2009-09-12 11:36:22 +04:00
|
|
|
return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
|
2009-07-17 01:55:28 +04:00
|
|
|
struct kvm_set_guest_debug_data {
|
|
|
|
struct kvm_guest_debug dbg;
|
|
|
|
CPUState *env;
|
|
|
|
int err;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void kvm_invoke_set_guest_debug(void *data)
|
|
|
|
{
|
|
|
|
struct kvm_set_guest_debug_data *dbg_data = data;
|
2009-09-17 22:05:58 +04:00
|
|
|
CPUState *env = dbg_data->env;
|
|
|
|
|
|
|
|
dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
|
2009-07-17 01:55:28 +04:00
|
|
|
}
|
|
|
|
|
2009-03-12 23:12:48 +03:00
|
|
|
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
|
|
|
|
{
|
2009-07-17 01:55:28 +04:00
|
|
|
struct kvm_set_guest_debug_data data;
|
2009-03-12 23:12:48 +03:00
|
|
|
|
2010-03-01 21:10:29 +03:00
|
|
|
data.dbg.control = reinject_trap;
|
2009-03-12 23:12:48 +03:00
|
|
|
|
2010-03-01 21:10:29 +03:00
|
|
|
if (env->singlestep_enabled) {
|
|
|
|
data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
|
|
|
|
}
|
2009-07-17 01:55:28 +04:00
|
|
|
kvm_arch_update_guest_debug(env, &data.dbg);
|
|
|
|
data.env = env;
|
2009-03-12 23:12:48 +03:00
|
|
|
|
2010-05-20 02:28:45 +04:00
|
|
|
run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
|
2009-07-17 01:55:28 +04:00
|
|
|
return data.err;
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
|
|
|
|
target_ulong len, int type)
|
|
|
|
{
|
|
|
|
struct kvm_sw_breakpoint *bp;
|
|
|
|
CPUState *env;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (type == GDB_BREAKPOINT_SW) {
|
|
|
|
bp = kvm_find_sw_breakpoint(current_env, addr);
|
|
|
|
if (bp) {
|
|
|
|
bp->use_count++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-21 07:09:37 +04:00
|
|
|
bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
|
2011-01-04 11:32:13 +03:00
|
|
|
if (!bp) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -ENOMEM;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
|
|
|
|
bp->pc = addr;
|
|
|
|
bp->use_count = 1;
|
|
|
|
err = kvm_arch_insert_sw_breakpoint(current_env, bp);
|
|
|
|
if (err) {
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(bp);
|
2009-03-12 23:12:48 +03:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-09-12 11:36:22 +04:00
|
|
|
QTAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints,
|
2009-03-12 23:12:48 +03:00
|
|
|
bp, entry);
|
|
|
|
} else {
|
|
|
|
err = kvm_arch_insert_hw_breakpoint(addr, len, type);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (err) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return err;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
|
|
|
err = kvm_update_guest_debug(env, 0);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (err) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return err;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
|
|
|
|
target_ulong len, int type)
|
|
|
|
{
|
|
|
|
struct kvm_sw_breakpoint *bp;
|
|
|
|
CPUState *env;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (type == GDB_BREAKPOINT_SW) {
|
|
|
|
bp = kvm_find_sw_breakpoint(current_env, addr);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (!bp) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return -ENOENT;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
|
|
|
|
if (bp->use_count > 1) {
|
|
|
|
bp->use_count--;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = kvm_arch_remove_sw_breakpoint(current_env, bp);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (err) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return err;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
|
2009-09-12 11:36:22 +04:00
|
|
|
QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry);
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(bp);
|
2009-03-12 23:12:48 +03:00
|
|
|
} else {
|
|
|
|
err = kvm_arch_remove_hw_breakpoint(addr, len, type);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (err) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return err;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
|
|
|
err = kvm_update_guest_debug(env, 0);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (err) {
|
2009-03-12 23:12:48 +03:00
|
|
|
return err;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_remove_all_breakpoints(CPUState *current_env)
|
|
|
|
{
|
|
|
|
struct kvm_sw_breakpoint *bp, *next;
|
|
|
|
KVMState *s = current_env->kvm_state;
|
|
|
|
CPUState *env;
|
|
|
|
|
2009-09-12 11:36:22 +04:00
|
|
|
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
|
2009-03-12 23:12:48 +03:00
|
|
|
if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
|
|
|
|
/* Try harder to find a CPU that currently sees the breakpoint. */
|
|
|
|
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
2011-01-04 11:32:13 +03:00
|
|
|
if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
|
2009-03-12 23:12:48 +03:00
|
|
|
break;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kvm_arch_remove_all_hw_breakpoints();
|
|
|
|
|
2011-01-04 11:32:13 +03:00
|
|
|
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
2009-03-12 23:12:48 +03:00
|
|
|
kvm_update_guest_debug(env, 0);
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2009-03-12 23:12:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#else /* !KVM_CAP_SET_GUEST_DEBUG */
|
|
|
|
|
|
|
|
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
|
|
|
|
target_ulong len, int type)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
|
|
|
|
target_ulong len, int type)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_remove_all_breakpoints(CPUState *current_env)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* !KVM_CAP_SET_GUEST_DEBUG */
|
2010-02-18 01:14:42 +03:00
|
|
|
|
|
|
|
int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
|
|
|
|
{
|
|
|
|
struct kvm_signal_mask *sigmask;
|
|
|
|
int r;
|
|
|
|
|
2011-01-04 11:32:13 +03:00
|
|
|
if (!sigset) {
|
2010-02-18 01:14:42 +03:00
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2010-02-18 01:14:42 +03:00
|
|
|
|
2011-08-21 07:09:37 +04:00
|
|
|
sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
|
2010-02-18 01:14:42 +03:00
|
|
|
|
|
|
|
sigmask->len = 8;
|
|
|
|
memcpy(sigmask->sigset, sigset, sizeof(*sigset));
|
|
|
|
r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(sigmask);
|
2010-02-18 01:14:42 +03:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
2010-03-17 14:07:54 +03:00
|
|
|
|
2010-07-27 04:10:59 +04:00
|
|
|
int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct kvm_ioeventfd iofd;
|
|
|
|
|
|
|
|
iofd.datamatch = val;
|
|
|
|
iofd.addr = addr;
|
|
|
|
iofd.len = 4;
|
|
|
|
iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
|
|
|
|
iofd.fd = fd;
|
|
|
|
|
|
|
|
if (!kvm_enabled()) {
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!assign) {
|
|
|
|
iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-17 14:07:54 +03:00
|
|
|
int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
|
|
|
|
{
|
|
|
|
struct kvm_ioeventfd kick = {
|
|
|
|
.datamatch = val,
|
|
|
|
.addr = addr,
|
|
|
|
.len = 2,
|
|
|
|
.flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
|
|
|
|
.fd = fd,
|
|
|
|
};
|
|
|
|
int r;
|
2011-01-04 11:32:13 +03:00
|
|
|
if (!kvm_enabled()) {
|
2010-03-17 14:07:54 +03:00
|
|
|
return -ENOSYS;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
|
|
|
if (!assign) {
|
2010-03-17 14:07:54 +03:00
|
|
|
kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2010-03-17 14:07:54 +03:00
|
|
|
r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
|
2011-01-04 11:32:13 +03:00
|
|
|
if (r < 0) {
|
2010-03-17 14:07:54 +03:00
|
|
|
return r;
|
2011-01-04 11:32:13 +03:00
|
|
|
}
|
2010-03-17 14:07:54 +03:00
|
|
|
return 0;
|
2010-04-19 22:59:30 +04:00
|
|
|
}
|
2011-02-02 00:15:51 +03:00
|
|
|
|
|
|
|
int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr)
|
|
|
|
{
|
|
|
|
return kvm_arch_on_sigbus_vcpu(env, code, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_on_sigbus(int code, void *addr)
|
|
|
|
{
|
|
|
|
return kvm_arch_on_sigbus(code, addr);
|
|
|
|
}
|