204204308b
If we use larger BIOS image than current 256KB, we would need move reserved TSS and EPT identity mapping pages. Currently TSS support this, but not EPT. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
1272 lines
34 KiB
C
1272 lines
34 KiB
C
/*
|
|
* QEMU KVM support
|
|
*
|
|
* Copyright (C) 2006-2008 Qumranet Technologies
|
|
* Copyright IBM, Corp. 2008
|
|
*
|
|
* Authors:
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
* See the COPYING file in the top-level directory.
|
|
*
|
|
*/
|
|
|
|
#include <sys/types.h>
|
|
#include <sys/ioctl.h>
|
|
#include <sys/mman.h>
|
|
|
|
#include <linux/kvm.h>
|
|
|
|
#include "qemu-common.h"
|
|
#include "sysemu.h"
|
|
#include "kvm.h"
|
|
#include "cpu.h"
|
|
#include "gdbstub.h"
|
|
#include "host-utils.h"
|
|
#include "hw/pc.h"
|
|
#include "ioport.h"
|
|
|
|
#ifdef CONFIG_KVM_PARA
|
|
#include <linux/kvm_para.h>
|
|
#endif
|
|
//
|
|
//#define DEBUG_KVM
|
|
|
|
#ifdef DEBUG_KVM
|
|
#define DPRINTF(fmt, ...) \
|
|
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
|
|
#else
|
|
#define DPRINTF(fmt, ...) \
|
|
do { } while (0)
|
|
#endif
|
|
|
|
#define MSR_KVM_WALL_CLOCK 0x11
|
|
#define MSR_KVM_SYSTEM_TIME 0x12
|
|
|
|
#ifdef KVM_CAP_EXT_CPUID
|
|
|
|
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
|
|
{
|
|
struct kvm_cpuid2 *cpuid;
|
|
int r, size;
|
|
|
|
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
|
|
cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
|
|
cpuid->nent = max;
|
|
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
|
|
if (r == 0 && cpuid->nent >= max) {
|
|
r = -E2BIG;
|
|
}
|
|
if (r < 0) {
|
|
if (r == -E2BIG) {
|
|
qemu_free(cpuid);
|
|
return NULL;
|
|
} else {
|
|
fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
|
|
strerror(-r));
|
|
exit(1);
|
|
}
|
|
}
|
|
return cpuid;
|
|
}
|
|
|
|
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
|
|
{
|
|
struct kvm_cpuid2 *cpuid;
|
|
int i, max;
|
|
uint32_t ret = 0;
|
|
uint32_t cpuid_1_edx;
|
|
|
|
if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
|
|
return -1U;
|
|
}
|
|
|
|
max = 1;
|
|
while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
|
|
max *= 2;
|
|
}
|
|
|
|
for (i = 0; i < cpuid->nent; ++i) {
|
|
if (cpuid->entries[i].function == function) {
|
|
switch (reg) {
|
|
case R_EAX:
|
|
ret = cpuid->entries[i].eax;
|
|
break;
|
|
case R_EBX:
|
|
ret = cpuid->entries[i].ebx;
|
|
break;
|
|
case R_ECX:
|
|
ret = cpuid->entries[i].ecx;
|
|
break;
|
|
case R_EDX:
|
|
ret = cpuid->entries[i].edx;
|
|
switch (function) {
|
|
case 1:
|
|
/* KVM before 2.6.30 misreports the following features */
|
|
ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
|
|
break;
|
|
case 0x80000001:
|
|
/* On Intel, kvm returns cpuid according to the Intel spec,
|
|
* so add missing bits according to the AMD spec:
|
|
*/
|
|
cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, R_EDX);
|
|
ret |= cpuid_1_edx & 0xdfeff7ff;
|
|
break;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
qemu_free(cpuid);
|
|
|
|
return ret;
|
|
}
|
|
|
|
#else
|
|
|
|
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
|
|
{
|
|
return -1U;
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_KVM_PARA
|
|
struct kvm_para_features {
|
|
int cap;
|
|
int feature;
|
|
} para_features[] = {
|
|
#ifdef KVM_CAP_CLOCKSOURCE
|
|
{ KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
|
|
#endif
|
|
#ifdef KVM_CAP_NOP_IO_DELAY
|
|
{ KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
|
|
#endif
|
|
#ifdef KVM_CAP_PV_MMU
|
|
{ KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
|
|
#endif
|
|
{ -1, -1 }
|
|
};
|
|
|
|
static int get_para_features(CPUState *env)
|
|
{
|
|
int i, features = 0;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
|
|
if (kvm_check_extension(env->kvm_state, para_features[i].cap))
|
|
features |= (1 << para_features[i].feature);
|
|
}
|
|
|
|
return features;
|
|
}
|
|
#endif
|
|
|
|
int kvm_arch_init_vcpu(CPUState *env)
|
|
{
|
|
struct {
|
|
struct kvm_cpuid2 cpuid;
|
|
struct kvm_cpuid_entry2 entries[100];
|
|
} __attribute__((packed)) cpuid_data;
|
|
uint32_t limit, i, j, cpuid_i;
|
|
uint32_t unused;
|
|
struct kvm_cpuid_entry2 *c;
|
|
#ifdef KVM_CPUID_SIGNATURE
|
|
uint32_t signature[3];
|
|
#endif
|
|
|
|
env->mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, R_EDX);
|
|
|
|
i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
|
|
env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, R_ECX);
|
|
env->cpuid_ext_features |= i;
|
|
|
|
env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
|
|
R_EDX);
|
|
env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
|
|
R_ECX);
|
|
|
|
cpuid_i = 0;
|
|
|
|
#ifdef CONFIG_KVM_PARA
|
|
/* Paravirtualization CPUIDs */
|
|
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
memset(c, 0, sizeof(*c));
|
|
c->function = KVM_CPUID_SIGNATURE;
|
|
c->eax = 0;
|
|
c->ebx = signature[0];
|
|
c->ecx = signature[1];
|
|
c->edx = signature[2];
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
memset(c, 0, sizeof(*c));
|
|
c->function = KVM_CPUID_FEATURES;
|
|
c->eax = env->cpuid_kvm_features & get_para_features(env);
|
|
#endif
|
|
|
|
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
|
|
|
|
for (i = 0; i <= limit; i++) {
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
switch (i) {
|
|
case 2: {
|
|
/* Keep reading function 2 till all the input is received */
|
|
int times;
|
|
|
|
c->function = i;
|
|
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
|
|
KVM_CPUID_FLAG_STATE_READ_NEXT;
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
times = c->eax & 0xff;
|
|
|
|
for (j = 1; j < times; ++j) {
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
c->function = i;
|
|
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
}
|
|
break;
|
|
}
|
|
case 4:
|
|
case 0xb:
|
|
case 0xd:
|
|
for (j = 0; ; j++) {
|
|
c->function = i;
|
|
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
c->index = j;
|
|
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
if (i == 4 && c->eax == 0)
|
|
break;
|
|
if (i == 0xb && !(c->ecx & 0xff00))
|
|
break;
|
|
if (i == 0xd && c->eax == 0)
|
|
break;
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
}
|
|
break;
|
|
default:
|
|
c->function = i;
|
|
c->flags = 0;
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
break;
|
|
}
|
|
}
|
|
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
|
|
|
|
for (i = 0x80000000; i <= limit; i++) {
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
c->function = i;
|
|
c->flags = 0;
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
}
|
|
|
|
cpuid_data.cpuid.nent = cpuid_i;
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
|
|
}
|
|
|
|
void kvm_arch_reset_vcpu(CPUState *env)
|
|
{
|
|
env->exception_injected = -1;
|
|
env->interrupt_injected = -1;
|
|
env->nmi_injected = 0;
|
|
env->nmi_pending = 0;
|
|
}
|
|
|
|
static int kvm_has_msr_star(CPUState *env)
|
|
{
|
|
static int has_msr_star;
|
|
int ret;
|
|
|
|
/* first time */
|
|
if (has_msr_star == 0) {
|
|
struct kvm_msr_list msr_list, *kvm_msr_list;
|
|
|
|
has_msr_star = -1;
|
|
|
|
/* Obtain MSR list from KVM. These are the MSRs that we must
|
|
* save/restore */
|
|
msr_list.nmsrs = 0;
|
|
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
|
|
if (ret < 0 && ret != -E2BIG) {
|
|
return 0;
|
|
}
|
|
/* Old kernel modules had a bug and could write beyond the provided
|
|
memory. Allocate at least a safe amount of 1K. */
|
|
kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
|
|
msr_list.nmsrs *
|
|
sizeof(msr_list.indices[0])));
|
|
|
|
kvm_msr_list->nmsrs = msr_list.nmsrs;
|
|
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
|
|
if (ret >= 0) {
|
|
int i;
|
|
|
|
for (i = 0; i < kvm_msr_list->nmsrs; i++) {
|
|
if (kvm_msr_list->indices[i] == MSR_STAR) {
|
|
has_msr_star = 1;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
free(kvm_msr_list);
|
|
}
|
|
|
|
if (has_msr_star == 1)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
static int kvm_init_identity_map_page(KVMState *s)
|
|
{
|
|
#ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
|
|
int ret;
|
|
uint64_t addr = 0xfffbc000;
|
|
|
|
if (!kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
|
|
return 0;
|
|
}
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &addr);
|
|
if (ret < 0) {
|
|
fprintf(stderr, "kvm_set_identity_map_addr: %s\n", strerror(ret));
|
|
return ret;
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
int kvm_arch_init(KVMState *s, int smp_cpus)
|
|
{
|
|
int ret;
|
|
|
|
/* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
|
|
* directly. In order to use vm86 mode, a TSS is needed. Since this
|
|
* must be part of guest physical memory, we need to allocate it. Older
|
|
* versions of KVM just assumed that it would be at the end of physical
|
|
* memory but that doesn't work with more than 4GB of memory. We simply
|
|
* refuse to work with those older versions of KVM. */
|
|
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
|
|
if (ret <= 0) {
|
|
fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
|
|
return ret;
|
|
}
|
|
|
|
/* this address is 3 pages before the bios, and the bios should present
|
|
* as unavaible memory. FIXME, need to ensure the e820 map deals with
|
|
* this?
|
|
*/
|
|
/*
|
|
* Tell fw_cfg to notify the BIOS to reserve the range.
|
|
*/
|
|
if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED) < 0) {
|
|
perror("e820_add_entry() table is full");
|
|
exit(1);
|
|
}
|
|
ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
|
|
if (ret < 0) {
|
|
return ret;
|
|
}
|
|
|
|
return kvm_init_identity_map_page(s);
|
|
}
|
|
|
|
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
|
|
{
|
|
lhs->selector = rhs->selector;
|
|
lhs->base = rhs->base;
|
|
lhs->limit = rhs->limit;
|
|
lhs->type = 3;
|
|
lhs->present = 1;
|
|
lhs->dpl = 3;
|
|
lhs->db = 0;
|
|
lhs->s = 1;
|
|
lhs->l = 0;
|
|
lhs->g = 0;
|
|
lhs->avl = 0;
|
|
lhs->unusable = 0;
|
|
}
|
|
|
|
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
|
|
{
|
|
unsigned flags = rhs->flags;
|
|
lhs->selector = rhs->selector;
|
|
lhs->base = rhs->base;
|
|
lhs->limit = rhs->limit;
|
|
lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
|
|
lhs->present = (flags & DESC_P_MASK) != 0;
|
|
lhs->dpl = rhs->selector & 3;
|
|
lhs->db = (flags >> DESC_B_SHIFT) & 1;
|
|
lhs->s = (flags & DESC_S_MASK) != 0;
|
|
lhs->l = (flags >> DESC_L_SHIFT) & 1;
|
|
lhs->g = (flags & DESC_G_MASK) != 0;
|
|
lhs->avl = (flags & DESC_AVL_MASK) != 0;
|
|
lhs->unusable = 0;
|
|
}
|
|
|
|
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
|
|
{
|
|
lhs->selector = rhs->selector;
|
|
lhs->base = rhs->base;
|
|
lhs->limit = rhs->limit;
|
|
lhs->flags =
|
|
(rhs->type << DESC_TYPE_SHIFT)
|
|
| (rhs->present * DESC_P_MASK)
|
|
| (rhs->dpl << DESC_DPL_SHIFT)
|
|
| (rhs->db << DESC_B_SHIFT)
|
|
| (rhs->s * DESC_S_MASK)
|
|
| (rhs->l << DESC_L_SHIFT)
|
|
| (rhs->g * DESC_G_MASK)
|
|
| (rhs->avl * DESC_AVL_MASK);
|
|
}
|
|
|
|
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
|
|
{
|
|
if (set)
|
|
*kvm_reg = *qemu_reg;
|
|
else
|
|
*qemu_reg = *kvm_reg;
|
|
}
|
|
|
|
static int kvm_getput_regs(CPUState *env, int set)
|
|
{
|
|
struct kvm_regs regs;
|
|
int ret = 0;
|
|
|
|
if (!set) {
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
|
|
if (ret < 0)
|
|
return ret;
|
|
}
|
|
|
|
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
|
|
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
|
|
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
|
|
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
|
|
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
|
|
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
|
|
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
|
|
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
|
|
#ifdef TARGET_X86_64
|
|
kvm_getput_reg(®s.r8, &env->regs[8], set);
|
|
kvm_getput_reg(®s.r9, &env->regs[9], set);
|
|
kvm_getput_reg(®s.r10, &env->regs[10], set);
|
|
kvm_getput_reg(®s.r11, &env->regs[11], set);
|
|
kvm_getput_reg(®s.r12, &env->regs[12], set);
|
|
kvm_getput_reg(®s.r13, &env->regs[13], set);
|
|
kvm_getput_reg(®s.r14, &env->regs[14], set);
|
|
kvm_getput_reg(®s.r15, &env->regs[15], set);
|
|
#endif
|
|
|
|
kvm_getput_reg(®s.rflags, &env->eflags, set);
|
|
kvm_getput_reg(®s.rip, &env->eip, set);
|
|
|
|
if (set)
|
|
ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int kvm_put_fpu(CPUState *env)
|
|
{
|
|
struct kvm_fpu fpu;
|
|
int i;
|
|
|
|
memset(&fpu, 0, sizeof fpu);
|
|
fpu.fsw = env->fpus & ~(7 << 11);
|
|
fpu.fsw |= (env->fpstt & 7) << 11;
|
|
fpu.fcw = env->fpuc;
|
|
for (i = 0; i < 8; ++i)
|
|
fpu.ftwx |= (!env->fptags[i]) << i;
|
|
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
|
|
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
|
|
fpu.mxcsr = env->mxcsr;
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
|
|
}
|
|
|
|
static int kvm_put_sregs(CPUState *env)
|
|
{
|
|
struct kvm_sregs sregs;
|
|
|
|
memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
|
|
if (env->interrupt_injected >= 0) {
|
|
sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
|
|
(uint64_t)1 << (env->interrupt_injected % 64);
|
|
}
|
|
|
|
if ((env->eflags & VM_MASK)) {
|
|
set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
|
|
set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
|
|
set_v8086_seg(&sregs.es, &env->segs[R_ES]);
|
|
set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
|
|
set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
|
|
set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
|
|
} else {
|
|
set_seg(&sregs.cs, &env->segs[R_CS]);
|
|
set_seg(&sregs.ds, &env->segs[R_DS]);
|
|
set_seg(&sregs.es, &env->segs[R_ES]);
|
|
set_seg(&sregs.fs, &env->segs[R_FS]);
|
|
set_seg(&sregs.gs, &env->segs[R_GS]);
|
|
set_seg(&sregs.ss, &env->segs[R_SS]);
|
|
|
|
if (env->cr[0] & CR0_PE_MASK) {
|
|
/* force ss cpl to cs cpl */
|
|
sregs.ss.selector = (sregs.ss.selector & ~3) |
|
|
(sregs.cs.selector & 3);
|
|
sregs.ss.dpl = sregs.ss.selector & 3;
|
|
}
|
|
}
|
|
|
|
set_seg(&sregs.tr, &env->tr);
|
|
set_seg(&sregs.ldt, &env->ldt);
|
|
|
|
sregs.idt.limit = env->idt.limit;
|
|
sregs.idt.base = env->idt.base;
|
|
sregs.gdt.limit = env->gdt.limit;
|
|
sregs.gdt.base = env->gdt.base;
|
|
|
|
sregs.cr0 = env->cr[0];
|
|
sregs.cr2 = env->cr[2];
|
|
sregs.cr3 = env->cr[3];
|
|
sregs.cr4 = env->cr[4];
|
|
|
|
sregs.cr8 = cpu_get_apic_tpr(env);
|
|
sregs.apic_base = cpu_get_apic_base(env);
|
|
|
|
sregs.efer = env->efer;
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
|
|
}
|
|
|
|
static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
|
|
uint32_t index, uint64_t value)
|
|
{
|
|
entry->index = index;
|
|
entry->data = value;
|
|
}
|
|
|
|
static int kvm_put_msrs(CPUState *env, int level)
|
|
{
|
|
struct {
|
|
struct kvm_msrs info;
|
|
struct kvm_msr_entry entries[100];
|
|
} msr_data;
|
|
struct kvm_msr_entry *msrs = msr_data.entries;
|
|
int n = 0;
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
|
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
|
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
|
|
if (kvm_has_msr_star(env))
|
|
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
|
|
#ifdef TARGET_X86_64
|
|
/* FIXME if lm capable */
|
|
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
|
|
kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
|
|
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
|
|
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
|
|
#endif
|
|
if (level == KVM_PUT_FULL_STATE) {
|
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
|
|
kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
|
|
env->system_time_msr);
|
|
kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
|
|
}
|
|
|
|
msr_data.info.nmsrs = n;
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
|
|
|
|
}
|
|
|
|
|
|
static int kvm_get_fpu(CPUState *env)
|
|
{
|
|
struct kvm_fpu fpu;
|
|
int i, ret;
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
env->fpstt = (fpu.fsw >> 11) & 7;
|
|
env->fpus = fpu.fsw;
|
|
env->fpuc = fpu.fcw;
|
|
for (i = 0; i < 8; ++i)
|
|
env->fptags[i] = !((fpu.ftwx >> i) & 1);
|
|
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
|
|
memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
|
|
env->mxcsr = fpu.mxcsr;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int kvm_get_sregs(CPUState *env)
|
|
{
|
|
struct kvm_sregs sregs;
|
|
uint32_t hflags;
|
|
int bit, i, ret;
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
/* There can only be one pending IRQ set in the bitmap at a time, so try
|
|
to find it and save its number instead (-1 for none). */
|
|
env->interrupt_injected = -1;
|
|
for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
|
|
if (sregs.interrupt_bitmap[i]) {
|
|
bit = ctz64(sregs.interrupt_bitmap[i]);
|
|
env->interrupt_injected = i * 64 + bit;
|
|
break;
|
|
}
|
|
}
|
|
|
|
get_seg(&env->segs[R_CS], &sregs.cs);
|
|
get_seg(&env->segs[R_DS], &sregs.ds);
|
|
get_seg(&env->segs[R_ES], &sregs.es);
|
|
get_seg(&env->segs[R_FS], &sregs.fs);
|
|
get_seg(&env->segs[R_GS], &sregs.gs);
|
|
get_seg(&env->segs[R_SS], &sregs.ss);
|
|
|
|
get_seg(&env->tr, &sregs.tr);
|
|
get_seg(&env->ldt, &sregs.ldt);
|
|
|
|
env->idt.limit = sregs.idt.limit;
|
|
env->idt.base = sregs.idt.base;
|
|
env->gdt.limit = sregs.gdt.limit;
|
|
env->gdt.base = sregs.gdt.base;
|
|
|
|
env->cr[0] = sregs.cr0;
|
|
env->cr[2] = sregs.cr2;
|
|
env->cr[3] = sregs.cr3;
|
|
env->cr[4] = sregs.cr4;
|
|
|
|
cpu_set_apic_base(env, sregs.apic_base);
|
|
|
|
env->efer = sregs.efer;
|
|
//cpu_set_apic_tpr(env, sregs.cr8);
|
|
|
|
#define HFLAG_COPY_MASK ~( \
|
|
HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
|
|
HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
|
|
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
|
|
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
|
|
|
|
|
|
|
|
hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
|
|
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
|
|
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
|
|
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
|
|
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
|
|
hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
|
|
(HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
|
|
|
|
if (env->efer & MSR_EFER_LMA) {
|
|
hflags |= HF_LMA_MASK;
|
|
}
|
|
|
|
if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
|
|
hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
|
|
} else {
|
|
hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
|
|
(DESC_B_SHIFT - HF_CS32_SHIFT);
|
|
hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
|
|
(DESC_B_SHIFT - HF_SS32_SHIFT);
|
|
if (!(env->cr[0] & CR0_PE_MASK) ||
|
|
(env->eflags & VM_MASK) ||
|
|
!(hflags & HF_CS32_MASK)) {
|
|
hflags |= HF_ADDSEG_MASK;
|
|
} else {
|
|
hflags |= ((env->segs[R_DS].base |
|
|
env->segs[R_ES].base |
|
|
env->segs[R_SS].base) != 0) <<
|
|
HF_ADDSEG_SHIFT;
|
|
}
|
|
}
|
|
env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int kvm_get_msrs(CPUState *env)
|
|
{
|
|
struct {
|
|
struct kvm_msrs info;
|
|
struct kvm_msr_entry entries[100];
|
|
} msr_data;
|
|
struct kvm_msr_entry *msrs = msr_data.entries;
|
|
int ret, i, n;
|
|
|
|
n = 0;
|
|
msrs[n++].index = MSR_IA32_SYSENTER_CS;
|
|
msrs[n++].index = MSR_IA32_SYSENTER_ESP;
|
|
msrs[n++].index = MSR_IA32_SYSENTER_EIP;
|
|
if (kvm_has_msr_star(env))
|
|
msrs[n++].index = MSR_STAR;
|
|
msrs[n++].index = MSR_IA32_TSC;
|
|
#ifdef TARGET_X86_64
|
|
/* FIXME lm_capable_kernel */
|
|
msrs[n++].index = MSR_CSTAR;
|
|
msrs[n++].index = MSR_KERNELGSBASE;
|
|
msrs[n++].index = MSR_FMASK;
|
|
msrs[n++].index = MSR_LSTAR;
|
|
#endif
|
|
msrs[n++].index = MSR_KVM_SYSTEM_TIME;
|
|
msrs[n++].index = MSR_KVM_WALL_CLOCK;
|
|
|
|
msr_data.info.nmsrs = n;
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
for (i = 0; i < ret; i++) {
|
|
switch (msrs[i].index) {
|
|
case MSR_IA32_SYSENTER_CS:
|
|
env->sysenter_cs = msrs[i].data;
|
|
break;
|
|
case MSR_IA32_SYSENTER_ESP:
|
|
env->sysenter_esp = msrs[i].data;
|
|
break;
|
|
case MSR_IA32_SYSENTER_EIP:
|
|
env->sysenter_eip = msrs[i].data;
|
|
break;
|
|
case MSR_STAR:
|
|
env->star = msrs[i].data;
|
|
break;
|
|
#ifdef TARGET_X86_64
|
|
case MSR_CSTAR:
|
|
env->cstar = msrs[i].data;
|
|
break;
|
|
case MSR_KERNELGSBASE:
|
|
env->kernelgsbase = msrs[i].data;
|
|
break;
|
|
case MSR_FMASK:
|
|
env->fmask = msrs[i].data;
|
|
break;
|
|
case MSR_LSTAR:
|
|
env->lstar = msrs[i].data;
|
|
break;
|
|
#endif
|
|
case MSR_IA32_TSC:
|
|
env->tsc = msrs[i].data;
|
|
break;
|
|
case MSR_KVM_SYSTEM_TIME:
|
|
env->system_time_msr = msrs[i].data;
|
|
break;
|
|
case MSR_KVM_WALL_CLOCK:
|
|
env->wall_clock_msr = msrs[i].data;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int kvm_put_mp_state(CPUState *env)
|
|
{
|
|
struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
|
|
}
|
|
|
|
static int kvm_get_mp_state(CPUState *env)
|
|
{
|
|
struct kvm_mp_state mp_state;
|
|
int ret;
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
|
|
if (ret < 0) {
|
|
return ret;
|
|
}
|
|
env->mp_state = mp_state.mp_state;
|
|
return 0;
|
|
}
|
|
|
|
static int kvm_put_vcpu_events(CPUState *env, int level)
|
|
{
|
|
#ifdef KVM_CAP_VCPU_EVENTS
|
|
struct kvm_vcpu_events events;
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
return 0;
|
|
}
|
|
|
|
events.exception.injected = (env->exception_injected >= 0);
|
|
events.exception.nr = env->exception_injected;
|
|
events.exception.has_error_code = env->has_error_code;
|
|
events.exception.error_code = env->error_code;
|
|
|
|
events.interrupt.injected = (env->interrupt_injected >= 0);
|
|
events.interrupt.nr = env->interrupt_injected;
|
|
events.interrupt.soft = env->soft_interrupt;
|
|
|
|
events.nmi.injected = env->nmi_injected;
|
|
events.nmi.pending = env->nmi_pending;
|
|
events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
|
|
|
|
events.sipi_vector = env->sipi_vector;
|
|
|
|
events.flags = 0;
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
|
events.flags |=
|
|
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
|
|
}
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
static int kvm_get_vcpu_events(CPUState *env)
|
|
{
|
|
#ifdef KVM_CAP_VCPU_EVENTS
|
|
struct kvm_vcpu_events events;
|
|
int ret;
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
return 0;
|
|
}
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
|
|
if (ret < 0) {
|
|
return ret;
|
|
}
|
|
env->exception_injected =
|
|
events.exception.injected ? events.exception.nr : -1;
|
|
env->has_error_code = events.exception.has_error_code;
|
|
env->error_code = events.exception.error_code;
|
|
|
|
env->interrupt_injected =
|
|
events.interrupt.injected ? events.interrupt.nr : -1;
|
|
env->soft_interrupt = events.interrupt.soft;
|
|
|
|
env->nmi_injected = events.nmi.injected;
|
|
env->nmi_pending = events.nmi.pending;
|
|
if (events.nmi.masked) {
|
|
env->hflags2 |= HF2_NMI_MASK;
|
|
} else {
|
|
env->hflags2 &= ~HF2_NMI_MASK;
|
|
}
|
|
|
|
env->sipi_vector = events.sipi_vector;
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int kvm_guest_debug_workarounds(CPUState *env)
|
|
{
|
|
int ret = 0;
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
|
unsigned long reinject_trap = 0;
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
if (env->exception_injected == 1) {
|
|
reinject_trap = KVM_GUESTDBG_INJECT_DB;
|
|
} else if (env->exception_injected == 3) {
|
|
reinject_trap = KVM_GUESTDBG_INJECT_BP;
|
|
}
|
|
env->exception_injected = -1;
|
|
}
|
|
|
|
/*
|
|
* Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
|
|
* injected via SET_GUEST_DEBUG while updating GP regs. Work around this
|
|
* by updating the debug state once again if single-stepping is on.
|
|
* Another reason to call kvm_update_guest_debug here is a pending debug
|
|
* trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
|
|
* reinject them via SET_GUEST_DEBUG.
|
|
*/
|
|
if (reinject_trap ||
|
|
(!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
|
|
ret = kvm_update_guest_debug(env, reinject_trap);
|
|
}
|
|
#endif /* KVM_CAP_SET_GUEST_DEBUG */
|
|
return ret;
|
|
}
|
|
|
|
static int kvm_put_debugregs(CPUState *env)
|
|
{
|
|
#ifdef KVM_CAP_DEBUGREGS
|
|
struct kvm_debugregs dbgregs;
|
|
int i;
|
|
|
|
if (!kvm_has_debugregs()) {
|
|
return 0;
|
|
}
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
dbgregs.db[i] = env->dr[i];
|
|
}
|
|
dbgregs.dr6 = env->dr[6];
|
|
dbgregs.dr7 = env->dr[7];
|
|
dbgregs.flags = 0;
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
static int kvm_get_debugregs(CPUState *env)
|
|
{
|
|
#ifdef KVM_CAP_DEBUGREGS
|
|
struct kvm_debugregs dbgregs;
|
|
int i, ret;
|
|
|
|
if (!kvm_has_debugregs()) {
|
|
return 0;
|
|
}
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
|
|
if (ret < 0) {
|
|
return ret;
|
|
}
|
|
for (i = 0; i < 4; i++) {
|
|
env->dr[i] = dbgregs.db[i];
|
|
}
|
|
env->dr[4] = env->dr[6] = dbgregs.dr6;
|
|
env->dr[5] = env->dr[7] = dbgregs.dr7;
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
int kvm_arch_put_registers(CPUState *env, int level)
|
|
{
|
|
int ret;
|
|
|
|
ret = kvm_getput_regs(env, 1);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_put_fpu(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_put_sregs(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_put_msrs(env, level);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
|
ret = kvm_put_mp_state(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
}
|
|
|
|
ret = kvm_put_vcpu_events(env, level);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
/* must be last */
|
|
ret = kvm_guest_debug_workarounds(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_put_debugregs(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int kvm_arch_get_registers(CPUState *env)
|
|
{
|
|
int ret;
|
|
|
|
ret = kvm_getput_regs(env, 0);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_get_fpu(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_get_sregs(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_get_msrs(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_get_mp_state(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_get_vcpu_events(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = kvm_get_debugregs(env);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
|
|
{
|
|
/* Try to inject an interrupt if the guest can accept it */
|
|
if (run->ready_for_interrupt_injection &&
|
|
(env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
|
(env->eflags & IF_MASK)) {
|
|
int irq;
|
|
|
|
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
|
irq = cpu_get_pic_interrupt(env);
|
|
if (irq >= 0) {
|
|
struct kvm_interrupt intr;
|
|
intr.irq = irq;
|
|
/* FIXME: errors */
|
|
DPRINTF("injected interrupt %d\n", irq);
|
|
kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
|
|
}
|
|
}
|
|
|
|
/* If we have an interrupt but the guest is not ready to receive an
|
|
* interrupt, request an interrupt window exit. This will
|
|
* cause a return to userspace as soon as the guest is ready to
|
|
* receive interrupts. */
|
|
if ((env->interrupt_request & CPU_INTERRUPT_HARD))
|
|
run->request_interrupt_window = 1;
|
|
else
|
|
run->request_interrupt_window = 0;
|
|
|
|
DPRINTF("setting tpr\n");
|
|
run->cr8 = cpu_get_apic_tpr(env);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
|
|
{
|
|
if (run->if_flag)
|
|
env->eflags |= IF_MASK;
|
|
else
|
|
env->eflags &= ~IF_MASK;
|
|
|
|
cpu_set_apic_tpr(env, run->cr8);
|
|
cpu_set_apic_base(env, run->apic_base);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int kvm_handle_halt(CPUState *env)
|
|
{
|
|
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
|
(env->eflags & IF_MASK)) &&
|
|
!(env->interrupt_request & CPU_INTERRUPT_NMI)) {
|
|
env->halted = 1;
|
|
env->exception_index = EXCP_HLT;
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
|
|
{
|
|
int ret = 0;
|
|
|
|
switch (run->exit_reason) {
|
|
case KVM_EXIT_HLT:
|
|
DPRINTF("handle_hlt\n");
|
|
ret = kvm_handle_halt(env);
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
|
int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
|
|
{
|
|
static const uint8_t int3 = 0xcc;
|
|
|
|
if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
|
|
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
|
|
{
|
|
uint8_t int3;
|
|
|
|
if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
|
|
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
static struct {
|
|
target_ulong addr;
|
|
int len;
|
|
int type;
|
|
} hw_breakpoint[4];
|
|
|
|
static int nb_hw_breakpoint;
|
|
|
|
static int find_hw_breakpoint(target_ulong addr, int len, int type)
|
|
{
|
|
int n;
|
|
|
|
for (n = 0; n < nb_hw_breakpoint; n++)
|
|
if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
|
|
(hw_breakpoint[n].len == len || len == -1))
|
|
return n;
|
|
return -1;
|
|
}
|
|
|
|
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
|
target_ulong len, int type)
|
|
{
|
|
switch (type) {
|
|
case GDB_BREAKPOINT_HW:
|
|
len = 1;
|
|
break;
|
|
case GDB_WATCHPOINT_WRITE:
|
|
case GDB_WATCHPOINT_ACCESS:
|
|
switch (len) {
|
|
case 1:
|
|
break;
|
|
case 2:
|
|
case 4:
|
|
case 8:
|
|
if (addr & (len - 1))
|
|
return -EINVAL;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
default:
|
|
return -ENOSYS;
|
|
}
|
|
|
|
if (nb_hw_breakpoint == 4)
|
|
return -ENOBUFS;
|
|
|
|
if (find_hw_breakpoint(addr, len, type) >= 0)
|
|
return -EEXIST;
|
|
|
|
hw_breakpoint[nb_hw_breakpoint].addr = addr;
|
|
hw_breakpoint[nb_hw_breakpoint].len = len;
|
|
hw_breakpoint[nb_hw_breakpoint].type = type;
|
|
nb_hw_breakpoint++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
|
|
target_ulong len, int type)
|
|
{
|
|
int n;
|
|
|
|
n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
|
|
if (n < 0)
|
|
return -ENOENT;
|
|
|
|
nb_hw_breakpoint--;
|
|
hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
|
|
|
|
return 0;
|
|
}
|
|
|
|
void kvm_arch_remove_all_hw_breakpoints(void)
|
|
{
|
|
nb_hw_breakpoint = 0;
|
|
}
|
|
|
|
static CPUWatchpoint hw_watchpoint;
|
|
|
|
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
|
|
{
|
|
int handle = 0;
|
|
int n;
|
|
|
|
if (arch_info->exception == 1) {
|
|
if (arch_info->dr6 & (1 << 14)) {
|
|
if (cpu_single_env->singlestep_enabled)
|
|
handle = 1;
|
|
} else {
|
|
for (n = 0; n < 4; n++)
|
|
if (arch_info->dr6 & (1 << n))
|
|
switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
|
|
case 0x0:
|
|
handle = 1;
|
|
break;
|
|
case 0x1:
|
|
handle = 1;
|
|
cpu_single_env->watchpoint_hit = &hw_watchpoint;
|
|
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
|
|
hw_watchpoint.flags = BP_MEM_WRITE;
|
|
break;
|
|
case 0x3:
|
|
handle = 1;
|
|
cpu_single_env->watchpoint_hit = &hw_watchpoint;
|
|
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
|
|
hw_watchpoint.flags = BP_MEM_ACCESS;
|
|
break;
|
|
}
|
|
}
|
|
} else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
|
|
handle = 1;
|
|
|
|
if (!handle) {
|
|
cpu_synchronize_state(cpu_single_env);
|
|
assert(cpu_single_env->exception_injected == -1);
|
|
|
|
cpu_single_env->exception_injected = arch_info->exception;
|
|
cpu_single_env->has_error_code = 0;
|
|
}
|
|
|
|
return handle;
|
|
}
|
|
|
|
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
|
|
{
|
|
const uint8_t type_code[] = {
|
|
[GDB_BREAKPOINT_HW] = 0x0,
|
|
[GDB_WATCHPOINT_WRITE] = 0x1,
|
|
[GDB_WATCHPOINT_ACCESS] = 0x3
|
|
};
|
|
const uint8_t len_code[] = {
|
|
[1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
|
|
};
|
|
int n;
|
|
|
|
if (kvm_sw_breakpoints_active(env))
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
|
|
|
|
if (nb_hw_breakpoint > 0) {
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
|
|
dbg->arch.debugreg[7] = 0x0600;
|
|
for (n = 0; n < nb_hw_breakpoint; n++) {
|
|
dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
|
|
dbg->arch.debugreg[7] |= (2 << (n * 2)) |
|
|
(type_code[hw_breakpoint[n].type] << (16 + n*4)) |
|
|
(len_code[hw_breakpoint[n].len] << (18 + n*4));
|
|
}
|
|
}
|
|
}
|
|
#endif /* KVM_CAP_SET_GUEST_DEBUG */
|