kqemu support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1283 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
92a31b1fff
commit
9df217a317
26
cpu-exec.c
26
cpu-exec.c
@ -209,7 +209,33 @@ int cpu_exec(CPUState *env1)
|
||||
#endif
|
||||
}
|
||||
env->exception_index = -1;
|
||||
}
|
||||
#ifdef USE_KQEMU
|
||||
if (kqemu_is_ok(env) && env->interrupt_request == 0) {
|
||||
int ret;
|
||||
env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
|
||||
ret = kqemu_cpu_exec(env);
|
||||
/* put eflags in CPU temporary format */
|
||||
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
|
||||
DF = 1 - (2 * ((env->eflags >> 10) & 1));
|
||||
CC_OP = CC_OP_EFLAGS;
|
||||
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
|
||||
if (ret == 1) {
|
||||
/* exception */
|
||||
longjmp(env->jmp_env, 1);
|
||||
} else if (ret == 2) {
|
||||
/* softmmu execution needed */
|
||||
} else {
|
||||
if (env->interrupt_request != 0) {
|
||||
/* hardware interrupt will be executed just after */
|
||||
} else {
|
||||
/* otherwise, we restart */
|
||||
longjmp(env->jmp_env, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
T0 = 0; /* force lookup of first TB */
|
||||
for(;;) {
|
||||
#ifdef __sparc__
|
||||
|
22
exec-all.h
22
exec-all.h
@ -586,3 +586,25 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
|
||||
return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef USE_KQEMU
|
||||
extern int kqemu_fd;
|
||||
extern int kqemu_flushed;
|
||||
|
||||
int kqemu_init(CPUState *env);
|
||||
int kqemu_cpu_exec(CPUState *env);
|
||||
void kqemu_flush_page(CPUState *env, target_ulong addr);
|
||||
void kqemu_flush(CPUState *env, int global);
|
||||
|
||||
static inline int kqemu_is_ok(CPUState *env)
|
||||
{
|
||||
return(env->kqemu_enabled &&
|
||||
(env->hflags & HF_CPL_MASK) == 3 &&
|
||||
(env->eflags & IOPL_MASK) != IOPL_MASK &&
|
||||
(env->cr[0] & CR0_PE_MASK) &&
|
||||
(env->eflags & IF_MASK) &&
|
||||
!(env->eflags & VM_MASK));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
427
kqemu.c
Normal file
427
kqemu.c
Normal file
@ -0,0 +1,427 @@
|
||||
/*
|
||||
* KQEMU support
|
||||
*
|
||||
* Copyright (c) 2005 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#include "config.h"
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#else
|
||||
#include <sys/types.h>
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "cpu.h"
|
||||
#include "exec-all.h"
|
||||
|
||||
#ifdef USE_KQEMU
|
||||
|
||||
#define DEBUG
|
||||
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include "kqemu/kqemu.h"
|
||||
|
||||
#define KQEMU_DEVICE "/dev/kqemu"
|
||||
|
||||
int kqemu_allowed = 1;
|
||||
int kqemu_fd = -1;
|
||||
unsigned long *pages_to_flush;
|
||||
unsigned int nb_pages_to_flush;
|
||||
extern uint32_t **l1_phys_map;
|
||||
|
||||
#define cpuid(index, eax, ebx, ecx, edx) \
|
||||
asm volatile ("cpuid" \
|
||||
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
|
||||
: "0" (index))
|
||||
|
||||
static int is_cpuid_supported(void)
|
||||
{
|
||||
int v0, v1;
|
||||
asm volatile ("pushf\n"
|
||||
"popl %0\n"
|
||||
"movl %0, %1\n"
|
||||
"xorl $0x00200000, %0\n"
|
||||
"pushl %0\n"
|
||||
"popf\n"
|
||||
"pushf\n"
|
||||
"popl %0\n"
|
||||
: "=a" (v0), "=d" (v1)
|
||||
:
|
||||
: "cc");
|
||||
return (v0 != v1);
|
||||
}
|
||||
|
||||
static void kqemu_update_cpuid(CPUState *env)
|
||||
{
|
||||
int critical_features_mask, features;
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
|
||||
/* the following features are kept identical on the host and
|
||||
target cpus because they are important for user code. Strictly
|
||||
speaking, only SSE really matters because the OS must support
|
||||
it if the user code uses it. */
|
||||
critical_features_mask =
|
||||
CPUID_CMOV | CPUID_CX8 |
|
||||
CPUID_FXSR | CPUID_MMX | CPUID_SSE |
|
||||
CPUID_SSE2;
|
||||
if (!is_cpuid_supported()) {
|
||||
features = 0;
|
||||
} else {
|
||||
cpuid(1, eax, ebx, ecx, edx);
|
||||
features = edx;
|
||||
}
|
||||
env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
|
||||
(features & critical_features_mask);
|
||||
/* XXX: we could update more of the target CPUID state so that the
|
||||
non accelerated code sees exactly the same CPU features as the
|
||||
accelerated code */
|
||||
}
|
||||
|
||||
int kqemu_init(CPUState *env)
|
||||
{
|
||||
struct kqemu_init init;
|
||||
int ret, version;
|
||||
|
||||
if (!kqemu_allowed)
|
||||
return -1;
|
||||
|
||||
kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
|
||||
if (kqemu_fd < 0) {
|
||||
fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE);
|
||||
return -1;
|
||||
}
|
||||
version = 0;
|
||||
ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
|
||||
if (version != KQEMU_VERSION) {
|
||||
fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
|
||||
version, KQEMU_VERSION);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
|
||||
sizeof(unsigned long));
|
||||
if (!pages_to_flush)
|
||||
goto fail;
|
||||
|
||||
init.ram_base = phys_ram_base;
|
||||
init.ram_size = phys_ram_size;
|
||||
init.ram_dirty = phys_ram_dirty;
|
||||
init.phys_to_ram_map = l1_phys_map;
|
||||
init.pages_to_flush = pages_to_flush;
|
||||
ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
|
||||
fail:
|
||||
close(kqemu_fd);
|
||||
kqemu_fd = -1;
|
||||
return -1;
|
||||
}
|
||||
kqemu_update_cpuid(env);
|
||||
env->kqemu_enabled = 1;
|
||||
nb_pages_to_flush = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kqemu_flush_page(CPUState *env, target_ulong addr)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
if (loglevel & CPU_LOG_INT) {
|
||||
fprintf(logfile, "kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
|
||||
}
|
||||
#endif
|
||||
if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
|
||||
nb_pages_to_flush = KQEMU_FLUSH_ALL;
|
||||
else
|
||||
pages_to_flush[nb_pages_to_flush++] = addr;
|
||||
}
|
||||
|
||||
void kqemu_flush(CPUState *env, int global)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
if (loglevel & CPU_LOG_INT) {
|
||||
fprintf(logfile, "kqemu_flush:\n");
|
||||
}
|
||||
#endif
|
||||
nb_pages_to_flush = KQEMU_FLUSH_ALL;
|
||||
}
|
||||
|
||||
struct fpstate {
|
||||
uint16_t fpuc;
|
||||
uint16_t dummy1;
|
||||
uint16_t fpus;
|
||||
uint16_t dummy2;
|
||||
uint16_t fptag;
|
||||
uint16_t dummy3;
|
||||
|
||||
uint32_t fpip;
|
||||
uint32_t fpcs;
|
||||
uint32_t fpoo;
|
||||
uint32_t fpos;
|
||||
uint8_t fpregs1[8 * 10];
|
||||
};
|
||||
|
||||
struct fpxstate {
|
||||
uint16_t fpuc;
|
||||
uint16_t fpus;
|
||||
uint16_t fptag;
|
||||
uint16_t fop;
|
||||
uint32_t fpuip;
|
||||
uint16_t cs_sel;
|
||||
uint16_t dummy0;
|
||||
uint32_t fpudp;
|
||||
uint16_t ds_sel;
|
||||
uint16_t dummy1;
|
||||
uint32_t mxcsr;
|
||||
uint32_t mxcsr_mask;
|
||||
uint8_t fpregs1[8 * 16];
|
||||
uint8_t xmm_regs[8 * 16];
|
||||
uint8_t dummy2[224];
|
||||
};
|
||||
|
||||
static struct fpxstate fpx1 __attribute__((aligned(16)));
|
||||
|
||||
static void restore_native_fp_frstor(CPUState *env)
|
||||
{
|
||||
int fptag, i, j;
|
||||
struct fpstate fp1, *fp = &fp1;
|
||||
|
||||
fp->fpuc = env->fpuc;
|
||||
fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
|
||||
fptag = 0;
|
||||
for (i=7; i>=0; i--) {
|
||||
fptag <<= 2;
|
||||
if (env->fptags[i]) {
|
||||
fptag |= 3;
|
||||
} else {
|
||||
/* the FPU automatically computes it */
|
||||
}
|
||||
}
|
||||
fp->fptag = fptag;
|
||||
j = env->fpstt;
|
||||
for(i = 0;i < 8; i++) {
|
||||
memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
|
||||
j = (j + 1) & 7;
|
||||
}
|
||||
asm volatile ("frstor %0" : "=m" (*fp));
|
||||
}
|
||||
|
||||
static void save_native_fp_fsave(CPUState *env)
|
||||
{
|
||||
int fptag, i, j;
|
||||
uint16_t fpuc;
|
||||
struct fpstate fp1, *fp = &fp1;
|
||||
|
||||
asm volatile ("fsave %0" : : "m" (*fp));
|
||||
env->fpuc = fp->fpuc;
|
||||
env->fpstt = (fp->fpus >> 11) & 7;
|
||||
env->fpus = fp->fpus & ~0x3800;
|
||||
fptag = fp->fptag;
|
||||
for(i = 0;i < 8; i++) {
|
||||
env->fptags[i] = ((fptag & 3) == 3);
|
||||
fptag >>= 2;
|
||||
}
|
||||
j = env->fpstt;
|
||||
for(i = 0;i < 8; i++) {
|
||||
memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
|
||||
j = (j + 1) & 7;
|
||||
}
|
||||
/* we must restore the default rounding state */
|
||||
fpuc = 0x037f | (env->fpuc & (3 << 10));
|
||||
asm volatile("fldcw %0" : : "m" (fpuc));
|
||||
}
|
||||
|
||||
static void restore_native_fp_fxrstor(CPUState *env)
|
||||
{
|
||||
struct fpxstate *fp = &fpx1;
|
||||
int i, j, fptag;
|
||||
|
||||
fp->fpuc = env->fpuc;
|
||||
fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
|
||||
fptag = 0;
|
||||
for(i = 0; i < 8; i++)
|
||||
fptag |= (env->fptags[i] << i);
|
||||
fp->fptag = fptag ^ 0xff;
|
||||
|
||||
j = env->fpstt;
|
||||
for(i = 0;i < 8; i++) {
|
||||
memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
|
||||
j = (j + 1) & 7;
|
||||
}
|
||||
if (env->cpuid_features & CPUID_SSE) {
|
||||
fp->mxcsr = env->mxcsr;
|
||||
/* XXX: check if DAZ is not available */
|
||||
fp->mxcsr_mask = 0xffff;
|
||||
memcpy(fp->xmm_regs, env->xmm_regs, 8 * 16);
|
||||
}
|
||||
asm volatile ("fxrstor %0" : "=m" (*fp));
|
||||
}
|
||||
|
||||
static void save_native_fp_fxsave(CPUState *env)
|
||||
{
|
||||
struct fpxstate *fp = &fpx1;
|
||||
int fptag, i, j;
|
||||
uint16_t fpuc;
|
||||
|
||||
asm volatile ("fxsave %0" : : "m" (*fp));
|
||||
env->fpuc = fp->fpuc;
|
||||
env->fpstt = (fp->fpus >> 11) & 7;
|
||||
env->fpus = fp->fpus & ~0x3800;
|
||||
fptag = fp->fptag ^ 0xff;
|
||||
for(i = 0;i < 8; i++) {
|
||||
env->fptags[i] = (fptag >> i) & 1;
|
||||
}
|
||||
j = env->fpstt;
|
||||
for(i = 0;i < 8; i++) {
|
||||
memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
|
||||
j = (j + 1) & 7;
|
||||
}
|
||||
if (env->cpuid_features & CPUID_SSE) {
|
||||
env->mxcsr = fp->mxcsr;
|
||||
memcpy(env->xmm_regs, fp->xmm_regs, 8 * 16);
|
||||
}
|
||||
|
||||
/* we must restore the default rounding state */
|
||||
asm volatile ("fninit");
|
||||
fpuc = 0x037f | (env->fpuc & (3 << 10));
|
||||
asm volatile("fldcw %0" : : "m" (fpuc));
|
||||
}
|
||||
|
||||
int kqemu_cpu_exec(CPUState *env)
|
||||
{
|
||||
struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
|
||||
int ret;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (loglevel & CPU_LOG_INT) {
|
||||
fprintf(logfile, "kqemu: cpu_exec: enter\n");
|
||||
cpu_dump_state(env, logfile, fprintf, 0);
|
||||
}
|
||||
#endif
|
||||
memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
|
||||
kenv->eip = env->eip;
|
||||
kenv->eflags = env->eflags;
|
||||
memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
|
||||
memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
|
||||
memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
|
||||
memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
|
||||
memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
|
||||
kenv->cr0 = env->cr[0];
|
||||
kenv->cr2 = env->cr[2];
|
||||
kenv->cr3 = env->cr[3];
|
||||
kenv->cr4 = env->cr[4];
|
||||
kenv->a20_mask = env->a20_mask;
|
||||
if (env->dr[7] & 0xff) {
|
||||
kenv->dr7 = env->dr[7];
|
||||
kenv->dr0 = env->dr[0];
|
||||
kenv->dr1 = env->dr[1];
|
||||
kenv->dr2 = env->dr[2];
|
||||
kenv->dr3 = env->dr[3];
|
||||
} else {
|
||||
kenv->dr7 = 0;
|
||||
}
|
||||
kenv->dr6 = env->dr[6];
|
||||
kenv->cpl = 3;
|
||||
kenv->nb_pages_to_flush = nb_pages_to_flush;
|
||||
nb_pages_to_flush = 0;
|
||||
|
||||
if (!(kenv->cr0 & CR0_TS_MASK)) {
|
||||
if (env->cpuid_features & CPUID_FXSR)
|
||||
restore_native_fp_fxrstor(env);
|
||||
else
|
||||
restore_native_fp_frstor(env);
|
||||
}
|
||||
|
||||
ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
|
||||
|
||||
if (!(kenv->cr0 & CR0_TS_MASK)) {
|
||||
if (env->cpuid_features & CPUID_FXSR)
|
||||
save_native_fp_fxsave(env);
|
||||
else
|
||||
save_native_fp_fsave(env);
|
||||
}
|
||||
|
||||
memcpy(env->regs, kenv->regs, sizeof(env->regs));
|
||||
env->eip = kenv->eip;
|
||||
env->eflags = kenv->eflags;
|
||||
memcpy(env->segs, kenv->segs, sizeof(env->segs));
|
||||
#if 0
|
||||
/* no need to restore that */
|
||||
memcpy(env->ldt, kenv->ldt, sizeof(env->ldt));
|
||||
memcpy(env->tr, kenv->tr, sizeof(env->tr));
|
||||
memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
|
||||
memcpy(env->idt, kenv->idt, sizeof(env->idt));
|
||||
env->cr[0] = kenv->cr0;
|
||||
env->cr[3] = kenv->cr3;
|
||||
env->cr[4] = kenv->cr4;
|
||||
env->a20_mask = kenv->a20_mask;
|
||||
#endif
|
||||
env->cr[2] = kenv->cr2;
|
||||
env->dr[6] = kenv->dr6;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (loglevel & CPU_LOG_INT) {
|
||||
fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
|
||||
}
|
||||
#endif
|
||||
if ((ret & 0xff00) == KQEMU_RET_INT) {
|
||||
env->exception_index = ret & 0xff;
|
||||
env->error_code = 0;
|
||||
env->exception_is_int = 1;
|
||||
env->exception_next_eip = kenv->next_eip;
|
||||
#ifdef DEBUG
|
||||
if (loglevel & CPU_LOG_INT) {
|
||||
fprintf(logfile, "kqemu: interrupt v=%02x:\n",
|
||||
env->exception_index);
|
||||
cpu_dump_state(env, logfile, fprintf, 0);
|
||||
}
|
||||
#endif
|
||||
return 1;
|
||||
} else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
|
||||
env->exception_index = ret & 0xff;
|
||||
env->error_code = kenv->error_code;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = 0;
|
||||
#ifdef DEBUG
|
||||
if (loglevel & CPU_LOG_INT) {
|
||||
fprintf(logfile, "kqemu: exception v=%02x e=%04x:\n",
|
||||
env->exception_index, env->error_code);
|
||||
cpu_dump_state(env, logfile, fprintf, 0);
|
||||
}
|
||||
#endif
|
||||
return 1;
|
||||
} else if (ret == KQEMU_RET_INTR) {
|
||||
return 0;
|
||||
} else if (ret == KQEMU_RET_SOFTMMU) {
|
||||
return 2;
|
||||
} else {
|
||||
cpu_dump_state(env, stderr, fprintf, 0);
|
||||
fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
|
||||
exit(1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
@ -39,6 +39,9 @@
|
||||
#if defined(__i386__) && !defined(CONFIG_SOFTMMU)
|
||||
#define USE_CODE_COPY
|
||||
#endif
|
||||
#if defined(__linux__) && defined(CONFIG_SOFTMMU) && defined(__i386__) && !defined(TARGET_X86_64)
|
||||
#define USE_KQEMU
|
||||
#endif
|
||||
|
||||
#define R_EAX 0
|
||||
#define R_ECX 1
|
||||
@ -248,6 +251,14 @@
|
||||
#define CPUID_SSE (1 << 25)
|
||||
#define CPUID_SSE2 (1 << 26)
|
||||
|
||||
#define CPUID_EXT_SS3 (1 << 0)
|
||||
#define CPUID_EXT_MONITOR (1 << 3)
|
||||
#define CPUID_EXT_CX16 (1 << 13)
|
||||
|
||||
#define CPUID_EXT2_SYSCALL (1 << 11)
|
||||
#define CPUID_EXT2_NX (1 << 20)
|
||||
#define CPUID_EXT2_LM (1 << 29)
|
||||
|
||||
#define EXCP00_DIVZ 0
|
||||
#define EXCP01_SSTP 1
|
||||
#define EXCP02_NMI 2
|
||||
@ -408,6 +419,16 @@ typedef struct CPUX86State {
|
||||
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
|
||||
uint32_t hflags; /* hidden flags, see HF_xxx constants */
|
||||
|
||||
/* segments */
|
||||
SegmentCache segs[6]; /* selector values */
|
||||
SegmentCache ldt;
|
||||
SegmentCache tr;
|
||||
SegmentCache gdt; /* only base and limit are used */
|
||||
SegmentCache idt; /* only base and limit are used */
|
||||
|
||||
target_ulong cr[5]; /* NOTE: cr1 is unused */
|
||||
uint32_t a20_mask;
|
||||
|
||||
/* FPU state */
|
||||
unsigned int fpstt; /* top of stack index */
|
||||
unsigned int fpus;
|
||||
@ -431,13 +452,6 @@ typedef struct CPUX86State {
|
||||
int64_t i64;
|
||||
} fp_convert;
|
||||
|
||||
/* segments */
|
||||
SegmentCache segs[6]; /* selector values */
|
||||
SegmentCache ldt;
|
||||
SegmentCache tr;
|
||||
SegmentCache gdt; /* only base and limit are used */
|
||||
SegmentCache idt; /* only base and limit are used */
|
||||
|
||||
uint32_t mxcsr;
|
||||
XMMReg xmm_regs[CPU_NB_REGS];
|
||||
XMMReg xmm_t0;
|
||||
@ -470,13 +484,10 @@ typedef struct CPUX86State {
|
||||
int exception_is_int;
|
||||
target_ulong exception_next_eip;
|
||||
struct TranslationBlock *current_tb; /* currently executing TB */
|
||||
target_ulong cr[5]; /* NOTE: cr1 is unused */
|
||||
target_ulong dr[8]; /* debug registers */
|
||||
int interrupt_request;
|
||||
int user_mode_only; /* user mode only simulation */
|
||||
|
||||
uint32_t a20_mask;
|
||||
|
||||
/* soft mmu support */
|
||||
/* in order to avoid passing too many arguments to the memory
|
||||
write helpers, we store some rarely used information in the CPU
|
||||
@ -501,7 +512,11 @@ typedef struct CPUX86State {
|
||||
uint32_t cpuid_vendor3;
|
||||
uint32_t cpuid_version;
|
||||
uint32_t cpuid_features;
|
||||
uint32_t cpuid_ext_features;
|
||||
|
||||
#ifdef USE_KQEMU
|
||||
int kqemu_enabled;
|
||||
#endif
|
||||
/* in order to simplify APIC support, we leave this pointer to the
|
||||
user */
|
||||
struct APICState *apic_state;
|
||||
|
@ -1274,7 +1274,7 @@ void helper_cpuid(void)
|
||||
case 1:
|
||||
EAX = env->cpuid_version;
|
||||
EBX = 0;
|
||||
ECX = 0;
|
||||
ECX = env->cpuid_ext_features;
|
||||
EDX = env->cpuid_features;
|
||||
break;
|
||||
default:
|
||||
@ -1828,6 +1828,12 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
|
||||
ESP = (ESP & ~sp_mask) | (sp & sp_mask);
|
||||
EIP = offset;
|
||||
}
|
||||
#ifdef USE_KQEMU
|
||||
if (kqemu_is_ok(env)) {
|
||||
env->exception_index = -1;
|
||||
cpu_loop_exit();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* real and vm86 mode iret */
|
||||
@ -2097,11 +2103,25 @@ void helper_iret_protected(int shift, int next_eip)
|
||||
} else {
|
||||
helper_ret_protected(shift, 1, 0);
|
||||
}
|
||||
#ifdef USE_KQEMU
|
||||
if (kqemu_is_ok(env)) {
|
||||
CC_OP = CC_OP_EFLAGS;
|
||||
env->exception_index = -1;
|
||||
cpu_loop_exit();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void helper_lret_protected(int shift, int addend)
|
||||
{
|
||||
helper_ret_protected(shift, 0, addend);
|
||||
#ifdef USE_KQEMU
|
||||
if (kqemu_is_ok(env)) {
|
||||
CC_OP = CC_OP_EFLAGS;
|
||||
env->exception_index = -1;
|
||||
cpu_loop_exit();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void helper_sysenter(void)
|
||||
@ -2146,6 +2166,12 @@ void helper_sysexit(void)
|
||||
DESC_W_MASK | DESC_A_MASK);
|
||||
ESP = ECX;
|
||||
EIP = EDX;
|
||||
#ifdef USE_KQEMU
|
||||
if (kqemu_is_ok(env)) {
|
||||
env->exception_index = -1;
|
||||
cpu_loop_exit();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void helper_movl_crN_T0(int reg)
|
||||
|
Loading…
Reference in New Issue
Block a user