2013-07-07 14:07:54 +04:00
|
|
|
/*
|
|
|
|
* x86 gdb server stub
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2005 Fabrice Bellard
|
|
|
|
* Copyright (c) 2013 SUSE LINUX Products GmbH
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-23 15:28:01 +03:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2013-07-07 14:07:54 +04:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
2016-01-26 21:17:03 +03:00
|
|
|
#include "qemu/osdep.h"
|
2016-03-15 18:58:45 +03:00
|
|
|
#include "cpu.h"
|
2024-05-07 17:05:48 +03:00
|
|
|
#include "gdbstub/helpers.h"
|
2013-07-07 14:07:54 +04:00
|
|
|
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
static const int gpr_map[16] = {
|
|
|
|
R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
|
|
|
|
8, 9, 10, 11, 12, 13, 14, 15
|
|
|
|
};
|
|
|
|
#else
|
|
|
|
#define gpr_map gpr_map32
|
|
|
|
#endif
|
|
|
|
static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
|
|
|
|
|
2019-01-24 07:04:57 +03:00
|
|
|
/*
|
|
|
|
* Keep these in sync with assignment to
|
|
|
|
* gdb_num_core_regs in target/i386/cpu.c
|
|
|
|
* and with the machine description
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SEG: 6 segments, plus fs_base, gs_base, kernel_gs_base
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* general regs -----> 8 or 16
|
|
|
|
*/
|
|
|
|
#define IDX_NB_IP 1
|
|
|
|
#define IDX_NB_FLAGS 1
|
|
|
|
#define IDX_NB_SEG (6 + 3)
|
|
|
|
#define IDX_NB_CTL 6
|
|
|
|
#define IDX_NB_FP 16
|
|
|
|
/*
|
|
|
|
* fpu regs ----------> 8 or 16
|
|
|
|
*/
|
|
|
|
#define IDX_NB_MXCSR 1
|
|
|
|
/*
|
|
|
|
* total ----> 8+1+1+9+6+16+8+1=50 or 16+1+1+9+6+16+16+1=66
|
|
|
|
*/
|
|
|
|
|
2013-07-07 14:07:54 +04:00
|
|
|
#define IDX_IP_REG CPU_NB_REGS
|
2019-01-24 07:04:57 +03:00
|
|
|
#define IDX_FLAGS_REG (IDX_IP_REG + IDX_NB_IP)
|
|
|
|
#define IDX_SEG_REGS (IDX_FLAGS_REG + IDX_NB_FLAGS)
|
|
|
|
#define IDX_CTL_REGS (IDX_SEG_REGS + IDX_NB_SEG)
|
|
|
|
#define IDX_FP_REGS (IDX_CTL_REGS + IDX_NB_CTL)
|
|
|
|
#define IDX_XMM_REGS (IDX_FP_REGS + IDX_NB_FP)
|
2013-07-07 14:07:54 +04:00
|
|
|
#define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
|
|
|
|
|
2019-01-24 07:04:57 +03:00
|
|
|
#define IDX_CTL_CR0_REG (IDX_CTL_REGS + 0)
|
|
|
|
#define IDX_CTL_CR2_REG (IDX_CTL_REGS + 1)
|
|
|
|
#define IDX_CTL_CR3_REG (IDX_CTL_REGS + 2)
|
|
|
|
#define IDX_CTL_CR4_REG (IDX_CTL_REGS + 3)
|
|
|
|
#define IDX_CTL_CR8_REG (IDX_CTL_REGS + 4)
|
|
|
|
#define IDX_CTL_EFER_REG (IDX_CTL_REGS + 5)
|
|
|
|
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
#define GDB_FORCE_64 1
|
|
|
|
#else
|
|
|
|
#define GDB_FORCE_64 0
|
|
|
|
#endif
|
|
|
|
|
2021-03-22 16:27:55 +03:00
|
|
|
static int gdb_read_reg_cs64(uint32_t hflags, GByteArray *buf, target_ulong val)
|
|
|
|
{
|
|
|
|
if ((hflags & HF_CS64_MASK) || GDB_FORCE_64) {
|
|
|
|
return gdb_get_reg64(buf, val);
|
|
|
|
}
|
|
|
|
return gdb_get_reg32(buf, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val)
|
|
|
|
{
|
|
|
|
if (hflags & HF_CS64_MASK) {
|
|
|
|
*val = ldq_p(buf);
|
|
|
|
return 8;
|
|
|
|
}
|
|
|
|
*val = ldl_p(buf);
|
|
|
|
return 4;
|
|
|
|
}
|
2019-01-24 07:04:57 +03:00
|
|
|
|
2024-09-12 12:28:22 +03:00
|
|
|
static int gdb_get_reg(CPUX86State *env, GByteArray *mem_buf, target_ulong val)
|
|
|
|
{
|
|
|
|
if (TARGET_LONG_BITS == 64) {
|
|
|
|
if (env->hflags & HF_CS64_MASK) {
|
|
|
|
return gdb_get_reg64(mem_buf, val);
|
|
|
|
} else {
|
|
|
|
return gdb_get_reg64(mem_buf, val & 0xffffffffUL);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return gdb_get_reg32(mem_buf, val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-16 20:21:41 +03:00
|
|
|
int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
2013-07-07 14:07:54 +04:00
|
|
|
{
|
2013-06-29 06:18:45 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2019-01-24 07:04:57 +03:00
|
|
|
uint64_t tpr;
|
|
|
|
|
2016-11-04 02:35:32 +03:00
|
|
|
/* N.B. GDB can't deal with changes in registers or sizes in the middle
|
|
|
|
of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
|
|
|
|
as if we're on a 64-bit cpu. */
|
|
|
|
|
2013-07-07 14:07:54 +04:00
|
|
|
if (n < CPU_NB_REGS) {
|
2016-11-04 02:35:32 +03:00
|
|
|
if (TARGET_LONG_BITS == 64) {
|
|
|
|
if (env->hflags & HF_CS64_MASK) {
|
|
|
|
return gdb_get_reg64(mem_buf, env->regs[gpr_map[n]]);
|
|
|
|
} else if (n < CPU_NB_REGS32) {
|
|
|
|
return gdb_get_reg64(mem_buf,
|
|
|
|
env->regs[gpr_map[n]] & 0xffffffffUL);
|
|
|
|
} else {
|
2020-03-16 20:21:40 +03:00
|
|
|
return gdb_get_regl(mem_buf, 0);
|
2016-11-04 02:35:32 +03:00
|
|
|
}
|
|
|
|
} else {
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->regs[gpr_map32[n]]);
|
2013-07-07 14:07:54 +04:00
|
|
|
}
|
|
|
|
} else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
|
target/i386/gdbstub: Fix a bug about order of FPU stack in 'g' packets.
Before this commit, when GDB attached an OS working on QEMU, order of FPU
stack registers printed by GDB command 'info float' was wrong. There was a
bug causing the problem in 'g' packets sent by QEMU to GDB. The packets have
values of registers of machine emulated by QEMU containing FPU stack
registers. There are 2 ways to specify a x87 FPU stack register. The first
is specifying by absolute indexed register names (R0, ..., R7). The second
is specifying by stack top relative indexed register names (ST0, ..., ST7).
Values of the FPU stack registers should be located in 'g' packet and be
ordered by the relative index. But QEMU had located these registers ordered
by the absolute index. After this commit, when QEMU reads registers to make
a 'g' packet, QEMU specifies FPU stack registers by the relative index.
Then, the registers are ordered correctly in the packet. As a result, GDB,
the packet receiver, can print FPU stack registers in the correct order.
Signed-off-by: TaiseiIto <taisei1212@outlook.jp>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <TY0PR0101MB4285923FBE9AD97CE832D95BA4E59@TY0PR0101MB4285.apcprd01.prod.exchangelabs.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-19 07:04:12 +03:00
|
|
|
int st_index = n - IDX_FP_REGS;
|
|
|
|
int r_index = (st_index + env->fpstt) % 8;
|
|
|
|
floatx80 *fp = &env->fpregs[r_index].d;
|
2020-03-16 20:21:40 +03:00
|
|
|
int len = gdb_get_reg64(mem_buf, cpu_to_le64(fp->low));
|
2020-04-14 23:06:25 +03:00
|
|
|
len += gdb_get_reg16(mem_buf, cpu_to_le16(fp->high));
|
2020-03-16 20:21:40 +03:00
|
|
|
return len;
|
2013-07-07 14:07:54 +04:00
|
|
|
} else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
|
|
|
|
n -= IDX_XMM_REGS;
|
2016-11-04 02:35:32 +03:00
|
|
|
if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) {
|
2020-03-16 20:21:40 +03:00
|
|
|
return gdb_get_reg128(mem_buf,
|
2022-04-19 12:10:19 +03:00
|
|
|
env->xmm_regs[n].ZMM_Q(1),
|
|
|
|
env->xmm_regs[n].ZMM_Q(0));
|
2013-07-07 14:07:54 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (n) {
|
|
|
|
case IDX_IP_REG:
|
2024-09-12 12:28:22 +03:00
|
|
|
return gdb_get_reg(env, mem_buf, env->eip);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FLAGS_REG:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->eflags);
|
2013-07-07 14:07:54 +04:00
|
|
|
|
|
|
|
case IDX_SEG_REGS:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->segs[R_CS].selector);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 1:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->segs[R_SS].selector);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 2:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->segs[R_DS].selector);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 3:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->segs[R_ES].selector);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 4:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->segs[R_FS].selector);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 5:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->segs[R_GS].selector);
|
2019-01-24 07:04:57 +03:00
|
|
|
case IDX_SEG_REGS + 6:
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_FS].base);
|
2019-01-24 07:04:57 +03:00
|
|
|
case IDX_SEG_REGS + 7:
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_GS].base);
|
2019-01-24 07:04:57 +03:00
|
|
|
|
|
|
|
case IDX_SEG_REGS + 8:
|
|
|
|
#ifdef TARGET_X86_64
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_read_reg_cs64(env->hflags, mem_buf, env->kernelgsbase);
|
2019-01-24 07:04:57 +03:00
|
|
|
#else
|
|
|
|
return gdb_get_reg32(mem_buf, 0);
|
|
|
|
#endif
|
|
|
|
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FP_REGS + 8:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->fpuc);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FP_REGS + 9:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, (env->fpus & ~0x3800) |
|
|
|
|
(env->fpstt & 0x7) << 11);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FP_REGS + 10:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, 0); /* ftag */
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FP_REGS + 11:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, 0); /* fiseg */
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FP_REGS + 12:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, 0); /* fioff */
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FP_REGS + 13:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, 0); /* foseg */
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FP_REGS + 14:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, 0); /* fooff */
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FP_REGS + 15:
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, 0); /* fop */
|
2013-07-07 14:07:54 +04:00
|
|
|
|
|
|
|
case IDX_MXCSR_REG:
|
2020-06-26 02:58:31 +03:00
|
|
|
update_mxcsr_from_sse_status(env);
|
2013-07-07 15:05:05 +04:00
|
|
|
return gdb_get_reg32(mem_buf, env->mxcsr);
|
2019-01-24 07:04:57 +03:00
|
|
|
|
|
|
|
case IDX_CTL_CR0_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[0]);
|
2019-01-24 07:04:57 +03:00
|
|
|
case IDX_CTL_CR2_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[2]);
|
2019-01-24 07:04:57 +03:00
|
|
|
case IDX_CTL_CR3_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[3]);
|
2019-01-24 07:04:57 +03:00
|
|
|
case IDX_CTL_CR4_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[4]);
|
2019-01-24 07:04:57 +03:00
|
|
|
case IDX_CTL_CR8_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2019-01-24 07:04:57 +03:00
|
|
|
tpr = cpu_get_apic_tpr(cpu->apic_state);
|
|
|
|
#else
|
|
|
|
tpr = 0;
|
|
|
|
#endif
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_read_reg_cs64(env->hflags, mem_buf, tpr);
|
2019-01-24 07:04:57 +03:00
|
|
|
|
|
|
|
case IDX_CTL_EFER_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_read_reg_cs64(env->hflags, mem_buf, env->efer);
|
2013-07-07 14:07:54 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-01-10 02:34:27 +03:00
|
|
|
static int x86_cpu_gdb_load_seg(X86CPU *cpu, X86Seg sreg, uint8_t *mem_buf)
|
2013-07-07 14:07:54 +04:00
|
|
|
{
|
2013-06-29 06:18:45 +04:00
|
|
|
CPUX86State *env = &cpu->env;
|
2013-07-07 14:07:54 +04:00
|
|
|
uint16_t selector = ldl_p(mem_buf);
|
|
|
|
|
|
|
|
if (selector != env->segs[sreg].selector) {
|
|
|
|
#if defined(CONFIG_USER_ONLY)
|
|
|
|
cpu_x86_load_seg(env, sreg, selector);
|
|
|
|
#else
|
|
|
|
unsigned int limit, flags;
|
|
|
|
target_ulong base;
|
|
|
|
|
|
|
|
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
|
2014-05-15 18:07:04 +04:00
|
|
|
int dpl = (env->eflags & VM_MASK) ? 3 : 0;
|
2013-07-07 14:07:54 +04:00
|
|
|
base = selector << 4;
|
|
|
|
limit = 0xffff;
|
2014-05-15 18:07:04 +04:00
|
|
|
flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
|
|
|
DESC_A_MASK | (dpl << DESC_DPL_SHIFT);
|
2013-07-07 14:07:54 +04:00
|
|
|
} else {
|
|
|
|
if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
|
|
|
|
&flags)) {
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
|
2024-09-12 12:28:22 +03:00
|
|
|
static int gdb_write_reg(CPUX86State *env, uint8_t *mem_buf, target_ulong *val)
|
|
|
|
{
|
|
|
|
if (TARGET_LONG_BITS == 64) {
|
|
|
|
if (env->hflags & HF_CS64_MASK) {
|
|
|
|
*val = ldq_p(mem_buf);
|
|
|
|
} else {
|
|
|
|
*val = ldq_p(mem_buf) & 0xffffffffUL;
|
|
|
|
}
|
|
|
|
return 8;
|
|
|
|
} else {
|
|
|
|
*val = (uint32_t)ldl_p(mem_buf);
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-29 06:18:45 +04:00
|
|
|
int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
2013-07-07 14:07:54 +04:00
|
|
|
{
|
2013-06-29 06:18:45 +04:00
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2021-03-22 16:27:55 +03:00
|
|
|
target_ulong tmp;
|
|
|
|
int len;
|
2013-07-07 14:07:54 +04:00
|
|
|
|
2016-11-04 02:35:32 +03:00
|
|
|
/* N.B. GDB can't deal with changes in registers or sizes in the middle
|
|
|
|
of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
|
|
|
|
as if we're on a 64-bit cpu. */
|
|
|
|
|
2013-07-07 14:07:54 +04:00
|
|
|
if (n < CPU_NB_REGS) {
|
2016-11-04 02:35:32 +03:00
|
|
|
if (TARGET_LONG_BITS == 64) {
|
|
|
|
if (env->hflags & HF_CS64_MASK) {
|
|
|
|
env->regs[gpr_map[n]] = ldtul_p(mem_buf);
|
|
|
|
} else if (n < CPU_NB_REGS32) {
|
|
|
|
env->regs[gpr_map[n]] = ldtul_p(mem_buf) & 0xffffffffUL;
|
|
|
|
}
|
2013-07-07 14:07:54 +04:00
|
|
|
return sizeof(target_ulong);
|
|
|
|
} else if (n < CPU_NB_REGS32) {
|
|
|
|
n = gpr_map32[n];
|
|
|
|
env->regs[n] &= ~0xffffffffUL;
|
|
|
|
env->regs[n] |= (uint32_t)ldl_p(mem_buf);
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
} else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
|
2020-03-16 20:21:40 +03:00
|
|
|
floatx80 *fp = (floatx80 *) &env->fpregs[n - IDX_FP_REGS];
|
|
|
|
fp->low = le64_to_cpu(* (uint64_t *) mem_buf);
|
|
|
|
fp->high = le16_to_cpu(* (uint16_t *) (mem_buf + 8));
|
2013-07-07 14:07:54 +04:00
|
|
|
return 10;
|
|
|
|
} else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
|
|
|
|
n -= IDX_XMM_REGS;
|
2016-11-04 02:35:32 +03:00
|
|
|
if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) {
|
2015-11-26 22:14:32 +03:00
|
|
|
env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf);
|
|
|
|
env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8);
|
2013-07-07 14:07:54 +04:00
|
|
|
return 16;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (n) {
|
|
|
|
case IDX_IP_REG:
|
2024-09-12 12:28:22 +03:00
|
|
|
return gdb_write_reg(env, mem_buf, &env->eip);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FLAGS_REG:
|
|
|
|
env->eflags = ldl_p(mem_buf);
|
|
|
|
return 4;
|
|
|
|
|
|
|
|
case IDX_SEG_REGS:
|
2013-06-29 06:18:45 +04:00
|
|
|
return x86_cpu_gdb_load_seg(cpu, R_CS, mem_buf);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 1:
|
2013-06-29 06:18:45 +04:00
|
|
|
return x86_cpu_gdb_load_seg(cpu, R_SS, mem_buf);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 2:
|
2013-06-29 06:18:45 +04:00
|
|
|
return x86_cpu_gdb_load_seg(cpu, R_DS, mem_buf);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 3:
|
2013-06-29 06:18:45 +04:00
|
|
|
return x86_cpu_gdb_load_seg(cpu, R_ES, mem_buf);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 4:
|
2013-06-29 06:18:45 +04:00
|
|
|
return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf);
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_SEG_REGS + 5:
|
2013-06-29 06:18:45 +04:00
|
|
|
return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf);
|
2019-01-24 07:04:57 +03:00
|
|
|
case IDX_SEG_REGS + 6:
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_FS].base);
|
2019-01-24 07:04:57 +03:00
|
|
|
case IDX_SEG_REGS + 7:
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_GS].base);
|
2019-01-24 07:04:57 +03:00
|
|
|
case IDX_SEG_REGS + 8:
|
2020-01-07 16:26:07 +03:00
|
|
|
#ifdef TARGET_X86_64
|
2021-03-22 16:27:55 +03:00
|
|
|
return gdb_write_reg_cs64(env->hflags, mem_buf, &env->kernelgsbase);
|
2019-01-24 07:04:57 +03:00
|
|
|
#endif
|
2020-01-07 16:26:07 +03:00
|
|
|
return 4;
|
2019-01-24 07:04:57 +03:00
|
|
|
|
2013-07-07 14:07:54 +04:00
|
|
|
case IDX_FP_REGS + 8:
|
2014-09-17 12:05:19 +04:00
|
|
|
cpu_set_fpuc(env, ldl_p(mem_buf));
|
2013-07-07 14:07:54 +04:00
|
|
|
return 4;
|
|
|
|
case IDX_FP_REGS + 9:
|
|
|
|
tmp = ldl_p(mem_buf);
|
|
|
|
env->fpstt = (tmp >> 11) & 7;
|
|
|
|
env->fpus = tmp & ~0x3800;
|
|
|
|
return 4;
|
|
|
|
case IDX_FP_REGS + 10: /* ftag */
|
|
|
|
return 4;
|
|
|
|
case IDX_FP_REGS + 11: /* fiseg */
|
|
|
|
return 4;
|
|
|
|
case IDX_FP_REGS + 12: /* fioff */
|
|
|
|
return 4;
|
|
|
|
case IDX_FP_REGS + 13: /* foseg */
|
|
|
|
return 4;
|
|
|
|
case IDX_FP_REGS + 14: /* fooff */
|
|
|
|
return 4;
|
|
|
|
case IDX_FP_REGS + 15: /* fop */
|
|
|
|
return 4;
|
|
|
|
|
|
|
|
case IDX_MXCSR_REG:
|
2014-02-25 02:59:54 +04:00
|
|
|
cpu_set_mxcsr(env, ldl_p(mem_buf));
|
2013-07-07 14:07:54 +04:00
|
|
|
return 4;
|
2019-01-24 07:04:57 +03:00
|
|
|
|
|
|
|
case IDX_CTL_CR0_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
|
2021-03-22 16:27:56 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2021-03-22 16:27:55 +03:00
|
|
|
cpu_x86_update_cr0(env, tmp);
|
2021-03-22 16:27:56 +03:00
|
|
|
#endif
|
2021-03-22 16:27:55 +03:00
|
|
|
return len;
|
2019-01-24 07:04:57 +03:00
|
|
|
|
|
|
|
case IDX_CTL_CR2_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
|
2021-03-22 16:27:56 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2021-03-22 16:27:55 +03:00
|
|
|
env->cr[2] = tmp;
|
2021-03-22 16:27:56 +03:00
|
|
|
#endif
|
2021-03-22 16:27:55 +03:00
|
|
|
return len;
|
2019-01-24 07:04:57 +03:00
|
|
|
|
|
|
|
case IDX_CTL_CR3_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
|
2021-03-22 16:27:56 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2021-03-22 16:27:55 +03:00
|
|
|
cpu_x86_update_cr3(env, tmp);
|
2021-03-22 16:27:56 +03:00
|
|
|
#endif
|
2021-03-22 16:27:55 +03:00
|
|
|
return len;
|
2019-01-24 07:04:57 +03:00
|
|
|
|
|
|
|
case IDX_CTL_CR4_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
|
2021-03-22 16:27:56 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2021-03-22 16:27:55 +03:00
|
|
|
cpu_x86_update_cr4(env, tmp);
|
2021-03-22 16:27:56 +03:00
|
|
|
#endif
|
2021-03-22 16:27:55 +03:00
|
|
|
return len;
|
2019-01-24 07:04:57 +03:00
|
|
|
|
|
|
|
case IDX_CTL_CR8_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
cpu_set_apic_tpr(cpu->apic_state, tmp);
|
2019-01-24 07:04:57 +03:00
|
|
|
#endif
|
2021-03-22 16:27:55 +03:00
|
|
|
return len;
|
2019-01-24 07:04:57 +03:00
|
|
|
|
|
|
|
case IDX_CTL_EFER_REG:
|
2021-03-22 16:27:55 +03:00
|
|
|
len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
|
2021-03-22 16:27:56 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2021-03-22 16:27:55 +03:00
|
|
|
cpu_load_efer(env, tmp);
|
2021-03-22 16:27:56 +03:00
|
|
|
#endif
|
2021-03-22 16:27:55 +03:00
|
|
|
return len;
|
2013-07-07 14:07:54 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Unrecognised register. */
|
|
|
|
return 0;
|
|
|
|
}
|