target-i386: the x86 CPL is stored in CS.selector - auto update hflags accordingly.
Instead of manually calling cpu_x86_set_cpl() when the CPL changes, check for CPL changes on calls to cpu_x86_load_seg_cache(R_CS). Every location that called cpu_x86_set_cpl() also called cpu_x86_load_seg_cache(R_CS), so cpu_x86_set_cpl() is no longer required. This fixes the SMM handler code as it was not setting/restoring the CPL level manually. Signed-off-by: Kevin O'Connor <kevin@koconnor.net> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
fd460606fd
commit
7848c8d19f
@ -1003,8 +1003,6 @@ int main(int argc, char **argv)
|
||||
cpu->opaque = ts;
|
||||
|
||||
#if defined(TARGET_I386)
|
||||
cpu_x86_set_cpl(env, 3);
|
||||
|
||||
env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
|
||||
env->hflags |= HF_PE_MASK;
|
||||
if (env->features[FEAT_1_EDX] & CPUID_SSE) {
|
||||
|
@ -4051,8 +4051,6 @@ int main(int argc, char **argv, char **envp)
|
||||
#endif
|
||||
|
||||
#if defined(TARGET_I386)
|
||||
cpu_x86_set_cpl(env, 3);
|
||||
|
||||
env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
|
||||
env->hflags |= HF_PE_MASK;
|
||||
if (env->features[FEAT_1_EDX] & CPUID_SSE) {
|
||||
|
@ -124,9 +124,9 @@
|
||||
#define ID_MASK 0x00200000
|
||||
|
||||
/* hidden flags - used internally by qemu to represent additional cpu
|
||||
states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
|
||||
redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK
|
||||
bit positions to ease oring with eflags. */
|
||||
states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
|
||||
avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
|
||||
positions to ease oring with eflags. */
|
||||
/* current cpl */
|
||||
#define HF_CPL_SHIFT 0
|
||||
/* true if soft mmu is being used */
|
||||
@ -974,6 +974,7 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
|
||||
/* update the hidden flags */
|
||||
{
|
||||
if (seg_reg == R_CS) {
|
||||
int cpl = selector & 3;
|
||||
#ifdef TARGET_X86_64
|
||||
if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
|
||||
/* long mode */
|
||||
@ -983,11 +984,19 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
|
||||
#endif
|
||||
{
|
||||
/* legacy / compatibility case */
|
||||
if (!(env->cr[0] & CR0_PE_MASK))
|
||||
cpl = 0;
|
||||
else if (env->eflags & VM_MASK)
|
||||
cpl = 3;
|
||||
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
|
||||
>> (DESC_B_SHIFT - HF_CS32_SHIFT);
|
||||
env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
|
||||
new_hflags;
|
||||
}
|
||||
#if HF_CPL_MASK != 3
|
||||
#error HF_CPL_MASK is hardcoded
|
||||
#endif
|
||||
env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
|
||||
}
|
||||
new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
|
||||
>> (DESC_B_SHIFT - HF_SS32_SHIFT);
|
||||
@ -1031,16 +1040,6 @@ int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
|
||||
target_ulong *base, unsigned int *limit,
|
||||
unsigned int *flags);
|
||||
|
||||
/* wrapper, just in case memory mappings must be changed */
|
||||
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
|
||||
{
|
||||
#if HF_CPL_MASK == 3
|
||||
s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
|
||||
#else
|
||||
#error HF_CPL_MASK is hardcoded
|
||||
#endif
|
||||
}
|
||||
|
||||
/* op_helper.c */
|
||||
/* used for debug or cpu save/restore */
|
||||
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
|
||||
|
@ -409,11 +409,7 @@ static void switch_tss(CPUX86State *env, int tss_selector,
|
||||
for (i = 0; i < 6; i++) {
|
||||
load_seg_vm(env, i, new_segs[i]);
|
||||
}
|
||||
/* in vm86, CPL is always 3 */
|
||||
cpu_x86_set_cpl(env, 3);
|
||||
} else {
|
||||
/* CPL is set the RPL of CS */
|
||||
cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
|
||||
/* first just selectors as the rest may trigger exceptions */
|
||||
for (i = 0; i < 6; i++) {
|
||||
cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
|
||||
@ -763,7 +759,6 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
|
||||
get_seg_base(e1, e2),
|
||||
get_seg_limit(e1, e2),
|
||||
e2);
|
||||
cpu_x86_set_cpl(env, dpl);
|
||||
env->eip = offset;
|
||||
}
|
||||
|
||||
@ -928,7 +923,6 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
|
||||
get_seg_base(e1, e2),
|
||||
get_seg_limit(e1, e2),
|
||||
e2);
|
||||
cpu_x86_set_cpl(env, dpl);
|
||||
env->eip = offset;
|
||||
}
|
||||
#endif
|
||||
@ -962,7 +956,6 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
|
||||
|
||||
env->eflags &= ~env->fmask;
|
||||
cpu_load_eflags(env, env->eflags, 0);
|
||||
cpu_x86_set_cpl(env, 0);
|
||||
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
|
||||
0, 0xffffffff,
|
||||
DESC_G_MASK | DESC_P_MASK |
|
||||
@ -983,7 +976,6 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
|
||||
env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
|
||||
|
||||
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
|
||||
cpu_x86_set_cpl(env, 0);
|
||||
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
|
||||
0, 0xffffffff,
|
||||
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
|
||||
@ -1038,7 +1030,6 @@ void helper_sysret(CPUX86State *env, int dflag)
|
||||
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
|
||||
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
|
||||
DESC_W_MASK | DESC_A_MASK);
|
||||
cpu_x86_set_cpl(env, 3);
|
||||
} else {
|
||||
env->eflags |= IF_MASK;
|
||||
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
|
||||
@ -1052,7 +1043,6 @@ void helper_sysret(CPUX86State *env, int dflag)
|
||||
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
|
||||
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
|
||||
DESC_W_MASK | DESC_A_MASK);
|
||||
cpu_x86_set_cpl(env, 3);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -1905,7 +1895,6 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
|
||||
get_seg_base(e1, e2),
|
||||
get_seg_limit(e1, e2),
|
||||
e2);
|
||||
cpu_x86_set_cpl(env, dpl);
|
||||
SET_ESP(sp, sp_mask);
|
||||
env->eip = offset;
|
||||
}
|
||||
@ -2134,7 +2123,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
|
||||
get_seg_base(e1, e2),
|
||||
get_seg_limit(e1, e2),
|
||||
e2);
|
||||
cpu_x86_set_cpl(env, rpl);
|
||||
sp = new_esp;
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_CS64_MASK) {
|
||||
@ -2185,7 +2173,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
|
||||
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
|
||||
VIP_MASK);
|
||||
load_seg_vm(env, R_CS, new_cs & 0xffff);
|
||||
cpu_x86_set_cpl(env, 3);
|
||||
load_seg_vm(env, R_SS, new_ss & 0xffff);
|
||||
load_seg_vm(env, R_ES, new_es & 0xffff);
|
||||
load_seg_vm(env, R_DS, new_ds & 0xffff);
|
||||
@ -2238,7 +2225,6 @@ void helper_sysenter(CPUX86State *env)
|
||||
raise_exception_err(env, EXCP0D_GPF, 0);
|
||||
}
|
||||
env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
|
||||
cpu_x86_set_cpl(env, 0);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
@ -2274,7 +2260,6 @@ void helper_sysexit(CPUX86State *env, int dflag)
|
||||
if (env->sysenter_cs == 0 || cpl != 0) {
|
||||
raise_exception_err(env, EXCP0D_GPF, 0);
|
||||
}
|
||||
cpu_x86_set_cpl(env, 3);
|
||||
#ifdef TARGET_X86_64
|
||||
if (dflag == 2) {
|
||||
cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
|
||||
|
@ -282,9 +282,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr7));
|
||||
env->dr[6] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr6));
|
||||
cpu_x86_set_cpl(env, ldub_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cpl)));
|
||||
|
||||
/* FIXME: guest state consistency checks */
|
||||
|
||||
@ -729,7 +726,6 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr7));
|
||||
|
||||
/* other setups */
|
||||
cpu_x86_set_cpl(env, 0);
|
||||
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
|
||||
exit_code);
|
||||
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
|
||||
|
Loading…
Reference in New Issue
Block a user