Remove vm86. Simplifies a number of critical places.

Pass 2.
This commit is contained in:
maxv 2017-08-12 07:07:53 +00:00
parent 99813fb172
commit 7a3268ffdb
9 changed files with 133 additions and 331 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: compat_13_machdep.c,v 1.25 2009/11/21 03:11:00 rmind Exp $ */
/* $NetBSD: compat_13_machdep.c,v 1.26 2017/08/12 07:07:53 maxv Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
@ -30,11 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: compat_13_machdep.c,v 1.25 2009/11/21 03:11:00 rmind Exp $");
#ifdef _KERNEL_OPT
#include "opt_vm86.h"
#endif
__KERNEL_RCSID(0, "$NetBSD: compat_13_machdep.c,v 1.26 2017/08/12 07:07:53 maxv Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -47,10 +43,6 @@ __KERNEL_RCSID(0, "$NetBSD: compat_13_machdep.c,v 1.25 2009/11/21 03:11:00 rmind
#include <compat/sys/signal.h>
#include <compat/sys/signalvar.h>
#ifdef VM86
#include <machine/vm86.h>
#endif
int
compat_13_sys_sigreturn(struct lwp *l, const struct compat_13_sys_sigreturn_args *uap, register_t *retval)
{
@ -73,36 +65,24 @@ compat_13_sys_sigreturn(struct lwp *l, const struct compat_13_sys_sigreturn_args
/* Restore register context. */
tf = l->l_md.md_regs;
#ifdef VM86
if (context.sc_eflags & PSL_VM) {
void syscall_vm86(struct trapframe *);
tf->tf_vm86_gs = context.sc_gs;
tf->tf_vm86_fs = context.sc_fs;
tf->tf_vm86_es = context.sc_es;
tf->tf_vm86_ds = context.sc_ds;
set_vflags(l, context.sc_eflags);
p->p_md.md_syscall = syscall_vm86;
} else
#endif
{
/*
* Check for security violations. If we're returning to
* protected mode, the CPU will validate the segment registers
* automatically and generate a trap on violations. We handle
* the trap, rather than doing all of the checking here.
*/
if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(context.sc_cs, context.sc_eflags))
return (EINVAL);
/*
* Check for security violations. If we're returning to
* protected mode, the CPU will validate the segment registers
* automatically and generate a trap on violations. We handle
* the trap, rather than doing all of the checking here.
*/
if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(context.sc_cs, context.sc_eflags))
return (EINVAL);
tf->tf_gs = context.sc_gs;
tf->tf_fs = context.sc_fs;
tf->tf_es = context.sc_es;
tf->tf_ds = context.sc_ds;
tf->tf_eflags &= ~PSL_USER;
tf->tf_eflags |= context.sc_eflags & PSL_USER;
tf->tf_gs = context.sc_gs;
tf->tf_fs = context.sc_fs;
tf->tf_es = context.sc_es;
tf->tf_ds = context.sc_ds;
tf->tf_eflags &= ~PSL_USER;
tf->tf_eflags |= context.sc_eflags & PSL_USER;
}
tf->tf_edi = context.sc_edi;
tf->tf_esi = context.sc_esi;
tf->tf_ebp = context.sc_ebp;

View File

@ -1,4 +1,4 @@
/* $NetBSD: compat_16_machdep.c,v 1.28 2017/08/10 12:49:11 maxv Exp $ */
/* $NetBSD: compat_16_machdep.c,v 1.29 2017/08/12 07:07:53 maxv Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
@ -30,10 +30,9 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: compat_16_machdep.c,v 1.28 2017/08/10 12:49:11 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: compat_16_machdep.c,v 1.29 2017/08/12 07:07:53 maxv Exp $");
#ifdef _KERNEL_OPT
#include "opt_vm86.h"
#include "opt_compat_netbsd.h"
#endif
@ -46,10 +45,6 @@ __KERNEL_RCSID(0, "$NetBSD: compat_16_machdep.c,v 1.28 2017/08/10 12:49:11 maxv
#include <sys/mount.h>
#include <sys/syscallargs.h>
#ifdef VM86
#include <machine/mcontext.h>
#include <machine/vm86.h>
#endif
#include <uvm/uvm_extern.h>
#include <machine/pmap.h>
@ -93,36 +88,24 @@ compat_16_sys___sigreturn14(struct lwp *l, const struct compat_16_sys___sigretur
/* Restore register context. */
tf = l->l_md.md_regs;
#ifdef VM86
if (context.sc_eflags & PSL_VM) {
void syscall_vm86(struct trapframe *);
tf->tf_vm86_gs = context.sc_gs;
tf->tf_vm86_fs = context.sc_fs;
tf->tf_vm86_es = context.sc_es;
tf->tf_vm86_ds = context.sc_ds;
set_vflags(l, context.sc_eflags);
p->p_md.md_syscall = syscall_vm86;
} else
#endif
{
/*
* Check for security violations. If we're returning to
* protected mode, the CPU will validate the segment registers
* automatically and generate a trap on violations. We handle
* the trap, rather than doing all of the checking here.
*/
if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(context.sc_cs, context.sc_eflags))
return (EINVAL);
/*
* Check for security violations. If we're returning to
* protected mode, the CPU will validate the segment registers
* automatically and generate a trap on violations. We handle
* the trap, rather than doing all of the checking here.
*/
if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(context.sc_cs, context.sc_eflags))
return (EINVAL);
tf->tf_gs = context.sc_gs;
tf->tf_fs = context.sc_fs;
tf->tf_es = context.sc_es;
tf->tf_ds = context.sc_ds;
tf->tf_eflags &= ~PSL_USER;
tf->tf_eflags |= context.sc_eflags & PSL_USER;
tf->tf_gs = context.sc_gs;
tf->tf_fs = context.sc_fs;
tf->tf_es = context.sc_es;
tf->tf_ds = context.sc_ds;
tf->tf_eflags &= ~PSL_USER;
tf->tf_eflags |= context.sc_eflags & PSL_USER;
}
tf->tf_edi = context.sc_edi;
tf->tf_esi = context.sc_esi;
tf->tf_ebp = context.sc_ebp;
@ -198,23 +181,12 @@ sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *mask)
frame.sf_scp = &fp->sf_sc;
/* Save register context. */
#ifdef VM86
if (tf->tf_eflags & PSL_VM) {
frame.sf_sc.sc_gs = tf->tf_vm86_gs;
frame.sf_sc.sc_fs = tf->tf_vm86_fs;
frame.sf_sc.sc_es = tf->tf_vm86_es;
frame.sf_sc.sc_ds = tf->tf_vm86_ds;
frame.sf_sc.sc_eflags = get_vflags(l);
(*p->p_emul->e_syscall_intern)(p);
} else
#endif
{
frame.sf_sc.sc_gs = tf->tf_gs;
frame.sf_sc.sc_fs = tf->tf_fs;
frame.sf_sc.sc_es = tf->tf_es;
frame.sf_sc.sc_ds = tf->tf_ds;
frame.sf_sc.sc_eflags = tf->tf_eflags;
}
frame.sf_sc.sc_gs = tf->tf_gs;
frame.sf_sc.sc_fs = tf->tf_fs;
frame.sf_sc.sc_es = tf->tf_es;
frame.sf_sc.sc_ds = tf->tf_ds;
frame.sf_sc.sc_eflags = tf->tf_eflags;
frame.sf_sc.sc_edi = tf->tf_edi;
frame.sf_sc.sc_esi = tf->tf_esi;
frame.sf_sc.sc_ebp = tf->tf_ebp;

View File

@ -1,4 +1,4 @@
/* $NetBSD: i386_trap.S,v 1.8 2017/06/14 17:21:04 maxv Exp $ */
/* $NetBSD: i386_trap.S,v 1.9 2017/08/12 07:07:53 maxv Exp $ */
/*
* Copyright 2002 (c) Wasabi Systems, Inc.
@ -66,7 +66,7 @@
#if 0
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.8 2017/06/14 17:21:04 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.9 2017/08/12 07:07:53 maxv Exp $");
#endif
/*
@ -383,12 +383,7 @@ calltrap:
_C_LABEL(trapreturn): .globl trapreturn
testb $CHK_UPL,TF_CS(%esp)
jnz .Lalltraps_checkast
#ifdef VM86
testl $PSL_VM,TF_EFLAGS(%esp)
jz 6f
#else
jmp 6f
#endif
.Lalltraps_checkast:
/* Check for ASTs on exit to user mode. */
CLI(%eax)

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.788 2017/08/10 12:49:11 maxv Exp $ */
/* $NetBSD: machdep.c,v 1.789 2017/08/12 07:07:53 maxv Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.788 2017/08/10 12:49:11 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.789 2017/08/12 07:07:53 maxv Exp $");
#include "opt_beep.h"
#include "opt_compat_freebsd.h"
@ -83,7 +83,6 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.788 2017/08/10 12:49:11 maxv Exp $");
#include "opt_physmem.h"
#include "opt_realmem.h"
#include "opt_user_ldt.h"
#include "opt_vm86.h"
#include "opt_xen.h"
#include "isa.h"
#include "pci.h"
@ -172,10 +171,6 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.788 2017/08/10 12:49:11 maxv Exp $");
#include <ddb/db_extern.h>
#endif
#ifdef VM86
#include <machine/vm86.h>
#endif
#include "acpica.h"
#include "bioscall.h"
@ -593,12 +588,7 @@ getframe(struct lwp *l, int sig, int *onstack)
&& (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
if (*onstack)
return (char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size;
#ifdef VM86
if (tf->tf_eflags & PSL_VM)
return (void *)(tf->tf_esp + (tf->tf_ss << 4));
else
#endif
return (void *)tf->tf_esp;
return (void *)tf->tf_esp;
}
/*
@ -1458,22 +1448,12 @@ cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
__greg_t ras_eip;
/* Save register context. */
#ifdef VM86
if (tf->tf_eflags & PSL_VM) {
gr[_REG_GS] = tf->tf_vm86_gs;
gr[_REG_FS] = tf->tf_vm86_fs;
gr[_REG_ES] = tf->tf_vm86_es;
gr[_REG_DS] = tf->tf_vm86_ds;
gr[_REG_EFL] = get_vflags(l);
} else
#endif
{
gr[_REG_GS] = tf->tf_gs;
gr[_REG_FS] = tf->tf_fs;
gr[_REG_ES] = tf->tf_es;
gr[_REG_DS] = tf->tf_ds;
gr[_REG_EFL] = tf->tf_eflags;
}
gr[_REG_GS] = tf->tf_gs;
gr[_REG_FS] = tf->tf_fs;
gr[_REG_ES] = tf->tf_es;
gr[_REG_DS] = tf->tf_ds;
gr[_REG_EFL] = tf->tf_eflags;
gr[_REG_EDI] = tf->tf_edi;
gr[_REG_ESI] = tf->tf_esi;
gr[_REG_EBP] = tf->tf_ebp;
@ -1545,32 +1525,18 @@ cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
/* Restore register context, if any. */
if ((flags & _UC_CPU) != 0) {
#ifdef VM86
if (gr[_REG_EFL] & PSL_VM) {
tf->tf_vm86_gs = gr[_REG_GS];
tf->tf_vm86_fs = gr[_REG_FS];
tf->tf_vm86_es = gr[_REG_ES];
tf->tf_vm86_ds = gr[_REG_DS];
set_vflags(l, gr[_REG_EFL]);
if (flags & _UC_VM) {
void syscall_vm86(struct trapframe *);
l->l_proc->p_md.md_syscall = syscall_vm86;
}
} else
#endif
{
error = cpu_mcontext_validate(l, mcp);
if (error)
return error;
error = cpu_mcontext_validate(l, mcp);
if (error)
return error;
tf->tf_gs = gr[_REG_GS];
tf->tf_fs = gr[_REG_FS];
tf->tf_es = gr[_REG_ES];
tf->tf_ds = gr[_REG_DS];
/* Only change the user-alterable part of eflags */
tf->tf_eflags &= ~PSL_USER;
tf->tf_eflags |= (gr[_REG_EFL] & PSL_USER);
tf->tf_gs = gr[_REG_GS];
tf->tf_fs = gr[_REG_FS];
tf->tf_es = gr[_REG_ES];
tf->tf_ds = gr[_REG_DS];
/* Only change the user-alterable part of eflags */
tf->tf_eflags &= ~PSL_USER;
tf->tf_eflags |= (gr[_REG_EFL] & PSL_USER);
}
tf->tf_edi = gr[_REG_EDI];
tf->tf_esi = gr[_REG_ESI];
tf->tf_ebp = gr[_REG_EBP];

View File

@ -1,4 +1,4 @@
/* $NetBSD: process_machdep.c,v 1.89 2017/02/23 03:34:22 kamil Exp $ */
/* $NetBSD: process_machdep.c,v 1.90 2017/08/12 07:07:53 maxv Exp $ */
/*-
* Copyright (c) 1998, 2000, 2001, 2008 The NetBSD Foundation, Inc.
@ -75,9 +75,8 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: process_machdep.c,v 1.89 2017/02/23 03:34:22 kamil Exp $");
__KERNEL_RCSID(0, "$NetBSD: process_machdep.c,v 1.90 2017/08/12 07:07:53 maxv Exp $");
#include "opt_vm86.h"
#include "opt_ptrace.h"
#include <sys/param.h>
@ -97,10 +96,6 @@ __KERNEL_RCSID(0, "$NetBSD: process_machdep.c,v 1.89 2017/02/23 03:34:22 kamil E
#include <x86/dbregs.h>
#include <x86/fpu.h>
#ifdef VM86
#include <machine/vm86.h>
#endif
static inline struct trapframe *
process_frame(struct lwp *l)
{
@ -113,22 +108,12 @@ process_read_regs(struct lwp *l, struct reg *regs)
{
struct trapframe *tf = process_frame(l);
#ifdef VM86
if (tf->tf_eflags & PSL_VM) {
regs->r_gs = tf->tf_vm86_gs;
regs->r_fs = tf->tf_vm86_fs;
regs->r_es = tf->tf_vm86_es;
regs->r_ds = tf->tf_vm86_ds;
regs->r_eflags = get_vflags(l);
} else
#endif
{
regs->r_gs = tf->tf_gs & 0xffff;
regs->r_fs = tf->tf_fs & 0xffff;
regs->r_es = tf->tf_es & 0xffff;
regs->r_ds = tf->tf_ds & 0xffff;
regs->r_eflags = tf->tf_eflags;
}
regs->r_gs = tf->tf_gs & 0xffff;
regs->r_fs = tf->tf_fs & 0xffff;
regs->r_es = tf->tf_es & 0xffff;
regs->r_ds = tf->tf_ds & 0xffff;
regs->r_eflags = tf->tf_eflags;
regs->r_edi = tf->tf_edi;
regs->r_esi = tf->tf_esi;
regs->r_ebp = tf->tf_ebp;
@ -168,41 +153,19 @@ process_write_regs(struct lwp *l, const struct reg *regs)
{
struct trapframe *tf = process_frame(l);
#ifdef VM86
if (regs->r_eflags & PSL_VM) {
void syscall_vm86(struct trapframe *);
/*
* Check for security violations.
*/
if (((regs->r_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(regs->r_cs, regs->r_eflags))
return (EINVAL);
tf->tf_vm86_gs = regs->r_gs;
tf->tf_vm86_fs = regs->r_fs;
tf->tf_vm86_es = regs->r_es;
tf->tf_vm86_ds = regs->r_ds;
set_vflags(l, regs->r_eflags);
/*
* Make sure that attempts at system calls from vm86
* mode die horribly.
*/
l->l_proc->p_md.md_syscall = syscall_vm86;
} else
#endif
{
/*
* Check for security violations.
*/
if (((regs->r_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(regs->r_cs, regs->r_eflags))
return (EINVAL);
tf->tf_gs = regs->r_gs;
tf->tf_fs = regs->r_fs;
tf->tf_es = regs->r_es;
tf->tf_ds = regs->r_ds;
tf->tf_eflags = regs->r_eflags;
tf->tf_gs = regs->r_gs;
tf->tf_fs = regs->r_fs;
tf->tf_es = regs->r_es;
tf->tf_ds = regs->r_ds;
#ifdef VM86
/* Restore normal syscall handler */
if (tf->tf_eflags & PSL_VM)
(*l->l_proc->p_emul->e_syscall_intern)(l->l_proc);
#endif
tf->tf_eflags = regs->r_eflags;
}
tf->tf_edi = regs->r_edi;
tf->tf_esi = regs->r_esi;
tf->tf_ebp = regs->r_ebp;

View File

@ -1,4 +1,4 @@
/* $NetBSD: spl.S,v 1.40 2014/01/11 17:06:15 christos Exp $ */
/* $NetBSD: spl.S,v 1.41 2017/08/12 07:07:53 maxv Exp $ */
/*
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@ -30,9 +30,8 @@
*/
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.40 2014/01/11 17:06:15 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.41 2017/08/12 07:07:53 maxv Exp $");
#include "opt_vm86.h"
#include "opt_ddb.h"
#include "opt_spldebug.h"
#include "opt_xen.h"
@ -276,12 +275,7 @@ IDTVEC(doreti)
5:
testb $CHK_UPL,TF_CS(%esp)
jnz doreti_checkast
#ifdef VM86
testl $PSL_VM,TF_EFLAGS(%esp)
jz 6f
#else
jmp 6f
#endif
.type _C_LABEL(doreti_checkast), @function
LABEL(doreti_checkast)
CHECK_ASTPENDING(%eax)
@ -417,10 +411,6 @@ IDTVEC(preemptresume)
sti
testb $CHK_UPL, TF_CS(%esp)
jnz 1f
#ifdef VM86
testl $PSL_VM,TF_EFLAGS(%esp)
jnz 1f
#endif
movl TF_EIP(%esp), %eax
pushl %eax
call _C_LABEL(kpreempt) # from kernel

View File

@ -1,5 +1,5 @@
/* $NetBSD: trap.c,v 1.288 2017/07/01 10:44:42 maxv Exp $ */
/* $NetBSD: trap.c,v 1.289 2017/08/12 07:07:53 maxv Exp $ */
/*-
* Copyright (c) 1998, 2000, 2005, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -69,13 +69,12 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.288 2017/07/01 10:44:42 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.289 2017/08/12 07:07:53 maxv Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
#include "opt_lockdebug.h"
#include "opt_multiprocessor.h"
#include "opt_vm86.h"
#include "opt_xen.h"
#include "opt_dtrace.h"
#include "opt_compat_netbsd.h"
@ -465,12 +464,6 @@ kernelfault:
ksi.ksi_code = BUS_ADRALN;
break;
case T_PROTFLT|T_USER:
#ifdef VM86
if (frame->tf_eflags & PSL_VM) {
vm86_gpfault(l, type & ~T_USER);
goto out;
}
#endif
/*
* If pmap_exec_fixup does something,
* let's retry the trap.

View File

@ -1,4 +1,4 @@
/* $NetBSD: freebsd_machdep.c,v 1.2 2017/08/08 08:04:06 maxv Exp $ */
/* $NetBSD: freebsd_machdep.c,v 1.3 2017/08/12 07:07:53 maxv Exp $ */
/*-
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
@ -30,11 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: freebsd_machdep.c,v 1.2 2017/08/08 08:04:06 maxv Exp $");
#if defined(_KERNEL_OPT)
#include "opt_vm86.h"
#endif
__KERNEL_RCSID(0, "$NetBSD: freebsd_machdep.c,v 1.3 2017/08/12 07:07:53 maxv Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -48,7 +44,6 @@ __KERNEL_RCSID(0, "$NetBSD: freebsd_machdep.c,v 1.2 2017/08/08 08:04:06 maxv Exp
#include <machine/cpufunc.h>
#include <x86/fpu.h>
#include <machine/reg.h>
#include <machine/vm86.h>
#include <machine/vmparam.h>
#include <compat/freebsd/freebsd_machdep.h>
@ -101,23 +96,12 @@ freebsd_sendsig(const ksiginfo_t *ksi, const sigset_t *mask)
frame.sf_handler = catcher;
/* Save context. */
#ifdef VM86
if (tf->tf_eflags & PSL_VM) {
frame.sf_sc.sc_gs = tf->tf_vm86_gs;
frame.sf_sc.sc_fs = tf->tf_vm86_fs;
frame.sf_sc.sc_es = tf->tf_vm86_es;
frame.sf_sc.sc_ds = tf->tf_vm86_ds;
frame.sf_sc.sc_efl = get_vflags(l);
(*p->p_emul->e_syscall_intern)(p);
} else
#endif
{
frame.sf_sc.sc_gs = tf->tf_gs;
frame.sf_sc.sc_fs = tf->tf_fs;
frame.sf_sc.sc_es = tf->tf_es;
frame.sf_sc.sc_ds = tf->tf_ds;
frame.sf_sc.sc_efl = tf->tf_eflags;
}
frame.sf_sc.sc_gs = tf->tf_gs;
frame.sf_sc.sc_fs = tf->tf_fs;
frame.sf_sc.sc_es = tf->tf_es;
frame.sf_sc.sc_ds = tf->tf_ds;
frame.sf_sc.sc_efl = tf->tf_eflags;
frame.sf_sc.sc_edi = tf->tf_edi;
frame.sf_sc.sc_esi = tf->tf_esi;
frame.sf_sc.sc_ebp = tf->tf_ebp;
@ -192,36 +176,24 @@ freebsd_sys_sigreturn(struct lwp *l, const struct freebsd_sys_sigreturn_args *ua
/* Restore register context. */
tf = l->l_md.md_regs;
#ifdef VM86
if (context.sc_efl & PSL_VM) {
void syscall_vm86(struct trapframe *);
tf->tf_vm86_gs = context.sc_gs;
tf->tf_vm86_fs = context.sc_fs;
tf->tf_vm86_es = context.sc_es;
tf->tf_vm86_ds = context.sc_ds;
set_vflags(l, context.sc_efl);
p->p_md.md_syscall = syscall_vm86;
} else
#endif
{
/*
* Check for security violations. If we're returning to
* protected mode, the CPU will validate the segment registers
* automatically and generate a trap on violations. We handle
* the trap, rather than doing all of the checking here.
*/
if (((context.sc_efl ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(context.sc_cs, context.sc_efl))
return (EINVAL);
/*
* Check for security violations. If we're returning to
* protected mode, the CPU will validate the segment registers
* automatically and generate a trap on violations. We handle
* the trap, rather than doing all of the checking here.
*/
if (((context.sc_efl ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(context.sc_cs, context.sc_efl))
return (EINVAL);
tf->tf_gs = context.sc_gs;
tf->tf_fs = context.sc_fs;
tf->tf_es = context.sc_es;
tf->tf_ds = context.sc_ds;
tf->tf_eflags &= ~PSL_USER;
tf->tf_eflags |= context.sc_efl & PSL_USER;
tf->tf_gs = context.sc_gs;
tf->tf_fs = context.sc_fs;
tf->tf_es = context.sc_es;
tf->tf_ds = context.sc_ds;
tf->tf_eflags &= ~PSL_USER;
tf->tf_eflags |= context.sc_efl & PSL_USER;
}
tf->tf_edi = context.sc_edi;
tf->tf_esi = context.sc_esi;
tf->tf_ebp = context.sc_ebp;

View File

@ -1,4 +1,4 @@
/* $NetBSD: linux_machdep.c,v 1.163 2017/02/05 10:42:22 maxv Exp $ */
/* $NetBSD: linux_machdep.c,v 1.164 2017/08/12 07:07:53 maxv Exp $ */
/*-
* Copyright (c) 1995, 2000, 2008, 2009 The NetBSD Foundation, Inc.
@ -30,10 +30,9 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: linux_machdep.c,v 1.163 2017/02/05 10:42:22 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: linux_machdep.c,v 1.164 2017/08/12 07:07:53 maxv Exp $");
#if defined(_KERNEL_OPT)
#include "opt_vm86.h"
#include "opt_user_ldt.h"
#endif
@ -82,7 +81,6 @@ __KERNEL_RCSID(0, "$NetBSD: linux_machdep.c,v 1.163 2017/02/05 10:42:22 maxv Exp
#include <machine/segments.h>
#include <machine/specialreg.h>
#include <machine/sysarch.h>
#include <machine/vm86.h>
#include <machine/vmparam.h>
#include <x86/fpu.h>
@ -194,22 +192,12 @@ linux_save_sigcontext(struct lwp *l, struct trapframe *tf,
struct pcb *pcb = lwp_getpcb(l);
/* Save register context. */
#ifdef VM86
if (tf->tf_eflags & PSL_VM) {
sc->sc_gs = tf->tf_vm86_gs;
sc->sc_fs = tf->tf_vm86_fs;
sc->sc_es = tf->tf_vm86_es;
sc->sc_ds = tf->tf_vm86_ds;
sc->sc_eflags = get_vflags(l);
} else
#endif
{
sc->sc_gs = tf->tf_gs;
sc->sc_fs = tf->tf_fs;
sc->sc_es = tf->tf_es;
sc->sc_ds = tf->tf_ds;
sc->sc_eflags = tf->tf_eflags;
}
sc->sc_gs = tf->tf_gs;
sc->sc_fs = tf->tf_fs;
sc->sc_es = tf->tf_es;
sc->sc_ds = tf->tf_ds;
sc->sc_eflags = tf->tf_eflags;
sc->sc_edi = tf->tf_edi;
sc->sc_esi = tf->tf_esi;
sc->sc_esp = tf->tf_esp;
@ -442,39 +430,22 @@ linux_restore_sigcontext(struct lwp *l, struct linux_sigcontext *scp,
tf = l->l_md.md_regs;
DPRINTF(("sigreturn enter esp=0x%x eip=0x%x\n", tf->tf_esp, tf->tf_eip));
#ifdef VM86
if (scp->sc_eflags & PSL_VM) {
void syscall_vm86(struct trapframe *);
/*
* Check for security violations. If we're returning to
* protected mode, the CPU will validate the segment registers
* automatically and generate a trap on violations. We handle
* the trap, rather than doing all of the checking here.
*/
if (((scp->sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(scp->sc_cs, scp->sc_eflags))
return EINVAL;
tf->tf_vm86_gs = scp->sc_gs;
tf->tf_vm86_fs = scp->sc_fs;
tf->tf_vm86_es = scp->sc_es;
tf->tf_vm86_ds = scp->sc_ds;
set_vflags(l, scp->sc_eflags);
p->p_md.md_syscall = syscall_vm86;
} else
#endif
{
/*
* Check for security violations. If we're returning to
* protected mode, the CPU will validate the segment registers
* automatically and generate a trap on violations. We handle
* the trap, rather than doing all of the checking here.
*/
if (((scp->sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
!USERMODE(scp->sc_cs, scp->sc_eflags))
return EINVAL;
tf->tf_gs = scp->sc_gs;
tf->tf_fs = scp->sc_fs;
tf->tf_es = scp->sc_es;
tf->tf_ds = scp->sc_ds;
tf->tf_eflags = scp->sc_eflags;
tf->tf_gs = scp->sc_gs;
tf->tf_fs = scp->sc_fs;
tf->tf_es = scp->sc_es;
tf->tf_ds = scp->sc_ds;
#ifdef VM86
if (tf->tf_eflags & PSL_VM)
(*p->p_emul->e_syscall_intern)(p);
#endif
tf->tf_eflags = scp->sc_eflags;
}
tf->tf_edi = scp->sc_edi;
tf->tf_esi = scp->sc_esi;
tf->tf_ebp = scp->sc_ebp;