VM86 support, by John Kohl, touched up a bit by me.

This commit is contained in:
mycroft 1996-01-08 13:51:30 +00:00
parent 269329a42b
commit 7149c5ff14
9 changed files with 621 additions and 33 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.i386,v 1.60 1995/10/11 04:19:29 mycroft Exp $ # $NetBSD: files.i386,v 1.61 1996/01/08 13:51:30 mycroft Exp $
# #
# new style config file for i386 architecture # new style config file for i386 architecture
# #
@ -126,6 +126,9 @@ file arch/i386/pci/pci_machdep.c pci
# Compatibility modules # Compatibility modules
# #
# VM86 mode
file arch/i386/i386/vm86.c vm86
# SVR4 binary compatibility (COMPAT_SVR4) # SVR4 binary compatibility (COMPAT_SVR4)
include "../../../compat/svr4/files.svr4" include "../../../compat/svr4/files.svr4"
file arch/i386/i386/svr4_machdep.c compat_svr4 file arch/i386/i386/svr4_machdep.c compat_svr4

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.183 1996/01/04 22:22:01 jtc Exp $ */ /* $NetBSD: machdep.c,v 1.184 1996/01/08 13:51:34 mycroft Exp $ */
/*- /*-
* Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved. * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved.
@ -92,6 +92,10 @@
#include <i386/isa/isa_machdep.h> #include <i386/isa/isa_machdep.h>
#include <i386/isa/nvram.h> #include <i386/isa/nvram.h>
#ifdef VM86
#include <machine/vm86.h>
#endif
#include "isa.h" #include "isa.h"
#include "npx.h" #include "npx.h"
#if NNPX > 0 #if NNPX > 0
@ -537,6 +541,8 @@ sendsig(catcher, sig, mask, code)
/* /*
* Build the signal context to be used by sigreturn. * Build the signal context to be used by sigreturn.
*/ */
frame.sf_sc.sc_err = tf->tf_err;
frame.sf_sc.sc_trapno = tf->tf_trapno;
frame.sf_sc.sc_onstack = oonstack; frame.sf_sc.sc_onstack = oonstack;
frame.sf_sc.sc_mask = mask; frame.sf_sc.sc_mask = mask;
#ifdef VM86 #ifdef VM86
@ -545,6 +551,9 @@ sendsig(catcher, sig, mask, code)
frame.sf_sc.sc_fs = tf->tf_vm86_fs; frame.sf_sc.sc_fs = tf->tf_vm86_fs;
frame.sf_sc.sc_es = tf->tf_vm86_es; frame.sf_sc.sc_es = tf->tf_vm86_es;
frame.sf_sc.sc_ds = tf->tf_vm86_ds; frame.sf_sc.sc_ds = tf->tf_vm86_ds;
frame.sf_sc.sc_eflags = tf->tf_eflags;
SETFLAGS(frame.sf_sc.sc_eflags, VM86_EFLAGS(p),
VM86_FLAGMASK(p)|PSL_VIF);
} else } else
#endif #endif
{ {
@ -552,19 +561,19 @@ sendsig(catcher, sig, mask, code)
__asm("movl %%fs,%w0" : "=r" (frame.sf_sc.sc_fs)); __asm("movl %%fs,%w0" : "=r" (frame.sf_sc.sc_fs));
frame.sf_sc.sc_es = tf->tf_es; frame.sf_sc.sc_es = tf->tf_es;
frame.sf_sc.sc_ds = tf->tf_ds; frame.sf_sc.sc_ds = tf->tf_ds;
frame.sf_sc.sc_eflags = tf->tf_eflags;
} }
frame.sf_sc.sc_edi = tf->tf_edi; frame.sf_sc.sc_edi = tf->tf_edi;
frame.sf_sc.sc_esi = tf->tf_esi; frame.sf_sc.sc_esi = tf->tf_esi;
frame.sf_sc.sc_ebp = tf->tf_ebp; frame.sf_sc.sc_ebp = tf->tf_ebp;
frame.sf_sc.sc_ebx = tf->tf_ebx; frame.sf_sc.sc_ebx = tf->tf_ebx;
frame.sf_sc.sc_edx = tf->tf_edx; frame.sf_sc.sc_edx = tf->tf_edx;
frame.sf_sc.sc_ecx = tf->tf_ecx; frame.sf_sc.sc_ecx = tf->tf_ecx;
frame.sf_sc.sc_eax = tf->tf_eax; frame.sf_sc.sc_eax = tf->tf_eax;
frame.sf_sc.sc_eip = tf->tf_eip; frame.sf_sc.sc_eip = tf->tf_eip;
frame.sf_sc.sc_cs = tf->tf_cs; frame.sf_sc.sc_cs = tf->tf_cs;
frame.sf_sc.sc_eflags = tf->tf_eflags; frame.sf_sc.sc_esp = tf->tf_esp;
frame.sf_sc.sc_esp = tf->tf_esp; frame.sf_sc.sc_ss = tf->tf_ss;
frame.sf_sc.sc_ss = tf->tf_ss;
if (copyout(&frame, fp, sizeof(frame)) != 0) { if (copyout(&frame, fp, sizeof(frame)) != 0) {
/* /*
@ -646,25 +655,28 @@ sys_sigreturn(p, v, retval)
tf->tf_vm86_fs = context.sc_fs; tf->tf_vm86_fs = context.sc_fs;
tf->tf_vm86_es = context.sc_es; tf->tf_vm86_es = context.sc_es;
tf->tf_vm86_ds = context.sc_ds; tf->tf_vm86_ds = context.sc_ds;
tf->tf_eflags = context.sc_eflags;
SETFLAGS(VM86_EFLAGS(p), context.sc_eflags,
VM86_FLAGMASK(p)|PSL_VIF);
} else } else
#endif #endif
{ {
/* %fs and %gs were restored by the trampoline. */ /* %fs and %gs were restored by the trampoline. */
tf->tf_es = context.sc_es; tf->tf_es = context.sc_es;
tf->tf_ds = context.sc_ds; tf->tf_ds = context.sc_ds;
tf->tf_eflags = context.sc_eflags;
} }
tf->tf_edi = context.sc_edi; tf->tf_edi = context.sc_edi;
tf->tf_esi = context.sc_esi; tf->tf_esi = context.sc_esi;
tf->tf_ebp = context.sc_ebp; tf->tf_ebp = context.sc_ebp;
tf->tf_ebx = context.sc_ebx; tf->tf_ebx = context.sc_ebx;
tf->tf_edx = context.sc_edx; tf->tf_edx = context.sc_edx;
tf->tf_ecx = context.sc_ecx; tf->tf_ecx = context.sc_ecx;
tf->tf_eax = context.sc_eax; tf->tf_eax = context.sc_eax;
tf->tf_eip = context.sc_eip; tf->tf_eip = context.sc_eip;
tf->tf_cs = context.sc_cs; tf->tf_cs = context.sc_cs;
tf->tf_eflags = context.sc_eflags; tf->tf_esp = context.sc_esp;
tf->tf_esp = context.sc_esp; tf->tf_ss = context.sc_ss;
tf->tf_ss = context.sc_ss;
return (EJUSTRETURN); return (EJUSTRETURN);
} }

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_machdep.c,v 1.26 1995/10/15 05:48:08 mycroft Exp $ */ /* $NetBSD: sys_machdep.c,v 1.27 1996/01/08 13:51:36 mycroft Exp $ */
/*- /*-
* Copyright (c) 1995 Charles M. Hannum. All rights reserved. * Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -66,6 +66,10 @@
#include <machine/reg.h> #include <machine/reg.h>
#include <machine/sysarch.h> #include <machine/sysarch.h>
#ifdef VM86
#include <machine/vm86.h>
#endif
extern vm_map_t kernel_map; extern vm_map_t kernel_map;
#ifdef TRACE #ifdef TRACE
@ -270,6 +274,14 @@ i386_set_ldt(p, args, retval)
if (n == fsslot || n == gsslot) if (n == fsslot || n == gsslot)
return (EBUSY); return (EBUSY);
break; break;
case SDT_MEMEC:
case SDT_MEMEAC:
case SDT_MEMERC:
case SDT_MEMERAC:
/* Must be "present" if executable and conforming. */
if (desc.sd.sd_p == 0)
return (EACCES);
break;
case SDT_MEMRO: case SDT_MEMRO:
case SDT_MEMROA: case SDT_MEMROA:
case SDT_MEMRW: case SDT_MEMRW:
@ -411,6 +423,12 @@ sys_sysarch(p, v, retval)
error = i386_set_ioperm(p, SCARG(uap, parms), retval); error = i386_set_ioperm(p, SCARG(uap, parms), retval);
break; break;
#ifdef VM86
case I386_VM86:
error = i386_vm86(p, SCARG(uap, parms), retval);
break;
#endif
default: default:
error = EINVAL; error = EINVAL;
break; break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.91 1995/12/09 05:00:27 mycroft Exp $ */ /* $NetBSD: trap.c,v 1.92 1996/01/08 13:51:38 mycroft Exp $ */
#undef DEBUG #undef DEBUG
#define DEBUG #define DEBUG
@ -262,9 +262,15 @@ trap(frame)
frame.tf_eip = resume; frame.tf_eip = resume;
return; return;
case T_PROTFLT|T_USER: /* protection fault */
#ifdef VM86
if (frame.tf_eflags & PSL_VM) {
vm86_gpfault(p, type & ~T_USER);
goto out;
}
#endif
case T_SEGNPFLT|T_USER: case T_SEGNPFLT|T_USER:
case T_STKFLT|T_USER: case T_STKFLT|T_USER:
case T_PROTFLT|T_USER: /* protection fault */
case T_ALIGNFLT|T_USER: case T_ALIGNFLT|T_USER:
trapsignal(p, SIGBUS, type &~ T_USER); trapsignal(p, SIGBUS, type &~ T_USER);
goto out; goto out;
@ -521,6 +527,17 @@ syscall(frame)
#endif #endif
params = (caddr_t)frame.tf_esp + sizeof(int); params = (caddr_t)frame.tf_esp + sizeof(int);
#ifdef VM86
/*
* VM86 mode application found our syscall trap gate by accident; let
* it get a SIGSYS and have the VM86 handler in the process take care
* of it.
*/
if (frame.tf_eflags & PSL_VM)
code = -1;
else
#endif
switch (code) { switch (code) {
case SYS_syscall: case SYS_syscall:
#ifdef COMPAT_LINUX #ifdef COMPAT_LINUX

449
sys/arch/i386/i386/vm86.c Normal file
View File

@ -0,0 +1,449 @@
/* $NetBSD: vm86.c,v 1.1 1996/01/08 13:51:40 mycroft Exp $ */
/*
* Copyright (c) 1995 John T. Kohl
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/signalvar.h>
#include <sys/kernel.h>
#include <sys/map.h>
#include <sys/proc.h>
#include <sys/user.h>
#include <sys/exec.h>
#include <sys/buf.h>
#include <sys/reboot.h>
#include <sys/conf.h>
#include <sys/file.h>
#include <sys/callout.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/msgbuf.h>
#include <sys/mount.h>
#include <sys/vnode.h>
#include <sys/device.h>
#include <sys/sysctl.h>
#include <sys/syscallargs.h>
#ifdef SYSVMSG
#include <sys/msg.h>
#endif
#ifdef SYSVSEM
#include <sys/sem.h>
#endif
#ifdef SYSVSHM
#include <sys/shm.h>
#endif
#include <sys/ktrace.h>
#include <machine/sysarch.h>
#include <machine/vm86.h>
static void return_to_32bit __P((struct proc *, int));
static void fast_intxx __P((struct proc *, int));
#define ADDR(segment, addr) ((caddr_t)((segment << 4) + addr))
#define IP_ADVANCE(x) { ip += (x); tf->tf_eip = ip; }
#define SP_ADJUST(x) { sp += (x); tf->tf_esp = sp; }
#define SETDIRECT ((~(PSL_USERSTATIC|PSL_NT)) & 0xffff)
#define GETDIRECT (SETDIRECT|0x02a) /* add in two MBZ bits */
static __inline__ int
is_bitset(nr, bitmap)
int nr;
caddr_t bitmap;
{
u_int byte; /* bt instruction doesn't do
bytes--it examines ints! */
bitmap += nr / NBBY;
nr = nr % NBBY;
byte = fubyte(bitmap);
__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
:"=r" (nr)
:"r" (byte),"r" (nr));
return (nr);
}
static __inline__ void
set_vif(p)
struct proc *p;
{
VM86_EFLAGS(p) |= PSL_VIF;
if (VM86_EFLAGS(p) & PSL_VIP)
return_to_32bit(p, VM86_STI);
}
static __inline__ void
set_vflags(p, flags)
struct proc *p;
int flags;
{
struct trapframe *tf = p->p_md.md_regs;
SETFLAGS(VM86_EFLAGS(p), flags, VM86_FLAGMASK(p));
SETFLAGS(tf->tf_eflags, flags, SETDIRECT);
if (flags & PSL_I)
set_vif(p);
}
static __inline__ void
set_vflags_short(p, flags)
struct proc *p;
int flags;
{
struct trapframe *tf = p->p_md.md_regs;
SETFLAGS(VM86_EFLAGS(p), flags, VM86_FLAGMASK(p) & 0xffff);
SETFLAGS(tf->tf_eflags, flags, SETDIRECT);
if (flags & PSL_I)
set_vif(p);
}
static __inline__ int
get_vflags(p)
struct proc *p;
{
struct trapframe *tf = p->p_md.md_regs;
int flags = 0;
SETFLAGS(flags, VM86_EFLAGS(p), VM86_FLAGMASK(p));
SETFLAGS(flags, tf->tf_eflags, GETDIRECT);
if (VM86_EFLAGS(p) & PSL_VIF)
flags |= PSL_I;
return (flags);
}
#define V86_AH(regs) (((u_char *)&((regs)->tf_eax))[1])
#define V86_AL(regs) (((u_char *)&((regs)->tf_eax))[0])
static void
fast_intxx(p, intrno)
struct proc *p;
int intrno;
{
struct trapframe *tf = p->p_md.md_regs;
/*
* handle certain interrupts directly by pushing the interrupt
* frame and resetting registers, but only if user said that's ok
* (i.e. not revectored.) Otherwise bump to 32-bit user handler.
*/
struct vm86_struct *u_vm86p;
struct { u_short ip, cs; } ihand;
struct { u_short short1, short2, short3; } threeshorts;
u_short ip, cs, sp, ss;
/*
* Note: u_vm86p points to user-space, we only compute offsets
* and don't deref it. is_revectored() above does fubyte() to
* get stuff from it
*/
u_vm86p = (struct vm86_struct *)p->p_addr->u_pcb.vm86_userp;
/*
* If coming from BIOS segment, or going to BIOS segment, or user
* requested special handling, return to user space with indication
* of which INT was requested.
*/
cs = tf->tf_cs;
if (cs == BIOSSEG || is_bitset(intrno, &u_vm86p->int_byuser[0]))
goto vector;
/*
* If it's interrupt 0x21 (special in the DOS world) and the
* sub-command (in AH) was requested for special handling,
* return to user mode.
*/
if (intrno == 0x21 && is_bitset(V86_AH(tf), &u_vm86p->int21_byuser[0]))
goto vector;
/*
* Fetch intr handler info from "real-mode" IDT based at addr 0 in
* the user address space.
*/
if (copyin((caddr_t)(intrno * sizeof(ihand)), &ihand, sizeof(ihand)))
goto bad;
if (ihand.cs == BIOSSEG)
goto vector;
/*
* Otherwise, push flags, cs, eip, and jump to handler to
* simulate direct INT call.
*/
ip = tf->tf_eip;
sp = tf->tf_esp;
ss = tf->tf_ss;
threeshorts.short1 = ip;
threeshorts.short2 = cs;
SP_ADJUST(-6);
threeshorts.short3 = get_vflags(p);
if (copyout(&threeshorts, ADDR(ss, sp), sizeof(threeshorts)))
goto bad;
tf->tf_eip = ihand.ip;
tf->tf_cs = ihand.cs;
/* disable further "hardware" interrupts, turn off any tracing. */
VM86_EFLAGS(p) &= ~PSL_VIF;
tf->tf_eflags &= ~PSL_VIF|PSL_T;
return;
vector:
return_to_32bit(p, VM86_MAKEVAL(VM86_INTx, intrno));
return;
bad:
return_to_32bit(p, VM86_UNKNOWN);
return;
}
static void
return_to_32bit(p, retval)
struct proc *p;
int retval;
{
/*
* We can't set the virtual flags in our real trap frame,
* since it's used to jump to the signal handler. Instead we
* let sendsig() pull in the VM86_EFLAGS bits.
*/
if (p->p_sigmask & sigmask(SIGURG)) {
#ifdef DIAGNOSTIC
printf("pid %d killed on VM86 protocol screwup (SIGURG blocked)\n",
p->p_pid);
#endif
sigexit(p, SIGILL);
/* NOTREACHED */
}
trapsignal(p, SIGURG, retval);
}
#define CLI 0xFA
#define STI 0xFB
#define INTxx 0xCD
#define IRET 0xCF
#define OPSIZ 0x66
#define INT3 0xCC /* Actually the process gets 32-bit IDT to handle it */
#define LOCK 0xF0
#define PUSHF 0x9C
#define POPF 0x9D
/*
* Handle a GP fault that occurred while in VM86 mode. Things that are easy
* to handle here are done here (much more efficient than trapping to 32-bit
* handler code and then having it restart VM86 mode).
*/
void
vm86_gpfault(p, type)
struct proc *p;
int type;
{
struct trapframe *tf = p->p_md.md_regs;
/*
* we want to fetch some stuff from the current user virtual
* address space for checking. remember that the frame's
* segment selectors are real-mode style selectors.
*/
u_short tmpshort; /* for fetching */
u_int tmpint; /* for fetching */
struct { u_short short1, short2, short3; } threeshorts;
struct { u_int int1, int2, int3; } threeints;
u_short ip, cs, sp, ss;
ip = tf->tf_eip;
cs = tf->tf_cs;
sp = tf->tf_esp;
ss = tf->tf_ss;
/*
* For most of these, we must set all the registers before calling
* macros/functions which might do a return_to_32bit.
*/
switch (fubyte(ADDR(cs, ip))) {
case CLI:
/* simulate handling of IF */
IP_ADVANCE(1);
VM86_EFLAGS(p) &= ~PSL_VIF;
tf->tf_eflags &= ~PSL_VIF;
return;
case STI:
/* simulate handling of IF.
* XXX the i386 enables interrupts one instruction later.
* code here is wrong, but much simpler than doing it Right.
*/
IP_ADVANCE(1);
set_vif(p);
return;
case INTxx:
/* try fast intxx, or return to 32bit mode to handle it. */
IP_ADVANCE(2);
fast_intxx(p, fubyte(ADDR(cs, ip - 1)));
return;
case POPF:
tmpshort = fusword(ADDR(ss, sp));
SP_ADJUST(2);
IP_ADVANCE(1);
set_vflags_short(p, tmpshort);
return;
case PUSHF:
tmpshort = get_vflags(p);
SP_ADJUST(-2);
IP_ADVANCE(1);
susword(ADDR(ss, sp), tmpshort);
return;
case IRET:
/* pop ip, cs, flags */
if (copyin(ADDR(ss, sp), &threeshorts, sizeof(threeshorts)))
break;
tf->tf_eip = threeshorts.short1;
tf->tf_cs = threeshorts.short2;
SP_ADJUST(6);
set_vflags_short(p, threeshorts.short3);
return;
case OPSIZ:
switch (fubyte(ADDR(cs, ip + 1))) {
case POPF: /* popfd */
tmpint = fuword(ADDR(ss, sp));
SP_ADJUST(4);
IP_ADVANCE(2);
set_vflags(p, tmpint);
return;
case PUSHF: /* pushfd */
tmpint = get_vflags(p);
SP_ADJUST(-4);
IP_ADVANCE(2);
suword(ADDR(ss, sp), tmpint);
return;
case IRET:
if (copyin(ADDR(ss, sp), &threeints, sizeof(threeints)))
break;
tf->tf_eip = threeints.int1;
tf->tf_cs = threeints.int2;
SP_ADJUST(12);
set_vflags(p, threeints.int3);
return;
}
break;
case LOCK:
break;
}
return_to_32bit(p, VM86_UNKNOWN);
return;
}
int
i386_vm86(p, args, retval)
struct proc *p;
char *args;
register_t *retval;
{
struct trapframe *tf = p->p_md.md_regs;
struct vm86_kern vm86s;
int psl, err;
if (err = copyin(args, &vm86s, sizeof(vm86s)))
return err;
p->p_addr->u_pcb.vm86_userp = (void *)args;
#define DOVREG(reg) tf->tf_vm86_##reg = (u_short) vm86s.regs.vmsc.sc_##reg
#define DOREG(reg) tf->tf_##reg = (u_short) vm86s.regs.vmsc.sc_##reg
DOVREG(ds);
DOVREG(es);
DOVREG(fs);
DOVREG(gs);
DOREG(edi);
DOREG(esi);
DOREG(ebp);
DOREG(eax);
DOREG(ebx);
DOREG(ecx);
DOREG(edx);
DOREG(eip);
DOREG(cs);
DOREG(esp);
DOREG(ss);
#undef DOVREG
#undef DOREG
SETFLAGS(VM86_EFLAGS(p), vm86s.regs.vmsc.sc_eflags, VM86_FLAGMASK(p)|PSL_VIF);
SETFLAGS(tf->tf_eflags, vm86s.regs.vmsc.sc_eflags, SETDIRECT);
tf->tf_eflags |= PSL_VM;
/*
* Keep mask of flags we simulate to simulate a particular type of
* processor.
*/
switch (vm86s.ss_cpu_type) {
case VCPU_086:
case VCPU_186:
case VCPU_286:
VM86_FLAGMASK(p) = 0;
break;
case VCPU_386:
VM86_FLAGMASK(p) = PSL_NT|PSL_IOPL;
break;
case VCPU_486:
VM86_FLAGMASK(p) = PSL_AC|PSL_NT|PSL_IOPL;
break;
case VCPU_586:
default:
VM86_FLAGMASK(p) = PSL_ID|PSL_AC|PSL_NT|PSL_IOPL;
break;
}
/* Going into vm86 mode jumps off the signal stack. */
p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
return (EJUSTRETURN);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pcb.h,v 1.20 1995/10/11 04:20:16 mycroft Exp $ */ /* $NetBSD: pcb.h,v 1.21 1996/01/08 13:51:42 mycroft Exp $ */
/*- /*-
* Copyright (c) 1995 Charles M. Hannum. All rights reserved. * Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -53,6 +53,8 @@
#include <machine/npx.h> #include <machine/npx.h>
#include <machine/sysarch.h> #include <machine/sysarch.h>
#define NIOPORTS 1024 /* # of ports we allow to be mapped */
struct pcb { struct pcb {
struct i386tss pcb_tss; struct i386tss pcb_tss;
#define pcb_cr3 pcb_tss.tss_cr3 #define pcb_cr3 pcb_tss.tss_cr3
@ -73,7 +75,10 @@ struct pcb {
int pcb_flags; int pcb_flags;
#define PCB_USER_LDT 0x01 /* has user-set LDT */ #define PCB_USER_LDT 0x01 /* has user-set LDT */
caddr_t pcb_onfault; /* copyin/out fault recovery */ caddr_t pcb_onfault; /* copyin/out fault recovery */
u_long pcb_iomap[1024/32]; /* I/O bitmap */ int vm86_eflags; /* virtual eflags for vm86 mode */
int vm86_flagmask; /* flag mask for vm86 mode */
void *vm86_userp; /* XXX performance hack */
u_long pcb_iomap[NIOPORTS/32]; /* I/O bitmap */
}; };
/* /*

View File

@ -1,4 +1,4 @@
/* $NetBSD: signal.h,v 1.5 1995/05/01 14:14:11 mycroft Exp $ */ /* $NetBSD: signal.h,v 1.6 1996/01/08 13:51:43 mycroft Exp $ */
/* /*
* Copyright (c) 1982, 1986, 1989, 1991 Regents of the University of California. * Copyright (c) 1982, 1986, 1989, 1991 Regents of the University of California.
@ -65,6 +65,7 @@ struct sigcontext {
int sc_edx; int sc_edx;
int sc_ecx; int sc_ecx;
int sc_eax; int sc_eax;
/* XXX */
int sc_eip; int sc_eip;
int sc_cs; int sc_cs;
int sc_eflags; int sc_eflags;
@ -73,6 +74,9 @@ struct sigcontext {
int sc_onstack; /* sigstack state to restore */ int sc_onstack; /* sigstack state to restore */
int sc_mask; /* signal mask to restore */ int sc_mask; /* signal mask to restore */
int sc_trapno; /* XXX should be above */
int sc_err;
}; };
#define sc_sp sc_esp #define sc_sp sc_esp

View File

@ -1,4 +1,4 @@
/* $NetBSD: sysarch.h,v 1.7 1995/10/11 04:20:26 mycroft Exp $ */ /* $NetBSD: sysarch.h,v 1.8 1996/01/08 13:51:44 mycroft Exp $ */
#ifndef _I386_SYSARCH_H_ #ifndef _I386_SYSARCH_H_
#define _I386_SYSARCH_H_ #define _I386_SYSARCH_H_
@ -11,6 +11,7 @@
#define I386_IOPL 2 #define I386_IOPL 2
#define I386_GET_IOPERM 3 #define I386_GET_IOPERM 3
#define I386_SET_IOPERM 4 #define I386_SET_IOPERM 4
#define I386_VM86 5
struct i386_get_ldt_args { struct i386_get_ldt_args {
int start; int start;

View File

@ -0,0 +1,79 @@
/* $NetBSD: vm86.h,v 1.1 1996/01/08 13:51:45 mycroft Exp $ */
/*
* Copyright (c) 1995 John T. Kohl
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#define SETFLAGS(targ, new, newmask) (targ) = ((targ) & ~(newmask)) | ((new) & (newmask))
#define VM86_EFLAGS(p) ((p)->p_addr->u_pcb.vm86_eflags)
#define VM86_FLAGMASK(p) ((p)->p_addr->u_pcb.vm86_flagmask)
#define VM86_TYPE(x) ((x) & 0xff)
#define VM86_ARG(x) (((x) & 0xff00) >> 8)
#define VM86_MAKEVAL(type,arg) ((type) | (((arg) & 0xff) << 8))
#define VM86_STI 0
#define VM86_INTx 1
#define VM86_SIGNAL 2
#define VM86_UNKNOWN 3
struct vm86_regs {
struct sigcontext vmsc;
};
struct vm86_kern { /* kernel uses this stuff */
struct vm86_regs regs;
unsigned long ss_cpu_type;
};
#define cpu_type substr.ss_cpu_type
/*
* Kernel keeps copy of user-mode address of this, but doesn't copy it in.
*/
struct vm86_struct {
struct vm86_kern substr;
unsigned long screen_bitmap; /* not used/supported (yet) */
unsigned long flags; /* not used/supported (yet) */
unsigned char int_byuser[32]; /* 256 bits each: pass control to user */
unsigned char int21_byuser[32]; /* otherwise, handle directly */
};
#define BIOSSEG 0x0f000
#define VCPU_086 0
#define VCPU_186 1
#define VCPU_286 2
#define VCPU_386 3
#define VCPU_486 4
#define VCPU_586 5
#ifdef _KERNEL
int i386_vm86 __P((struct proc *, char *, register_t *));
void vm86_gpfault __P((struct proc *, int));
#else
int i386_vm86 __P((struct vm86_struct *vmcp));
#endif