Integration of many changes done by Matthias Pfaller with a few by me.

clock.c		* Removed definition of DELAY.

intr.c		* Removed an unneeded $Id:....$

locore.s	* Moved some of the low level initialization
		  code to machdep.c.
		* Defined proc_trampoline.
		* Changed sigcode to pass scp to SYS_sigreturn.
		* Changed copyin/copyout/fu*/su* to take advantage
		  of the ns32532's dual address instructions.
		* Recoded copyinstr/copyoutstr/copystr in assembler.
		* Added a new and faster version of bzero.
		  This makes bzero.s unnecessary.
		* Defined suswintr to make profiling work.
		* Recoded cpu_switch modelled after the i386
		  version of cpu_switch.
		* Added support for lazy fpu state restore to
		  cpu_switch.
		* Recoded trap handling code to be more readable.
		* Added experimental code for single cacheline
		  invalidation.

machdep.c	* Copied over cpu_startup from i386/i386/machdep.c.
		* Changed sys_sigreturn to take advantage of the
		  argument passed by the trampoline code.
		* Changed boot to call doshutdownhooks and to store
		  machine state in case of a panic.
		* Changed setregs to clear the fpu registers.
		* Recoded low_level_init. It's now called init532.
		* cpu_reset: New function, resets the machine.

trap.c		* Pulled over from i386/i386/trap.c.
		* Added support for lazy saved/restored fpu state.

vm_machdep.c	* Removed kstack double mapping by pulling over alot
		  of code from i386/i386/vm_machdep.c.
		* Added support for lazy saved/restored fpu state.
		* Moved freeing of process resources from cpu_wait
		  to cpu_exit.
		* Pulled over cpu_coredump, pagemove, vmapbuf and
		  vunmapbuf from i386/i386/vm_machdep.c.

pmap.c		* Pulled over from i386/i386/pmap.c.

genassym.c	* Removed old and unused definitions, added new ones.

sys_machdep.c	* Moved sys_sysarch from machdep.c to sys_machdep.c.

process_machdep.c	* Changed to work without ktack double mapping.
			* Changed to work with lazy saved/restored fpu state.
This commit is contained in:
phil 1996-01-31 21:33:42 +00:00
parent 9836ea60d6
commit ccbcfbef91
11 changed files with 3122 additions and 3414 deletions

View File

@ -1,97 +0,0 @@
/* $NetBSD: bzero.s,v 1.3 1996/01/26 08:11:47 phil Exp $ */
/*
* Mach Operating System
* Copyright (c) 1992 Carnegie Mellon University
* Copyright (c) 1992 Helsinki University of Technology
* All Rights Reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON AND HELSINKI UNIVERSITY OF TECHNOLOGY ALLOW FREE USE
* OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON AND
* HELSINKI UNIVERSITY OF TECHNOLOGY DISCLAIM ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
/*
* File: ns532/bzero.s
* Author: Tero Kivinen, Helsinki University of Technology 1992.
*
* $Id: bzero.s,v 1.3 1996/01/26 08:11:47 phil Exp $
*/
/*
* bzero(char * addr, unsigned int length)
*/
.text
ENTRY(bzero)
enter [],0
movd B_ARG0,r1 /* addr */
movd B_ARG1,r2 /* length */
movd r1,r0 /* align addr */
andd 3,r0
cmpqd 0,r0
beq wstart /* already aligned */
negd r0,r0
addqd 4,r0
cmpd r0,r2
bhi bytes /* not enough data to align */
b1loop: movqb 0,0(r1) /* zero bytes */
addqd 1,r1
addqd -1,r2
acbd -1,r0,b1loop
wstart: movd r2,r0 /* length */
lshd -6,r0
cmpqd 0,r0
beq phase2
w1loop: movqd 0,0(r1) /* zero words */
movqd 0,4(r1)
movqd 0,8(r1)
movqd 0,12(r1)
movqd 0,16(r1)
movqd 0,20(r1)
movqd 0,24(r1)
movqd 0,28(r1)
movqd 0,32(r1)
movqd 0,36(r1)
movqd 0,40(r1)
movqd 0,44(r1)
movqd 0,48(r1)
movqd 0,52(r1)
movqd 0,56(r1)
movqd 0,60(r1)
addd 64,r1
acbd -1,r0,w1loop
phase2: movd r2,r0 /* length */
andd 63,r0
lshd -2,r0
cmpqd 0,r0
beq bytes
w2loop: movqd 0,0(r1)
addqd 4,r1
acbd -1,r0,w2loop
bytes: movd r2,r0 /* length */
andd 3,r0
cmpqd 0,r0
beq done
bloop: movqb 0,0(r1) /* zero bytes */
addqd 1,r1
acbb -1,r0,bloop
done: exit []
ret 0

View File

@ -1,4 +1,4 @@
/* $NetBSD: clock.c,v 1.11 1995/05/16 07:30:46 phil Exp $ */
/* $NetBSD: clock.c,v 1.12 1996/01/31 21:33:47 phil Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -82,13 +82,6 @@ spinwait(int millisecs)
DELAY(5000 * millisecs);
}
DELAY(n)
{
volatile int N = (n);
while (--N > 0)
;
}
void
setstatclockrate(int dummy)
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: genassym.c,v 1.8 1995/06/09 05:59:58 phil Exp $ */
/* $NetBSD: genassym.c,v 1.9 1996/01/31 21:33:52 phil Exp $ */
/*-
* Copyright (c) 1982, 1990 The Regents of the University of California.
@ -45,80 +45,73 @@
#include <vm/vm.h>
#include <sys/user.h>
#include <machine/cpu.h>
#include <machine/trap.h>
#include <machine/pmap.h>
#include <machine/vmparam.h>
#include <stdio.h>
main()
{
struct proc *p = (struct proc *)0;
struct user *up = (struct user *)0;
struct rusage *rup = (struct rusage *)0;
struct uprof *uprof = (struct uprof *)0;
struct pcb *pcb = (struct pcb *)0;
struct on_stack *regs = (struct on_stack *)0;
struct iv *iv = (struct iv *)0;
struct proc *p = 0;
struct vmmeter *vm = 0;
struct pcb *pcb = 0;
struct sigframe *sigf = 0;
struct on_stack *regs = 0;
struct iv *iv = 0;
register unsigned i;
printf("#define\tKERNBASE 0x%x\n", KERNBASE);
printf("#define\tUDOT_SZ %d\n", sizeof(struct user));
printf("#define\tP_FORW %d\n", &p->p_forw);
printf("#define\tP_BACK %d\n", &p->p_back);
printf("#define\tP_VMSPACE %d\n", &p->p_vmspace);
printf("#define\tP_ADDR %d\n", &p->p_addr);
printf("#define\tP_PRIORITY %d\n", &p->p_priority);
printf("#define\tP_STAT %d\n", &p->p_stat);
printf("#define\tP_WCHAN %d\n", &p->p_wchan);
printf("#define\tP_FLAG %d\n", &p->p_flag);
printf("#define\tP_PID %d\n", &p->p_pid);
#define def(N,V) printf("#define\t%s %d\n", N, V)
printf("#define\tSSLEEP %d\n", SSLEEP);
printf("#define\tSRUN %d\n", SRUN);
printf("#define\tUPAGES %d\n", UPAGES);
printf("#define\tHIGHPAGES %d\n", HIGHPAGES);
printf("#define\tCLSIZE %d\n", CLSIZE);
printf("#define\tNBPG %d\n", NBPG);
printf("#define\tNPTEPG %d\n", NPTEPG);
printf("#define\tPGSHIFT %d\n", PGSHIFT);
printf("#define\tSYSPTSIZE %d\n", SYSPTSIZE);
printf("#define\tUSRPTSIZE %d\n", USRPTSIZE);
def("SRUN", SRUN);
printf("#define\tKERN_STK_START 0x%x\n",
USRSTACK + UPAGES*NBPG);
printf("#define\tKSTK_SIZE %d\n", UPAGES*NBPG);
printf("#define\tON_STK_SIZE %d\n", sizeof(struct on_stack));
printf("#define\tREGS_USP %d\n", &regs->pcb_usp);
printf("#define\tREGS_FP %d\n", &regs->pcb_fp);
printf("#define\tREGS_SB %d\n", &regs->pcb_sb);
printf("#define\tREGS_PSR %d\n", &regs->pcb_psr);
def("PDSHIFT", PDSHIFT);
def("PGSHIFT", PGSHIFT);
def("PGOFSET", PGOFSET);
def("NBPG", NBPG);
printf("#define\tPCB_ONSTACK %d\n", &pcb->pcb_onstack);
printf("#define\tPCB_FSR %d\n", &pcb->pcb_fsr);
def("PTDPTDI", PTDPTDI);
def("KPTDI", KPTDI);
def("NKPDE", NKPDE);
def("APTDPTDI", APTDPTDI);
def("KERNBASE", KERNBASE);
def("VM_MAXUSER_ADDRESS", VM_MAXUSER_ADDRESS);
def("P_ADDR", &p->p_addr);
def("P_BACK", &p->p_back);
def("P_FORW", &p->p_forw);
def("P_PRIORITY", &p->p_priority);
def("P_STAT", &p->p_stat);
def("P_WCHAN", &p->p_wchan);
def("P_VMSPACE", &p->p_vmspace);
def("P_FLAG", &p->p_flag);
def("P_PID", &p->p_pid);
def("V_INTR", &vm->v_intr);
def("PCB_ONSTACK", &pcb->pcb_onstack);
def("PCB_FSR", &pcb->pcb_fsr);
for (i=0; i<8; i++)
printf("#define\tPCB_F%d %d\n", i, &pcb->pcb_freg[i]);
printf("#define\tPCB_KSP %d\n", &pcb->pcb_ksp);
printf("#define\tPCB_KFP %d\n", &pcb->pcb_kfp);
printf("#define\tPCB_PTB %d\n", &pcb->pcb_ptb);
printf("#define\tPCB_PL %d\n", &pcb->pcb_pl);
printf("#define\tPCB_FLAGS %d\n", &pcb->pcb_flags);
printf("#define\tPCB_ONFAULT %d\n", &pcb->pcb_onfault);
printf("#define\tPCB_F%d %d\n", i, &pcb->pcb_freg[i]);
def("PCB_KSP", &pcb->pcb_ksp);
def("PCB_KFP", &pcb->pcb_kfp);
def("PCB_PTB", &pcb->pcb_ptb);
def("PCB_ONFAULT", &pcb->pcb_onfault);
printf("#define\tV_TRAP %d\n", &vm->v_trap);
printf("#define\tV_INTR %d\n", &vm->v_intr);
def("ON_STK_SIZE", sizeof(struct on_stack));
def("REGS_USP", &regs->pcb_usp);
def("REGS_FP", &regs->pcb_fp);
def("REGS_SB", &regs->pcb_sb);
def("REGS_PSR", &regs->pcb_psr);
printf("#define\tIV_VEC %d\n", &iv->iv_vec);
printf("#define\tIV_ARG %d\n", &iv->iv_arg);
printf("#define\tIV_CNT %d\n", &iv->iv_cnt);
printf("#define\tIV_USE %d\n", &iv->iv_use);
def("SIGF_HANDLER", &sigf->sf_handler);
def("SIGF_SC", &sigf->sf_sc);
def("IV_VEC", &iv->iv_vec);
def("IV_ARG", &iv->iv_arg);
def("IV_CNT", &iv->iv_cnt);
def("IV_USE", &iv->iv_use);
printf("#define\tUSRSTACK 0x%x\n", USRSTACK);
#ifdef SYSVSHM
printf("#define\tSHMMAXPGS %d\n", SHMMAXPGS);
#endif
printf("#define\tENOENT %d\n", ENOENT);
printf("#define\tEFAULT %d\n", EFAULT);
printf("#define\tENAMETOOLONG %d\n", ENAMETOOLONG);
exit(0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: intr.c,v 1.5 1995/09/26 20:16:26 phil Exp $ */
/* $NetBSD: intr.c,v 1.6 1996/01/31 21:33:53 phil Exp $ */
/*
* Copyright (c) 1994 Matthias Pfaller.
@ -28,8 +28,6 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: intr.c,v 1.5 1995/09/26 20:16:26 phil Exp $
*/
#define DEFINE_SPLX

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/* $NetBSD: process_machdep.c,v 1.8 1995/09/26 20:16:32 phil Exp $ */
/* $NetBSD: process_machdep.c,v 1.9 1996/01/31 21:34:02 phil Exp $ */
/*
* Copyright (c) 1993 The Regents of the University of California.
@ -73,17 +73,13 @@
#include <machine/reg.h>
#include <machine/frame.h>
extern struct proc *fpu_proc;
static inline struct reg *
process_regs(p)
struct proc *p;
{
void *ptr;
if ((p->p_flag & P_INMEM) == 0)
return (NULL);
ptr = (char *)p->p_addr + ((char *)p->p_md.md_regs - (char *)USRSTACK);
return (ptr);
return ((struct reg *) p->p_md.md_regs);
}
int
@ -127,6 +123,10 @@ process_read_fpregs(p, regs)
if ((p->p_flag & P_INMEM) == 0)
return (EIO);
if (fpu_proc == p) {
save_fpu_context(&p->p_addr->u_pcb);
fpu_proc = 0;
}
bcopy(&p->p_addr->u_pcb.pcb_fsr, regs, sizeof(*regs));
return (0);
}
@ -139,7 +139,11 @@ process_write_fpregs(p, regs)
if ((p->p_flag & P_INMEM) == 0)
return (EIO);
if (fpu_proc == p)
fpu_proc = 0;
bcopy(regs, &p->p_addr->u_pcb.pcb_fsr, sizeof(*regs));
return (0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_machdep.c,v 1.5 1995/09/26 20:16:34 phil Exp $ */
/* $NetBSD: sys_machdep.c,v 1.6 1996/01/31 21:34:03 phil Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -108,3 +108,17 @@ vdoualarm(arg)
nvualarm--;
}
#endif
int
sys_sysarch(p, v, retval)
struct proc *p;
void *v;
register_t *retval;
{
struct sysarch_args /* {
syscallarg(int) op;
syscallarg(char *) parms;
} */ *uap = v;
return ENOSYS;
}

View File

@ -1,6 +1,8 @@
/* $NetBSD: trap.c,v 1.13 1995/06/09 06:00:10 phil Exp $ */
/* $NetBSD: trap.c,v 1.14 1996/01/31 21:34:04 phil Exp $ */
/*-
* Copyright (c) 1996 Matthias Pfaller. All rights reserved.
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
@ -48,6 +50,7 @@
#include <sys/user.h>
#include <sys/acct.h>
#include <sys/kernel.h>
#include <sys/signal.h>
#ifdef KTRACE
#include <sys/ktrace.h>
#endif
@ -58,131 +61,246 @@
#include <vm/vm_map.h>
#include <machine/cpu.h>
#include <machine/trap.h>
#include <machine/cpufunc.h>
#include <machine/psl.h>
#include <machine/reg.h>
#include <machine/trap.h>
struct proc *fpu_proc; /* Process owning the FPU. */
/*
* Define the code needed before returning to user mode, for
* trap and syscall.
*/
static inline void
userret(p, pc, oticks)
register struct proc *p;
int pc;
u_quad_t oticks;
{
int sig, s;
unsigned rcr2();
extern short cpl;
/* take pending signals */
while ((sig = CURSIG(p)) != 0)
postsig(sig);
p->p_priority = p->p_usrpri;
if (want_resched) {
/*
* Since we are curproc, a clock interrupt could
* change our priority without changing run queues
* (the running process is not kept on a run queue).
* If this happened after we setrunqueue ourselves but
* before we switch()'ed, we might not be on the queue
* indicated by our priority.
*/
s = splstatclock();
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
splx(s);
while ((sig = CURSIG(p)) != 0)
postsig(sig);
}
/*
* If profiling, charge recent system time to the trapped pc.
*/
if (p->p_flag & P_PROFIL) {
extern int psratio;
addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio);
}
curpriority = p->p_priority;
}
char *trap_type[] = {
"non-vectored interrupt", /* 0 T_NVI */
"non-maskable interrupt", /* 1 T_NMI */
"abort trap", /* 2 T_ABT */
"coprocessor trap", /* 3 T_SLAVE */
"illegal operation in user mode", /* 4 T_ILL */
"supervisor call", /* 5 T_SVC */
"divide by zero", /* 6 T_DVZ */
"flag instruction", /* 7 T_FLG */
"breakpoint instruction", /* 8 T_BPT */
"trace trap", /* 9 T_TRC */
"undefined instruction", /* 10 T_UND */
"restartable bus error", /* 11 T_RBE */
"non-restartable bus error", /* 12 T_NBE */
"integer overflow trap", /* 13 T_OVF */
"debug trap", /* 14 T_DBG */
"reserved trap", /* 15 T_RESERVED */
"unused", /* 16 unused */
"watchpoint", /* 17 T_WATCHPOINT */
"asynchronous system trap" /* 18 T_AST */
};
int trap_types = sizeof trap_type / sizeof trap_type[0];
#ifdef DEBUG
int trapdebug = 0;
#endif
/*
* trap(frame):
* Exception, fault, and trap interface to BSD kernel. This
* common code is called from assembly language IDT gate entry
* common code is called from assembly language trap vector
* routines that prepare a suitable stack frame, and restore this
* frame after the exception has been processed. Note that the
* effect is as if the arguments were passed call by reference.
*/
/*ARGSUSED*/
void
trap(frame)
struct trapframe frame;
{
register int i;
register struct proc *p = curproc;
struct timeval sticks;
int ucode, type, tear, msr;
int type = frame.tf_trapno;
u_quad_t sticks;
struct pcb *pcb;
extern char fusubail[];
#ifdef CINVSMALL
extern char cinvstart[], cinvend[];
#endif
cnt.v_trap++;
type = frame.tf_trapno;
tear = frame.tf_tear;
msr = frame.tf_msr;
if (curpcb->pcb_onfault && frame.tf_trapno != T_ABT) {
copyfault:
frame.tf_pc = (int)curpcb->pcb_onfault;
return;
}
#ifdef DDB
if (curpcb && curpcb->pcb_onfault) {
if (frame.tf_trapno == T_BPTFLT
|| frame.tf_trapno == T_TRCTRAP)
if (kdb_trap (type, 0, &frame))
return;
#ifdef DEBUG
if (trapdebug) {
printf("trap type=%d, pc=0x%x, tear=0x%x, msr=0x%x\n",
type, frame.tf_pc, frame.tf_tear, frame.tf_msr);
printf("curproc %x\n", curproc);
}
#endif
if (curpcb == 0 || curproc == 0) goto we_re_toast;
if ((frame.tf_psr & PSL_USER) == PSL_USER) {
if (USERMODE(frame.tf_psr)) {
type |= T_USER;
#ifdef notdef
sticks = p->p_stime;
#endif
sticks = p->p_sticks;
p->p_md.md_regs = (int *)&(frame.tf_reg);
}
ucode = 0;
switch (type) {
default:
we_re_toast:
#ifdef KDB
if (kdb_trap(&psl))
return;
#endif
#ifdef DDB
if (kdb_trap (type, 0, &frame))
if (kdb_trap(type, 0, &frame))
return;
#endif
printf("bad trap: type=%d, pc=0x%x, tear=0x%x, msr=0x%x\n",
if (frame.tf_trapno < trap_types)
printf("fatal %s", trap_type[frame.tf_trapno]);
else
printf("unknown trap %d", frame.tf_trapno);
printf(" in %s mode\n", (type & T_USER) ? "user" : "supervisor");
printf("trap type=%d, pc=0x%x, tear=0x%x, msr=0x%x\n",
type, frame.tf_pc, frame.tf_tear, frame.tf_msr);
panic("trap");
/*NOTREACHED*/
case T_ABT: /* System level pagefault! */
if (((msr & MSR_STT) == STT_SEQ_INS)
|| ((msr & MSR_STT) == STT_NSQ_INS))
{
printf ("System pagefault: pc=0x%x, tear=0x%x, msr=0x%x\n",
frame.tf_pc, frame.tf_tear, frame.tf_msr);
goto we_re_toast;
}
case T_UND | T_USER: { /* undefined instruction fault */
int opcode, cfg;
extern int _have_fpu;
opcode = fubyte((void *)frame.tf_pc);
#ifndef NS381
if (!_have_fpu) {
#ifdef MATH_EMULATE
int rv;
if ((rv = math_emulate(&frame)) == 0) {
if (frame.tf_psr & PSL_T)
goto trace;
return;
}
#endif
} else
#endif
if (opcode == 0x3e || opcode == 0xbe || opcode == 0xfe) {
sprd(cfg, cfg);
if ((cfg & CFG_F) == 0) {
lprd(cfg, cfg | CFG_F);
if (fpu_proc == p)
return;
pcb = &p->p_addr->u_pcb;
if (fpu_proc != 0)
save_fpu_context(&fpu_proc->p_addr->u_pcb);
restore_fpu_context(pcb);
fpu_proc = p;
return;
}
}
}
/* fall into */
case T_ABT | T_USER: /* User level pagefault! */
/* if (type == (T_ABT | T_USER))
printf ("pagefault: pc=0x%x, tear=0x%x, msr=0x%x\n",
frame.tf_pc, frame.tf_tear, frame.tf_msr); */
{
case T_ILL | T_USER: /* privileged instruction fault */
trapsignal(p, SIGILL, type &~ T_USER);
goto out;
case T_AST | T_USER: /* Allow process switch */
cnt.v_soft++;
if (p->p_flag & P_OWEUPC) {
p->p_flag &= ~P_OWEUPC;
ADDUPROF(p);
}
goto out;
case T_OVF | T_USER:
case T_DVZ | T_USER:
trapsignal(p, SIGFPE, type &~ T_USER);
goto out;
case T_SLAVE | T_USER: {
int fsr;
#ifdef MATH_IEEE
int rv;
if ((rv = math_ieee(&frame)) == 0) {
if (frame.tf_psr & PSL_T)
goto trace;
return;
}
#endif
sfsr(fsr);
trapsignal(p, SIGFPE, 0x80000000 | fsr);
goto out;
}
case T_ABT: /* allow page faults in kernel mode */
if ((frame.tf_msr & MSR_STT) == STT_SEQ_INS ||
(frame.tf_msr & MSR_STT) == STT_NSQ_INS ||
(p == 0))
goto we_re_toast;
pcb = &p->p_addr->u_pcb;
/*
* fusubail is used by [fs]uswintr() to prevent page faulting
* from inside the profiling interrupt.
*/
if (pcb->pcb_onfault == fusubail)
goto copyfault;
#ifdef CINVSMALL
/*
* If a address translation for a cache invalidate
* request fails, reset the pc and return.
*/
if ((unsigned int)frame.tf_pc >= (unsigned int)cinvstart &&
(unsigned int)frame.tf_pc < (unsigned int)cinvend) {
frame.tf_pc = (int)cinvend;
return;
}
#endif
/* FALLTHROUGH */
case T_ABT | T_USER: { /* page fault */
register vm_offset_t va;
register struct vmspace *vm = p->p_vmspace;
register vm_map_t map;
int rv;
vm_prot_t ftype;
extern vm_map_t kernel_map;
unsigned nss,v;
unsigned nss, v;
va = trunc_page((vm_offset_t)tear);
/*
* Avoid even looking at pde_v(va) for high va's. va's
* above VM_MAX_KERNEL_ADDRESS don't correspond to normal
* PDE's (half of them correspond to APDEpde and half to
* an unmapped kernel PDE). va's betweeen 0xFEC00000 and
* VM_MAX_KERNEL_ADDRESS correspond to unmapped kernel PDE's
* (XXX - why are only 3 initialized when 6 are required to
* reach VM_MAX_KERNEL_ADDRESS?). Faulting in an unmapped
* kernel page table would give inconsistent PTD's.
*
* XXX - faulting in unmapped page tables wastes a page if
* va turns out to be invalid.
*
* XXX - should "kernel address space" cover the kernel page
* tables? Might have same problem with PDEpde as with
* APDEpde (or there may be no problem with APDEpde).
*/
if (va > 0xFEBFF000) {
v = KERN_FAILURE; /* becomes SIGBUS */
goto nogo;
}
va = trunc_page((vm_offset_t)frame.tf_tear);
/*
* It is only a kernel address space fault iff:
* 1. (type & T_USER) == 0 and
* 2. pcb_onfault not set or
* 1. (type & T_USER) == 0 and
* 2. pcb_onfault not set or
* 3. pcb_onfault set but supervisor space fault
* The last can occur during an exec() copyin where the
* argument space is lazy-allocated.
@ -191,13 +309,13 @@ copyfault:
map = kernel_map;
else
map = &vm->vm_map;
if ((msr & MSR_DDT) == DDT_WRITE
|| (msr & MSR_STT) == STT_RMW)
if ((frame.tf_msr & MSR_DDT) == DDT_WRITE ||
(frame.tf_msr & MSR_STT) == STT_RMW)
ftype = VM_PROT_READ | VM_PROT_WRITE;
else
ftype = VM_PROT_READ;
#ifdef DEBUG
#ifdef DIAGNOSTIC
if (map == kernel_map && va == 0) {
printf("trap: bad kernel access at %x\n", va);
goto we_re_toast;
@ -206,365 +324,206 @@ copyfault:
nss = 0;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)VM_MAXUSER_ADDRESS
&& map != kernel_map) {
nss = clrnd(btoc((unsigned)vm->vm_maxsaddr
+ MAXSSIZ - (unsigned)va));
&& (caddr_t)va < (caddr_t)VM_MAXUSER_ADDRESS
&& map != kernel_map) {
nss = clrnd(btoc(USRSTACK-(unsigned)va));
if (nss > btoc(p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
/*pg("trap rlimit %d, maxsaddr %x va %x ", nss, vm->vm_maxsaddr, va);*/
rv = KERN_FAILURE;
goto nogo;
}
}
/* check if page table is mapped, if not, fault it first */
#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
if (!pde_v(va)) {
if ((PTD[pdei(va)] & PG_V) == 0) {
v = trunc_page(vtopte(va));
rv = vm_fault(map, v, ftype, FALSE);
if (rv != KERN_SUCCESS) goto nogo;
if (rv != KERN_SUCCESS)
goto nogo;
/* check if page table fault, increment wiring */
vm_map_pageable(map, v, round_page(v+1), FALSE);
} else v=0;
rv = vm_fault(map, va, ftype, FALSE);
} else
v = 0;
rv = vm_fault(map, va, ftype, FALSE);
if (rv == KERN_SUCCESS) {
/*
* XXX: continuation of rude stack hack
*/
if (nss > vm->vm_ssize)
vm->vm_ssize = nss;
va = trunc_page(vtopte(va));
/* for page table, increment wiring
as long as not a page table fault as well */
/* for page table, increment wiring as long as
not a page table fault as well */
if (!v && map != kernel_map)
vm_map_pageable(map, va, round_page(va+1), FALSE);
vm_map_pageable(map, va, round_page(va+1),
FALSE);
if (type == T_ABT)
return;
goto out;
}
nogo:
nogo:
if (type == T_ABT) {
if (curpcb->pcb_onfault)
goto copyfault;
printf("vm_fault(0x%x, 0x%x, 0x%x, 0) -> 0x%x\n",
map, va, ftype, rv);
printf(" type 0x%x, tear 0x%x msr 0x%x\n",
type, tear, msr);
if (pcb->pcb_onfault != 0) {
copyfault:
frame.tf_pc = (int)curpcb->pcb_onfault;
return;
}
printf("vm_fault(%x, %x, %x, 0) -> %x\n",
map, va, ftype, rv);
goto we_re_toast;
}
i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
trapsignal(p, (rv == KERN_PROTECTION_FAILURE)
? SIGBUS : SIGSEGV, T_ABT);
break;
}
case T_UND | T_USER: /* undefined instruction */
case T_ILL | T_USER: /* Illegal instruction! */
ucode = type &~ T_USER;
i = SIGILL;
break;
case T_NVI | T_USER: /* Non-vectored interrupt */
case T_NMI | T_USER: /* non-maskable interrupt */
case T_FLG | T_USER: /* flag instruction */
goto we_re_toast;
case T_NBE | T_USER: /* non-restartable bus error */
ucode = type &~ T_USER;
i = SIGBUS;
break;
case T_RBE | T_USER: /* restartable bus error */
return;
case T_SLAVE | T_USER: /* coprocessor trap */
ucode = type &~ T_USER;
/* ucode = FPE_INTDIV_TRAP; */
i = SIGFPE;
break;
case T_DVZ | T_USER: /* divide by zero */
ucode = type &~ T_USER;
/* ucode = FPE_INTDIV_TRAP; */
i = SIGFPE;
break;
case T_OVF | T_USER: /* integer overflow trap */
ucode = type &~ T_USER;
/* ucode = FPE_INTOVF_TRAP; */
i = SIGFPE;
break;
}
case T_TRC | T_USER: /* trace trap */
case T_BPT | T_USER: /* breakpoint instruction */
case T_DBG | T_USER: /* debug trap */
trace:
frame.tf_psr &= ~PSL_P;
i = SIGTRAP;
trapsignal(p, SIGTRAP, type &~ T_USER);
break;
case T_INTERRUPT | T_USER: /* Allow Process Switch */
/* if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) {
addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
p->p_flag &= ~SOWEUPC;
} */
goto out;
case T_NMI: /* non-maskable interrupt */
case T_NMI | T_USER:
#ifdef DDB
/* NMI can be hooked up to a pushbutton for debugging */
printf ("NMI ... going to debugger\n");
if (kdb_trap (type, 0, &frame))
return;
#endif
goto we_re_toast;
}
} /* End of switch */
trapsignal(p, i, ucode);
if ((type & T_USER) == 0)
return;
out:
while (i = CURSIG(p))
postsig(i);
p->p_priority = p->p_usrpri;
if (want_resched) {
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
* If that happened after we setrunqueue ourselves but
* before we switch()'ed, we might not be on the queue
* indicated by our priority.
*/
(void) splstatclock();
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
(void) splnone();
while (i = CURSIG(p))
postsig(i);
}
if (p->p_stats->p_prof.pr_scale) {
int ticks;
#ifdef YO_WHAT
struct timeval *tv = &p->p_stime;
ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
addupc(frame.tf_eip, &p->p_stats->p_prof,
ticks * profscale);
#else
/* addupc(frame.tf_pc, &p->p_stats->p_prof, ticks); */
#endif
}
#endif
}
curpriority = p->p_priority;
userret(p, frame.tf_pc, sticks);
}
/*
* syscall(frame):
* System call request from POSIX system call gate interface to kernel.
* Like trap(), argument is call by reference.
*/
/*ARGSUSED*/
void
syscall(frame)
volatile struct syscframe frame;
struct syscframe frame;
{
register caddr_t params;
register int i;
register struct sysent *callp;
register struct proc *p;
struct timeval sticks;
int error, opc, nsys;
int args[8], rval[2];
int code;
size_t argsize;
register_t code, args[8], rval[2];
u_quad_t sticks;
cnt.v_syscall++;
/* is this a user? */
if ((frame.sf_psr & PSL_USER) != PSL_USER)
panic("syscall - process not in user mode.");
if (!USERMODE(frame.sf_psr))
panic("syscall");
p = curproc;
#ifdef notdef
sticks = p->p_stime;
#endif
code = frame.sf_reg[REG_R0];
p->p_md.md_regs = (int *) & (frame.sf_reg);
params = (caddr_t)frame.sf_usp + sizeof (int) ;
callp = p->p_emul->e_sysent;
nsys = p->p_emul->e_nsysent;
/* Set new return address and save old one. */
sticks = p->p_sticks;
p->p_md.md_regs = (int *) &frame.sf_reg;
opc = frame.sf_pc++;
code = frame.sf_reg[REG_R0];
nsys = p->p_emul->e_nsysent;
callp = p->p_emul->e_sysent;
params = (caddr_t)frame.sf_usp + sizeof(int);
switch (code) {
case SYS_syscall:
/*
* Code is first argument, followed by actual args.
*/
code = fuword(params);
params += sizeof(int);
break;
case SYS___syscall:
/*
* Like syscall, but code is a quad, so as to maintain
* quad alignment for the rest of the arguments.
*/
if (callp != sysent)
break;
code = fuword(params + _QUAD_LOWWORD * sizeof(int));
params += sizeof(quad_t);
break;
default:
/* do nothing by default */
break;
}
/* Guard against bad sys call numbers! */
if (code < 0 || code >= nsys)
callp += p->p_emul->e_nosys; /* indir (illegal) */
else
callp += code;
if ((i = callp->sy_argsize) &&
(error = copyin(params, (caddr_t)args, (u_int)i))) {
frame.sf_reg[REG_R0] = error;
frame.sf_psr |= PSL_C;
if (code < 0 || code >= nsys)
callp += p->p_emul->e_nosys; /* illegal */
else
callp += code;
argsize = callp->sy_argsize;
if (argsize)
error = copyin(params, (caddr_t)args, argsize);
else
error = 0;
#ifdef SYSCALL_DEBUG
scdebug_call(p, code, callp->sy_narg, i, args);
#endif
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL))
ktrsyscall(p->p_tracep, code, i, &args);
#endif
goto done;
}
#ifdef SYSCALL_DEBUG
scdebug_call(p, code, callp->sy_narg, i, args);
scdebug_call(p, code, args);
#endif
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL))
ktrsyscall(p->p_tracep, code, i, &args);
ktrsyscall(p->p_tracep, code, argsize, args);
#endif
if (error)
goto bad;
rval[0] = 0;
rval[1] = 0;
rval[1] = frame.sf_reg[REG_R1];
error = (*callp->sy_call)(p, args, rval);
if (error == ERESTART)
frame.sf_pc = opc;
else if (error != EJUSTRETURN) {
if (error) {
frame.sf_reg[REG_R0] = error;
frame.sf_psr |= PSL_C;
} else {
frame.sf_reg[REG_R0] = rval[0];
frame.sf_reg[REG_R1] = rval[1];
frame.sf_psr &= ~PSL_C;
}
}
/* else if (error == EJUSTRETURN) */
/* nothing to do */
done:
/*
* Reinitialize proc pointer `p' as it may be different
* if this is a child returning from fork syscall.
*/
p = curproc;
while (i = CURSIG(p))
postsig(i);
p->p_priority = p->p_usrpri;
if (want_resched) {
switch (error) {
case 0:
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
* If that happened after we setrunqeue ourselves but before
* we switch()'ed, we might not be on the queue indicated by
* our priority.
* Reinitialize proc pointer `p' as it may be different
* if this is a child returning from fork syscall.
*/
(void) splstatclock();
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
(void) splnone();
while (i = CURSIG(p))
postsig(i);
p = curproc;
frame.sf_reg[REG_R0] = rval[0];
frame.sf_reg[REG_R1] = rval[1];
frame.sf_psr &= ~PSL_C; /* carry bit */
break;
case ERESTART:
/*
* Just reset the pc to the SVC instruction.
*/
frame.sf_pc = opc;
break;
case EJUSTRETURN:
/* nothing to do */
break;
default:
bad:
if (p->p_emul->e_errno)
error = p->p_emul->e_errno[error];
frame.sf_reg[REG_R0] = error;
frame.sf_psr |= PSL_C; /* carry bit */
break;
}
if (p->p_stats->p_prof.pr_scale) {
int ticks;
#ifdef YO_WHAT
struct timeval *tv = &p->p_stime;
ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
addupc(frame.sf_pc, &p->p_stats->p_prof,
ticks * profscale);
#else
/* addupc(frame.sf_pc, &p->p_stats->p_prof, ticks); */
#endif
}
#endif
}
curpriority = p->p_priority;
#ifdef SYSCALL_DEBUG
scdebug_ret(p, code, error, rval[0]);
scdebug_ret(p, code, error, rval);
#endif
userret(p, frame.sf_pc, sticks);
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET))
ktrsysret(p->p_tracep, code, error, rval[0]);
#endif
}
/* For the child, do the stuff after mi_swtch() in syscall so
low_level_fork does not have to rethread the kernel stack. */
void
ll_fork_sig()
child_return(p, frame)
struct proc *p;
struct syscframe frame;
{
register struct proc *p = curproc;
int i;
frame.sf_reg[REG_R0] = 0;
frame.sf_psr &= ~PSL_C;
(void) splnone();
while (i = CURSIG(p))
postsig(i);
}
/* #define dbg_user */
/* Other stuff.... */
int
check_user_write ( u_long addr, u_long size)
{
int rv;
vm_offset_t va;
#ifdef dbg_user
printf ("ck_ur_wr: addr=0x%x, size=0x%x", addr, size);
#endif
/* check for all possible places! */
va = trunc_page((vm_offset_t) addr);
if (va > VM_MAXUSER_ADDRESS) return (1);
while ((u_long)va < (addr + size)) {
/* check for copy on write access. */
#ifdef dbg_user
printf (" (0x%x:%d)", va, vtopte(va)->pg_prot);
#endif
if (!(vtopte(va)->pg_v) || vtopte(va)->pg_prot != 3 ) {
#ifdef dbg_user
printf (" fault");
#endif
rv = vm_fault(&curproc->p_vmspace->vm_map, va,
VM_PROT_READ | VM_PROT_WRITE, FALSE);
if (rv != KERN_SUCCESS)
#ifdef dbg_user
{ printf (" bad\n");
#endif
return(1);
#ifdef dbg_user
}
#endif
}
va += NBPG;
}
#ifdef dbg_user
printf ("\n");
#endif
return (0);
userret(p, frame.sf_pc, 0);
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET))
ktrsysret(p->p_tracep, SYS_fork, 0, 0);
#endif
}

View File

@ -1,6 +1,9 @@
/* $NetBSD: vm_machdep.c,v 1.11 1995/08/29 22:37:54 phil Exp $ */
/* $NetBSD: vm_machdep.c,v 1.12 1996/01/31 21:34:06 phil Exp $ */
/*-
* Copyright (c) 1996 Matthias Pfaller.
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
* Copyright (c) 1993 Philip A. Nelson.
* Copyright (c) 1982, 1986 The Regents of the University of California.
* Copyright (c) 1989, 1990 William Jolitz
* All rights reserved.
@ -54,120 +57,140 @@
#include <vm/vm_kern.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
extern struct proc *fpu_proc;
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the kernel stack and pcb, making the child
* ready to run, and marking it so that it can return differently
* than the parent. Returns 1 in the child process, 0 in the parent.
* We currently double-map the user area so that the stack is at the same
* address in each process; in the future we will probably relocate
* the frame pointers on the stack after copying.
* Copy the pcb and setup the kernel stack for the child.
* Setup the child's stackframe to return to child_return
* via proc_trampoline from cpu_switch.
*/
cpu_fork(p1, p2)
register struct proc *p1, *p2;
{
struct user *up = p2->p_addr;
int foo, offset, addr, i;
register struct pcb *pcb = &p2->p_addr->u_pcb;
register struct syscframe *tf;
register struct switchframe *sf;
extern void proc_trampoline(), child_return();
/* Copy curpcb (which is presumably p1's PCB) to p2. */
*pcb = p1->p_addr->u_pcb;
pcb->pcb_onstack = (struct on_stack *)((u_int)p2->p_addr + USPACE) - 1;
*pcb->pcb_onstack = *p1->p_addr->u_pcb.pcb_onstack;
/* If p1 is holding the FPU, update the FPU context of p2. */
if (fpu_proc == p1)
save_fpu_context(pcb);
pmap_activate(&p2->p_vmspace->vm_pmap, pcb);
/*
* Copy pcb from proc p1 to p2.
* _low_level_init will copy the kernel stack as cheeply as
* possible.
* Copy the syscframe, and arrange for the child to return directly
* through rei().
*/
p2->p_addr->u_pcb = p1->p_addr->u_pcb;
p2->p_addr->u_pcb.pcb_onstack =
(struct on_stack *) p2->p_addr + USPACE
- sizeof (struct on_stack);
/*
* Wire top of address space of child to it's kstack.
* First, fault in a page of pte's to map it.
*/
addr = trunc_page((u_int)vtopte(USRSTACK));
vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+USPACE, FALSE);
for (i=0; i < UPAGES; i++)
pmap_enter(&p2->p_vmspace->vm_pmap, USRSTACK+i*NBPG,
pmap_extract(pmap_kernel(), ((int)p2->p_addr)+i*NBPG),
VM_PROT_READ, TRUE);
pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
/*
* Low_level_fork returns twice! First with a 0 in the
* parent space and Second with a 1 in the child.
*/
return (low_level_fork(up));
tf = (struct syscframe *)((u_int)p2->p_addr + USPACE) - 1;
p2->p_md.md_regs = (int *)&(tf->sf_reg);
sf = (struct switchframe *)tf - 1;
sf->sf_pc = (long) proc_trampoline;
sf->sf_fp = (long) &tf->sf_fp;
sf->sf_reg[REG_R3] = (long) child_return;
sf->sf_reg[REG_R4] = (long) p2;
sf->sf_pl = imask[IPL_ZERO];
pcb->pcb_ksp = (long) sf;
pcb->pcb_kfp = (long) &sf->sf_fp;
}
/*
* cpu_set_kpc:
*
* Arrange for in-kernel execution of a process to continue at the
* named pc, as if the code at that address were called as a function
* with argument, the current process's process pointer.
*
* Note that it's assumed that when the named process returns, rei()
* should be invoked, to return to user mode.
*/
void
cpu_set_kpc(p, pc)
struct proc *p;
u_long pc;
{
struct pcb *pcbp;
struct switchframe *sf;
extern void proc_trampoline();
#ifdef notyet
pcbp = &p->p_addr->u_pcb;
sf = (struct switchframe *) pcbp->pcb_ksp;
sf->sf_pc = (long) proc_trampoline;
sf->sf_reg[REG_R3] = pc;
sf->sf_reg[REG_R4] = (long) p;
}
/*
* cpu_swapout is called immediately before a process's 'struct user'
* and kernel stack are unwired (which are in turn done immediately
* before it's P_INMEM flag is cleared). If the process is the
* current owner of the floating point unit, the FP state has to be
* saved, so that it goes out with the pcb, which is in the user area.
*/
void
cpu_swapout(p)
struct proc *p;
{
/*
* Make sure we save the FP state before the user area vanishes.
*/
if (fpu_proc != p)
return;
save_fpu_context(&p->p_addr->u_pcb);
fpu_proc = 0;
}
/*
* cpu_exit is called as the last action during exit.
*
* We change to an inactive address space and a "safe" stack,
* passing thru an argument to the new stack. Now, safely isolated
* from the resources we're shedding, we release the address space
* and any remaining machine-dependent resources, including the
* memory for the user structure and kernel stack.
*
* Next, we assign a dummy context to be written over by swtch,
* calling it to send this process off to oblivion.
* [The nullpcb allows us to minimize cost in swtch() by not having
* a special case].
* We switch to a temorary stack and address space. Then we release
* release the original address space and machine-dependent resources,
* including the memory for the user structure and kernel stack.
* Once finished, we call cpu_exit, which never returns.
* We block interrupts until cpu_switch has made things safe again.
*/
struct proc *swtch_to_inactive();
void
cpu_exit(p)
register struct proc *p;
cpu_exit(arg)
struct proc *arg;
{
static struct pcb nullpcb; /* pcb to overwrite on last swtch */
register struct proc *p __asm("r3");
cnt.v_swtch++;
/* free cporcessor (if we have it) */
if( p == npxproc) npxproc =0;
/* Copy arg into a register. */
movd(arg, p);
/* move to inactive space and stack, passing arg accross */
p = swtch_to_inactive(p);
/* If we were using the FPU, forget about it. */
if (fpu_proc == p)
fpu_proc = 0;
/* drop per-process resources */
/* Switch to temporary stack and address space. */
lprd(sp, INTSTACK);
load_ptb(PTDpaddr);
/* Free resources. */
vmspace_free(p->p_vmspace);
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
(void) splhigh();
kmem_free(kernel_map, (vm_offset_t)p->p_addr, USPACE);
p->p_addr = (struct user *) &nullpcb;
splstatclock();
/* Don't update pcb in cpu_switch. */
curproc = NULL;
cpu_switch();
/* NOTREACHED */
}
#else
void
cpu_exit(p)
register struct proc *p;
{
splstatclock();
cpu_switch();
/* Not reached. */
panic ("cpu_exit! swtch returned!");
}
void
cpu_wait(p)
struct proc *p;
{
/* drop per-process resources */
vmspace_free(p->p_vmspace);
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
}
#endif
/*
* Dump the machine specific segment at the start of a core dump.
*/
struct md_core {
struct reg intreg;
struct fpreg freg;
};
int
cpu_coredump(p, vp, cred, chdr)
struct proc *p;
@ -175,38 +198,43 @@ cpu_coredump(p, vp, cred, chdr)
struct ucred *cred;
struct core *chdr;
{
int error;
struct {
struct reg regs;
struct fpreg fpregs;
} cpustate;
struct md_core md_core;
struct coreseg cseg;
int error;
CORE_SETMAGIC(*chdr, COREMAGIC, MID_NS32532, 0);
chdr->c_hdrsize = ALIGN(sizeof(*chdr));
chdr->c_seghdrsize = ALIGN(sizeof(cseg));
chdr->c_cpusize = sizeof(cpustate);
cpustate.regs = *((struct reg *)p->p_md.md_regs);
cpustate.fpregs = *((struct fpreg *)&p->p_addr->u_pcb.pcb_fsr);
chdr->c_cpusize = sizeof(md_core);
/* Save integer registers. */
error = process_read_regs(p, &md_core.intreg);
if (error)
return error;
/* Save floating point registers. */
error = process_read_fpregs(p, &md_core.freg);
if (error)
return error;
CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_NS32532, CORE_CPU);
cseg.c_addr = 0;
cseg.c_size = chdr->c_cpusize;
error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
(off_t)chdr->c_hdrsize, UIO_SYSSPACE,
IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p);
(off_t)chdr->c_hdrsize, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred,
(int *)0, p);
if (error)
return error;
error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cpustate, sizeof(cpustate),
error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core),
(off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p);
IO_NODELOCKED|IO_UNIT, cred, (int *)0, p);
if (error)
return error;
if (!error)
chdr->c_nseg++;
return error;
chdr->c_nseg++;
return 0;
}
@ -236,7 +264,7 @@ pagemove(from, to, size)
register caddr_t from, to;
int size;
{
register struct pte *fpte, *tpte;
int *fpte, *tpte;
if (size % CLBYTES)
panic("pagemove");
@ -249,7 +277,7 @@ pagemove(from, to, size)
to += NBPG;
size -= NBPG;
}
tlbflush();
pmap_update();
}
/*
@ -259,6 +287,7 @@ kvtop(addr)
register caddr_t addr;
{
vm_offset_t va;
va = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
if (va == 0)
panic("kvtop: zero page frame");
@ -285,62 +314,49 @@ extern vm_map_t phys_map;
* All requests are (re)mapped into kernel VA space via the useriomap
* (a name with only slightly more meaning than "kernelmap")
*/
vmapbuf(bp)
register struct buf *bp;
vmapbuf(bp, len)
struct buf *bp;
vm_size_t len;
{
register int npf;
register caddr_t addr;
register long flags = bp->b_flags;
struct proc *p;
int off;
vm_offset_t kva;
register vm_offset_t pa;
vm_offset_t faddr, taddr, off;
pt_entry_t *fpte, *tpte;
pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
if ((flags & B_PHYS) == 0)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
addr = bp->b_saveaddr = bp->b_un.b_addr;
off = (int)addr & PGOFSET;
p = bp->b_proc;
npf = btoc(round_page(bp->b_bcount + off));
kva = kmem_alloc_wait(phys_map, ctob(npf));
bp->b_un.b_addr = (caddr_t) (kva + off);
while (npf--) {
pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
if (pa == 0)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
VM_PROT_READ|VM_PROT_WRITE, TRUE);
addr += PAGE_SIZE;
kva += PAGE_SIZE;
}
faddr = trunc_page(bp->b_saveaddr = bp->b_data);
off = (vm_offset_t)bp->b_data - faddr;
len = round_page(off + len);
taddr = kmem_alloc_wait(phys_map, len);
bp->b_data = (caddr_t)(taddr + off);
/*
* The region is locked, so we expect that pmap_pte() will return
* non-NULL.
*/
fpte = pmap_pte(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map), faddr);
tpte = pmap_pte(vm_map_pmap(phys_map), taddr);
do {
*tpte++ = *fpte++;
len -= PAGE_SIZE;
} while (len);
}
/*
* Free the io map PTEs associated with this IO operation.
* We also invalidate the TLB entries and restore the original b_addr.
*/
vunmapbuf(bp)
register struct buf *bp;
vunmapbuf(bp, len)
struct buf *bp;
vm_size_t len;
{
register int npf;
register caddr_t addr = bp->b_un.b_addr;
vm_offset_t kva;
vm_offset_t addr, off;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
kva = (vm_offset_t)((int)addr & ~PGOFSET);
kmem_free_wakeup(phys_map, kva, ctob(npf));
bp->b_un.b_addr = bp->b_saveaddr;
bp->b_saveaddr = NULL;
}
/*
* (Force reset the processor by invalidating the entire address space!)
* Well, lets just hang!
*/
cpu_reset()
{
splhigh();
while (1);
addr = trunc_page(bp->b_data);
off = (vm_offset_t)bp->b_data - addr;
len = round_page(off + len);
kmem_free_wakeup(phys_map, addr, len);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
}