From ccbcfbef9147c543c39f0e0533869f11a80c6e78 Mon Sep 17 00:00:00 2001 From: phil Date: Wed, 31 Jan 1996 21:33:42 +0000 Subject: [PATCH] Integration of many changes done by Matthias Pfaller with a few by me. clock.c * Removed definition of DELAY. intr.c * Removed an unneeded $Id:....$ locore.s * Moved some of the low level initialization code to machdep.c. * Defined proc_trampoline. * Changed sigcode to pass scp to SYS_sigreturn. * Changed copyin/copyout/fu*/su* to take advantage of the ns32532's dual address instructions. * Recoded copyinstr/copyoutstr/copystr in assembler. * Added a new and faster version of bzero. This makes bzero.s unnecessary. * Defined suswintr to make profiling work. * Recoded cpu_switch modelled after the i386 version of cpu_switch. * Added support for lazy fpu state restore to cpu_switch. * Recoded trap handling code to be more readable. * Added experimental code for single cacheline invalidation. machdep.c * Copied over cpu_startup from i386/i386/machdep.c. * Changed sys_sigreturn to take advantage of the argument passed by the trampoline code. * Changed boot to call doshutdownhooks and to store machine state in case of a panic. * Changed setregs to clear the fpu registers. * Recoded low_level_init. It's now called init532. * cpu_reset: New function, resets the machine. trap.c * Pulled over from i386/i386/trap.c. * Added support for lazy saved/restored fpu state. vm_machdep.c * Removed kstack double mapping by pulling over alot of code from i386/i386/vm_machdep.c. * Added support for lazy saved/restored fpu state. * Moved freeing of process resources from cpu_wait to cpu_exit. * Pulled over cpu_coredump, pagemove, vmapbuf and vunmapbuf from i386/i386/vm_machdep.c. pmap.c * Pulled over from i386/i386/pmap.c. genassym.c * Removed old and unused definitions, added new ones. sys_machdep.c * Moved sys_sysarch from machdep.c to sys_machdep.c. process_machdep.c * Changed to work without ktack double mapping. * Changed to work with lazy saved/restored fpu state. --- sys/arch/pc532/pc532/bzero.s | 97 -- sys/arch/pc532/pc532/clock.c | 9 +- sys/arch/pc532/pc532/genassym.c | 113 +- sys/arch/pc532/pc532/intr.c | 4 +- sys/arch/pc532/pc532/locore.s | 2132 ++++++++++++------------ sys/arch/pc532/pc532/machdep.c | 1211 +++++++------- sys/arch/pc532/pc532/pmap.c | 1961 ++++++++++------------ sys/arch/pc532/pc532/process_machdep.c | 20 +- sys/arch/pc532/pc532/sys_machdep.c | 16 +- sys/arch/pc532/pc532/trap.c | 665 ++++---- sys/arch/pc532/pc532/vm_machdep.c | 308 ++-- 11 files changed, 3122 insertions(+), 3414 deletions(-) delete mode 100644 sys/arch/pc532/pc532/bzero.s diff --git a/sys/arch/pc532/pc532/bzero.s b/sys/arch/pc532/pc532/bzero.s deleted file mode 100644 index c2f418db10db..000000000000 --- a/sys/arch/pc532/pc532/bzero.s +++ /dev/null @@ -1,97 +0,0 @@ -/* $NetBSD: bzero.s,v 1.3 1996/01/26 08:11:47 phil Exp $ */ - -/* - * Mach Operating System - * Copyright (c) 1992 Carnegie Mellon University - * Copyright (c) 1992 Helsinki University of Technology - * All Rights Reserved. - * - * Permission to use, copy, modify and distribute this software and its - * documentation is hereby granted, provided that both the copyright - * notice and this permission notice appear in all copies of the - * software, derivative works or modified versions, and any portions - * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON AND HELSINKI UNIVERSITY OF TECHNOLOGY ALLOW FREE USE - * OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON AND - * HELSINKI UNIVERSITY OF TECHNOLOGY DISCLAIM ANY LIABILITY OF ANY KIND - * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * - * Carnegie Mellon requests users of this software to return to - * - * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU - * School of Computer Science - * Carnegie Mellon University - * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon - * the rights to redistribute these changes. - */ -/* - * File: ns532/bzero.s - * Author: Tero Kivinen, Helsinki University of Technology 1992. - * - * $Id: bzero.s,v 1.3 1996/01/26 08:11:47 phil Exp $ - */ - - -/* - * bzero(char * addr, unsigned int length) - */ - - .text -ENTRY(bzero) - enter [],0 - movd B_ARG0,r1 /* addr */ - movd B_ARG1,r2 /* length */ - movd r1,r0 /* align addr */ - andd 3,r0 - cmpqd 0,r0 - beq wstart /* already aligned */ - negd r0,r0 - addqd 4,r0 - cmpd r0,r2 - bhi bytes /* not enough data to align */ -b1loop: movqb 0,0(r1) /* zero bytes */ - addqd 1,r1 - addqd -1,r2 - acbd -1,r0,b1loop -wstart: movd r2,r0 /* length */ - lshd -6,r0 - cmpqd 0,r0 - beq phase2 -w1loop: movqd 0,0(r1) /* zero words */ - movqd 0,4(r1) - movqd 0,8(r1) - movqd 0,12(r1) - movqd 0,16(r1) - movqd 0,20(r1) - movqd 0,24(r1) - movqd 0,28(r1) - movqd 0,32(r1) - movqd 0,36(r1) - movqd 0,40(r1) - movqd 0,44(r1) - movqd 0,48(r1) - movqd 0,52(r1) - movqd 0,56(r1) - movqd 0,60(r1) - addd 64,r1 - acbd -1,r0,w1loop -phase2: movd r2,r0 /* length */ - andd 63,r0 - lshd -2,r0 - cmpqd 0,r0 - beq bytes -w2loop: movqd 0,0(r1) - addqd 4,r1 - acbd -1,r0,w2loop -bytes: movd r2,r0 /* length */ - andd 3,r0 - cmpqd 0,r0 - beq done -bloop: movqb 0,0(r1) /* zero bytes */ - addqd 1,r1 - acbb -1,r0,bloop -done: exit [] - ret 0 diff --git a/sys/arch/pc532/pc532/clock.c b/sys/arch/pc532/pc532/clock.c index 81bcc8f83535..e3a7e8eca70f 100644 --- a/sys/arch/pc532/pc532/clock.c +++ b/sys/arch/pc532/pc532/clock.c @@ -1,4 +1,4 @@ -/* $NetBSD: clock.c,v 1.11 1995/05/16 07:30:46 phil Exp $ */ +/* $NetBSD: clock.c,v 1.12 1996/01/31 21:33:47 phil Exp $ */ /*- * Copyright (c) 1990 The Regents of the University of California. @@ -82,13 +82,6 @@ spinwait(int millisecs) DELAY(5000 * millisecs); } -DELAY(n) -{ - volatile int N = (n); - while (--N > 0) - ; -} - void setstatclockrate(int dummy) { diff --git a/sys/arch/pc532/pc532/genassym.c b/sys/arch/pc532/pc532/genassym.c index 487cd8c72b39..11e97ba28836 100644 --- a/sys/arch/pc532/pc532/genassym.c +++ b/sys/arch/pc532/pc532/genassym.c @@ -1,4 +1,4 @@ -/* $NetBSD: genassym.c,v 1.8 1995/06/09 05:59:58 phil Exp $ */ +/* $NetBSD: genassym.c,v 1.9 1996/01/31 21:33:52 phil Exp $ */ /*- * Copyright (c) 1982, 1990 The Regents of the University of California. @@ -45,80 +45,73 @@ #include #include -#include #include +#include +#include #include main() { - struct proc *p = (struct proc *)0; - struct user *up = (struct user *)0; - struct rusage *rup = (struct rusage *)0; - struct uprof *uprof = (struct uprof *)0; - struct pcb *pcb = (struct pcb *)0; - struct on_stack *regs = (struct on_stack *)0; - struct iv *iv = (struct iv *)0; + struct proc *p = 0; struct vmmeter *vm = 0; + struct pcb *pcb = 0; + struct sigframe *sigf = 0; + struct on_stack *regs = 0; + struct iv *iv = 0; register unsigned i; - printf("#define\tKERNBASE 0x%x\n", KERNBASE); - printf("#define\tUDOT_SZ %d\n", sizeof(struct user)); - printf("#define\tP_FORW %d\n", &p->p_forw); - printf("#define\tP_BACK %d\n", &p->p_back); - printf("#define\tP_VMSPACE %d\n", &p->p_vmspace); - printf("#define\tP_ADDR %d\n", &p->p_addr); - printf("#define\tP_PRIORITY %d\n", &p->p_priority); - printf("#define\tP_STAT %d\n", &p->p_stat); - printf("#define\tP_WCHAN %d\n", &p->p_wchan); - printf("#define\tP_FLAG %d\n", &p->p_flag); - printf("#define\tP_PID %d\n", &p->p_pid); +#define def(N,V) printf("#define\t%s %d\n", N, V) - printf("#define\tSSLEEP %d\n", SSLEEP); - printf("#define\tSRUN %d\n", SRUN); - printf("#define\tUPAGES %d\n", UPAGES); - printf("#define\tHIGHPAGES %d\n", HIGHPAGES); - printf("#define\tCLSIZE %d\n", CLSIZE); - printf("#define\tNBPG %d\n", NBPG); - printf("#define\tNPTEPG %d\n", NPTEPG); - printf("#define\tPGSHIFT %d\n", PGSHIFT); - printf("#define\tSYSPTSIZE %d\n", SYSPTSIZE); - printf("#define\tUSRPTSIZE %d\n", USRPTSIZE); + def("SRUN", SRUN); - printf("#define\tKERN_STK_START 0x%x\n", - USRSTACK + UPAGES*NBPG); - printf("#define\tKSTK_SIZE %d\n", UPAGES*NBPG); - printf("#define\tON_STK_SIZE %d\n", sizeof(struct on_stack)); - printf("#define\tREGS_USP %d\n", ®s->pcb_usp); - printf("#define\tREGS_FP %d\n", ®s->pcb_fp); - printf("#define\tREGS_SB %d\n", ®s->pcb_sb); - printf("#define\tREGS_PSR %d\n", ®s->pcb_psr); + def("PDSHIFT", PDSHIFT); + def("PGSHIFT", PGSHIFT); + def("PGOFSET", PGOFSET); + def("NBPG", NBPG); - printf("#define\tPCB_ONSTACK %d\n", &pcb->pcb_onstack); - printf("#define\tPCB_FSR %d\n", &pcb->pcb_fsr); + def("PTDPTDI", PTDPTDI); + def("KPTDI", KPTDI); + def("NKPDE", NKPDE); + def("APTDPTDI", APTDPTDI); + def("KERNBASE", KERNBASE); + + def("VM_MAXUSER_ADDRESS", VM_MAXUSER_ADDRESS); + + def("P_ADDR", &p->p_addr); + def("P_BACK", &p->p_back); + def("P_FORW", &p->p_forw); + def("P_PRIORITY", &p->p_priority); + def("P_STAT", &p->p_stat); + def("P_WCHAN", &p->p_wchan); + def("P_VMSPACE", &p->p_vmspace); + def("P_FLAG", &p->p_flag); + def("P_PID", &p->p_pid); + + def("V_INTR", &vm->v_intr); + + def("PCB_ONSTACK", &pcb->pcb_onstack); + def("PCB_FSR", &pcb->pcb_fsr); for (i=0; i<8; i++) - printf("#define\tPCB_F%d %d\n", i, &pcb->pcb_freg[i]); - printf("#define\tPCB_KSP %d\n", &pcb->pcb_ksp); - printf("#define\tPCB_KFP %d\n", &pcb->pcb_kfp); - printf("#define\tPCB_PTB %d\n", &pcb->pcb_ptb); - printf("#define\tPCB_PL %d\n", &pcb->pcb_pl); - printf("#define\tPCB_FLAGS %d\n", &pcb->pcb_flags); - printf("#define\tPCB_ONFAULT %d\n", &pcb->pcb_onfault); + printf("#define\tPCB_F%d %d\n", i, &pcb->pcb_freg[i]); + def("PCB_KSP", &pcb->pcb_ksp); + def("PCB_KFP", &pcb->pcb_kfp); + def("PCB_PTB", &pcb->pcb_ptb); + def("PCB_ONFAULT", &pcb->pcb_onfault); - printf("#define\tV_TRAP %d\n", &vm->v_trap); - printf("#define\tV_INTR %d\n", &vm->v_intr); + def("ON_STK_SIZE", sizeof(struct on_stack)); + def("REGS_USP", ®s->pcb_usp); + def("REGS_FP", ®s->pcb_fp); + def("REGS_SB", ®s->pcb_sb); + def("REGS_PSR", ®s->pcb_psr); - printf("#define\tIV_VEC %d\n", &iv->iv_vec); - printf("#define\tIV_ARG %d\n", &iv->iv_arg); - printf("#define\tIV_CNT %d\n", &iv->iv_cnt); - printf("#define\tIV_USE %d\n", &iv->iv_use); + def("SIGF_HANDLER", &sigf->sf_handler); + def("SIGF_SC", &sigf->sf_sc); + + def("IV_VEC", &iv->iv_vec); + def("IV_ARG", &iv->iv_arg); + def("IV_CNT", &iv->iv_cnt); + def("IV_USE", &iv->iv_use); - printf("#define\tUSRSTACK 0x%x\n", USRSTACK); -#ifdef SYSVSHM - printf("#define\tSHMMAXPGS %d\n", SHMMAXPGS); -#endif - printf("#define\tENOENT %d\n", ENOENT); - printf("#define\tEFAULT %d\n", EFAULT); - printf("#define\tENAMETOOLONG %d\n", ENAMETOOLONG); exit(0); } diff --git a/sys/arch/pc532/pc532/intr.c b/sys/arch/pc532/pc532/intr.c index 3d492fedb550..4eaf8ed00eda 100644 --- a/sys/arch/pc532/pc532/intr.c +++ b/sys/arch/pc532/pc532/intr.c @@ -1,4 +1,4 @@ -/* $NetBSD: intr.c,v 1.5 1995/09/26 20:16:26 phil Exp $ */ +/* $NetBSD: intr.c,v 1.6 1996/01/31 21:33:53 phil Exp $ */ /* * Copyright (c) 1994 Matthias Pfaller. @@ -28,8 +28,6 @@ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $Id: intr.c,v 1.5 1995/09/26 20:16:26 phil Exp $ */ #define DEFINE_SPLX diff --git a/sys/arch/pc532/pc532/locore.s b/sys/arch/pc532/pc532/locore.s index 2862bf29997c..ef4bc536e2c6 100644 --- a/sys/arch/pc532/pc532/locore.s +++ b/sys/arch/pc532/pc532/locore.s @@ -1,4 +1,4 @@ -/* $NetBSD: locore.s,v 1.30 1995/11/30 00:59:00 jtc Exp $ */ +/* $NetBSD: locore.s,v 1.31 1996/01/31 21:33:56 phil Exp $ */ /* * Copyright (c) 1993 Philip A. Nelson. @@ -40,259 +40,1152 @@ * * Phil Nelson, Dec 6, 1992 * + * Modified by Matthias Pfaller, Jan 1996. + * */ -/* This is locore.s! */ +/* + * Tell include files that we don't want any C stuff. + */ #define LOCORE -/* Get the defines... */ -#include -#include #include "assym.h" -/* define some labels */ -#define PSR_U 0x100 -#define PSR_S 0x200 -#define PSR_P 0x400 -#define PSR_I 0x800 +#include +#include +#include +#include +#include +#include +#include -#define CFG_IVEC 0x1 -#define CFG_FPU 0x2 -#define CFG_MEM 0x4 -#define CFG_DE 0x100 -#define CFG_DATA 0x200 -#define CFG_DLCK 0x400 -#define CFG_INS 0x800 -#define CFG_ILCK 0x1000 +/* + * PTmap is recursive pagemap at top of virtual address space. + * Within PTmap, the page directory can be found (third indirection). + */ + .globl _PTmap, _PTD, _PTDpde, _Sysmap + .set _PTmap,(PTDPTDI << PDSHIFT) + .set _PTD,(_PTmap + PTDPTDI * NBPG) + .set _PTDpde,(_PTD + PTDPTDI * 4) # XXX 4 == sizeof pde + .set _Sysmap,(_PTmap + KPTDI * NBPG) -/* Initial Kernel stack page and the Idle processes' stack. */ -#define KERN_INT_SP 0xFFC00FFC +/* + * APTmap, APTD is the alternate recursive pagemap. + * It's used when modifying another process's page tables. + */ + .globl _APTmap, _APTD, _APTDpde + .set _APTmap,(APTDPTDI << PDSHIFT) + .set _APTD,(_APTmap + APTDPTDI * NBPG) + .set _APTDpde,(_PTD + APTDPTDI * 4) # XXX 4 == sizeof pde -/* Global Data */ + .globl _proc0paddr, _PTDpaddr +_proc0paddr: .long 0 +_PTDpaddr: .long 0 # paddr of PTD, for libkvm -.data -.globl _cold, __save_sp, __save_fp, __old_intbase -_cold: .long 1 -__save_sp: .long 0 -__save_fp: .long 0 -__old_intbase: .long 0 -__have_fpu: .long 0 +/* + * Initialization + */ + .data + .globl _cold, _esym, _bootdev, _boothowto, _inttab + .globl __save_sp, __save_fp, __old_intbase, __have_fpu +_cold: .long 1 /* cold till we are not */ +_esym: .long 0 /* pointer to end of symbols */ +__save_sp: .long 0 /* Monitor stack pointer */ +__save_fp: .long 0 /* Monitor frame pointer */ +__old_intbase: .long 0 /* Monitor intbase */ +__have_fpu: .long 0 /* Have we an FPU installed? */ -.text -.globl start -start: - br here_we_go + .text + .globl start +start: ints_off # make sure interrupts are off. + bicpsrw PSL_US # make sure we are using sp0. + lprd sb,0 # gcc expects this. - .align 4 /* So the trap table is double aligned. */ -int_base_tab: /* Here is the fixed jump table for traps! */ - .long __int - .long __trap_nmi - .long __trap_abt - .long __trap_slave - .long __trap_ill - .long __trap_svc - .long __trap_dvz - .long __trap_flg - .long __trap_bpt - .long __trap_trc - .long __trap_und - .long __trap_rbe - .long __trap_nbe - .long __trap_ovf - .long __trap_dbg - .long __trap_reserved - -here_we_go: /* This is the actual start of the locore code! */ - - bicpsrw PSR_I /* make sure interrupts are off. */ - bicpsrw PSR_S /* make sure we are using sp0. */ - lprd sb, 0 /* gcc expects this. */ - sprd sp, __save_sp(pc) /* save monitor's sp. */ - sprd fp, __save_fp(pc) /* save monitor's fp. */ - sprd intbase, __old_intbase(pc) /* save monitor's intbase. */ - -.globl _bootdev -.globl _boothowto - /* Save the registers loaded by the boot program ... if the kernel - was loaded by the boot program. */ - cmpd 0xc1e86394, r3 - bne zero_bss - movd r7, _boothowto(pc) - movd r6, _bootdev(pc) - -zero_bss: - /* Zero the bss segment. */ - addr _end(pc),r0 # setup to zero the bss segment. + /* + * Zero the bss segment. + */ + addr _end(pc),r0 # setup to zero the bss segment. addr _edata(pc),r1 - subd r1,r0 # compute _end - _edata - movd r0,tos # push length - addr _edata(pc),tos # push address - bsr _bzero # zero the bss segment + subd r1,r0 # compute _end - _edata + movd r0,tos # push length + addr _edata(pc),tos # push address + bsr _bzero # zero the bss segment - bsr __low_level_init /* Do the low level setup. */ + /* + * Save monitor's sp, fp and intbase. + */ + sprd sp,__save_sp(pc) + sprd fp,__save_fp(pc) + sprd intbase,__old_intbase(pc) - lprd sp, KERN_INT_SP # use the idle/interrupt stack. - lprd fp, KERN_INT_SP # use the idle/interrupt stack. + /* + * The boot program provides us a magic in r3, + * esym in r4, bootdev in r6 and boothowto in r7. + * Set the kernel variables if the magic matches. + */ + cmpd 0xc1e86394,r3 + bne 1f + movd r4,_esym(pc) + movd r6,_bootdev(pc) + movd r7,_boothowto(pc) +1: /* + * Finish machine initialization and start main. + */ + br _init532 - /* Load cfg register is bF6 (IC,DC,DE,M,F) or bF4 */ - sprd cfg, r0 - tbitb 1, r0 /* Test the F bit! */ - bfc cfg_no_fpu - movqd 1, __have_fpu(pc) - lprd cfg, 0xbf6 - br jmphi - -cfg_no_fpu: - lprd cfg, 0xbf4 +ENTRY(proc_trampoline) + movd r4,tos + jsr 0(r3) + cmpqd 0,tos + br rei -/* Now jump to high addresses after starting mapping! */ +/*****************************************************************************/ -jmphi: - addr here(pc), r0 - ord KERNBASE, r0 - jump 0(r0) +/* + * Signal trampoline; copied to top of user stack. + */ -here: - lprd intbase, int_base_tab /* set up the intbase. */ - - /* stack and frame pointer are pointing at high memory. */ - - bsr _init532 /* Set thing up to call main()! */ - - /* Get the proc0 kernel stack and pcb set up. */ - movd KERN_STK_START, r1 /* Standard sp start! */ - lprd sp, r1 /* Load it! */ - lprd fp, USRSTACK /* fp for the user. */ - lprd usp, USRSTACK /* starting stack for the user. */ - - /* Build the "trap" frame to return to address 0 in user space! */ - movw PSR_I|PSR_S|PSR_U, tos /* psr - user/user stack/interrupts */ - movw 0, tos /* mod - 0! */ - movd 0, tos /* pc - 0 after module table */ - enter [],8 /* Extra space is for USP */ - movqd 0, tos /* Zero the registers in the pcb. */ - movqd 0, tos - movqd 0, tos - movqd 0, tos - movqd 0, tos - movqd 0, tos - movqd 0, tos - movqd 0, tos - movqd 0, REGS_SB(sp) - - /* Now things should be ready to start _main! */ - - addr 0(sp), tos - bsr _main /* Start the kernel! */ - movd tos, r0 /* Pop addr */ - - /* We should only get here in proc 1. */ - movd _curproc(pc), r1 - cmpqd 0, r1 - beq main_panic - movd P_PID(r1),r0 - cmpqd 1, r0 - bne main_panic - lprd usp, REGS_USP(sp) - lprd sb, REGS_SB(sp) - - exit [r0,r1,r2,r3,r4,r5,r6,r7] - rett 0 - -main_panic: - addr main_panic_str(pc), tos - bsr _panic - -main_panic_str: - .asciz "After main -- no curproc or not proc 1." - -/* Signal support */ -.align 2 -.globl _sigcode -.globl _esigcode -_sigcode: - jsr 0(12(sp)) - movd 103, r0 +ENTRY(sigcode) + jsr 0(SIGF_HANDLER(sp)) + addr SIGF_SC(sp),tos /* scp (the call may have clobbered */ + /* the copy at SIGF_SCP(sp)). */ + movqd 0,tos /* Push a fake return address. */ + movd SYS_sigreturn,r0 svc -.align 2 + movd 0,0 /* Illegal instruction. */ + .globl _esigcode _esigcode: -/* To get the ptb0 register set correctly. */ +/*****************************************************************************/ -ENTRY(_load_ptb0) - movd S_ARG0, r0 - andd ~KERNBASE, r0 - lmr ptb0, r0 - ret 0 +/* + * The following primitives are used to fill and copy regions of memory. + */ -ENTRY(_load_ptb1) - movd S_ARG0, r0 - andd ~KERNBASE, r0 - lmr ptb1, r0 - ret 0 +/* + * bzero (void *b, size_t len) + * write len zero bytes to the string b. + */ -ENTRY (_get_ptb0) - smr ptb0, r0 - ret 0 +ENTRY(bzero) + enter [r3],0 -ENTRY (tlbflush) - smr ptb0, r0 - lmr ptb0, r0 - ret 0 + movd B_ARG0,r1 /* b */ + movd B_ARG1,r2 /* len */ + cmpd 19,r2 + bhs 6f /* Not worth the trouble. */ -ENTRY (_get_sp_adr) /* for use in testing.... */ - addr 4(sp), r0 - ret 0 + /* + * Is address aligned? + */ + movd r1,r0 + andd 3,r0 /* r0 = b & 3 */ + cmpqd 0,r0 + beq 0f -ENTRY (_get_ret_adr) - movd 0(sp), r0 - ret 0 + /* + * Align address (if necessary). + */ + movqd 0,0(r1) + addr -4(r0)[r2:b],r2 /* len = len + (r0 - 4) */ + negd r0,r0 + addr 4(r0)[r1:b],r1 /* b = b + (-r0 + 4) */ -ENTRY (_get_fp_ret) - movd 4(fp), r0 - ret 0 +0: /* + * Compute loop start address. + */ + movd r2,r0 + addr 60(r2),r3 + andd 60,r0 /* r0 = len & 60 */ + lshd -6,r3 /* r3 = (len + 60) >> 6 */ + andd 3,r2 /* len &= 3 */ -ENTRY (_get_2fp_ret) - movd 4(0(fp)), r0 - ret 0 + cmpqd 0,r0 + beq 1f -ENTRY (_get_fp) - addr 0(fp), r0 - ret 0 + addr -64(r1)[r0:b],r1 /* b = b - 64 + r0 */ + lshd -2,r0 + addr 0(r0)[r0:w],r0 + negd r0,r0 /* r0 = -3 * r0 / 4 */ -/* reboot the machine :) if possible */ + jump 2f(pc)[r0:b] /* Now enter the loop */ -ENTRY(low_level_reboot) + /* + * Zero 64 bytes per loop iteration. + */ + .align 2 +1: movqd 0,0(r1) + movqd 0,4(r1) + movqd 0,8(r1) + movqd 0,12(r1) + movqd 0,16(r1) + movqd 0,20(r1) + movqd 0,24(r1) + movqd 0,28(r1) + movqd 0,32(r1) + movqd 0,36(r1) + movqd 0,40(r1) + movqd 0,44(r1) + movqd 0,48(r1) + movqd 0,52(r1) + movqd 0,56(r1) + movqd 0,60(r1) +2: addd 64,r1 + acbd -1,r3,1b - movd -1,tos +3: cmpqd 0,r2 + beq 5f + + /* + * Zero out blocks shorter then four bytes. + */ +4: movqb 0,-1(r1)[r2:b] + acbd -1,r2,4b + +5: exit [r3] + ret 0 + + /* + * For blocks smaller then 20 bytes + * this is faster. + */ + .align 2 +6: cmpqd 3,r2 + bhs 3b + + movd r2,r0 + andd 3,r2 + lshd -2,r0 + +7: movqd 0,0(r1) + addqd 4,r1 + acbd -1,r0,7b + br 3b + +/*****************************************************************************/ + +/* + * The following primitives are used to copy data in and out of the user's + * address space. + */ + +/* + * copyout(caddr_t from, caddr_t to, size_t len); + * Copy len bytes into the user's address space. + */ +ENTRY(copyout) + enter [r3,r4],0 + movd _curpcb(pc),r4 + addr _copy_fault(pc),PCB_ONFAULT(r4) + + movd B_ARG0,r1 /* from */ + movd B_ARG1,r2 /* to */ + movd B_ARG2,r0 /* len */ + cmpqd 0,r0 + beq 9f /* anything to do? */ + + /* + * We check that each page of the destination buffer is writable + * with one movsub per page. + */ + /* Compute number of pages. */ + movd r2,r3 + andd PGOFSET,r3 + addd r0,r3 + addqd -1,r3 + lshd -PGSHIFT,r3 + + /* Do an user-write-access for first page. */ + movsub 0(r1),0(r2) + + /* More to do? */ + cmpqd 0,r3 + beq 2f + + /* Bump address to start of next page. */ + addd NBPG,r2 + andd ~PGOFSET,r2 + + /* Do an user-write-acess for all remaining pages. */ +1: movsub 0(r1),0(r2) + addd NBPG,r2 + acbd -1,r3,1b + + /* Reload to argument. */ + movd B_ARG1,r2 + +2: /* And now do the copy. */ + lshd -2,r0 + movsd + movd B_ARG2,r0 + andd 3,r0 + movsb /* This also sets r0 to zero. */ +9: movd r0,PCB_ONFAULT(r4) + exit [r3,r4] + ret 0 + +/* + * copyin(caddr_t from, caddr_t to, size_t len); + * Copy len bytes from the user's address space. + */ +ENTRY(copyin) + enter [r3,r4],0 + movd _curpcb(pc),r4 + addr _copy_fault(pc),PCB_ONFAULT(r4) + + movd B_ARG0,r1 /* from */ + movd B_ARG1,r2 /* to */ + movd B_ARG2,r0 /* len */ + cmpqd 0,r0 + beq 9f /* anything to do? */ + + /* + * We check that the end of the destination buffer is not past the end + * of the user's address space. If it's not, then we only need to + * check that each page is readable, and the CPU will do that for us. + */ + movd r1,r3 + addd r0,r3 + cmpd r3,VM_MAXUSER_ADDRESS + bhi _copy_fault + cmpd r1,r3 + bhs _copy_fault /* check for overflow. */ + + /* And now do the copy. */ + lshd -2,r0 + movsd +1: movd B_ARG2,r0 + andd 3,r0 + movsb /* This also sets r0 to zero. */ +9: movd r0,PCB_ONFAULT(r4) + exit [r3,r4] + ret 0 + +ENTRY(copy_fault) + movqd 0,PCB_ONFAULT(r4) + movd EFAULT,r0 + exit [r3,r4] + ret 0 + +/* + * copyoutstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied); + * Copy a NUL-terminated string, at most maxlen characters long, into the + * user's address space. Return the number of characters copied (including the + * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else + * return 0 or EFAULT. + */ +ENTRY(copyoutstr) + enter [r3],0 + movd _curpcb(pc),r3 + addr _copystr_fault(pc),PCB_ONFAULT(r3) + movd B_ARG0,r0 /* from */ + movd B_ARG1,r1 /* to */ + movd B_ARG2,r2 /* maxlen */ + cmpqd 0,r2 + beq 2f /* anything to do? */ + +1: movsub 0(r0),0(r1) + cmpqb 0,0(r0) + beq 3f + addqd 1,r0 + addqd 1,r1 + acbd -1,r2,1b +2: movd ENAMETOOLONG,r0 + br copystr_return +3: addqd -1,r2 + movqd 0,r0 + br copystr_return + +/* + * copyinstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied); + * Copy a NUL-terminated string, at most maxlen characters long, from the + * user's address space. Return the number of characters copied (including the + * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else + * return 0 or EFAULT. + */ +ENTRY(copyinstr) + enter [r3],0 + movd _curpcb(pc),r3 + addr _copystr_fault(pc),PCB_ONFAULT(r3) + movd B_ARG0,r0 /* from */ + movd B_ARG1,r1 /* to */ + movd B_ARG2,r2 /* maxlen */ + cmpqd 0,r2 + beq 2f /* anything to do? */ + +1: movusb 0(r0),0(r1) + cmpqb 0,0(r1) + beq 3f + addqd 1,r0 + addqd 1,r1 + acbd -1,r2,1b +2: movd ENAMETOOLONG,r0 + br copystr_return +3: addqd -1,r2 + movqd 0,r0 + br copystr_return + +ENTRY(copystr_fault) + movd EFAULT,r0 + +copystr_return: + /* Set *lencopied and return r0. */ + movqd 0,PCB_ONFAULT(r3) + movd B_ARG2,r1 + subd r2,r1 + movd B_ARG3,r2 + cmpqd 0,r2 + beq 1f + movd r1,0(r2) +1: exit [r3] + ret 0 + +/* + * copystr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied); + * Copy a NUL-terminated string, at most maxlen characters long. Return the + * number of characters copied (including the NUL) in *lencopied. If the + * string is too long, return ENAMETOOLONG; else return 0. + */ +ENTRY(copystr) + enter [r4],0 + movd B_ARG0,r1 /* from */ + movd B_ARG1,r2 /* to */ + movd B_ARG2,r0 /* maxlen */ + cmpqd 0,r0 + beq 2f /* anything to do? */ + + movqd 0,r4 /* Set match value. */ + movsb u + movd r0,r1 /* Save count. */ + bfs 1f + + /* + * Terminated due to limit count. + * Return ENAMETOOLONG. + */ + movd ENAMETOOLONG,r0 + br 2f + +1: /* + * Terminated due to match. Adjust + * count and transfer final element. + */ + addqd -1,r1 + movqd 0,r0 + movb r0,0(r2) + +2: /* Set *lencopied and return r0. */ + movd B_ARG2,r2 + subd r1,r2 + movd B_ARG3,r1 + cmpqd 0,r1 + beq 3f + movd r2,0(r1) +3: exit [r4] + ret 0 + +/* + * fuword(caddr_t uaddr); + * Fetch an int from the user's address space. + */ +ENTRY(fuword) + movd _curpcb(pc),r2 + addr _fusufault(pc),PCB_ONFAULT(r2) + movd S_ARG0,r0 + /* + * MACH's locore.s code says that + * due to cpu bugs the destination + * of movusi can't be a register or tos. + */ + movusd 0(r0),S_ARG0 + movd S_ARG0,r0 + movqd 0,PCB_ONFAULT(r2) + ret 0 + +/* + * fuswintr(caddr_t uaddr); + * Fetch a short from the user's address space. Can be called during an + * interrupt. + */ +ENTRY(fuswintr) + movd _curpcb(pc),r2 + addr _fusubail(pc),PCB_ONFAULT(r2) + movd S_ARG0,r0 + movusw 0(r0),S_ARG0 + movqd 0,r0 + movw S_ARG0,r0 + movqd 0,PCB_ONFAULT(r2) + ret 0 + +/* + * fubyte(caddr_t uaddr); + * Fetch a byte from the user's address space. + */ +ENTRY(fubyte) + movd _curpcb(pc),r2 + addr _fusufault(pc),PCB_ONFAULT(r2) + movd S_ARG0,r0 + movusb 0(r0),S_ARG0 + movqd 0,r0 + movb S_ARG0,r0 + movqd 0,PCB_ONFAULT(r2) + ret 0 + +/* + * suword(caddr_t uaddr, int x); + * Store an int in the user's address space. + */ +ENTRY(suword) + movd _curpcb(pc),r2 + addr _fusufault(pc),PCB_ONFAULT(r2) + movd S_ARG0,r0 + movsud S_ARG1,0(r0) + movqd 0,r0 + movd r0,PCB_ONFAULT(r2) + ret 0 + +/* + * suswintr(caddr_t uaddr, short x); + * Store a short in the user's address space. Can be called during an + * interrupt. + */ +ENTRY(suswintr) + movd _curpcb(pc),r2 + addr _fusubail(pc),PCB_ONFAULT(r2) + movd S_ARG0,r0 + movsuw S_ARG1,0(r0) + movqd 0,r0 + movd r0,PCB_ONFAULT(r2) + ret 0 + +/* + * subyte(caddr_t uaddr, char x); + * Store a byte in the user's address space. + */ +ENTRY(subyte) + movd _curpcb(pc),r2 + addr _fusufault(pc),PCB_ONFAULT(r2) + movd S_ARG0,r0 + movsub S_ARG1,0(r0) + movqd 0,r0 + movd r0,PCB_ONFAULT(r2) + ret 0 + +/* + * Handle faults from [fs]u*(). Clean up and return -1. + */ +ENTRY(fusufault) + movqd 0,PCB_ONFAULT(r2) + movqd -1,r0 + ret 0 + +/* + * Handle faults from [fs]u*(). Clean up and return -1. This differs from + * fusufault() in that trap() will recognize it and return immediately rather + * than trying to page fault. + */ +ENTRY(fusubail) + movqd 0,PCB_ONFAULT(r2) + movqd -1,r0 + ret 0 + +/*****************************************************************************/ + +/* + * The following primitives manipulate the run queues. + * _whichqs tells which of the 32 queues _qs + * have processes in them. Setrq puts processes into queues, Remrq + * removes them from queues. The running process is on no queue, + * other processes are on a queue related to p->p_pri, divided by 4 + * actually to shrink the 0-127 range of priorities into the 32 available + * queues. + */ + .globl _whichqs, _qs, _cnt, _panic + +/* + * setrunqueue(struct proc *p); + * Insert a process on the appropriate queue. Should be called at splclock(). + */ +ENTRY(setrunqueue) + movd S_ARG0,r0 +#ifdef DIAGNOSTIC + cmpqd 0,P_BACK(r0) /* should not be on q already */ + bne 1f + cmpqd 0,P_WCHAN(r0) + bne 1f + cmpb SRUN,P_STAT(r0) + bne 1f +#endif /* DIAGNOSTIC */ + movzbd P_PRIORITY(r0),r1 + lshd -2,r1 + sbitd r1,_whichqs(pc) /* set queue full bit */ + addr _qs(pc)[r1:q],r1 /* locate q hdr */ + movd P_BACK(r1),r2 /* locate q tail */ + movd r1,P_FORW(r0) /* set p->p_forw */ + movd r0,P_BACK(r1) /* update q's p_back */ + movd r0,P_FORW(r2) /* update tail's p_forw */ + movd r2,P_BACK(r0) /* set p->p_back */ + ret 0 +#ifdef DIAGNOSTIC +1: addr 3f(pc),tos /* Was on the list! */ + bsr _panic +3: .asciz "setrunqueue" +#endif + +/* + * remrq(struct proc *p); + * Remove a process from its queue. Should be called at splclock(). + */ +ENTRY(remrq) + movd S_ARG0,r1 + movzbd P_PRIORITY(r1),r0 +#ifdef DIAGNOSTIC + lshd -2,r0 + tbitd r0,_whichqs(pc) + bfc 1f +#endif /* DIAGNOSTIC */ + movd P_BACK(r1),r2 /* Address of prev. item */ + movqd 0,P_BACK(r1) /* Clear reverse link */ + movd P_FORW(r1),r1 /* Addr of next item. */ + movd r1,P_FORW(r2) /* Unlink item. */ + movd r2,P_BACK(r1) + cmpd r1,r2 /* r1 = r2 => empty queue */ + bne 2f +#ifndef DIAGNOSTIC + lshd -2,r0 +#endif + cbitd r0,_whichqs(pc) /* mark q as empty */ +2: ret 0 +#ifdef DIAGNOSTIC +1: addr 3f(pc),tos /* No queue entry! */ + bsr _panic +3: .asciz "remrq" +#endif + +/* + * When no processes are on the runq, cpu_switch() branches to here to wait for + * something to come ready. + */ +ENTRY(idle) + ints_off + cmpqd 0,_whichqs(pc) + bne sw1 + ints_on /* We may lose a tick here ... */ + wait /* Wait for interrupt. */ + br _idle + +#ifdef DIAGNOSTIC +ENTRY(switch_error) + addr 1f(pc),tos + bsr _panic +1: .asciz "cpu_switch" +#endif /* DIAGNOSTIC */ + +/* + * cpu_switch(void); + * Find a runnable process and switch to it. Wait if necessary. + */ +ENTRY(cpu_switch) + enter [r3,r4,r5,r6,r7],0 + + movd _curproc(pc),r4 + + /* + * Clear curproc so that we don't accumulate system time while idle. + * This also insures that schedcpu() will move the old process to + * the correct queue if it happens to get called from the spl0() + * below and changes the priority. (See corresponding comment in + * userret()). + */ + movqd 0,_curproc(pc) + + movd _imask(pc),tos + bsr _splx /* spl0 - process pending interrupts */ + + /* + * First phase: find new process. + * + * Registers: + * r0 - queue number + * r1 - queue head + * r2 - new process + * r3 - next process in queue + * r4 - old process + */ + ints_off + +sw1: movqd 0,r0 + ffsd _whichqs(pc),r0 /* find a full q */ + bfs _idle /* if none, idle */ + + /* Get the process and unlink it from the queue. */ + addr _qs(pc)[r0:q],r1 /* address of qs entry! */ + + movd P_FORW(r1),r2 /* unlink from front of process q */ +#ifdef DIAGNOSTIC + cmpd r2,r1 /* linked to self (i.e. nothing queued? */ + beq _switch_error /* not possible */ +#endif /* DIAGNOSTIC */ + movd P_FORW(r2),r3 + movd r3,P_FORW(r1) + movd r1,P_BACK(r3) + + cmpd r1,r3 /* q empty? */ + bne 3f + + cbitd r0,_whichqs(pc) /* queue is empty, turn off whichqs. */ + +3: movqd 0,_want_resched(pc) /* We did a resched! */ + +#ifdef DIAGNOSTIC + cmpqd 0,P_WCHAN(r2) /* Waiting for something? */ + bne _switch_error /* Yes; shouldn't be queued. */ + cmpb SRUN,P_STAT(r2) /* In run state? */ + bne _switch_error /* No; shouldn't be queued. */ +#endif /* DIAGNOSTIC */ + /* Isolate process. XXX Is this necessary? */ + movqd 0,P_BACK(r2) + + /* Record new process. */ + movd r2,_curproc(pc) + + /* It's okay to take interrupts here. */ + ints_on + + /* Skip context switch if same process. */ + cmpd r2,r4 + beq switch_return + + /* If old process exited, don't bother. */ + cmpqd 0,r4 + beq switch_exited + + /* + * Second phase: save old context. + * + * Registers: + * r4 - old process, then old pcb + * r2 - new process + */ + + movd P_ADDR(r4),r4 + + /* save stack and frame pointer registers. */ + sprd sp,PCB_KSP(r4) + sprd fp,PCB_KFP(r4) + +switch_exited: + /* + * Third phase: restore saved context. + * + * Registers: + * r1 - new pcb + * r2 - new process + */ + + /* No interrupts while loading new state. */ + ints_off + movd P_ADDR(r2),r1 + + /* Switch address space. */ + lmr ptb0,PCB_PTB(r1) + lmr ptb1,PCB_PTB(r1) + + /* Restore stack and frame pointer registers. */ + lprd sp,PCB_KSP(r1) + lprd fp,PCB_KFP(r1) + + /* Record new pcb. */ + movd r1,_curpcb(pc) + + /* + * Disable the FPU. + */ + sprw cfg,r0 + andw ~CFG_F,r0 + lprw cfg,r0 + + /* Interrupts are okay again. */ + ints_on + +switch_return: + /* + * Restore old priority level from stack. + */ bsr _splx cmpqd 0,tos - ints_off /* Stop things! */ - addr xxxlow(pc), r0 /* jump to low memory */ - andd ~KERNBASE, r0 - movd r0, tos - ret 0 -xxxlow: - lmr mcr, 0 /* Turn off mapping. */ - lprd sp, __save_sp(pc) /* get monitor's sp. */ - jump 0x10000032 /* Jump to the ROM! */ + movd r2,r0 /* return(p); */ + exit [r3,r4,r5,r6,r7] + ret 0 + +/*****************************************************************************/ + +/* + * FPU handling. + * Normally the FPU state is saved and restored in trap. + */ + +/* + * Check if we have a FPU installed. + */ +#ifdef NS381 +#define FPU_INSTALLED +#else +#define FPU_INSTALLED cmpqd 0,__have_fpu(pc); beq 9f +#endif + +/* + * void save_fpu_context(struct pcb *p); + * Save FPU context. + */ +ENTRY(save_fpu_context) + FPU_INSTALLED + movd S_ARG0,r0 + sprw cfg,r1 + orw CFG_F,r1 + lprw cfg,r1 + sfsr PCB_FSR(r0) + movl f0,PCB_F0(r0) + movl f1,PCB_F1(r0) + movl f2,PCB_F2(r0) + movl f3,PCB_F3(r0) + movl f4,PCB_F4(r0) + movl f5,PCB_F5(r0) + movl f6,PCB_F6(r0) + movl f7,PCB_F7(r0) +9: ret 0 + +/* + * void restore_fpu_context(struct pcb *p); + * Restore FPU context. + */ +ENTRY(restore_fpu_context) + FPU_INSTALLED + movd S_ARG0,r0 + sprw cfg,r1 + orw CFG_F,r1 + lprw cfg,r1 + lfsr PCB_FSR(r0) + movl PCB_F0(r0),f0 + movl PCB_F1(r0),f1 + movl PCB_F2(r0),f2 + movl PCB_F3(r0),f3 + movl PCB_F4(r0),f4 + movl PCB_F5(r0),f5 + movl PCB_F6(r0),f6 + movl PCB_F7(r0),f7 +9: ret 0 + +/*****************************************************************************/ + +/* + * Trap and fault vector routines + * + * On exit from the kernel to user mode, we always need to check for ASTs. In + * addition, we need to do this atomically; otherwise an interrupt may occur + * which causes an AST, but it won't get processed until the next kernel entry + * (possibly the next clock tick). Thus, we disable interrupt before checking, + * and only enable them again on the final `rett' or before calling the AST + * handler. + */ + +/* + * First some macro definitions. + */ + +/* + * Enter the kernel. Save r0-r7, usp and sb + */ +#define KENTER \ + enter [r0,r1,r2,r3,r4,r5,r6,r7],8; \ + sprd usp,REGS_USP(sp); \ + sprd sb,REGS_SB(sp) + +/* + * Exit the kernel. Restore sb, sp and r0-r7. + */ +#define KEXIT \ + lprd sb,REGS_SB(sp); \ + lprd usp,REGS_USP(sp); \ + exit [r0,r1,r2,r3,r4,r5,r6,r7]; \ + rett 0 + +/* + * Check for AST. CPU interrupts have to be disabled. + */ +#define CHECKAST \ + tbitw 8,REGS_PSR(sp); \ + bfc 9f; \ + cmpqd 0,_astpending(pc); \ + beq 9f; \ + movqd 0,_astpending(pc); \ + ints_on; \ + movd T_AST,tos; \ + movqd 0,tos; \ + movqd 0,tos; \ + bsr _trap; \ + adjspd -12; 9: + +#define TRAP(label, code) \ + .align 2; CAT(label,:); KENTER; \ + movd code,tos; \ + br handle_trap + +TRAP(trap_nmi, T_NMI) /* 1 non-maskable interrupt */ +TRAP(trap_abt, T_ABT) /* 2 abort */ +TRAP(trap_slave, T_SLAVE) /* 3 coprocessor trap */ +TRAP(trap_ill, T_ILL) /* 4 illegal operation in user mode */ +TRAP(trap_dvz, T_DVZ) /* 6 divide by zero */ +TRAP(trap_bpt, T_BPT) /* 8 breakpoint instruction */ +TRAP(trap_trc, T_TRC) /* 9 trace trap */ +TRAP(trap_und, T_UND) /* 10 undefined instruction */ +TRAP(trap_rbe, T_RBE) /* 11 restartable bus error */ +TRAP(trap_nbe, T_NBE) /* 12 non-restartable bus error */ +TRAP(trap_ovf, T_OVF) /* 13 integer overflow trap */ +TRAP(trap_dbg, T_DBG) /* 14 debug trap */ +TRAP(trap_reserved, T_RESERVED) /* 15 reserved */ + +/* + * The following handles all synchronous traps and non maskable interupts. + */ + .align 2 +handle_trap: + lprd sb,0 /* Kernel code expects sb to be 0 */ + /* + * Store the mmu status. + * This is needed for abort traps. + */ + smr tear,tos + smr msr,tos + bsr _trap + adjspd -12 /* Pop off software part of frame. */ + ints_off + CHECKAST + KEXIT + +/* + * We abuse the flag trap to flush the instruction cache. + * r0 contains the start address and r1 the len of the + * region to flush. The start address of the handler is + * cache line aligned. + */ + .align 4 +#ifndef CINVSMALL +trap_flg: + cinv ia,r0 + addqd 1,tos + rett 0 +#else + .globl _cinvstart, _cinvend +trap_flg: + addqd 1,tos /* Increment return address */ + addd r0,r1 + movd r1,tos /* Save address of second line. */ + sprw psr,tos /* Push psr. */ + movqw 0,tos /* Push mod. */ + bsr 1f /* Invalidate first line. */ + /* + * Restore address of second cachline and + * fall through to do the invalidation. + */ + movd tos,r0 +1: movd r0,r1 + andd PGOFSET,r0 + lshd -PGSHIFT,r1 +_cinvstart: + movd @_PTmap[r1:d],r1 + andd ~PGOFSET,r1 + addd r0,r1 + cinv i,r1 +_cinvend: + rett 0 +#endif + +/* + * The system call trap handler. + */ + .align 2 +trap_svc: + KENTER + lprd sb,0 /* Kernel code expects sb to be 0 */ + bsr _syscall +rei: ints_off + CHECKAST + KEXIT + +/* + * The handler for all asynchronous interrupts. + */ + .align 2 +trap_int: + KENTER + lprd sb,0 /* Kernel code expects sb to be 0 */ + movd _Cur_pl(pc),tos + movb @ICU_ADR+HVCT,r0 /* fetch vector */ + andd 0x0f,r0 + movd r0,tos + movqd 1,r1 + lshd r0,r1 + orw r1,_Cur_pl(pc) /* or bit to Cur_pl */ + orw r1,@ICU_ADR+IMSK /* and to IMSK */ + /* bits set by idisabled in IMSK */ + /* have to be preserved */ + ints_off /* flush pending writes */ + ints_on /* and now turn ints on */ + addqd 1,_intrcnt(pc)[r0:d] + lshd 4,r0 + addqd 1,_cnt+V_INTR(pc) + addqd 1,_ivt+IV_CNT(r0) /* increment counters */ + movd _ivt+IV_ARG(r0),r1 /* get argument */ + cmpqd 0,r1 + bne 1f + addr 0(sp),r1 /* NULL -> push frame address */ +1: movd r1,tos + movd _ivt+IV_VEC(r0),r0 /* call the handler */ + jsr 0(r0) + + adjspd -8 /* Remove arg and vec from stack */ + bsr _splx_di /* Restore Cur_pl */ + cmpqd 0,tos + CHECKAST + KEXIT + +/* + * Finally the interrupt vector table. + */ + .align 2 +_inttab: + .long trap_int + .long trap_nmi + .long trap_abt + .long trap_slave + .long trap_ill + .long trap_svc + .long trap_dvz + .long trap_flg + .long trap_bpt + .long trap_trc + .long trap_und + .long trap_rbe + .long trap_nbe + .long trap_ovf + .long trap_dbg + .long trap_reserved + +/*****************************************************************************/ + +/* + * void *ram_size(void *start); + * Determine RAM size. + * + * First attempt: write-and-read-back (WRB) each page from start + * until WRB fails or get a parity error. This didn't work because + * address decoding wraps around. + * + * New algorithm: + * + * ret = round-up-page (start); + * loop: + * if (!WRB or parity or wrap) return ret; + * ret += pagesz; (* check end of RAM at powers of two *) + * goto loop; + * + * Several things make this tricky. First, the value read from + * an address will be the same value written to the address if + * the cache is on -- regardless of whether RAM is located at + * the address. Hence the cache must be disabled. Second, + * reading an unpopulated RAM address is likely to produce a + * parity error. Third, a value written to an unpopulated address + * can be held by capacitance on the bus and can be correctly + * read back if there is no intervening bus cycle. Hence, + * read and write two patterns. + * + * Registers: + * r0 - current page, return value + * r1 - old config register + * r2 - temp config register + * r3 - pattern0 + * r4 - pattern1 + * r5 - old nmi vector + * r6 - save word at @0 + * r7 - save word at @4 + * sb - pointer to intbase + */ + +pattern0 = 0xa5a5a5a5 +pattern1 = 0x5a5a5a5a +parity_clr = 0x28000050 + +ENTRY(ram_size) + enter [r1,r2,r3,r4,r5,r6,r7],0 + sprd sb,tos + sprd intbase,r0 + lprd sb,r0 /* load intbase into sb */ + /* + * Initialize things. + */ + movd @0,r6 /* save 8 bytes of first page */ + movd @4,r7 + movd 0,@0 /* zero 8 bytes of first page */ + movd 0,@4 + sprw cfg,r1 /* turn off data cache */ + movw r1,r2 /* r1 = old config */ + andw ~CFG_DC,r2 + lprw cfg,r2 + movd 4(sb),r5 /* save old NMI vector */ + addr tmp_nmi(pc),4(sb) /* tmp NMI vector */ + cinv ia,r0 /* Vector reads go through the icache */ + movd 8(fp),r0 /* r0 = start */ + addr PGOFSET(r0),r0 /* round up to page */ + andd ~PGOFSET,r0 + movd pattern0,r3 + movd pattern1,r4 +rz_loop: + movd r3,0(r0) /* write 8 bytes */ + movd r4,4(r0) + lprw cfg,r2 /* flush write buffer */ + cmpd r3,0(r0) /* read back and compare */ + bne rz_exit + cmpd r4,4(r0) + bne rz_exit + cmpqd 0,@0 /* check for address wrap */ + bne rz_exit + cmpqd 0,@4 /* check for address wrap */ + bne rz_exit + addr NBPG(r0),r0 /* next page */ + br rz_loop +rz_exit: + movd r6,@0 /* restore 8 bytes of first page */ + movd r7,@4 + lprw cfg,r1 /* turn data cache back on */ + movd r5,4(sb) /* restore NMI vector */ + cinv ia,r0 /* Vector reads go through the icache */ + movd parity_clr,r2 + movb 0(r2),r2 /* clear parity status */ + lprd sb,tos + exit [r1,r2,r3,r4,r5,r6,r7] + ret 0 + +tmp_nmi: /* come here if parity error */ + addr rz_exit(pc),0(sp) /* modify return addr to exit */ + rett 0 /* To get back to the rom monitor .... */ ENTRY(bpt_to_monitor) /* Switch to monitor's stack. */ ints_off - bicpsrw PSR_S /* make sure we are using sp0. */ + bicpsrw PSL_US /* make sure we are using sp0. */ sprd psr, tos /* Push the current psl. */ save [r1,r2,r3,r4] sprd sp, r1 /* save kernel's sp */ sprd fp, r2 /* save kernel's fp */ sprd intbase, r3 /* Save current intbase. */ - smr ptb0, r4 /* Save current ptd! */ + smr ptb0, r4 /* Save current ptd! */ /* Change to low addresses */ - lmr ptb0, _IdlePTD(pc) /* Load the idle ptd */ + lmr ptb0, _PTDpaddr(pc) /* Load the idle ptd */ addr low(pc), r0 andd ~KERNBASE, r0 movd r0, tos @@ -322,837 +1215,18 @@ highagain: ints_on ret 0 - -/*===========================================================================* - * ram_size * - *===========================================================================* - - char * - ram_size (start) - char *start; - - Determines RAM size. - - First attempt: write-and-read-back (WRB) each page from start - until WRB fails or get a parity error. This didn't work because - address decoding wraps around. - - New algorithm: - - ret = round-up-page (start); - loop: - if (!WRB or parity or wrap) return ret; - ret += pagesz; (* check end of RAM at powers of two *) - goto loop; - - Several things make this tricky. First, the value read from - an address will be the same value written to the address if - the cache is on -- regardless of whether RAM is located at - the address. Hence the cache must be disabled. Second, - reading an unpopulated RAM address is likely to produce a - parity error. Third, a value written to an unpopulated address - can be held by capacitance on the bus and can be correctly - read back if there is no intervening bus cycle. Hence, - read and write two patterns. - -*/ - -cfg_dc = 0x200 -pagesz = 0x1000 -pattern0 = 0xa5a5a5a5 -pattern1 = 0x5a5a5a5a -nmi_vec = 0x44 -parity_clr = 0x28000050 - -/* - r0 current page, return value - r1 old config register - r2 temp config register - r3 pattern0 - r4 pattern1 - r5 old nmi vector - r6 save word at @0 - r7 save word at @4 -*/ -.globl _ram_size -_ram_size: - enter [r1,r2,r3,r4,r5,r6,r7],0 - # initialize things - movd @0,r6 #save 8 bytes of first page - movd @4,r7 - movd 0,@0 #zero 8 bytes of first page - movd 0,@4 - sprw cfg,r1 #turn off data cache - movw r1,r2 #r1 = old config - andw ~cfg_dc,r2 # was: com cfg_dc,r2 - lprw cfg,r2 - movd @nmi_vec,r5 #save old NMI vector - addr tmp_nmi(pc),@nmi_vec #tmp NMI vector - movd 8(fp),r0 #r0 = start - addr pagesz-1(r0),r0 #round up to page - andd ~(pagesz-1),r0 # was: com (pagesz-1),r0 - movd pattern0,r3 - movd pattern1,r4 -rz_loop: - movd r3,0(r0) #write 8 bytes - movd r4,4(r0) - lprw cfg,r2 #flush write buffer - cmpd r3,0(r0) #read back and compare - bne rz_exit - cmpd r4,4(r0) - bne rz_exit - cmpqd 0,@0 #check for address wrap - bne rz_exit - cmpqd 0,@4 #check for address wrap - bne rz_exit - addr pagesz(r0),r0 #next page - br rz_loop -rz_exit: - movd r6,@0 #restore 8 bytes of first page - movd r7,@4 - lprd cfg,r1 #turn data cache back on - movd r5,@nmi_vec #restore NMI vector - movd parity_clr,r2 - movb 0(r2),r2 #clear parity status - exit [r1,r2,r3,r4,r5,r6,r7] - ret 0 - -tmp_nmi: #come here if parity error - addr rz_exit(pc),0(sp) #modify return addr to exit - rett 0 - -/* Low level kernel support routines. */ - -/* External symbols that are needed. */ -/* .globl EX(cnt) */ -.globl EX(curproc) -.globl EX(curpcb) -.globl EX(qs) -.globl EX(whichqs) -.globl EX(want_resched) -.globl EX(Cur_pl) - -/* - User/Kernel copy routines ... {fu,su}{word,byte} and copyin/coyinstr - - These are "Fetch User" or "Save user" word or byte. They return -1 if - a page fault occurs on access. -*/ - -ENTRY(fuword) -ENTRY(fuiword) - enter [r2],0 - movd _curpcb(pc), r2 - addr fusufault(pc), PCB_ONFAULT(r2) - movd 0(B_ARG0), r0 - br fusu_ret - -ENTRY(fubyte) -ENTRY(fuibyte) - enter [r2],0 - movd _curpcb(pc), r2 - addr fusufault(pc), PCB_ONFAULT(r2) - movzbd 0(B_ARG0), r0 - br fusu_ret - -ENTRY(suword) -ENTRY(suiword) - enter [r2],0 - movqd 4, tos - movd B_ARG0, tos - bsr _check_user_write - adjspd -8 - cmpqd 0, r0 - bne fusufault - movd _curpcb(pc), r2 - addr fusufault(pc), PCB_ONFAULT(r2) - movqd 0, r0 - movd B_ARG1,0(B_ARG0) - br fusu_ret - -ENTRY(subyte) -ENTRY(suibyte) - enter [r2],0 - movqd 1, tos - movd B_ARG0, tos - bsr _check_user_write - adjspd -8 - cmpqd 0, r0 - bne fusufault - movd _curpcb(pc), r2 - addr fusufault(pc), PCB_ONFAULT(r2) - movqd 0, r0 - movb B_ARG1, 0(B_ARG0) - br fusu_ret - -fusufault: - movqd -1, r0 -fusu_ret: - movqd 0, PCB_ONFAULT(r2) - exit [r2] - ret 0 - -/* Two more fu/su routines .... for now ... just return -1. */ -ENTRY(fuswintr) -ENTRY(suswintr) - movqd -1, r0 - ret 0 - -/* C prototype: copyin ( int *usrc, int *kdst, u_int i) - C prototype: copyout ( int *ksrc, int *udst, u_int i) - - i is the number of Bytes! to copy! - - Similar code.... - */ - -ENTRY(copyout) - enter [r2,r3],0 -# Check for copying priviledges! i.e. copy on write! - movd B_ARG2, tos /* Length */ - movd B_ARG1, tos /* adr */ - bsr _check_user_write - adjspd -8 - cmpqd 0, r0 - bne cifault - br docopy - -ENTRY(copyin) - enter [r2,r3],0 -docopy: - movd _curpcb(pc), r3 - addr cifault(pc), PCB_ONFAULT(r3) - movd B_ARG2, r0 /* Length! */ - movd B_ARG0, r1 /* Src adr */ - movd B_ARG1, r2 /* Dst adr */ - movsb /* Move it! */ - movqd 0, r0 - movqd 0, PCB_ONFAULT(r3) - exit [r2,r3] - ret 0 - -cifault: - movd EFAULT, r0 - movd _curpcb(pc), r3 - movqd 0, PCB_ONFAULT(r3) - exit [r2,r3] - ret 0 - +/* Include all other .s files. */ +#include "bcopy.s" /*****************************************************************************/ /* - * The following primitives manipulate the run queues. - * _whichqs tells which of the 32 queues _qs - * have processes in them. Setrq puts processes into queues, Remrq - * removes them from queues. The running process is on no queue, - * other processes are on a queue related to p->p_pri, divided by 4 - * actually to shrink the 0-127 range of priorities into the 32 available - * queues. - */ - .globl _whichqs,_qs,_cnt,_panic - -/* - * setrunqueue(struct proc *p); - * Insert a process on the appropriate queue. Should be called at splclock(). - */ -ENTRY(setrunqueue) - movd S_ARG0, r0 - movd r2, tos - - cmpqd 0, P_BACK(r0) /* should not be on q already */ - bne 1f - cmpqd 0, P_WCHAN(r0) - bne 1f - cmpb SRUN, P_STAT(r0) - bne 1f - - movzbd P_PRIORITY(r0),r1 - lshd -2,r1 - sbitd r1,_whichqs(pc) /* set queue full bit */ - addr _qs(pc)[r1:q], r1 /* locate q hdr */ - movd P_BACK(r1),r2 /* locate q tail */ - movd r1, P_FORW(r0) /* set p->p_forw */ - movd r0, P_BACK(r1) /* update q's p_back */ - movd r0, P_FORW(r2) /* update tail's p_forw */ - movd r2, P_BACK(r0) /* set p->p_back */ - movd tos, r2 - ret 0 - -1: addr 2f(pc),tos /* Was on the list! */ - bsr _panic -2: .asciz "setrunqueue problem!" - -/* - * remrq(struct proc *p); - * Remove a process from its queue. Should be called at splclock(). - */ -ENTRY(remrq) - movd S_ARG0, r1 - movd r2, tos - movzbd P_PRIORITY(r1), r0 - - lshd -2, r0 - tbitd r0, _whichqs(pc) - bfc 1f - - movd P_BACK(r1), r2 /* Address of prev. item */ - movqd 0, P_BACK(r1) /* Clear reverse link */ - movd P_FORW(r1), r1 /* Addr of next item. */ - movd r1, P_FORW(r2) /* Unlink item. */ - movd r2, P_BACK(r1) - cmpd r1, r2 /* r1 = r2 => empty queue */ - bne 2f - - cbitd r0, _whichqs(pc) /* mark q as empty */ - -2: movd tos, r2 - ret 0 - -1: addr 2f(pc),tos /* No queue entry! */ - bsr _panic -2: .asciz "remrq problem!" - -/* Switch to another process from kernel code... */ - -ENTRY(cpu_switch) - ints_off /* to make sure cpu_switch runs to completion. */ - enter [r0,r1,r2,r3,r4,r5,r6,r7],0 -/* addqd 1, _cnt+V_SWTCH(pc) */ - - movd _curproc(pc), r0 - cmpqd 0, r0 - beq sw1 - - /* Save "kernel context" - - user context is saved at trap/svc. - Kernel registers are saved at entry to swtch. */ - - movd P_ADDR(r0), r0 - sprd sp, PCB_KSP(r0) - sprd fp, PCB_KFP(r0) - smr ptb0, PCB_PTB(r0) - - /* Save the Cur_pl. */ - movd _Cur_pl(pc), PCB_PL(r0) - - movqd 0, _curproc(pc) /* no current proc! */ - -sw1: /* Get something from a Queue! */ - ints_off /* Just in case we came from Idle. */ - movqd 0, r0 - ffsd _whichqs(pc), r0 - bfs Idle - - /* Get the process and unlink it from the queue. */ - addr _qs(pc)[r0:q], r1 /* address of qs entry! */ - movd 0(r1), r2 /* get process pointer! */ - movd P_FORW(r2), r3 /* get address of next entry. */ - - /* Test code */ - cmpqd 0, r3 - bne notzero - bsr _dump_qs -notzero: - - /* unlink the entry. */ - movd r3, 0(r1) /* New head pointer. */ - movd r1, P_BACK(r3) /* New reverse pointer. */ - cmpd r1, r3 /* Empty? */ - bne restart - - /* queue is empty, turn off whichqs. */ - cbitd r0, _whichqs(pc) - -restart: /* r2 has pointer to new proc.. */ - - /* Reload the new kernel context ... r2 points to proc entry. */ - movqd 0, P_BACK(r2) /* NULL p_forw */ - movqd 0, _want_resched(pc) /* We did a resched! */ - movd P_ADDR(r2), r3 /* get new pcb pointer */ - - - /* Do we need to reload floating point here? */ - - lmr ptb0, PCB_PTB(r3) - lprd sp, PCB_KSP(r3) - lprd fp, PCB_KFP(r3) - movw PCB_FLAGS(r3), r4 /* Get the flags. */ - - movd r2, _curproc(pc) - movd r3, _curpcb(pc) - - /* Restore the previous processor level. */ - movd PCB_PL(r3), tos - bsr _splx - cmpqd 0,tos - /* Return to the caller of swtch! */ - exit [r0,r1,r2,r3,r4,r5,r6,r7] - ret 0 - -/* - * The idle process! - */ -Idle: - lprd sp, KERN_INT_SP /* Set up the "interrupt" stack. */ - movqd 0, r0 - ffsd _whichqs(pc), r0 - bfc sw1 - movd _imask(pc),tos - bsr _splx - cmpqd 0,tos - wait /* Wait for interrupt. */ - br sw1 - -/* As part of the fork operation, we need to prepare a user are for - execution, to be resumed by swtch()... - - C proto is low_level_fork (struct user *up) - - up is a pointer the the "user" struct in the child. - We copy the kernel stack and update the pcb of the child to - return from low_level_fork twice. - - The first return should return a 0. The "second" return should - be because of a swtch() and should return a 1. - -*/ - -ENTRY(low_level_fork) - enter [r0,r1,r2,r3,r4,r5,r6,r7],0 - - /* Save "kernel context" - - user context is saved at trap/svc. - Kernel registers are saved at entry to swtch. */ - - movd B_ARG0, r2 /* Gets the paddr field of child. */ - sprd sp, PCB_KSP(r2) - sprd fp, PCB_KFP(r2) - /* Don't save ptb0 because child has a different ptb0! */ - movd _Cur_pl(pc), PCB_PL(r2) - - /* Copy the kernel stack from this process to new stack. */ - addr 0(sp), r1 /* Source address */ - movd r1, r3 /* Calculate the destination address */ - subd USRSTACK, r3 /* Get the offset */ - addd r3, r2 /* r2 had B_ARG0 in it. now the dest addr */ - movd r2, r5 /* Save the destination address */ - movd KSTK_SIZE, r0 /* Calculate the length of the kernel stack. */ - subd r3, r0 - - movd r0, r4 /* Check for a double alligned stack. */ - andd 3, r4 - cmpqd 0, r4 - beq kcopy - addr m_ll_fork(pc),tos /* panic if not double alligned. */ - bsr _panic - -kcopy: - lshd -2,r0 /* Divide by 4 to get # of doubles. */ - movsd /* Copy the stack! */ - - /* Set parent to return 0. */ - movqd 0,28(sp) - - /* Set child to return 1. */ - movqd 1,28(r5) - - exit [r0,r1,r2,r3,r4,r5,r6,r7] - ret 0 - -m_ll_fork: .asciz "_low_level_fork: kstack not double alligned." - -/* - * savectx(struct pcb *pcb, int altreturn); - * Update pcb, saving current processor state and arranging for alternate - * return in cpu_switch() if altreturn is true. - */ -ENTRY(savectx) - enter [r0,r1,r2,r3,r4,r5,r6,r7],0 - movd B_ARG0, r2 - sprd sp,PCB_KSP(r2) - sprd fp,PCB_KFP(r2) - movd _Cur_pl(pc),PCB_PL(r2) - exit [r0,r1,r2,r3,r4,r5,r6,r7] - ret 0 - -ENTRY(_trap_nmi) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movqd 1, tos - br all_trap - -ENTRY(_trap_abt) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movqd 2, tos - smr tear, tos - smr msr, tos - br abt_trap - -ENTRY(_trap_slave) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movqd 3, tos - br all_trap - -ENTRY(_trap_ill) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movqd 4, tos - br all_trap - -ENTRY(_trap_svc) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - lprd sb, 0 /* for the kernel */ - - /* Have an fpu? */ - cmpqd 0, __have_fpu(pc) - beq svc_no_fpu - - /* Save the FPU registers. */ - movd _curpcb(pc), r3 - sfsr PCB_FSR(r3) - movl f0,PCB_F0(r3) - movl f1,PCB_F1(r3) - movl f2,PCB_F2(r3) - movl f3,PCB_F3(r3) - movl f4,PCB_F4(r3) - movl f5,PCB_F5(r3) - movl f6,PCB_F6(r3) - movl f7,PCB_F7(r3) - - /* Call the system. */ - bsr _syscall - - /* Restore the FPU registers. */ - movd _curpcb(pc), r3 - lfsr PCB_FSR(r3) - movl PCB_F0(r3),f0 - movl PCB_F1(r3),f1 - movl PCB_F2(r3),f2 - movl PCB_F3(r3),f3 - movl PCB_F4(r3),f4 - movl PCB_F5(r3),f5 - movl PCB_F6(r3),f6 - movl PCB_F7(r3),f7 - - /* Restore the usp and sb. */ - lprd usp, REGS_USP(sp) - lprd sb, REGS_SB(sp) - - exit [r0,r1,r2,r3,r4,r5,r6,r7] - rett 0 - -svc_no_fpu: - /* Call the system. */ - bsr _syscall - - /* Restore the usp and sb. */ - lprd usp, REGS_USP(sp) - lprd sb, REGS_SB(sp) - - exit [r0,r1,r2,r3,r4,r5,r6,r7] - rett 0 - -ENTRY(_trap_dvz) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movqd 6, tos - br all_trap - -ENTRY(_trap_flg) - cinv ia, r0 - addqd 1, tos /* Increment return address */ - rett 0 - -ENTRY(_trap_bpt) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movd 8, tos - br all_trap - -ENTRY(_trap_trc) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movd 9, tos - br all_trap - -ENTRY(_trap_und) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movd 10, tos - br all_trap - -ENTRY(_trap_rbe) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movd 11, tos - br all_trap - -ENTRY(_trap_nbe) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movd 12, tos - br all_trap - -ENTRY(_trap_ovf) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movd 13, tos - br all_trap - -ENTRY(_trap_dbg) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movd 14, tos - br all_trap - -ENTRY(_trap_reserved) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp, REGS_USP(sp) - sprd sb, REGS_SB(sp) - movd 15, tos -all_trap: - movqd 0,tos /* Add 2 zeros for msr,tear in frame. */ - movqd 0,tos - -abt_trap: - lprd sb, 0 /* for the kernel */ - - /* Was this a real process? */ - cmpqd 0, _curproc(pc) - beq trap_no_fpu - - /* Have an fpu? */ - cmpqd 0, __have_fpu(pc) - beq trap_no_fpu - - /* Save the FPU registers. */ - movd _curpcb(pc), r3 /* R3 is saved by gcc. */ - sfsr PCB_FSR(r3) - movl f0,PCB_F0(r3) - movl f1,PCB_F1(r3) - movl f2,PCB_F2(r3) - movl f3,PCB_F3(r3) - movl f4,PCB_F4(r3) - movl f5,PCB_F5(r3) - movl f6,PCB_F6(r3) - movl f7,PCB_F7(r3) - - bsr _trap - adjspd -12 /* Pop off software part of trap frame. */ - - /* Restore the FPU registers. */ - lfsr PCB_FSR(r3) - movl PCB_F0(r3),f0 - movl PCB_F1(r3),f1 - movl PCB_F2(r3),f2 - movl PCB_F3(r3),f3 - movl PCB_F4(r3),f4 - movl PCB_F5(r3),f5 - movl PCB_F6(r3),f6 - movl PCB_F7(r3),f7 - - /* Reload the usp and sb just in case anything has changed. */ - lprd usp, REGS_USP(sp) - lprd sb, REGS_SB(sp) - - exit [r0,r1,r2,r3,r4,r5,r6,r7] - rett 0 - -trap_no_fpu: - bsr _trap - adjspd -12 /* Pop off software part of trap frame. */ - - /* Reload the usp and sb just in case anything has changed. */ - lprd usp, REGS_USP(sp) - lprd sb, REGS_SB(sp) - - exit [r0,r1,r2,r3,r4,r5,r6,r7] - rett 0 - -/* Interrupt service routines.... */ -ENTRY(_int) - enter [r0,r1,r2,r3,r4,r5,r6,r7],8 - sprd usp,REGS_USP(sp) - sprd sb,REGS_SB(sp) - lprd sb,0 /* for the kernel */ - movd _Cur_pl(pc), tos - movb @ICU_ADR+HVCT,r0 /* fetch vector */ - andd 0x0f,r0 - movd r0,tos - movqd 1,r1 - lshd r0,r1 - orw r1,_Cur_pl(pc) /* or bit to Cur_pl */ - orw r1,@ICU_ADR+IMSK /* and to IMSK */ - /* bits set by idisabled in IMSK */ - /* have to be preserved */ - ints_off /* flush pending writes */ - ints_on /* and now turn ints on */ - addqd 1,_intrcnt(pc)[r0:d] - lshd 4,r0 - addqd 1,_cnt+V_INTR(pc) - addqd 1,_ivt+IV_CNT(r0) /* increment counters */ - movd _ivt+IV_ARG(r0),r1 /* get argument */ - cmpqd 0,r1 - bne 1f - addr 0(sp),r1 /* NULL -> push frame address */ -1: movd r1,tos - movd _ivt+IV_VEC(r0),r0 /* call the handler */ - jsr 0(r0) - - adjspd -8 /* Remove arg and vec from stack */ - bsr _splx_di /* Restore Cur_pl */ - cmpqd 0,tos - - tbitw 8, REGS_PSR(sp) /* In system mode? */ - bfs do_user_intr /* branch if yes! */ - - lprd usp, REGS_USP(sp) - lprd sb, REGS_SB(sp) - exit [r0,r1,r2,r3,r4,r5,r6,r7] - rett 0 - -do_user_intr: - /* Do "user" mode interrupt processing, including preemption. */ - ints_off - movd _curproc(pc), r2 - cmpqd 0,r2 - beq intr_panic - - /* Have an fpu? */ - cmpqd 0, __have_fpu(pc) - beq intr_no_fpu - - /* Save the FPU registers. */ - movd _curpcb(pc), r3 /* R3 is saved by gcc. */ - sfsr PCB_FSR(r3) - movl f0,PCB_F0(r3) - movl f1,PCB_F1(r3) - movl f2,PCB_F2(r3) - movl f3,PCB_F3(r3) - movl f4,PCB_F4(r3) - movl f5,PCB_F5(r3) - movl f6,PCB_F6(r3) - movl f7,PCB_F7(r3) - -intr_no_fpu: - /* turn on interrupts! */ - ints_on - - cmpqd 0, _want_resched(pc) - beq do_usr_ret - movd 18, tos - movqd 0,tos - movqd 0,tos - bsr _trap - adjspd -12 /* Pop off software part of trap frame. */ - -do_usr_ret: - - /* Have an fpu? */ - cmpqd 0, __have_fpu(pc) - beq intr_ret_no_fpu - - /* Restore the FPU registers. r3 should be as set before. */ - lfsr PCB_FSR(r3) - movl PCB_F0(r3),f0 - movl PCB_F1(r3),f1 - movl PCB_F2(r3),f2 - movl PCB_F3(r3),f3 - movl PCB_F4(r3),f4 - movl PCB_F5(r3),f5 - movl PCB_F6(r3),f6 - movl PCB_F7(r3),f7 - -intr_ret_no_fpu: - lprd usp, REGS_USP(sp) - lprd sb, REGS_SB(sp) - exit [r0,r1,r2,r3,r4,r5,r6,r7] - rett 0 - -intr_panic: - addr intr_panic_msg(pc),tos /* panic if not double alligned. */ - bsr _panic - -intr_panic_msg: - .asciz "user mode interrupt with no current process!" - -/* Include all other .s files. */ -#include "bcopy.s" -#include "bzero.s" - - -/* pmap support??? ..... */ - -/* - * Note: This version greatly munged to avoid various assembler errors - * that may be fixed in newer versions of gas. Perhaps newer versions - * will have more pleasant appearance. - */ - - .set IDXSHIFT,10 - .set SYSTEM,0xFE000000 # virtual address of system start - /*note: gas copys sign bit (e.g. arithmetic >>), can't do SYSTEM>>22! */ - .set SYSPDROFF,0x3F8 # Page dir index of System Base - -/* - * PTmap is recursive pagemap at top of virtual address space. - * Within PTmap, the page directory can be found (third indirection). - */ -#define PDRPDROFF 0x03F7 /* page dir index of page dir */ - .globl _PTmap, _PTD, _PTDpde, _Sysmap - .set _PTmap,0xFDC00000 - .set _PTD,0xFDFF7000 - .set _Sysmap,0xFDFF8000 - .set _PTDpde,0xFDFF7000+4*PDRPDROFF - -/* - * APTmap, APTD is the alternate recursive pagemap. - * It's used when modifying another process's page tables. - */ -#define APDRPDROFF 0x03FE /* page dir index of page dir */ - .globl _APTmap, _APTD, _APTDpde - .set _APTmap,0xFF800000 - .set _APTD,0xFFBFE000 - .set _APTDpde,0xFDFF7000+4*APDRPDROFF - -/* - * Access to each processes kernel stack is via a region of - * per-process address space (at the beginning), immediatly above - * the user process stack. - */ -#if 0 - .set _kstack, USRSTACK - .globl _kstack -#endif - .set PPDROFF,0x3F6 -/* # .set PPTEOFF,0x400-UPAGES # 0x3FE */ - .set PPTEOFF,0x3FE - -.data -.globl _PDRPDROFF -_PDRPDROFF: - .long PDRPDROFF - -/* vmstat -i uses the following labels and __int even increments the - * counters. This information is also availiable from ivt[n].iv_use + * vmstat -i uses the following labels and trap_int even increments the + * counters. This information is also availiable from ivt[n].iv_use * and ivt[n].iv_cnt in much better form. */ .globl _intrnames, _eintrnames, _intrcnt, _eintrcnt + .text _intrnames: .asciz "int 0" .asciz "int 1" @@ -1171,6 +1245,8 @@ _intrnames: .asciz "int 14" .asciz "int 15" _eintrnames: + + .data _intrcnt: .long 0 .long 0 diff --git a/sys/arch/pc532/pc532/machdep.c b/sys/arch/pc532/pc532/machdep.c index 09d9d43eae62..be92e963b4dc 100644 --- a/sys/arch/pc532/pc532/machdep.c +++ b/sys/arch/pc532/pc532/machdep.c @@ -1,6 +1,10 @@ -/* $NetBSD: machdep.c,v 1.43 1996/01/15 05:30:47 phil Exp $ */ +/* $NetBSD: machdep.c,v 1.44 1996/01/31 21:33:58 phil Exp $ */ /*- + * Copyright (c) 1996 Matthias Pfaller. + * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved. + * Copyright (c) 1993 Philip A. Nelson. + * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * All rights reserved. * @@ -38,10 +42,6 @@ * @(#)machdep.c 7.4 (Berkeley) 6/3/91 */ -/* - * Modified for the pc532 by Phil Nelson. 2/3/93 - */ - static char rcsid[] = "/b/source/CVS/src/sys/arch/pc532/pc532/machdep.c,v 1.2 1993/09/13 07:26:49 phil Exp"; #include @@ -65,30 +65,38 @@ static char rcsid[] = "/b/source/CVS/src/sys/arch/pc532/pc532/machdep.c,v 1.2 19 #include #include #include +#ifdef SYSVMSG +#include +#endif +#ifdef SYSVSEM +#include +#endif +#ifdef SYSVSHM +#include +#endif #include -#include - #include #include #include -#include -#include #include +#include +#include #include -#include -extern vm_offset_t avail_end; -extern struct user *proc0paddr; - -vm_map_t buffer_map; - -/* A local function... */ -void reboot_cpu(); -void dumpsys __P((void)); +/* + * Support for VERY low debugging ... in case we get NO output. + * e.g. in case pmap does not work and can't do regular mapped + * output. In this case use umprintf to display debug messages. + */ +#if VERYLOWDEBUG +#include +/* Inform scncnputc the state of mapping. */ +int _mapped = 0; +#endif /* the following is used externally (sysctl_hw) */ char machine[] = "pc532"; @@ -108,298 +116,56 @@ int bufpages = BUFPAGES; #else int bufpages = 0; #endif -int msgbufmapped = 0; /* set when safe to use msgbuf */ +int physmem; +int boothowto; -/* Real low level initialization. This is called in unmapped mode and - sets up the inital page directory and page tables for the kernel. - This routine is the first to be called by locore.s to get the - kernel running at the correct place in memory. - */ +struct msgbuf *msgbufp; +int msgbufmapped; -extern char end[], _edata[]; -extern vm_offset_t avail_start; -extern vm_offset_t avail_end; +vm_map_t buffer_map; -int physmem = 0; -int maxmem = 0; - -vm_offset_t KPTphys; - -int IdlePTD; -int start_page; -int _istack; - -int low_mem_map; - -/* Support for VERY low debugging ... in case we get NO output. - (e.g. in case pmap does not work and can't do regular mapped - output. */ -#if VERYLOWDEBUG -#include -#endif - -void -_low_level_init () -{ - int ix, ix1, ix2; - int p0, p1, p2; - extern int _mapped; - -#if VERYLOWDEBUG - umprintf ("starting low level init\n"); -#endif - - mem_size = ram_size(end); - physmem = btoc(mem_size); - start_page = (((int)&end + NS532_PAGE_SIZE) & ~(NS532_PAGE_SIZE-1)) - & 0xffffff; - avail_start = start_page; - avail_end = mem_size - NS532_PAGE_SIZE; - -#if VERYLOWDEBUG - umprintf ("mem_size = 0x%x\nphysmem=%x\nstart_page=0x%x\navail_end=0x%x\n", - mem_size, physmem, start_page, avail_end); -#endif - - - /* Initialize the mmu with a simple memory map. */ - - /* A new interrupt stack, i.e. not the rom monitor's. */ - _istack = avail_start; - avail_start += NS532_PAGE_SIZE; - - /* The page directory that starts the entire mapping. */ - p0 = (int) avail_start; - IdlePTD = p0; - KPTphys = p0; - avail_start += NS532_PAGE_SIZE; - - /* First clear out the page table directory. */ - bzero((char *)p0, NS532_PAGE_SIZE); - - /* Now for the memory mapped I/O, the ICU and the eprom. */ - p1 = (int) avail_start; - avail_start += NS532_PAGE_SIZE; - bzero ((char *)p1, NS532_PAGE_SIZE); - - /* Addresses here start at FFC00000... */ - - /* Map the interrupt stack to FFC00000 - FFC00FFF */ - WR_ADR(int, p1, _istack+3); - - /* All futhur entries are cache inhibited. => 0x4? in low bits. */ - - /* The Duarts and Parity. Addresses FFC80000 */ - WR_ADR(int, p1+4*0x80, 0x28000043); - - /* SCSI Polled (Reduced space.) Addresses FFD00000 - FFDFFFFF */ - for (ix = 0x100; ix < 0x200; ix++) - WR_ADR(int, p1 + ix*4, 0x30000043 + ((ix - 0x100)<<12)); - - /* SCSI "DMA" (Reduced space.) Addresses FFE00000 - FFEEFFFF */ - for (ix = 0x200; ix < 0x2ff; ix++) - WR_ADR(int, p1 + ix*4, 0x38000043 + ((ix - 0x200)<<12)); - - /* SCSI "DMA" With A22 (EOP) Addresses FFEFF000 - FFEFFFFF */ - WR_ADR(int, p1 + 0x2ff*4, 0x38400043); - - /* The e-prom Addresses FFF00000 - FFF3FFFF */ - for (ix = 0x300; ix < 0x340; ix++) - WR_ADR(int, p1 + ix*4, 0x10000043 + ((ix - 0x300)<<12)); - - /* Finally the ICU! Addresses FFFFF000 - FFFFFFFF */ - WR_ADR(int, p1+4*0x3ff, 0xFFFFF043); - - /* Add the memory mapped I/O entry in the directory. */ - WR_ADR(int, p0+4*1023, p1 + 0x43); - - /* Map the kernel pages starting at FE00000 and at 0. - It also maps any pages past the end of the kernel, - up to the value of avail_start at this point. - These pages currently are: - 1 - interrupt stack - 2 - Top level page table - 3 - 2nd level page table for I/O - 4 - 2nd level page table for the kernel & low memory - 5-7 will be allocated as 2nd level page tables by pmap_bootstrap. - */ - - low_mem_map = p2 = (int) avail_start; - avail_start += NS532_PAGE_SIZE; - bzero ((char *)p2, NS532_PAGE_SIZE); - WR_ADR(int,p0+4*pdei(KERNBASE), p2 + 3); - WR_ADR(int,p0, p2+3); - - for (ix = 0; ix < (avail_start)/NS532_PAGE_SIZE; ix++) { - WR_ADR(int, p2 + ix*4, NS532_PAGE_SIZE * ix + 3); - } - - /* Load the ptb0 register and start mapping. */ - - _mapped = 1; - _load_ptb0 (p0); - asm(" lmr mcr, 3"); /* Start the machine mapping, 1 vm space. */ - -} - -extern void icu_init(); - -/* init532 is the first procedure called in mapped mode by locore.s - */ - -init532() -{ - int free_pages; - void (**int_tab)(); - extern int _save_sp; - -/*#include "ddb.h" */ -#if NDDB > 0 - kdb_init(); - if (boothowto & RB_KDB) - Debugger(); -#endif - - /* Initialize the pmap stuff.... */ - pmap_bootstrap (avail_start, 0); - /* now running on new page tables, configured, and u/iom is accessible */ - - /* Set up the proc0paddr struct. */ - proc0paddr->u_pcb.pcb_flags = 0; - proc0paddr->u_pcb.pcb_pl = 0xffffffff; - proc0paddr->u_pcb.pcb_ptb = IdlePTD; - proc0paddr->u_pcb.pcb_onstack = - (struct on_stack *) proc0paddr + UPAGES*NBPG - - sizeof (struct on_stack); - - /* Set up the ICU. */ - icu_init(); - intr_init(); -} +extern vm_offset_t avail_start, avail_end; +caddr_t allocsys __P((caddr_t)); +void dumpsys __P((void)); +void cpu_reset __P((void)); /* * Machine-dependent startup code */ -int boothowto = 0, Maxmem = 0; -long dumplo; -int physmem, maxmem; - -extern int bootdev; -extern cyloffset; - -/* pmap_enter prototype */ -void pmap_enter __P((register pmap_t, vm_offset_t, register vm_offset_t, - vm_prot_t, boolean_t)); - void -cpu_startup(void) +cpu_startup() { - register int unixsize; - register unsigned i; - register struct pte *pte; - int mapaddr, j; - register caddr_t v; - int maxbufs, base, residual; - extern long Usrptsize; + unsigned i; + caddr_t v; + int sz; + int base, residual; vm_offset_t minaddr, maxaddr; vm_size_t size; - int firstaddr; + int x; /* * Initialize error message buffer (at end of core). */ - /* avail_end was pre-decremented in pmap_bootstrap to compensate */ - for (i = 0; i < btoc(sizeof (struct msgbuf)); i++) - pmap_enter(pmap_kernel(), (vm_offset_t) msgbufp, - avail_end + i * NBPG, VM_PROT_ALL, TRUE); + for (i = 0; i < btoc(sizeof(struct msgbuf)); i++) + pmap_enter(pmap_kernel(), + (vm_offset_t)((caddr_t)msgbufp + i * NBPG), + avail_end + i * NBPG, VM_PROT_ALL, TRUE); msgbufmapped = 1; -#ifdef KDB - kdb_init(); /* startup kernel debugger */ -#endif - /* - * Good {morning,afternoon,evening,night}. - */ printf(version); - printf("\nreal mem = 0x%x\n", ctob(physmem)); + printf("real mem = %d\n", ctob(physmem)); /* - * Allocate space for system data structures. - * The first available kernel virtual address is in "v". - * As pages of kernel virtual memory are allocated, "v" is incremented. - * As pages of memory are allocated and cleared, - * "firstaddr" is incremented. - * An index into the kernel page table corresponding to the - * virtual memory address maintained in "v" is kept in "mapaddr". + * Find out how much space we need, allocate it, + * and then give everything true virtual addresses. */ - - /* - * Make two passes. The first pass calculates how much memory is - * needed and allocates it. The second pass assigns virtual - * addresses to the various data structures. - */ - firstaddr = 0; -again: - v = (caddr_t)firstaddr; - -#define valloc(name, type, num) \ - (name) = (type *)v; v = (caddr_t)((name)+(num)) -#define valloclim(name, type, num, lim) \ - (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) - valloc(callout, struct callout, ncallout); - valloc(swapmap, struct map, nswapmap = maxproc * 2); -#ifdef SYSVSHM - valloc(shmsegs, struct shmid_ds, shminfo.shmmni); -#endif - /* - * Determine how many buffers to allocate. - * Use 10% of memory for the first 2 Meg, 5% of the remaining - * memory. Insure a minimum of 16 buffers. - * We allocate 1/2 as many swap buffer headers as file i/o buffers. - */ - if (bufpages == 0) - if (physmem < (2 * 1024 * 1024)) - bufpages = physmem / 10 / CLSIZE; - else - bufpages = ((2 * 1024 * 1024 + physmem) / 20) / CLSIZE; - - bufpages = min(NKMEMCLUSTERS*2/5, bufpages); /* XXX ? - cgd */ - - if (nbuf == 0) { - nbuf = bufpages / 2; - if (nbuf < 16) { - nbuf = 16; - /* XXX (cgd) -- broken vfs_bio currently demands this */ - bufpages = 32; - } - } - - if (nswbuf == 0) { - nswbuf = (nbuf / 2) &~ 1; /* force even */ - if (nswbuf > 256) - nswbuf = 256; /* sanity */ - } - valloc(swbuf, struct buf, nswbuf); - valloc(buf, struct buf, nbuf); - - /* - * End of first pass, size has been calculated so allocate memory - */ - if (firstaddr == 0) { - size = (vm_size_t)(v - firstaddr); - firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); - if (firstaddr == 0) - panic("startup: no room for tables"); - goto again; - } - - /* - * End of second pass, addresses have been assigned - */ - if ((vm_size_t)(v - firstaddr) != size) + sz = (int)allocsys((caddr_t)0); + if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0) + panic("startup: no room for tables"); + if (allocsys(v) - v != sz) panic("startup: table size inconsistency"); /* @@ -413,13 +179,12 @@ again: if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, &minaddr, size, FALSE) != KERN_SUCCESS) panic("startup: cannot allocate buffers"); + if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { + /* don't want to alloc more physical mem than needed */ + bufpages = btoc(MAXBSIZE) * nbuf; + } base = bufpages / nbuf; residual = bufpages % nbuf; - if (base >= MAXBSIZE) { - /* don't want to alloc more physical mem than needed */ - base = MAXBSIZE; - residual = 0; - } for (i = 0; i < nbuf; i++) { vm_size_t curbufsize; vm_offset_t curbuf; @@ -437,13 +202,12 @@ again: vm_map_simplify(buffer_map, curbuf); } - /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, - 16*NCARGS, TRUE); + 16*NCARGS, TRUE); /* * Allocate a submap for physio @@ -459,7 +223,8 @@ again: M_MBUF, M_NOWAIT); bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES); mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, - VM_MBUF_SIZE, FALSE); + VM_MBUF_SIZE, FALSE); + /* * Initialize callouts */ @@ -467,7 +232,7 @@ again: for (i = 1; i < ncallout; i++) callout[i-1].c_next = &callout[i]; - printf("avail mem = 0x%x\n", ptoa(cnt.v_free_count)); + printf("avail mem = %d\n", ptoa(cnt.v_free_count)); printf("using %d buffers containing %d bytes of memory\n", nbuf, bufpages * CLBYTES); @@ -482,23 +247,101 @@ again: configure(); } -#ifdef PGINPROF /* - * Return the difference (in microseconds) - * between the current time and a previous - * time as represented by the arguments. - * If there is a pending clock interrupt - * which has not been serviced due to high - * ipl, return error code. + * Allocate space for system data structures. We are given + * a starting virtual address and we return a final virtual + * address; along the way we set each data structure pointer. + * + * We call allocsys() with 0 to find out how much space we want, + * allocate that much and fill it with zeroes, and then call + * allocsys() again with the correct base virtual address. */ -/*ARGSUSED*/ -vmtime(otime, olbolt, oicr) - register int otime, olbolt, oicr; +caddr_t +allocsys(v) + register caddr_t v; { - return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667); -} +#define valloc(name, type, num) \ + v = (caddr_t)(((name) = (type *)v) + (num)) +#ifdef REAL_CLISTS + valloc(cfree, struct cblock, nclist); #endif + valloc(callout, struct callout, ncallout); + valloc(swapmap, struct map, nswapmap = maxproc * 2); +#ifdef SYSVSHM + valloc(shmsegs, struct shmid_ds, shminfo.shmmni); +#endif +#ifdef SYSVSEM + valloc(sema, struct semid_ds, seminfo.semmni); + valloc(sem, struct sem, seminfo.semmns); + /* This is pretty disgusting! */ + valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); +#endif +#ifdef SYSVMSG + valloc(msgpool, char, msginfo.msgmax); + valloc(msgmaps, struct msgmap, msginfo.msgseg); + valloc(msghdrs, struct msg, msginfo.msgtql); + valloc(msqids, struct msqid_ds, msginfo.msgmni); +#endif + + /* + * Determine how many buffers to allocate. We use 10% of the + * first 2MB of memory, and 5% of the rest, with a minimum of 16 + * buffers. We allocate 1/2 as many swap buffer headers as file + * i/o buffers. + */ + if (bufpages == 0) + if (physmem < btoc(2 * 1024 * 1024)) + bufpages = physmem / (10 * CLSIZE); + else + bufpages = (btoc(2 * 1024 * 1024) + physmem) / + (20 * CLSIZE); + if (nbuf == 0) { + nbuf = bufpages; + if (nbuf < 16) + nbuf = 16; + } + if (nswbuf == 0) { + nswbuf = (nbuf / 2) &~ 1; /* force even */ + if (nswbuf > 256) + nswbuf = 256; /* sanity */ + } + valloc(swbuf, struct buf, nswbuf); + valloc(buf, struct buf, nbuf); + return v; +} + +/* + * machine dependent system variables. + */ +cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + dev_t consdev; + + /* all sysctl names at this level are terminal */ + if (namelen != 1) + return (ENOTDIR); /* overloaded */ + + switch (name[0]) { + case CPU_CONSDEV: + if (cn_tab != NULL) + consdev = cn_tab->cn_dev; + else + consdev = NODEV; + return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev, + sizeof consdev)); + default: + return (EOPNOTSUPP); + } + /* NOTREACHED */ +} /* * Send an interrupt to process. @@ -510,7 +353,6 @@ vmtime(otime, olbolt, oicr) * frame pointer, it returns to the user * specified pc, psl. */ - void sendsig(catcher, sig, mask, code) sig_t catcher; @@ -536,8 +378,7 @@ sendsig(catcher, sig, mask, code) ps->ps_sigstk.ss_size - sizeof(struct sigframe)); ps->ps_sigstk.ss_flags |= SS_ONSTACK; } else { - fp = (struct sigframe *)(regs[REG_SP] - - sizeof(struct sigframe)); + fp = (struct sigframe *)regs[REG_SP] - 1; } if ((unsigned)fp <= (unsigned)p->p_vmspace->vm_maxsaddr + MAXSSIZ - ctob(p->p_vmspace->vm_ssize)) @@ -548,13 +389,8 @@ sendsig(catcher, sig, mask, code) * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ - SIGACTION(p, SIGILL) = SIG_DFL; - sig = sigmask(SIGILL); - p->p_sigignore &= ~sig; - p->p_sigcatch &= ~sig; - p->p_sigmask &= ~sig; - psignal(p, SIGILL); - return; + sigexit(p, SIGILL); + /* NOTREACHED */ } /* @@ -565,19 +401,21 @@ sendsig(catcher, sig, mask, code) fp->sf_scp = &fp->sf_sc; fp->sf_handler = catcher; - /* save registers */ - bcopy (regs, fp->sf_scp->sc_reg, 8*sizeof(int)); - /* * Build the signal context to be used by sigreturn. */ fp->sf_sc.sc_onstack = oonstack; fp->sf_sc.sc_mask = mask; - fp->sf_sc.sc_sp = regs[REG_SP]; fp->sf_sc.sc_fp = regs[REG_FP]; + fp->sf_sc.sc_sp = regs[REG_SP]; fp->sf_sc.sc_pc = regs[REG_PC]; fp->sf_sc.sc_ps = regs[REG_PSR]; fp->sf_sc.sc_sb = regs[REG_SB]; + bcopy (regs, fp->sf_sc.sc_reg, 8*sizeof(int)); + + /* + * Build context to run handler in. + */ regs[REG_SP] = (int)fp; regs[REG_PC] = (int)(((char *)PS_STRINGS) - (esigcode - sigcode)); } @@ -602,56 +440,62 @@ sys_sigreturn(p, v, retval) syscallarg(struct sigcontext *) sigcntxp; } */ *uap = v; register struct sigcontext *scp; - register struct sigframe *fp; register int *regs = p->p_md.md_regs; - fp = (struct sigframe *) regs[REG_SP] ; - if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0) + /* + * The trampoline code hands us the context. + * It is unsafe to keep track of it ourselves, in the event that a + * program jumps out of a signal handler. + */ + scp = SCARG(uap, sigcntxp); + if (useracc((caddr_t)scp, sizeof (*scp), B_READ) == 0) return(EINVAL); - /* restore registers */ - bcopy (fp->sf_scp->sc_reg, regs, 8*sizeof(int)); + /* + * Check for security violations. + */ + if (((scp->sc_ps ^ regs[REG_PSR]) & PSL_USERSTATIC) != 0) + return (EINVAL); - scp = fp->sf_scp; - if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0) - return(EINVAL); -#ifdef notyet - if ((scp->sc_ps & PSL_MBZ) != 0 || (scp->sc_ps & PSL_MBO) != PSL_MBO) { - return(EINVAL); - } -#endif if (scp->sc_onstack & 01) p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK; else p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK; - p->p_sigmask = scp->sc_mask &~ - (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP)); - regs[REG_FP] = scp->sc_fp; - regs[REG_SP] = scp->sc_sp; - regs[REG_PC] = scp->sc_pc; + p->p_sigmask = scp->sc_mask & ~sigcantmask; + + /* + * Restore signal context. + */ + regs[REG_FP] = scp->sc_fp; + regs[REG_SP] = scp->sc_sp; + regs[REG_PC] = scp->sc_pc; regs[REG_PSR] = scp->sc_ps; - regs[REG_SB] = scp->sc_sb; + regs[REG_SB] = scp->sc_sb; + bcopy (scp->sc_reg, regs, 8*sizeof(int)); + return(EJUSTRETURN); } int waittime = -1; struct pcb dumppcb; +struct on_stack dumppcb_onstack; void boot(howto) int howto; { - register int devtype; /* r10 == major of root dev */ - extern const char *panicstr; + int s; extern int cold; - int nomsg = 1; + extern const char *panicstr; - if(cold) { - printf("cold boot: hit reset please"); - for(;;); + /* If system is cold, just halt. */ + if (cold) { + howto |= RB_HALT; + goto haltsys; } + boothowto = howto; - if ((howto&RB_NOSYNC) == 0 && waittime < 0) { + if ((howto & RB_NOSYNC) == 0 && waittime < 0) { waittime = 0; vfs_shutdown(); /* @@ -660,224 +504,67 @@ boot(howto) */ if (panicstr == 0) resettodr(); - - DELAY(10000); /* wait for printf to finish */ } - splhigh(); - devtype = major(rootdev); - if (howto&RB_HALT) { - printf ("\nThe operating system has halted.\n\n"); - cpu_reset(); - for(;;) ; - /*NOTREACHED*/ - } else { - if (howto & RB_DUMP) { + /* Disable interrupts. */ + s = splhigh(); + + /* If rebooting and a dump is requested do it. */ + if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) { #if STACK_DUMP - /* dump the stack! */ - { int *fp = (int *)_get_fp(); - int i=0; - while ((u_int)fp < (u_int)UPT_MIN_ADDRESS-40) { - printf ("0x%x (@0x%x), ", fp[1], fp); - fp = (int *)fp[0]; - if (++i == 3) { printf ("\n"); i=0; } - } - } + /* dump the stack! */ + { + int *fp; + u_int limit = ns532_round_page(fp) - 40; + int i=0; + sprd(fp, fp); + while ((u_int)fp < limit) { + printf ("0x%x (@0x%x), ", fp[1], fp); + fp = (int *)fp[0]; + if (++i == 3) { + printf("\n"); + i=0; + } + } + } #endif - savectx(&dumppcb, 0); - dumppcb.pcb_ptb = _get_ptb0(); - dumpsys(); - } - doshutdownhooks(); + di(); + sprd(sp, dumppcb.pcb_ksp); + sprd(fp, dumppcb.pcb_kfp); + smr(ptb0, dumppcb.pcb_ptb); + dumppcb.pcb_onstack = &dumppcb_onstack; + sprw(psr, dumppcb_onstack.pcb_psr); + sprw(mod, dumppcb_onstack.pcb_mod); + lprd(sp, &dumppcb_onstack.pcb_mod); + __asm __volatile("bsr 1f; 1: enter [r0,r1,r2,r3,r4,r5,r6,r7],8"); + lprd(sp, dumppcb.pcb_ksp); + lprd(fp, dumppcb.pcb_kfp); + sprd(sb, dumppcb_onstack.pcb_sb); + sprd(usp, dumppcb_onstack.pcb_usp); + ei(); + dumpsys(); } - printf("rebooting ..."); - reboot_cpu(); - for(;;) ; +haltsys: + + /* + * Call shutdown hooks. Do this _before_ anything might be + * asked to the user in case nobody is there.... + */ + doshutdownhooks(); + + if (howto & RB_HALT) { + printf("\n"); + printf ("The operating system has halted.\n"); + printf("Please press any key to reboot.\n\n"); + cngetc(); + } + + printf("rebooting...\n"); + cpu_reset(); /*NOTREACHED*/ } -void -microtime(tvp) - register struct timeval *tvp; -{ - int s = splhigh(); - - *tvp = time; - tvp->tv_usec += tick; - while (tvp->tv_usec > 1000000) { - tvp->tv_sec++; - tvp->tv_usec -= 1000000; - } - splx(s); -} - -/* - * Strange exec values! (Do we want to support a minix a.out header?) - */ -int -cpu_exec_aout_makecmds() -{ - return ENOEXEC; -}; - -/* - * Clear registers on exec - */ -void -setregs(p, entry, stack, retval) - struct proc *p; - struct exec_package *entry; - u_long stack; - register_t *retval; -{ - struct on_stack *r = (struct on_stack *)p->p_md.md_regs; - int i; - -/* printf ("Setregs: entry = %x, stack = %x, (usp = %x)\n", entry, stack, - r->pcb_usp); */ - - /* Start fp at stack also! */ - r->pcb_usp = stack; - r->pcb_fp = stack; - r->pcb_pc = entry->ep_entry; - r->pcb_psr = PSL_USERSET; - r->pcb_reg[0] = (int)PS_STRINGS; - for (i=1; i<8; i++) r->pcb_reg[i] = 0; - - p->p_addr->u_pcb.pcb_flags = 0; -} - - -extern struct pte *CMAP1, *CMAP2; -extern caddr_t CADDR1, CADDR2; -/* - * zero out physical memory - * specified in relocation units (NBPG bytes) - */ -clearseg(n) -{ - /* map page n in to virtual address CADDR2 */ - *(int *)CMAP2 = PG_V | PG_KW | ctob(n); - tlbflush(); - bzero(CADDR2,NBPG); - *(int *) CADDR2 = 0; -} - -/* - * copy a page of physical memory - * specified in relocation units (NBPG bytes) - */ -copyseg(frm, n) -{ - /* map page n in to virtual address CADDR2 */ - *(int *)CMAP2 = PG_V | PG_KW | ctob(n); - tlbflush(); - bcopy((void *)frm, (void *)CADDR2, NBPG); -} - -/* - * copy a page of physical memory - * specified in relocation units (NBPG bytes) - */ -physcopyseg(frm, to) -{ - /* map page frm in to virtual address CADDR1 */ - *(int *)CMAP1 = PG_V | PG_KW | ctob(frm); - /* map page to in to virtual address CADDR2 */ - *(int *)CMAP2 = PG_V | PG_KW | ctob(to); - tlbflush(); - bcopy(CADDR1, CADDR2, NBPG); -} - -/* - * insert an element into a queue - */ -#undef insque -_insque(element, head) - register struct prochd *element, *head; -{ - element->ph_link = head->ph_link; - head->ph_link = (struct proc *)element; - element->ph_rlink = (struct proc *)head; - ((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element; -} - -/* - * remove an element from a queue - */ -#undef remque -_remque(element) - register struct prochd *element; -{ - ((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink; - ((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link; - element->ph_rlink = (struct proc *)0; -} - -vmunaccess() {printf ("vmunaccess!\n");} - -/* - * Below written in C to allow access to debugging code - */ -copyinstr(fromaddr, toaddr, maxlength, lencopied) size_t *lencopied, maxlength; - void *toaddr, *fromaddr; { - int c,tally; - - tally = 0; - while (maxlength--) { - c = fubyte(fromaddr++); - if (c == -1) { - if(lencopied) *lencopied = tally; - return(EFAULT); - } - tally++; - *(char *)toaddr++ = (char) c; - if (c == 0){ - if(lencopied) *lencopied = (u_int)tally; - return(0); - } - } - if(lencopied) *lencopied = (u_int)tally; - return(ENAMETOOLONG); -} - -copyoutstr(fromaddr, toaddr, maxlength, lencopied) size_t *lencopied, maxlength; - void *fromaddr, *toaddr; { - int c; - int tally; - - tally = 0; - while (maxlength--) { - c = subyte(toaddr++, *(char *)fromaddr); - if (c == -1) return(EFAULT); - tally++; - if (*(char *)fromaddr++ == 0){ - if(lencopied) *lencopied = tally; - return(0); - } - } - if(lencopied) *lencopied = tally; - return(ENAMETOOLONG); -} - -copystr(fromaddr, toaddr, maxlength, lencopied) size_t *lencopied, maxlength; - void *fromaddr, *toaddr; { - u_int tally; - - tally = 0; - while (maxlength--) { - *(u_char *)toaddr = *(u_char *)fromaddr++; - tally++; - if (*(u_char *)toaddr++ == 0) { - if(lencopied) *lencopied = tally; - return(0); - } - } - if(lencopied) *lencopied = tally; - return(ENAMETOOLONG); -} - /* * These variables are needed by /sbin/savecore */ @@ -975,7 +662,7 @@ dumpsys() while (sget() != NULL); /*syscons and pccons differ */ #endif - bytes = mem_size; + bytes = ctob(physmem); maddr = 0; blkno = dumplo; dump = bdevsw[major(dumpdev)].d_dump; @@ -1036,98 +723,368 @@ dumpsys() break; } printf("\n\n"); - delay(5000000); /* 5 seconds */ + DELAY(5000000); /* 5 seconds */ } -/* Stub function for reboot_cpu. */ - -void reboot_cpu() +void +microtime(tvp) + register struct timeval *tvp; { - extern void low_level_reboot(); - - /* Point Low MEMORY to Kernel Memory! */ - *((int *)PTD) = low_mem_map+3; /* PTD[pdei(KERNBASE)]; */ - low_level_reboot(); + int s = splhigh(); + *tvp = time; + tvp->tv_usec += tick; + splx(s); + while (tvp->tv_usec > 1000000) { + tvp->tv_sec++; + tvp->tv_usec -= 1000000; + } } -int -sys_sysarch(p, v, retval) +/* + * Clear registers on exec + */ +void +setregs(p, pack, stack, retval) struct proc *p; - void *v; + struct exec_package *pack; + u_long stack; register_t *retval; { - struct sysarch_args /* { - syscallarg(int) op; - syscallarg(char *) parms; - } */ *uap = v; + struct on_stack *r = (struct on_stack *)p->p_md.md_regs; + struct pcb *pcbp = &p->p_addr->u_pcb; + extern struct proc *fpu_proc; - return ENOSYS; + if (p == fpu_proc) + fpu_proc = 0; + + r->pcb_usp = stack; + r->pcb_fp = 0; + r->pcb_pc = pack->ep_entry; + r->pcb_psr = PSL_USERSET; + bzero(r->pcb_reg, sizeof(r->pcb_reg)); + r->pcb_reg[0] = (int)PS_STRINGS; + + pcbp->pcb_fsr = 0; + bzero(pcbp->pcb_freg, sizeof(pcbp->pcb_freg)); + + retval[1] = 0; +} + +/* + * Allocate memory pages. + */ +static vm_offset_t +alloc_pages(pages) + int pages; +{ + vm_offset_t p = avail_start; + avail_start += pages * NBPG; + bzero((caddr_t) p, pages * NBPG); + return(p); +} + +/* + * Map physical to virtual addresses in the kernel page table directory. + * If -1 is passed as physical address, empty second level page tables + * are allocated for the selected virtual address range. + */ +static void +map(pd, virtual, physical, protection, size) + pd_entry_t *pd; + vm_offset_t virtual, physical; + int protection, size; +{ + u_int ix1 = pdei(virtual); + u_int ix2 = ptei(virtual); + pt_entry_t *pt = (pt_entry_t *) (pd[ix1] & PG_FRAME); + + while (size > 0) { + if (pt == 0) { + pt = (pt_entry_t *) alloc_pages(1); + pd[ix1] = (pd_entry_t) pt | PG_V | PG_KW; + } + if (physical != (vm_offset_t) -1) { + pt[ix2] = (pt_entry_t) (physical | protection | PG_V); + physical += NBPG; + size -= NBPG; + } else { + size -= (NPTEPD - ix2) * NBPG; + ix2 = NPTEPD - 1; + } + if (++ix2 == NPTEPD) { + ix1++; + ix2 = 0; + pt = (pt_entry_t *) (pd[ix1] & PG_FRAME); + } + } +} + +/* + * init532 is the first (and last) procedure called by locore.s. + * + * Level one and level two page tables are initialized to create + * the following mapping: + * 0xfdbfe000-0xfdbfffff: UAREA of process 0 + * 0xfdc00000-0xfdffefff: Kernel level two page tables + * 0xfdfff000-0xfdffffff: Kernel level one page table + * 0xfe000000-0xff7fffff: Kernel code and data + * 0xffc00000-0xffc00fff: Kernel temporary stack + * 0xffc80000-0xffc80fff: Duarts and Parity control + * 0xffd00000-0xffdfffff: SCSI polled + * 0xffe00000-0xffefefff: SCSI DMA + * 0xffeff000-0xffefffff: SCSI DMA with EOP + * 0xfff00000-0xfff3ffff: EPROM + * + * 0xfe000000-0xfe400000 is (temporary) mirrored at address 0. + * + * The intbase register is initialized to point to the interrupt + * vector table in locore.s. + * + * The cpu config register gets set. + * + * avail_start, avail_end, physmem, PTDpaddr and proc0paddr are set + * to the correct values. + * + * The last action is to switch stacks and call main. + */ + +extern struct user *proc0paddr; + +#define kppa(x) (ns532_round_page(x) & 0xffffff) +#define kvpa(x) (ns532_round_page(x)) + +void +init532() +{ + extern void icu_init(); + extern int inttab[]; + extern char etext[], edata[], end[]; + pd_entry_t *pd; + + +#if VERYLOWDEBUG + umprintf ("Starting init532\n"); +#endif + +#ifndef NS381 + { + /* Check if we have a FPU installed. */ + extern int _have_fpu; + int cfg; + sprd(cfg, cfg); + if (cfg & CFG_F) + _have_fpu = 1; + } +#endif + + /* + * Setup the cfg register. + * We enable instruction cache, data cache + * the memory management instruction set and + * direct exception mode. + */ + lprd(cfg, CFG_ONE | CFG_IC | CFG_DC | CFG_DE | CFG_M); + + /* Setup memory allocation variables. */ + avail_start = kppa(end); + avail_end = ram_size(avail_start); + physmem = btoc(avail_end); + +#if VERYLOWDEBUG + umprintf ("avail_start = 0x%x\navail_end=0x%x\nphysmem=0x%x\n", + avail_start, avail_end, physmem); +#endif + + /* + * Load the address of the kernel's + * trap/interrupt vector table. + */ + lprd(intbase, inttab); + + /* Allocate page table directory */ + pd = (pd_entry_t *) alloc_pages(1); + PTDpaddr = (int)pd; + + /* Recursively map in the page directory */ + pd[PTDPTDI] = (pd_entry_t)pd | PG_V | PG_KW; + + /* Map interrupt stack. */ + map(pd, 0xffc00000, alloc_pages(1), PG_KW, 0x001000); + + /* Map Duarts and Parity. */ + map(pd, 0xffc80000, 0x28000000, PG_KW | PG_N, 0x001000); + + /* Map SCSI Polled (Reduced space). */ + map(pd, 0xffd00000, 0x30000000, PG_KW | PG_N, 0x100000); + + /* Map SCSI DMA (Reduced space). */ + map(pd, 0xffe00000, 0x38000000, PG_KW | PG_N, 0x0ff000); + + /* Map SCSI DMA (With A22 "EOP"). */ + map(pd, 0xffeff000, 0x38400000, PG_KW | PG_N, 0x001000); + + /* Map EPROM (for realtime clock). */ + map(pd, 0xfff00000, 0x10000000, PG_KW | PG_N, 0x040000); + + /* Map the ICU. */ + map(pd, 0xfffff000, 0xfffff000, PG_KW | PG_N, 0x001000); + + /* Map UAREA for proc0. */ + proc0paddr = (struct user *)alloc_pages(UPAGES); + proc0paddr->u_pcb.pcb_ptb = (int) pd; + proc0paddr = (struct user *) ((vm_offset_t)proc0paddr + KERNBASE); + proc0.p_addr = proc0paddr; + + /* Allocate second level page tables for kernel virtual address space */ + map(pd, VM_MIN_KERNEL_ADDRESS, (vm_offset_t)-1, 0, + VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS); + + /* Map monitor scratch area R/W. */ + map(pd, KERNBASE, 0x00000000, PG_KW, 0x2000); + /* Map kernel text R/O. */ + map(pd, KERNBASE+0x2000, 0x00002000, PG_KR, kppa(etext) - 0x2000); + /* Map kernel data+bss R/W. */ + map(pd, kvpa(etext), kppa(etext), PG_KW, avail_start - kppa(etext)); + + /* Alias the mapping at KERNBASE to 0 */ + pd[pdei(0)] = pd[pdei(KERNBASE)]; + + /* Load the ptb registers and start mapping. */ + load_ptb(pd); + lmr(mcr, 3); + +#if VERYLOWDEBUG + umprintf ("Just before jump to high memory.\n"); +#endif + + /* Jump to high memory */ + __asm __volatile("jump @1f; 1:"); + +#if VERYLOWDEBUG + /* Let scncnputc know which form to use. */ + _mapped = 1; +#endif + + /* Set up the ICU. */ + icu_init(); + intr_init(); + +#ifdef DDB + ddb_init(); + if (boothowto & RB_KDB) + Debugger(); +#endif +#ifdef KGDB + if (boothowto & RB_KDB) + kgdb_connect(0); +#endif + + /* Initialize the pmap module. */ + pmap_bootstrap(avail_start + KERNBASE); + + /* Construct an empty syscframe for proc0. */ + curpcb = &proc0.p_addr->u_pcb; + curpcb->pcb_onstack = (struct on_stack *) + ((u_int)proc0.p_addr + USPACE) - 1; + + /* Switch to proc0's stack. */ + lprd(sp, curpcb->pcb_onstack); + lprd(fp, 0); + + main(curpcb->pcb_onstack); + panic("main returned to init532\n"); +} + +/* + * insert an element into a queue + */ +void +_insque(element, head) + register struct prochd *element, *head; +{ + element->ph_link = head->ph_link; + head->ph_link = (struct proc *)element; + element->ph_rlink = (struct proc *)head; + ((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element; +} + +/* + * remove an element from a queue + */ +void +_remque(element) + register struct prochd *element; +{ + ((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink; + ((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link; + element->ph_rlink = (struct proc *)0; +} + +/* + * cpu_exec_aout_makecmds(): + * cpu-dependent a.out format hook for execve(). + * + * Determine of the given exec package refers to something which we + * understand and, if so, set up the vmcmds for it. + * + * On the ns532 there are no binary compatibility options (yet), + * Any takers for Sinix, Genix, SVR2/32000 or Minix? + */ +int +cpu_exec_aout_makecmds(p, epp) + struct proc *p; + struct exec_package *epp; +{ + return ENOEXEC; } /* * consinit: * initialize the system console. - * XXX - shouldn't deal with this cons_initted thing, but then, - * it shouldn't be called from init386 either. + * XXX - shouldn't deal with this initted thing. */ -static int cons_initted; - void consinit() { - if (!cons_initted) { - cninit(); - cons_initted = 1; - } + static int initted; + if (initted) + return; + initted = 1; + cninit(); } -/* DEBUG routine */ - -void dump_qs() -{ int ix; - struct proc *ptr; - - for (ix=0; ixp_forw, ptr->p_back); - ptr = ptr->p_forw; - } while (ptr != (struct proc *)0 && ptr != qs[ix].ph_link); - } - panic("nil P_BACK"); -} - -/* - * machine dependent system variables. - */ -cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) - int *name; - u_int namelen; - void *oldp; - size_t *oldlenp; - void *newp; - size_t newlen; - struct proc *p; +void +cpu_reset() { - dev_t consdev; + /* Mask all ICU interrupts. */ + splhigh(); - /* all sysctl names at this level are terminal */ - if (namelen != 1) - return (ENOTDIR); /* overloaded */ + /* Disable CPU interrupts. */ + di(); - switch (name[0]) { - case CPU_CONSDEV: - if (cn_tab != NULL) - consdev = cn_tab->cn_dev; - else - consdev = NODEV; - return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev, - sizeof consdev)); - default: - return (EOPNOTSUPP); - } - /* NOTREACHED */ + /* Alias kernel memory at 0. */ + PTD[0] = PTD[pdei(KERNBASE)]; + pmap_update(); + + /* Jump to low memory. */ + __asm __volatile( + "addr 1f(pc),r0;" + "andd ~%0,r0;" + "jump 0(r0);" + "1:" + : : "i" (KERNBASE) : "r0" + ); + + /* Turn off mapping. */ + lmr(mcr, 0); + + /* Use monitor scratch area as stack. */ + lprd(sp, 0x2000); + + /* Copy start of ROM. */ + bcopy((void *)0x10000000, (void *)0, 0x1f00); + + /* Jump into ROM copy. */ + __asm __volatile("jump @0"); } diff --git a/sys/arch/pc532/pc532/pmap.c b/sys/arch/pc532/pc532/pmap.c index bf6089666fab..07b35968c9f5 100644 --- a/sys/arch/pc532/pc532/pmap.c +++ b/sys/arch/pc532/pc532/pmap.c @@ -1,6 +1,7 @@ -/* $NetBSD: pmap.c,v 1.10 1995/08/25 07:49:13 phil Exp $ */ +/* $NetBSD: pmap.c,v 1.11 1996/01/31 21:34:00 phil Exp $ */ -/* +/* + * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved. * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * @@ -40,21 +41,15 @@ */ /* - * Derived from hp300 version by Mike Hibler, this version by William - * Jolitz uses a recursive map [a pde points to the page directory] to - * map the page tables using the pagetables themselves. This is done to - * reduce the impact on kernel virtual memory for lots of sparse address - * space, and to reduce the cost of memory to each process. - * - * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 - */ - -/* - * Reno i386 version, from Mike Hibler's hp300 version. - */ - -/* - * Most recently made to be a pc532 pmap! (Phil Nelson, 1/14/93) + * Derived originally from an old hp300 version by Mike Hibler. The version + * by William Jolitz has been heavily modified to allow non-contiguous + * mapping of physical memory by Wolfgang Solfrank, and to fix several bugs + * and greatly speedup it up by Charles Hannum. + * + * A recursive map [a pde which points to the page directory] is used to map + * the page tables using the pagetables themselves. This is done to reduce + * the impact on kernel virtual memory for lots of sparse address space, and + * to reduce the cost of memory to each process. */ /* @@ -89,22 +84,16 @@ #include #include -#include #include #include -/* Prototypes of routines used here. */ - -vm_offset_t pmap_extract(pmap_t, vm_offset_t); -void pmap_activate(register pmap_t, struct pcb *); -extern vm_offset_t reserve_dumppages __P((vm_offset_t)); +#include /* * Allocate various and sundry SYSMAPs used in the days of old VM * and not yet converted. XXX. */ - -#define BSDVM_COMPAT 1 +#define BSDVM_COMPAT 1 #ifdef DEBUG struct { @@ -131,88 +120,74 @@ struct { int sflushes; } remove_stats; -int debugmap = 0; -int pmapdebug = 0; /* 0xffff */ -#define PDB_FOLLOW 0x0001 -#define PDB_INIT 0x0002 -#define PDB_ENTER 0x0004 -#define PDB_REMOVE 0x0008 -#define PDB_CREATE 0x0010 -#define PDB_PTPAGE 0x0020 -#define PDB_CACHE 0x0040 -#define PDB_BITS 0x0080 -#define PDB_COLLECT 0x0100 -#define PDB_PROTECT 0x0200 -#define PDB_PDRTAB 0x0400 -#define PDB_PARANOIA 0x2000 -#define PDB_WIRING 0x4000 -#define PDB_PVDUMP 0x8000 - -int pmapvacflush = 0; -#define PVF_ENTER 0x01 -#define PVF_REMOVE 0x02 -#define PVF_PROTECT 0x04 -#define PVF_TOTAL 0x80 +int pmapdebug = 0 /* 0xffff */; +#define PDB_FOLLOW 0x0001 +#define PDB_INIT 0x0002 +#define PDB_ENTER 0x0004 +#define PDB_REMOVE 0x0008 +#define PDB_CREATE 0x0010 +#define PDB_PTPAGE 0x0020 +#define PDB_CACHE 0x0040 +#define PDB_BITS 0x0080 +#define PDB_COLLECT 0x0100 +#define PDB_PROTECT 0x0200 +#define PDB_PDRTAB 0x0400 +#define PDB_PARANOIA 0x2000 +#define PDB_WIRING 0x4000 +#define PDB_PVDUMP 0x8000 #endif /* * Get PDEs and PTEs for user/kernel address space */ -#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) +#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PDSHIFT)&1023])) -#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) - -#define pmap_pde_v(pte) ((pte)->pd_v) -#define pmap_pte_w(pte) ((pte)->pg_w) -/* #define pmap_pte_ci(pte) ((pte)->pg_ci) */ -#define pmap_pte_m(pte) ((pte)->pg_m) -#define pmap_pte_u(pte) ((pte)->pg_u) -#define pmap_pte_v(pte) ((pte)->pg_v) -#define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v)) -#define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v)) - - -/* for debug output */ -#define pg printf +/* + * Empty PTEs and PDEs are always 0, but checking only the valid bit allows + * the compiler to generate `testb' rather than `testl'. + */ +#define pmap_pde_v(pde) (*(pde) & PG_V) +#define pmap_pte_pa(pte) (*(pte) & PG_FRAME) +#define pmap_pte_w(pte) (*(pte) & PG_W) +#define pmap_pte_m(pte) (*(pte) & PG_M) +#define pmap_pte_u(pte) (*(pte) & PG_U) +#define pmap_pte_v(pte) (*(pte) & PG_V) +#define pmap_pte_set_w(pte, v) ((v) ? (*(pte) |= PG_W) : (*(pte) &= ~PG_W)) +#define pmap_pte_set_prot(pte, v) ((*(pte) &= ~PG_PROT), (*(pte) |= (v))) /* * Given a map and a machine independent protection code, * convert to a ns532 protection code. */ -#define pte_prot(m, p) (protection_codes[p]) -int protection_codes[8]; +pt_entry_t protection_codes[8]; -struct user *proc0paddr; struct pmap kernel_pmap_store; vm_offset_t avail_start; /* PA of first available physical page */ vm_offset_t avail_end; /* PA of last available physical page */ -vm_size_t mem_size; /* memory size in bytes */ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ vm_offset_t vm_first_phys; /* PA of first managed page */ vm_offset_t vm_last_phys; /* PA just past last managed page */ -int ns532pagesperpage; /* PAGE_SIZE / NS532_PAGE_SIZE */ +int npages; + boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ short *pmap_attributes; /* reference and modify bits */ +TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist; +int pv_nfree; -boolean_t pmap_testbit(); -void pmap_clear_modify(); +pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t)); #if BSDVM_COMPAT -#include "msgbuf.h" +#include /* * All those kernel PT submaps that BSD is so fond of */ -struct pte *CMAP1, *CMAP2, *xxx_mmap; +pt_entry_t *CMAP1, *CMAP2, *XXX_mmap; caddr_t CADDR1, CADDR2, vmmap; -struct pte *msgbufmap; -struct msgbuf *msgbufp; -#endif - -vm_offset_t KPTphys; -extern int PDRPDROFF; +pt_entry_t *msgbufmap; +#endif /* BSDVM_COMPAT */ /* * Bootstrap memory allocator. This function allows for early dynamic @@ -233,8 +208,8 @@ pmap_bootstrap_alloc(size) vm_offset_t val; if (vm_page_startup_initialized) - panic("pmap_bootstrap_alloc: called after startup initialized")\ -; + panic("pmap_bootstrap_alloc: called after startup initialized"); + size = round_page(size); val = virtual_avail; @@ -242,211 +217,73 @@ pmap_bootstrap_alloc(size) avail_start + size, VM_PROT_READ|VM_PROT_WRITE); avail_start += size; - blkclr ((caddr_t) val, size); + bzero((caddr_t) val, size); return ((void *) val); } - - -/* static */ -void -v_probe(pmap_t pmap, vm_offset_t va) -{ - int *ptr; - struct pde *pde_entry; - struct pte *pte_entry; - - /* get a pointer to the top level page table entry */ - pde_entry = pmap_pde(pmap, va); - if (!pmap_pde_v(pde_entry)) { - printf("va 0x%x, no top-level entry\n", va); - return; - } - ptr = (int *) pde_entry; - pte_entry = ((struct pte *) ((*ptr & PG_FRAME) | KERNBASE)) + ptei(va); - ptr = (int *) pte_entry; - if (!pmap_pte_v(pte_entry)) { - printf("pte_entry 0x%x *pte_entry 0x%x\n", ptr, *ptr); - printf("va 0x%x, no 2nd-level entry\n", va); - return; - } - /* print the page table entry */ - printf("v_probe: va 0x%x pa 0x%x entry 0x%x\n", - va, (*ptr & PG_FRAME) | (va & ~PG_FRAME), *ptr); -} - -static -void -map_page(pmap_t pmap, vm_offset_t va, vm_offset_t pa) -{ - int *ptr; - struct pde *pde_entry; - struct pte *pte_entry; - - /* get a pointer to the top level page table entry */ - pde_entry = pmap_pde(pmap, va); - if (!pmap_pde_v(pde_entry)) { - printf("map_page(0x%x, 0x%x, 0x%x) failed\n", pmap, va, pa); - panic("missing 2nd level page table"); - } - /* get a pointer to the 2nd level table entry */ - ptr = (int *) pde_entry; - pte_entry = ((struct pte *) (*ptr & PG_FRAME)) + ptei(va); - if (pmap_pte_v(pte_entry)) { - printf("map_page(0x%x, 0x%x, 0x%x) failed\n", pmap, va, pa); - panic("2nd level page table entry already valid"); - } - /* make the page table entry */ - ptr = (int *) pte_entry; - *ptr = pa | PG_V | PG_KW; - /* just to be safe */ - tlbflush(); -} - -/* - * Map in physical page 'pa' to virtual address 'va', and install - * as a 2nd level page table at index 'index' for the given - * pmap. - */ -static -void -map_page_table(pmap_t pmap, int index, vm_offset_t va, vm_offset_t pa) -{ - int *ptr = (int *) &pmap->pm_pdir[index]; - if (*ptr) { - printf("2nd level table present for index %x\n", index); - panic("remapping 2nd level table"); - } - /* map in the 2nd level table */ - map_page(pmap_kernel(), va, pa); - /* init the 2nd level table to all invalid */ - bzero(pa, NBPG); - /* install the 2nd level table */ - *ptr = pa | PG_V | PG_KW; - /* just to be safe */ - tlbflush(); -} - /* * Bootstrap the system enough to run with virtual memory. * Map the kernel's code and data, and allocate the system page table. * - * On the Ns532 this is called after mapping has already been enabled + * On the NS532 this is called after mapping has already been enabled * and just syncs the pmap module with what has already been done. * [We can't call it easily with mapping off since the kernel is not * mapped with PA == VA, hence we would have to relocate every address - * from the linked base (virtual) address 0xFE000000 to the actual - * (physical) address starting relative to 0] + * from the linked base (virtual) address to the actual (physical) + * address starting relative to 0] */ -struct pte *pmap_pte(); void -pmap_bootstrap(firstaddr, loadaddr) - vm_offset_t firstaddr; - vm_offset_t loadaddr; +pmap_bootstrap(virtual_start) + vm_offset_t virtual_start; { - int x, *ptr; #if BSDVM_COMPAT vm_offset_t va; - struct pte *pte; + pt_entry_t *pte; #endif - extern vm_offset_t maxmem, physmem; - - ns532pagesperpage = 1; /* PAGE_SIZE / NS532_PAGE_SIZE; */ - - /* - * Initialize protection array. - */ - ns532_protection_init(); - - /* setup avail_start, avail_end, virtual_avail, virtual_end */ - avail_start = firstaddr; - avail_end = mem_size; + extern int physmem; + extern vm_offset_t reserve_dumppages(vm_offset_t); /* XXX: allow for msgbuf */ avail_end -= ns532_round_page(sizeof(struct msgbuf)); - virtual_avail = avail_start + KERNBASE; + virtual_avail = virtual_start; virtual_end = VM_MAX_KERNEL_ADDRESS; /* - * Create Kernel page directory table and page maps. + * Initialize protection array. */ - pmap_kernel()->pm_pdir = (pd_entry_t *) (KPTphys + KERNBASE); - /* recursively map in ptb0 */ - ptr = ((int *) pmap_kernel()->pm_pdir) + PDRPDROFF; - if (*ptr) { - printf("ptb0 0x%x offset 0x%x should be 0 but is 0x%x\n", - pmap_kernel()->pm_pdir, PDRPDROFF, *ptr); - bpt_to_monitor(); - } - /* don't add KERNBASE as this has to be a physical address */ - *ptr = KPTphys | PG_V | PG_KW; - /* fill in the rest of the top-level kernel VA entries */ - for (x = ns532_btod(VM_MIN_KERNEL_ADDRESS); - x < ns532_btod(VM_MAX_KERNEL_ADDRESS); x++) { - ptr = (int *) &pmap_kernel()->pm_pdir[x]; - /* only fill in the entries not yet made in _low_level_init() */ - if (!*ptr) { - /* map in the page table */ - map_page_table(pmap_kernel(), x, - virtual_avail, avail_start); - avail_start += NBPG; - virtual_avail += NBPG; - } - } - /* map in the kernel stack for process 0 */ - /* install avail_start as a 2nd level table for index 0x3f6 */ - map_page_table(pmap_kernel(), 0x3f6, virtual_avail, avail_start); - avail_start += NBPG; - virtual_avail += NBPG; - /* reserve UPAGES pages */ - proc0paddr = (struct user *) virtual_avail; - curpcb = (struct pcb *) proc0paddr; - va = ns532_dtob(0x3f6) | ns532_ptob(0x3fe); /* USRSTACK ? */ - for (x = 0; x < UPAGES; ++x) { - map_page(pmap_kernel(), va, avail_start); - map_page(pmap_kernel(), virtual_avail, avail_start); - bzero(va, NBPG); - va += NBPG; - avail_start += NBPG; - virtual_avail += NBPG; - } + ns532_protection_init(); + + pmap_kernel()->pm_pdir = + (pd_entry_t *)(proc0.p_addr->u_pcb.pcb_ptb + KERNBASE); simple_lock_init(&pmap_kernel()->pm_lock); pmap_kernel()->pm_count = 1; -#ifdef DEBUG - printf("avail_start = 0x%x\n", avail_start); - printf("avail_end = 0x%x\n", avail_end); - printf("virtual_avail = 0x%x\n", virtual_avail); - printf("virtual_end = 0x%x\n", virtual_end); -#endif - #if BSDVM_COMPAT /* * Allocate all the submaps we need */ #define SYSMAP(c, p, v, n) \ - v = (c)va; va += ((n)*NS532_PAGE_SIZE); p = pte; pte += (n); + v = (c)va; va += ((n)*NBPG); p = pte; pte += (n); va = virtual_avail; pte = pmap_pte(pmap_kernel(), va); SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) - SYSMAP(caddr_t ,xxx_mmap ,vmmap ,1 ) + SYSMAP(caddr_t ,XXX_mmap ,vmmap ,1 ) SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) virtual_avail = va; #endif - virtual_avail = reserve_dumppages(va); -#ifdef DEBUG - printf("virtual_avail = 0x%x\n", virtual_avail); -#endif - tlbflush(); - /* XXX why do we do this??? - MM */ - *(int *)PTD = 0; - tlbflush(); + + /* + * Reserve pmap space for mapping physical pages during dump. + */ + virtual_avail = reserve_dumppages(virtual_avail); + + pmap_update(); } /* @@ -456,60 +293,30 @@ pmap_bootstrap(firstaddr, loadaddr) */ void pmap_init(phys_start, phys_end) - vm_offset_t phys_start, phys_end; + vm_offset_t phys_start; + vm_offset_t phys_end; { - int result; - vm_offset_t addr, addr2; - vm_size_t npg, s; - int rv; - -#ifdef DEBUG - if (pmapdebug & (PDB_FOLLOW|PDB_INIT)) - printf("pmap_init(0x%x, 0x%x)\n", phys_start, phys_end); -#endif + vm_offset_t addr, addr2; + vm_size_t s; + int rv; if (PAGE_SIZE != NBPG) panic("pmap_init: CLSIZE != 1"); - /* - * Now that kernel map has been allocated, we can mark as - * unavailable regions which we have mapped in locore. - */ -#if 0 - /* the following reserves the (virtual) i/o space */ - addr = 0xffc00000; - result = vm_map_find(kernel_map, NULL, (vm_offset_t) 0, - &addr, NBPG, FALSE); - if (result != KERN_SUCCESS) { - printf("vm_map_find for virtual i/o space failed %d\n", result); - } - - /* reserve the used page tables following the kernel */ - /* bumped this to 10 pages just to be paranoid */ - addr = (vm_offset_t) KERNBASE + KPTphys; - vm_object_reference(kernel_object); - result = vm_map_find(kernel_map, kernel_object, addr, - &addr, 10*NBPG, FALSE); - if (result != KERN_SUCCESS) { - printf("vm_map_find for kernel page maps failed %d\n", result); - } -#endif - /* - * Allocate memory for random pmap data structures. Includes the - * pv_head_table and pmap_attributes. - */ - npg = atop(phys_end - phys_start); - s = (vm_size_t) (sizeof(struct pv_entry) * npg + 2*npg); + npages = atop(phys_end - phys_start); + s = (vm_size_t) (sizeof(struct pv_entry) * npages + + sizeof(*pmap_attributes) * npages); s = round_page(s); addr = (vm_offset_t) kmem_alloc(kernel_map, s); - pv_table = (pv_entry_t) addr; - addr += sizeof(struct pv_entry) * npg; + pv_table = (struct pv_entry *) addr; + addr += sizeof(struct pv_entry) * npages; pmap_attributes = (short *) addr; + TAILQ_INIT(&pv_page_freelist); #ifdef DEBUG if (pmapdebug & PDB_INIT) printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n", - s, npg, pv_table, pmap_attributes); + s, npages, pv_table, pmap_attributes); #endif /* @@ -520,6 +327,226 @@ pmap_init(phys_start, phys_end) pmap_initialized = TRUE; } +struct pv_entry * +pmap_alloc_pv() +{ + struct pv_page *pvp; + struct pv_entry *pv; + int i; + + if (pv_nfree == 0) { + pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG); + if (pvp == 0) + panic("pmap_alloc_pv: kmem_alloc() failed"); + pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1]; + for (i = NPVPPG - 2; i; i--, pv++) + pv->pv_next = pv + 1; + pv->pv_next = 0; + pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1; + TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list); + pv = &pvp->pvp_pv[0]; + } else { + --pv_nfree; + pvp = pv_page_freelist.tqh_first; + if (--pvp->pvp_pgi.pgi_nfree == 0) { + TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); + } + pv = pvp->pvp_pgi.pgi_freelist; +#ifdef DIAGNOSTIC + if (pv == 0) + panic("pmap_alloc_pv: pgi_nfree inconsistent"); +#endif + pvp->pvp_pgi.pgi_freelist = pv->pv_next; + } + return pv; +} + +void +pmap_free_pv(pv) + struct pv_entry *pv; +{ + register struct pv_page *pvp; + register int i; + + pvp = (struct pv_page *) trunc_page(pv); + switch (++pvp->pvp_pgi.pgi_nfree) { + case 1: + TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list); + default: + pv->pv_next = pvp->pvp_pgi.pgi_freelist; + pvp->pvp_pgi.pgi_freelist = pv; + ++pv_nfree; + break; + case NPVPPG: + pv_nfree -= NPVPPG - 1; + TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); + kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); + break; + } +} + +void +pmap_collect_pv() +{ + struct pv_page_list pv_page_collectlist; + struct pv_page *pvp, *npvp; + struct pv_entry *ph, *ppv, *pv, *npv; + int s; + + TAILQ_INIT(&pv_page_collectlist); + + for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) { + if (pv_nfree < NPVPPG) + break; + npvp = pvp->pvp_pgi.pgi_list.tqe_next; + if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) { + TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); + TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp, pvp_pgi.pgi_list); + pv_nfree -= pvp->pvp_pgi.pgi_nfree; + pvp->pvp_pgi.pgi_nfree = -1; + } + } + + if (pv_page_collectlist.tqh_first == 0) + return; + + for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) { + if (ph->pv_pmap == 0) + continue; + s = splimp(); + for (ppv = ph; (pv = ppv->pv_next) != 0; ) { + pvp = (struct pv_page *) trunc_page(pv); + if (pvp->pvp_pgi.pgi_nfree == -1) { + pvp = pv_page_freelist.tqh_first; + if (--pvp->pvp_pgi.pgi_nfree == 0) { + TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); + } + npv = pvp->pvp_pgi.pgi_freelist; +#ifdef DIAGNOSTIC + if (npv == 0) + panic("pmap_collect_pv: pgi_nfree inconsistent"); +#endif + pvp->pvp_pgi.pgi_freelist = npv->pv_next; + *npv = *pv; + ppv->pv_next = npv; + ppv = npv; + } else + ppv = pv; + } + splx(s); + } + + for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) { + npvp = pvp->pvp_pgi.pgi_list.tqe_next; + kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); + } +} + +__inline u_int +pmap_page_index(pa) + vm_offset_t pa; +{ + if (pa >= vm_first_phys && pa < vm_last_phys) + return ns532_btop(pa - vm_first_phys); + return -1; +} + +__inline void +pmap_enter_pv(pmap, va, pind) + register pmap_t pmap; + vm_offset_t va; + u_int pind; +{ + register struct pv_entry *pv, *npv; + int s; + + if (!pmap_initialized) + return; + +#ifdef DEBUG + if (pmapdebug & PDB_ENTER) + printf("pmap_enter_pv: pv %x: %x/%x/%x\n", + pv, pv->pv_va, pv->pv_pmap, pv->pv_next); +#endif + + pv = &pv_table[pind]; + s = splimp(); + + if (pv->pv_pmap == NULL) { + /* + * No entries yet, use header as the first entry + */ +#ifdef DEBUG + enter_stats.firstpv++; +#endif + pv->pv_va = va; + pv->pv_pmap = pmap; + pv->pv_next = NULL; + } else { + /* + * There is at least one other VA mapping this page. + * Place this entry after the header. + */ +#ifdef DEBUG + for (npv = pv; npv; npv = npv->pv_next) + if (pmap == npv->pv_pmap && va == npv->pv_va) + panic("pmap_enter_pv: already in pv_tab"); +#endif + npv = pmap_alloc_pv(); + npv->pv_va = va; + npv->pv_pmap = pmap; + npv->pv_next = pv->pv_next; + pv->pv_next = npv; +#ifdef DEBUG + if (!npv->pv_next) + enter_stats.secondpv++; +#endif + } + splx(s); +} + +__inline void +pmap_remove_pv(pmap, va, pind) + register pmap_t pmap; + vm_offset_t va; + u_int pind; +{ + register struct pv_entry *pv, *npv; + int s; + + /* + * Remove from the PV table (raise IPL since we + * may be called at interrupt time). + */ + pv = &pv_table[pind]; + s = splimp(); + + /* + * If it is the first entry on the list, it is actually + * in the header and we must copy the following entry up + * to the header. Otherwise we must search the list for + * the entry. In either case we free the now unused entry. + */ + if (pmap == pv->pv_pmap && va == pv->pv_va) { + npv = pv->pv_next; + if (npv) { + *pv = *npv; + pmap_free_pv(npv); + } else + pv->pv_pmap = NULL; + } else { + for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { + if (pmap == npv->pv_pmap && va == npv->pv_va) + break; + } + if (npv) { + pv->pv_next = npv->pv_next; + pmap_free_pv(npv); + } + } + splx(s); +} + /* * Used to map a range of physical addresses into kernel * virtual address space. @@ -528,22 +555,22 @@ pmap_init(phys_start, phys_end) * specified memory. */ vm_offset_t -pmap_map(virt, start, end, prot) - vm_offset_t virt; - vm_offset_t start; - vm_offset_t end; - int prot; +pmap_map(va, spa, epa, prot) + vm_offset_t va, spa, epa; + int prot; { + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); + printf("pmap_map(%x, %x, %x, %x)\n", va, spa, epa, prot); #endif - while (start < end) { - pmap_enter(pmap_kernel(), virt, start, prot, FALSE); - virt += PAGE_SIZE; - start += PAGE_SIZE; + + while (spa < epa) { + pmap_enter(pmap_kernel(), va, spa, prot, FALSE); + va += NBPG; + spa += NBPG; } - return(virt); + return va; } /* @@ -561,10 +588,9 @@ pmap_map(virt, start, end, prot) * [ just allocate a ptd and mark it uninitialize -- should we track * with a table which process has which ptd? -wfj ] */ - pmap_t pmap_create(size) - vm_size_t size; + vm_size_t size; { register pmap_t pmap; @@ -572,21 +598,17 @@ pmap_create(size) if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) printf("pmap_create(%x)\n", size); #endif + /* * Software use map does not need a pmap */ if (size) - return(NULL); + return NULL; - /* XXX: is it ok to wait here? */ pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); -#ifdef notifwewait - if (pmap == NULL) - panic("pmap_create: cannot allocate a pmap"); -#endif bzero(pmap, sizeof(*pmap)); pmap_pinit(pmap); - return (pmap); + return pmap; } /* @@ -597,9 +619,10 @@ void pmap_pinit(pmap) register struct pmap *pmap; { + #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) - pg("pmap_pinit(%x)\n", pmap); + printf("pmap_pinit(%x)\n", pmap); #endif /* @@ -609,13 +632,11 @@ pmap_pinit(pmap) pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG); /* wire in kernel global address entries */ - bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST, - (KPTDI_LAST-KPTDI_FIRST+1)*4); + bcopy(&PTD[KPTDI], &pmap->pm_pdir[KPTDI], NKPDE * sizeof(pd_entry_t)); /* install self-referential address mapping entry */ - *(int *)(pmap->pm_pdir+PTDPTDI) = - (int)pmap_extract(pmap_kernel(), (vm_offset_t) pmap->pm_pdir) - | PG_V | PG_KW; + pmap->pm_pdir[PTDPTDI] = + pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_pdir) | PG_V | PG_KW; pmap->pm_count = 1; simple_lock_init(&pmap->pm_lock); @@ -632,12 +653,13 @@ pmap_destroy(pmap) { int count; + if (pmap == NULL) + return; + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_destroy(%x)\n", pmap); #endif - if (pmap == NULL) - return; simple_lock(&pmap->pm_lock); count = --pmap->pm_count; @@ -657,16 +679,18 @@ void pmap_release(pmap) register struct pmap *pmap; { + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - pg("pmap_release(%x)\n", pmap); + printf("pmap_release(%x)\n", pmap); #endif -#ifdef notdef /* DIAGNOSTIC */ - /* count would be 0 from pmap_destroy... */ - simple_lock(&pmap->pm_lock); + +#ifdef DIAGNOSTICx + /* sometimes 1, sometimes 0; could rearrange pmap_destroy */ if (pmap->pm_count != 1) panic("pmap_release count"); #endif + kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG); } @@ -675,19 +699,44 @@ pmap_release(pmap) */ void pmap_reference(pmap) - pmap_t pmap; + pmap_t pmap; { + + if (pmap == NULL) + return; + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_reference(%x)\n", pmap); + printf("pmap_reference(%x)", pmap); #endif - if (pmap != NULL) { - simple_lock(&pmap->pm_lock); - pmap->pm_count++; - simple_unlock(&pmap->pm_lock); + + simple_lock(&pmap->pm_lock); + pmap->pm_count++; + simple_unlock(&pmap->pm_lock); +} + +void +pmap_activate(pmap, pcb) + pmap_t pmap; + struct pcb *pcb; +{ + + if (pmap /*&& pmap->pm_pdchanged */) { + pcb->pcb_ptb = + pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_pdir); + if (pmap == &curproc->p_vmspace->vm_pmap) + load_ptb(pcb->pcb_ptb); + pmap->pm_pdchanged = FALSE; } } +void +pmap_deactivate(pmap, pcb) + pmap_t pmap; + struct pcb *pcb; +{ +} + /* * Remove the given range of addresses from the specified map. * @@ -697,85 +746,151 @@ pmap_reference(pmap) void pmap_remove(pmap, sva, eva) struct pmap *pmap; - register vm_offset_t sva; - register vm_offset_t eva; + register vm_offset_t sva, eva; { - register pt_entry_t *ptp,*ptq; - vm_offset_t va; + register pt_entry_t *pte; vm_offset_t pa; - pt_entry_t *pte; - pv_entry_t pv, npv; - int ix; - int s, bits; + u_int pind; + int flush = 0; + sva &= PG_FRAME; + eva &= PG_FRAME; + + /* + * We need to acquire a pointer to a page table page before entering + * the following loop. + */ + while (sva < eva) { + pte = pmap_pte(pmap, sva); + if (pte) + break; + sva = (sva & PD_MASK) + NBPD; + } + + while (sva < eva) { + /* only check once in a while */ + if ((sva & PT_MASK) == 0) { + if (!pmap_pde_v(pmap_pde(pmap, sva))) { + /* We can race ahead here, to the next pde. */ + sva += NBPD; + pte += ns532_btop(NBPD); + continue; + } + } + + if (!pmap_pte_v(pte)) { +#ifdef __GNUC__ + /* + * Scan ahead in a tight loop for the next used PTE in + * this page. We don't scan the whole region here + * because we don't want to zero-fill unused page table + * pages. + */ + int n, m; + + n = min(eva - sva, NBPD - (sva & PT_MASK)) >> PGSHIFT; + { + register int r0 __asm("r0") = n; + register pt_entry_t *r1 __asm("r1") = pte; + register int r4 __asm("r4") = 0; + __asm __volatile( + "skpsd w" + : "=r" (r1), "=r" (r0) + : "0" (r1), "1" (r0), "r" (r4)); + pte = r1; + m = r0; + } + sva += (n - m) << PGSHIFT; + if (!m) + continue; +#else + goto next; +#endif + } + flush = 1; + + /* + * Update statistics + */ + if (pmap_pte_w(pte)) + pmap->pm_stats.wired_count--; + pmap->pm_stats.resident_count--; + + pa = pmap_pte_pa(pte); + + /* + * Invalidate the PTEs. + * XXX: should cluster them up and invalidate as many + * as possible at once. + */ #ifdef DEBUG - pt_entry_t opte; - - if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) - pg("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); + if (pmapdebug & PDB_REMOVE) + printf("remove: inv pte at %x(%x) ", pte, *pte); #endif - if (pmap == NULL) +#ifdef needednotdone +reduce wiring count on page table pages as references drop +#endif + + if ((pind = pmap_page_index(pa)) != -1) { + pmap_attributes[pind] |= *pte & (PG_M | PG_U); + pmap_remove_pv(pmap, sva, pind); + } + + *pte = 0; + + next: + sva += NBPG; + pte++; + } + + if (flush) + pmap_update(); +} + +/* + * Routine: pmap_remove_all + * Function: + * Removes this physical page from + * all physical maps in which it resides. + * Reflects back modify bits to the pager. + */ +void +pmap_remove_all(pa) + vm_offset_t pa; +{ + struct pv_entry *ph, *pv, *npv; + register pmap_t pmap; + register pt_entry_t *pte; + u_int pind; + int s; + +#ifdef DEBUG + if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) + printf("pmap_remove_all(%x)", pa); + /*pmap_pvdump(pa);*/ +#endif + + if ((pind = pmap_page_index(pa)) == -1) return; - /* are we current address space or kernel? */ - if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum - || pmap == pmap_kernel()) - ptp=PTmap; + pv = ph = &pv_table[pind]; + s = splimp(); + + if (ph->pv_pmap == NULL) { + splx(s); + return; + } + + while (pv) { + pmap = pv->pv_pmap; + pte = pmap_pte(pmap, pv->pv_va); - /* otherwise, we are alternate address space */ - else { - if (pmap->pm_pdir[PTDPTDI].pd_pfnum - != APTDpde.pd_pfnum) { - APTDpde = pmap->pm_pdir[PTDPTDI]; - tlbflush(); - } - ptp=APTmap; - } #ifdef DEBUG - remove_stats.calls++; + if (!pte || !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa) + panic("pmap_remove_all: bad mapping"); #endif - - /* this is essential since we must check the PDE(sva) for precense */ - while (sva <= eva && !pmap_pde_v(pmap_pde(pmap, sva))) - sva = (sva & PD_MASK) + (1<= eva) - return; - ptq++; - } - - - if(!(sva & 0x3ff)) /* Only check once in a while */ - { - if (!pmap_pde_v(pmap_pde(pmap, ns532_ptob(sva)))) - { - /* We can race ahead here, straight to next pde.. */ - sva = (sva & 0xffc00) + (1<<10) -1 ; - continue; - } - } - if(!pmap_pte_pa(ptp+sva)) - continue; - - pte = ptp + sva; - pa = pmap_pte_pa(pte); - va = ns532_ptob(sva); -#ifdef DEBUG - opte = *pte; - remove_stats.removes++; -#endif /* * Update statistics */ @@ -790,146 +905,30 @@ pmap_remove(pmap, sva, eva) */ #ifdef DEBUG if (pmapdebug & PDB_REMOVE) - printf("remove: inv %x ptes at pte %x pa %x va %x\n", - ns532pagesperpage, pte, pa, va); + printf("remove: inv pte at %x(%x) ", pte, *pte); #endif - bits = ix = 0; - do { - bits |= *(int *)pte & (PG_U|PG_M); - *(int *)pte++ = 0; - /*TBIS(va + ix * NS532_PAGE_SIZE);*/ - } while (++ix != ns532pagesperpage); - if (curproc && pmap == &curproc->p_vmspace->vm_pmap) - pmap_activate(pmap, (struct pcb *)curproc->p_addr); -#if 0 -/* commented out in 386 version as well */ - /* are we current address space or kernel? */ - if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum - || pmap == pmap_kernel()) { - _load_ptb0(curpcb->pcb_ptb); - } -#endif - tlbflush(); #ifdef needednotdone reduce wiring count on page table pages as references drop #endif - /* - * Remove from the PV table (raise IPL since we - * may be called at interrupt time). - */ - if (pa < vm_first_phys || pa >= vm_last_phys) - continue; - pv = pa_to_pvh(pa); - s = splimp(); - /* - * If it is the first entry on the list, it is actually - * in the header and we must copy the following entry up - * to the header. Otherwise we must search the list for - * the entry. In either case we free the now unused entry. - */ - if (pmap == pv->pv_pmap && va == pv->pv_va) { - npv = pv->pv_next; - if (npv) { - *pv = *npv; - free((caddr_t)npv, M_VMPVENT); - } else - pv->pv_pmap = NULL; -#ifdef DEBUG - remove_stats.pvfirst++; -#endif - } else { - for (npv = pv->pv_next; npv; npv = npv->pv_next) { -#ifdef DEBUG - remove_stats.pvsearch++; -#endif - if (pmap == npv->pv_pmap && va == npv->pv_va) - break; - pv = npv; - } -#ifdef DEBUG - if (npv == NULL) { - printf("vm_first_phys %x pa %x vm_last_phys %x\n", - vm_first_phys, pa, vm_last_phys); - panic("pmap_remove: PA not in pv_tab"); - } -#endif - pv->pv_next = npv->pv_next; - free((caddr_t)npv, M_VMPVENT); - pv = pa_to_pvh(pa); - } - -#ifdef notdef -[tally number of pagetable pages, if sharing of ptpages adjust here] -#endif /* * Update saved attributes for managed page */ - pmap_attributes[pa_index(pa)] |= bits; - splx(s); - } -#ifdef notdef -[cache and tlb flushing, if needed] -#endif -} + pmap_attributes[pind] |= *pte & (PG_M | PG_U); -/* - * Routine: pmap_remove_all - * Function: - * Removes this physical page from - * all physical maps in which it resides. - * Reflects back modify bits to the pager. - */ -void -pmap_remove_all(pa) - vm_offset_t pa; -{ - register pv_entry_t pv; - int s; + *pte = 0; -#ifdef DEBUG - if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) - printf("pmap_remove_all(%x)", pa); - /*pmap_pvdump(pa);*/ -#endif - /* - * Not one of ours - */ - if (pa < vm_first_phys || pa >= vm_last_phys) - return; - - pv = pa_to_pvh(pa); - s = splimp(); - /* - * Do it the easy way for now - */ - while (pv->pv_pmap != NULL) { -#ifdef DEBUG - if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) || - pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa) - panic("pmap_remove_all: bad mapping"); -#endif - pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE); + npv = pv->pv_next; + if (pv == ph) + ph->pv_pmap = NULL; + else + pmap_free_pv(pv); + pv = npv; } splx(s); -} -/* - * Routine: pmap_copy_on_write - * Function: - * Remove write privileges from all - * physical maps for this physical page. - */ -void -pmap_copy_on_write(pa) - vm_offset_t pa; -{ -#ifdef DEBUG - if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) - printf("pmap_copy_on_write(%x)", pa); -#endif - pmap_changebit(pa, /* was PG_RO, TRUE */ PG_RW, FALSE); + pmap_update(); } /* @@ -938,80 +937,98 @@ pmap_copy_on_write(pa) */ void pmap_protect(pmap, sva, eva, prot) - register pmap_t pmap; - vm_offset_t sva, eva; - vm_prot_t prot; + register pmap_t pmap; + vm_offset_t sva, eva; + vm_prot_t prot; { register pt_entry_t *pte; - register vm_offset_t va; - register int ix; - int ns532prot; - boolean_t firstpage = TRUE; - register pt_entry_t *ptp; + register int ns532prot; + int flush = 0; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot); #endif - if (pmap == NULL) - return; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } + if (prot & VM_PROT_WRITE) return; - /* are we current address space or kernel? */ - if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum - || pmap == pmap_kernel()) - ptp=PTmap; + sva &= PG_FRAME; + eva &= PG_FRAME; - /* otherwise, we are alternate address space */ - else { - if (pmap->pm_pdir[PTDPTDI].pd_pfnum - != APTDpde.pd_pfnum) { - APTDpde = pmap->pm_pdir[PTDPTDI]; - tlbflush(); - } - ptp=APTmap; - } - for (va = sva; va < eva; va += PAGE_SIZE) { - /* - * Page table page is not allocated. - * Skip it, we don't want to force allocation - * of unnecessary PTE pages just to set the protection. - */ - if (!pmap_pde_v(pmap_pde(pmap, va))) { - /* XXX: avoid address wrap around */ - if (va >= ns532_trunc_pdr((vm_offset_t)-1)) - break; - va = ns532_round_pdr(va + PAGE_SIZE) - PAGE_SIZE; - continue; - } - - pte = ptp + ns532_btop(va); - - /* - * Page not valid. Again, skip it. - * Should we do this? Or set protection anyway? - */ - if (!pmap_pte_v(pte)) - continue; - - ix = 0; - ns532prot = pte_prot(pmap, prot); - if(va < UPT_MAX_ADDRESS) - ns532prot |= 2 /*PG_u*/; - do { - /* clear VAC here if PG_RO? */ - pmap_pte_set_prot(pte++, ns532prot); - /*TBIS(va + ix * NS532_PAGE_SIZE);*/ - } while (++ix != ns532pagesperpage); + /* + * We need to acquire a pointer to a page table page before entering + * the following loop. + */ + while (sva < eva) { + pte = pmap_pte(pmap, sva); + if (pte) + break; + sva = (sva & PD_MASK) + NBPD; } - if (curproc && pmap == &curproc->p_vmspace->vm_pmap) - pmap_activate(pmap, (struct pcb *)curproc->p_addr); + + while (sva < eva) { + /* only check once in a while */ + if ((sva & PT_MASK) == 0) { + if (!pmap_pde_v(pmap_pde(pmap, sva))) { + /* We can race ahead here, to the next pde. */ + sva += NBPD; + pte += ns532_btop(NBPD); + continue; + } + } + + if (!pmap_pte_v(pte)) { +#ifdef __GNUC__ + /* + * Scan ahead in a tight loop for the next used PTE in + * this page. We don't scan the whole region here + * because we don't want to zero-fill unused page table + * pages. + */ + int n, m; + + n = min(eva - sva, NBPD - (sva & PT_MASK)) >> PGSHIFT; + { + register int r0 __asm("r0") = n; + register pt_entry_t *r1 __asm("r1") = pte; + register int r4 __asm("r4") = 0; + __asm __volatile( + "skpsd w" + : "=r" (r1), "=r" (r0) + : "0" (r1), "1" (r0), "r" (r4)); + pte = r1; + m = r0; + } + sva += (n - m) << PGSHIFT; + if (!m) + continue; +#else + goto next; +#endif + } + + flush = 1; + + ns532prot = protection_codes[prot]; + if (sva < VM_MAXUSER_ADDRESS) /* see also pmap_enter() */ + ns532prot |= PG_u; + else if (sva < VM_MAX_ADDRESS) + ns532prot |= PG_u | PG_RW; + pmap_pte_set_prot(pte, ns532prot); + + next: + sva += NBPG; + pte++; + } + + if (flush) + pmap_update(); } /* @@ -1035,21 +1052,22 @@ pmap_enter(pmap, va, pa, prot, wired) boolean_t wired; { register pt_entry_t *pte; - register int npte, ix; - vm_offset_t opa; - boolean_t cacheable = TRUE; - boolean_t checkpv = TRUE; + register pt_entry_t npte; + u_int pind; + int flush = 0; + boolean_t cacheable; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) - printf("pmap_enter(%x, %x, %x, %x, %x)\n", + printf("pmap_enter(%x, %x, %x, %x, %x)", pmap, va, pa, prot, wired); #endif + if (pmap == NULL) return; - if(va >= VM_MAX_KERNEL_ADDRESS) - panic("pmap_enter: toobig"); + if (va >= VM_MAX_KERNEL_ADDRESS) + panic("pmap_enter: too big"); /* also, should not muck with PTD va! */ #ifdef DEBUG @@ -1068,13 +1086,35 @@ pmap_enter(pmap, va, pa, prot, wired) #ifdef DEBUG if (pmapdebug & PDB_ENTER) - printf("enter: pte %x, *pte %x ", pte, *(int *)pte); + printf("enter: pte %x, *pte %x ", pte, *pte); #endif - if (pmap_pte_v(pte)) { register vm_offset_t opa; + /* + * Check for wiring change and adjust statistics. + */ + if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { + /* + * We don't worry about wiring PT pages as they remain + * resident as long as there are valid mappings in them. + * Hence, if a user page is wired, the PT page will be also. + */ +#ifdef DEBUG + if (pmapdebug & PDB_ENTER) + printf("enter: wiring change -> %x ", wired); +#endif + if (wired) + pmap->pm_stats.wired_count++; + else + pmap->pm_stats.wired_count--; +#ifdef DEBUG + enter_stats.wchange++; +#endif + } + + flush = 1; opa = pmap_pte_pa(pte); /* @@ -1084,25 +1124,6 @@ pmap_enter(pmap, va, pa, prot, wired) #ifdef DEBUG enter_stats.pwchange++; #endif - /* - * Wiring change, just update stats. - * We don't worry about wiring PT pages as they remain - * resident as long as there are valid mappings in them. - * Hence, if a user page is wired, the PT page will be also. - */ - if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { -#ifdef DEBUG - if (pmapdebug & PDB_ENTER) - pg("enter: wiring change -> %x ", wired); -#endif - if (wired) - pmap->pm_stats.wired_count++; - else - pmap->pm_stats.wired_count--; -#ifdef DEBUG - enter_stats.wchange++; -#endif - } goto validate; } @@ -1114,130 +1135,82 @@ pmap_enter(pmap, va, pa, prot, wired) if (pmapdebug & PDB_ENTER) printf("enter: removing old mapping %x pa %x ", va, opa); #endif - pmap_remove(pmap, va, va + NBPG); + if ((pind = pmap_page_index(opa)) != -1) { + pmap_attributes[pind] |= *pte & (PG_M | PG_U); + pmap_remove_pv(pmap, va, pind); + } #ifdef DEBUG enter_stats.mchange++; #endif + } else { + /* + * Increment counters + */ + pmap->pm_stats.resident_count++; + if (wired) + pmap->pm_stats.wired_count++; } /* * Enter on the PV list if part of our managed memory - * Note that we raise IPL while manipulating pv_table - * since pmap_enter can be called at interrupt time. */ -/* if (pmap_valid_page(pa)) in the i386 version ... */ - if (pa >= vm_first_phys && pa < vm_last_phys) { - register pv_entry_t pv, npv; - int s; - + if ((pind = pmap_page_index(pa)) != -1) { #ifdef DEBUG enter_stats.managed++; #endif - pv = pa_to_pvh(pa); - s = splimp(); -#ifdef DEBUG - if (pmapdebug & PDB_ENTER) - printf("enter: pv at %x: %x/%x/%x\n", - pv, pv->pv_va, pv->pv_pmap, pv->pv_next); -#endif - /* - * No entries yet, use header as the first entry - */ - if (pv->pv_pmap == NULL) { -#ifdef DEBUG - enter_stats.firstpv++; -#endif - pv->pv_va = va; - pv->pv_pmap = pmap; - pv->pv_next = NULL; - pv->pv_flags = 0; - } - /* - * There is at least one other VA mapping this page. - * Place this entry after the header. - */ - else { - /*printf("second time: ");*/ -#ifdef DEBUG - for (npv = pv; npv; npv = npv->pv_next) - if (pmap == npv->pv_pmap && va == npv->pv_va) - panic("pmap_enter: already in pv_tab"); -#endif - npv = (pv_entry_t) - malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); - if (npv == NULL) - panic("pmap_enter: malloc returned NULL"); - npv->pv_va = va; - npv->pv_pmap = pmap; - npv->pv_next = pv->pv_next; - pv->pv_next = npv; -#ifdef DEBUG - if (!npv->pv_next) - enter_stats.secondpv++; -#endif - } - splx(s); - } - /* - * Assumption: if it is not part of our managed memory - * then it must be device memory which may be volitile. - */ - if (pmap_initialized) { - checkpv = cacheable = FALSE; + pmap_enter_pv(pmap, va, pind); + cacheable = TRUE; + } else if (pmap_initialized) { #ifdef DEBUG enter_stats.unmanaged++; #endif + /* + * Assumption: if it is not part of our managed memory + * then it must be device memory which may be volatile. + */ + cacheable = FALSE; } - /* - * Increment counters - */ - pmap->pm_stats.resident_count++; - if (wired) - pmap->pm_stats.wired_count++; - validate: /* * Now validate mapping with desired protection/wiring. * Assume uniform modified and referenced status for all - * Ns532 pages in a MACH page. + * ns532 pages in a MACH page. */ - npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; - npte |= (*(int *)pte & (PG_M|PG_U)); + npte = (pa & PG_FRAME) | protection_codes[prot] | PG_V; if (wired) npte |= PG_W; + if (va < VM_MAXUSER_ADDRESS) /* i.e. below USRSTACK */ npte |= PG_u; - else if (va < UPT_MAX_ADDRESS) - /* pagetables need to be user RW, for some reason, and the + else if (va < VM_MAX_ADDRESS) + /* + * Page tables need to be user RW, for some reason, and the * user area must be writable too. Anything above * VM_MAXUSER_ADDRESS is protected from user access by * the user data and code segment descriptors, so this is OK. - * - * andrew@werple.apana.org.au */ npte |= PG_u | PG_RW; #ifdef DEBUG if (pmapdebug & PDB_ENTER) - printf("enter: new pte value %x\n", npte); + printf("enter: new pte value %x ", npte); #endif - ix = 0; - do { - *(int *)pte++ = npte; - /*TBIS(va);*/ - npte += NS532_PAGE_SIZE; - va += NS532_PAGE_SIZE; - } while (++ix != ns532pagesperpage); - pte--; -#ifdef DEBUGx -cache, tlb flushes -#endif -#if 0 - pads(pmap); - _load_ptb0(((struct pcb *)curproc->p_addr)->pcb_ptb); -#endif - tlbflush(); + + *pte = npte; + if (flush) + pmap_update(); + + /* + * The only time we need to flush the cache is if we + * execute from a physical address and then change the data. + * This is the best place to do this. + * pmap_protect() and pmap_remove() are mostly used to switch + * between R/W and R/O pages. + * NOTE: we only support cache flush for read only text. + */ + if (prot == (VM_PROT_READ | VM_PROT_EXECUTE)) + __asm __volatile("cinv ia,r0"); } /* @@ -1247,20 +1220,21 @@ cache, tlb flushes */ void pmap_page_protect(phys, prot) - vm_offset_t phys; - vm_prot_t prot; + vm_offset_t phys; + vm_prot_t prot; { - switch (prot) { - case VM_PROT_READ: - case VM_PROT_READ|VM_PROT_EXECUTE: - pmap_copy_on_write(phys); - break; - case VM_PROT_ALL: - break; - default: - pmap_remove_all(phys); - break; - } + + switch (prot) { + case VM_PROT_READ: + case VM_PROT_READ|VM_PROT_EXECUTE: + pmap_copy_on_write(phys); + break; + case VM_PROT_ALL: + break; + default: + pmap_remove_all(phys); + break; + } } /* @@ -1272,55 +1246,39 @@ pmap_page_protect(phys, prot) */ void pmap_change_wiring(pmap, va, wired) - register pmap_t pmap; - vm_offset_t va; - boolean_t wired; + register pmap_t pmap; + vm_offset_t va; + boolean_t wired; { register pt_entry_t *pte; - register int ix; #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired); + printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired); #endif - if (pmap == NULL) - return; pte = pmap_pte(pmap, va); -#ifdef DEBUG - /* - * Page table page is not allocated. - * Should this ever happen? Ignore it for now, - * we don't want to force allocation of unnecessary PTE pages. - */ - if (!pmap_pde_v(pmap_pde(pmap, va))) { - if (pmapdebug & PDB_PARANOIA) - pg("pmap_change_wiring: invalid PDE for %x\n", va); + if (!pte) return; - } + +#ifdef DEBUG /* * Page not valid. Should this ever happen? * Just continue and change wiring anyway. */ if (!pmap_pte_v(pte)) { if (pmapdebug & PDB_PARANOIA) - pg("pmap_change_wiring: invalid PTE for %x\n", va); + printf("pmap_change_wiring: invalid PTE for %x ", va); } #endif + if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { if (wired) pmap->pm_stats.wired_count++; else pmap->pm_stats.wired_count--; + pmap_pte_set_w(pte, wired); } - /* - * Wiring is not a hardware characteristic so there is no need - * to invalidate TLB. - */ - ix = 0; - do { - pmap_pte_set_w(pte++, wired); - } while (++ix != ns532pagesperpage); } /* @@ -1328,35 +1286,36 @@ pmap_change_wiring(pmap, va, wired) * Function: * Extract the page table entry associated * with the given map/virtual_address pair. - * [ what about induced faults -wfj] */ - -struct pte *pmap_pte(pmap, va) - register pmap_t pmap; +pt_entry_t * +pmap_pte(pmap, va) + register pmap_t pmap; vm_offset_t va; { -#ifdef DEBUGx + pt_entry_t *ptp; + +#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_pte(%x, %x) ->\n", pmap, va); #endif - if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { - /* are we current address space or kernel? */ - if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum - || pmap == pmap_kernel()) - return ((struct pte *) vtopte(va)); + if (!pmap || !pmap_pde_v(pmap_pde(pmap, va))) + return NULL; - /* otherwise, we are alternate address space */ - else { - if (pmap->pm_pdir[PTDPTDI].pd_pfnum - != APTDpde.pd_pfnum) { - APTDpde = pmap->pm_pdir[PTDPTDI]; - tlbflush(); - } - return((struct pte *) avtopte(va)); + if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde & PG_FRAME) || + pmap == pmap_kernel()) + /* current address space or kernel */ + ptp = PTmap; + else { + /* alternate address space */ + if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) != (APTDpde & PG_FRAME)) { + APTDpde = pmap->pm_pdir[PTDPTDI]; + pmap_update(); } + ptp = APTmap; } - return(0); + + return ptp + ns532_btop(va); } /* @@ -1365,29 +1324,31 @@ struct pte *pmap_pte(pmap, va) * Extract the physical page address associated * with the given map/virtual_address pair. */ - vm_offset_t pmap_extract(pmap, va) - register pmap_t pmap; + register pmap_t pmap; vm_offset_t va; { + register pt_entry_t *pte; register vm_offset_t pa; #ifdef DEBUGx if (pmapdebug & PDB_FOLLOW) - pg("pmap_extract(%x, %x) -> ", pmap, va); + printf("pmap_extract(%x, %x) -> ", pmap, va); #endif - pa = 0; - if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { - pa = *(int *) pmap_pte(pmap, va); - } - if (pa) - pa = (pa & PG_FRAME) | (va & ~PG_FRAME); -#ifdef DEBUGx + + pte = pmap_pte(pmap, va); + if (!pte) + return NULL; + if (!pmap_pte_v(pte)) + return NULL; + + pa = pmap_pte_pa(pte); +#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("%x\n", pa); #endif - return(pa); + return pa | (va & ~PG_FRAME); } /* @@ -1397,39 +1358,20 @@ pmap_extract(pmap, va) * * This routine is only advisory and need not do anything. */ -void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) - pmap_t dst_pmap; - pmap_t src_pmap; - vm_offset_t dst_addr; - vm_size_t len; - vm_offset_t src_addr; +void +pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) + pmap_t dst_pmap, src_pmap; + vm_offset_t dst_addr, src_addr; + vm_size_t len; { -/* printf ("pmap_copy: dst=0x%x src=0x%x d_addr=0x%x len=0x%x s_addr=0x%x\n", - dst_pmap, src_pmap, dst_addr, len, src_addr); */ + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_copy(%x, %x, %x, %x, %x)\n", + printf("pmap_copy(%x, %x, %x, %x, %x)", dst_pmap, src_pmap, dst_addr, len, src_addr); #endif } -/* - * Require that all active physical maps contain no - * incorrect entries NOW. [This update includes - * forcing updates of any address map caching.] - * - * Generally used to insure that a thread about - * to run will see a semantically correct world. - */ -void pmap_update() -{ -#ifdef DEBUG - if (pmapdebug & PDB_FOLLOW) - printf("pmap_update()"); -#endif - tlbflush(); -} - /* * Routine: pmap_collect * Function: @@ -1440,96 +1382,93 @@ void pmap_update() * others may be collected. * Usage: * Called by the pageout daemon when pages are scarce. - * [ needs to be written -wfj ] + * [ needs to be written -wfj ] XXXX */ void pmap_collect(pmap) - pmap_t pmap; + pmap_t pmap; { register vm_offset_t pa; - register pv_entry_t pv; - register int *pte; + register struct pv_entry *pv; + register pt_entry_t *pte; vm_offset_t kpa; int s; #ifdef DEBUG - int *pde; - int opmapdebug; + printf("pmap_collect(%x) ", pmap); #endif + if (pmap != pmap_kernel()) return; + } -/* [ macro again?, should I force kstack into user map here? -wfj ] */ +#if 0 void -pmap_activate(pmap, pcbp) - register pmap_t pmap; - struct pcb *pcbp; +pmap_dump_pvlist(phys, m) + vm_offset_t phys; + char *m; { -#ifdef DEBUG - if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB)) - pg("pmap_activate(%x, %x)\n", pmap, pcbp); -#endif - PMAP_ACTIVATE(pmap, pcbp); -#ifdef DEBUG - { - int x; - printf("pde "); - for(x=0x3f6; x < 0x3fA; x++) - printf("%x ", pmap->pm_pdir[x]); - pads(pmap); - pg(" pcb_ptb %x\n", pcbp->pcb_ptb); + register struct pv_entry *pv; + + if (!pmap_initialized) + return; + printf("%s %08x:", m, phys); + pv = &pv_table[pmap_page_index(phys)]; + if (pv->pv_pmap == NULL) { + printf(" no mappings\n"); + return; } -#endif + for (; pv; pv = pv->pv_next) + printf(" pmap %08x va %08x", pv->pv_pmap, pv->pv_va); + printf("\n"); } +#else +#define pmap_dump_pvlist(a,b) +#endif /* - * pmap_zero_page zeros the specified (machine independent) - * page by mapping the page into virtual memory and using - * bzero to clear its contents, one machine dependent page - * at a time. + * pmap_zero_page zeros the specified by mapping it into + * virtual memory and using bzero to clear its contents. */ void pmap_zero_page(phys) - register vm_offset_t phys; + register vm_offset_t phys; { - register int ix; #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_zero_page(%x)\n", phys); + printf("pmap_zero_page(%x)", phys); #endif - phys >>= PG_SHIFT; - ix = 0; - do { - clearseg(phys++); - } while (++ix != ns532pagesperpage); + + pmap_dump_pvlist(phys, "pmap_zero_page: phys"); + *CMAP2 = (phys & PG_FRAME) | PG_V | PG_KW /*| PG_N*/; + pmap_update(); + bzero(CADDR2, NBPG); } /* - * pmap_copy_page copies the specified (machine independent) - * page by mapping the page into virtual memory and using - * bcopy to copy the page, one machine dependent page at a - * time. + * pmap_copy_page copies the specified page by mapping + * it into virtual memory and using bcopy to copy its + * contents. */ void pmap_copy_page(src, dst) - register vm_offset_t src, dst; + register vm_offset_t src, dst; { - register int ix; #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_copy_page(%x, %x)", src, dst); #endif - src >>= PG_SHIFT; - dst >>= PG_SHIFT; - ix = 0; - do { - physcopyseg(src++, dst++); - } while (++ix != ns532pagesperpage); -} + pmap_dump_pvlist(src, "pmap_copy_page: src"); + pmap_dump_pvlist(dst, "pmap_copy_page: dst"); + *CMAP1 = (src & PG_FRAME) | PG_V | PG_KR; + *CMAP2 = (dst & PG_FRAME) | PG_V | PG_KW /*| PG_N*/; + pmap_update(); + bcopy(CADDR1, CADDR2, NBPG); +} /* * Routine: pmap_pageable @@ -1547,15 +1486,17 @@ pmap_copy_page(src, dst) */ void pmap_pageable(pmap, sva, eva, pageable) - pmap_t pmap; - vm_offset_t sva, eva; - boolean_t pageable; + pmap_t pmap; + vm_offset_t sva, eva; + boolean_t pageable; { + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_pageable(%x, %x, %x, %x)\n", + printf("pmap_pageable(%x, %x, %x, %x)", pmap, sva, eva, pageable); #endif + /* * If we are making a PT page pageable then all valid * mappings must be gone from that page. Hence it should @@ -1564,276 +1505,169 @@ pmap_pageable(pmap, sva, eva, pageable) * - we are called with only one page at a time * - PT pages have only one pv_table entry */ - if (pmap == pmap_kernel() && pageable && sva + PAGE_SIZE == eva) { - register pv_entry_t pv; + if (pmap == pmap_kernel() && pageable && sva + NBPG == eva) { register vm_offset_t pa; + register pt_entry_t *pte; #ifdef DEBUG + u_int pind; + register struct pv_entry *pv; + if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE) - printf("pmap_pageable(%x, %x, %x, %x)\n", + printf("pmap_pageable(%x, %x, %x, %x)", pmap, sva, eva, pageable); #endif - /*if (!pmap_pde_v(pmap_pde(pmap, sva))) - return;*/ - if(pmap_pte(pmap, sva) == 0) + + pte = pmap_pte(pmap, sva); + if (!pte) return; - pa = pmap_pte_pa(pmap_pte(pmap, sva)); - if (pa < vm_first_phys || pa >= vm_last_phys) + if (!pmap_pte_v(pte)) return; - pv = pa_to_pvh(pa); - /*if (!ispt(pv->pv_va)) - return;*/ + + pa = pmap_pte_pa(pte); + #ifdef DEBUG + if ((pind = pmap_page_index(pa)) == -1) + return; + + pv = &pv_table[pind]; if (pv->pv_va != sva || pv->pv_next) { - pg("pmap_pageable: bad PT page va %x next %x\n", + printf("pmap_pageable: bad PT page va %x next %x\n", pv->pv_va, pv->pv_next); return; } #endif + /* * Mark it unmodified to avoid pageout */ pmap_clear_modify(pa); + #ifdef needsomethinglikethis if (pmapdebug & PDB_PTPAGE) - pg("pmap_pageable: PT page %x(%x) unmodified\n", - sva, *(int *)pmap_pte(pmap, sva)); + printf("pmap_pageable: PT page %x(%x) unmodified\n", + sva, *pmap_pte(pmap, sva)); if (pmapdebug & PDB_WIRING) pmap_check_wiring("pageable", sva); #endif } } -/* - * Clear the modify bits on the specified physical page. - */ - -void -pmap_clear_modify(pa) - vm_offset_t pa; -{ -#ifdef DEBUG - if (pmapdebug & PDB_FOLLOW) - printf("pmap_clear_modify(%x)", pa); -#endif - pmap_changebit(pa, PG_M, FALSE); -} - -/* - * pmap_clear_reference: - * - * Clear the reference bit on the specified physical page. - */ - -void pmap_clear_reference(pa) - vm_offset_t pa; -{ -#ifdef DEBUG - if (pmapdebug & PDB_FOLLOW) - printf("pmap_clear_reference(%x)", pa); -#endif - pmap_changebit(pa, PG_U, FALSE); -} - -/* - * pmap_is_referenced: - * - * Return whether or not the specified physical page is referenced - * by any physical maps. - */ - -boolean_t -pmap_is_referenced(pa) - vm_offset_t pa; -{ -#ifdef DEBUG - if (pmapdebug & PDB_FOLLOW) { - boolean_t rv = pmap_testbit(pa, PG_U); - printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]); - return(rv); - } -#endif - return(pmap_testbit(pa, PG_U)); -} - -/* - * pmap_is_modified: - * - * Return whether or not the specified physical page is modified - * by any physical maps. - */ - -boolean_t -pmap_is_modified(pa) - vm_offset_t pa; -{ -#ifdef DEBUG - if (pmapdebug & PDB_FOLLOW) { - boolean_t rv = pmap_testbit(pa, PG_M); - printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]); - return(rv); - } -#endif - return(pmap_testbit(pa, PG_M)); -} - -vm_offset_t -pmap_phys_address(ppn) - int ppn; -{ - return(ns532_ptob(ppn)); -} - /* * Miscellaneous support routines follow */ ns532_protection_init() { - register int *kp, prot; - kp = protection_codes; - for (prot = 0; prot < 8; prot++) { - switch (prot) { - case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: - *kp++ = 0; - break; - case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: - case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: - case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: - *kp++ = PG_RO; - break; - case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: - case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: - case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: - case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: - *kp++ = PG_RW; - break; - } - } + protection_codes[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE] = 0; + protection_codes[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE] = + protection_codes[VM_PROT_NONE | VM_PROT_READ | VM_PROT_NONE] = + protection_codes[VM_PROT_NONE | VM_PROT_READ | VM_PROT_EXECUTE] = PG_RO; + protection_codes[VM_PROT_WRITE | VM_PROT_NONE | VM_PROT_NONE] = + protection_codes[VM_PROT_WRITE | VM_PROT_NONE | VM_PROT_EXECUTE] = + protection_codes[VM_PROT_WRITE | VM_PROT_READ | VM_PROT_NONE] = + protection_codes[VM_PROT_WRITE | VM_PROT_READ | VM_PROT_EXECUTE] = PG_RW; } boolean_t -pmap_testbit(pa, bit) +pmap_testbit(pa, setbits) register vm_offset_t pa; - int bit; + int setbits; { - register pv_entry_t pv; - register int *pte, ix; + register struct pv_entry *pv; + register pt_entry_t *pte; + u_int pind; int s; - if (pa < vm_first_phys || pa >= vm_last_phys) - return(FALSE); + if ((pind = pmap_page_index(pa)) == -1) + return FALSE; - pv = pa_to_pvh(pa); + pv = &pv_table[pind]; s = splimp(); + /* * Check saved info first */ - if (pmap_attributes[pa_index(pa)] & bit) { + if (pmap_attributes[pind] & setbits) { splx(s); - return(TRUE); + return TRUE; } + /* * Not found, check current mappings returning * immediately if found. */ if (pv->pv_pmap != NULL) { for (; pv; pv = pv->pv_next) { - pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va); - ix = 0; - do { - if (*pte++ & bit) { - splx(s); - return(TRUE); - } - } while (++ix != ns532pagesperpage); + pte = pmap_pte(pv->pv_pmap, pv->pv_va); + if (*pte & setbits) { + splx(s); + return TRUE; + } } } splx(s); - return(FALSE); + return FALSE; } -pmap_changebit(pa, bit, setem) +/* + * Modify pte bits for all ptes corresponding to the given physical address. + * We use `maskbits' rather than `clearbits' because we're always passing + * constants and the latter would require an extra inversion at run-time. + */ +void +pmap_changebit(pa, setbits, maskbits) register vm_offset_t pa; - int bit; - boolean_t setem; + int setbits, maskbits; { - register pv_entry_t pv; - register int *pte, npte, ix; + register struct pv_entry *pv; + register pt_entry_t *pte; vm_offset_t va; + u_int pind; int s; - boolean_t firstpage = TRUE; #ifdef DEBUG if (pmapdebug & PDB_BITS) - printf("pmap_changebit(%x, %x, %s)", - pa, bit, setem ? "set" : "clear"); + printf("pmap_changebit(%x, %x, %x)", + pa, setbits, ~maskbits); #endif - if (pa < vm_first_phys || pa >= vm_last_phys) + + if ((pind = pmap_page_index(pa)) == -1) return; - pv = pa_to_pvh(pa); + pv = &pv_table[pind]; s = splimp(); + /* * Clear saved attributes (modify, reference) */ - if (!setem) - pmap_attributes[pa_index(pa)] &= ~bit; + if (~maskbits) + pmap_attributes[pind] &= maskbits; /* * Loop over all current mappings setting/clearing as appropos * If setting RO do we need to clear the VAC? */ - if (pv->pv_pmap != NULL) { -#ifdef DEBUG - int toflush = 0; -#endif for (; pv; pv = pv->pv_next) { -#ifdef DEBUG - toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1; -#endif va = pv->pv_va; - /* - * XXX don't write protect pager mappings - */ - if (bit == PG_RO) { - extern vm_offset_t pager_sva, pager_eva; + /* + * XXX don't write protect pager mappings + */ + if ((PG_RO && setbits == PG_RO) || + (PG_RW && maskbits == ~PG_RW)) { + extern vm_offset_t pager_sva, pager_eva; - if (va >= pager_sva && va < pager_eva) - continue; - } + if (va >= pager_sva && va < pager_eva) + continue; + } - pte = (int *) pmap_pte(pv->pv_pmap, va); - ix = 0; - do { - if (setem) - npte = *pte | bit; - else - npte = *pte & ~bit; - if (*pte != npte) { - *pte = npte; - /*TBIS(va);*/ - } - va += NS532_PAGE_SIZE; - pte++; - } while (++ix != ns532pagesperpage); - - if (curproc && pv->pv_pmap == &curproc->p_vmspace->vm_pmap) - pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr); + pte = pmap_pte(pv->pv_pmap, va); + *pte = (*pte & maskbits) | setbits; } -#ifdef somethinglikethis - if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) { - if ((pmapvacflush & PVF_TOTAL) || toflush == 3) - DCIA(); - else if (toflush == 2) - DCIS(); - else - DCIU(); - } -#endif + pmap_update(); } splx(s); } @@ -1842,12 +1676,11 @@ pmap_changebit(pa, bit, setem) pmap_pvdump(pa) vm_offset_t pa; { - register pv_entry_t pv; + register struct pv_entry *pv; printf("pa %x", pa); - for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { - printf(" -> pmap %x, va %x, flags %x", - pv->pv_pmap, pv->pv_va, pv->pv_flags); + for (pv = &pv_table[pmap_page_index(pa)]; pv; pv = pv->pv_next) { + printf(" -> pmap %x, va %x", pv->pv_pmap, pv->pv_va); pads(pv->pv_pmap); } printf(" "); @@ -1867,15 +1700,15 @@ pmap_check_wiring(str, va) return; if (!vm_map_lookup_entry(pt_map, va, &entry)) { - pg("wired_check: entry for %x not found\n", va); + printf("wired_check: entry for %x not found\n", va); return; } count = 0; - for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++) + for (pte = (int *)va; pte < (int *)(va + NBPG); pte++) if (*pte) count++; if (entry->wired_count != count) - pg("*%s*: %x: w%d/a%d\n", + printf("*%s*: %x: w%d/a%d\n", str, va, entry->wired_count, count); } #endif @@ -1885,61 +1718,23 @@ pads(pm) pmap_t pm; { unsigned va, i, j; - struct pte *ptep; - int num=0; + register pt_entry_t *pte; -/* if(pm == pmap_kernel()) return; */ + if (pm == pmap_kernel()) + return; for (i = 0; i < 1024; i++) - if(pm->pm_pdir[i].pd_v) + if (pmap_pde_v(&pm->pm_pdir[i])) for (j = 0; j < 1024 ; j++) { - va = (i<<22)+(j<<12); - if (pm == pmap_kernel() && va < 0xfe000000) - continue; - if (pm != pmap_kernel() && va > UPT_MAX_ADDRESS) - continue; - ptep = pmap_pte(pm, va); - if(pmap_pte_v(ptep)) { - if (num % 4 == 0) printf (" "); - printf("%8x:%8x", va, *(int *)ptep); - if (++num %4 == 0) - printf ("\n"); - else - printf (" "); - } - } ; - if (num % 4 != 0) printf ("\n"); -} - -pmap_print (pmap_t pm, unsigned int start, unsigned int stop) -{ - unsigned va, i, j; - struct pte *ptep; - int num; - - printf ("pmap_print: pm_pdir = 0x%x\n", pm->pm_pdir); - printf (" map between 0x%x and 0x%x\n", start, stop); - for (i = 0; i < 1024; i++) - if (pm->pm_pdir != 0) { - if(pm->pm_pdir[i].pd_v && (start>>22) <= i - && (stop>>22) >= i) { - printf ("1st Level Entry 0x%x, 2nd PA = 0x%x000\n", - i, pm->pm_pdir[i].pd_pfnum); - num = 0; - for (j = 0; j < 1024 ; j++) { - va = (i<<22)+(j<<12); - ptep = pmap_pte(pm, va); - if(ptep->pg_v && start <= va && stop >= va) { - if (num % 5 == 0) printf (" "); - printf("%8x:%05x", va, ptep->pg_pfnum); - if (++num %5 == 0) - printf ("\n"); - else - printf (" "); - } - } ; - if (num % 5 != 0) printf ("\n"); - }; - }; - if (num % 5 != 0) printf ("\n"); + va = (i << PDSHIFT) | (j << PGSHIFT); + if (pm == pmap_kernel() && + va < VM_MIN_KERNEL_ADDRESS) + continue; + if (pm != pmap_kernel() && + va > VM_MAX_ADDRESS) + continue; + pte = pmap_pte(pm, va); + if (pmap_pte_v(pte)) + printf("%x:%x ", va, *pte); + } } #endif diff --git a/sys/arch/pc532/pc532/process_machdep.c b/sys/arch/pc532/pc532/process_machdep.c index fecbcd2d1932..282e2e3cdcbd 100644 --- a/sys/arch/pc532/pc532/process_machdep.c +++ b/sys/arch/pc532/pc532/process_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: process_machdep.c,v 1.8 1995/09/26 20:16:32 phil Exp $ */ +/* $NetBSD: process_machdep.c,v 1.9 1996/01/31 21:34:02 phil Exp $ */ /* * Copyright (c) 1993 The Regents of the University of California. @@ -73,17 +73,13 @@ #include #include +extern struct proc *fpu_proc; + static inline struct reg * process_regs(p) struct proc *p; { - void *ptr; - - if ((p->p_flag & P_INMEM) == 0) - return (NULL); - - ptr = (char *)p->p_addr + ((char *)p->p_md.md_regs - (char *)USRSTACK); - return (ptr); + return ((struct reg *) p->p_md.md_regs); } int @@ -127,6 +123,10 @@ process_read_fpregs(p, regs) if ((p->p_flag & P_INMEM) == 0) return (EIO); + if (fpu_proc == p) { + save_fpu_context(&p->p_addr->u_pcb); + fpu_proc = 0; + } bcopy(&p->p_addr->u_pcb.pcb_fsr, regs, sizeof(*regs)); return (0); } @@ -139,7 +139,11 @@ process_write_fpregs(p, regs) if ((p->p_flag & P_INMEM) == 0) return (EIO); + if (fpu_proc == p) + fpu_proc = 0; + bcopy(regs, &p->p_addr->u_pcb.pcb_fsr, sizeof(*regs)); + return (0); } diff --git a/sys/arch/pc532/pc532/sys_machdep.c b/sys/arch/pc532/pc532/sys_machdep.c index 0ae1c03dd9bf..b47e47a21f4e 100644 --- a/sys/arch/pc532/pc532/sys_machdep.c +++ b/sys/arch/pc532/pc532/sys_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: sys_machdep.c,v 1.5 1995/09/26 20:16:34 phil Exp $ */ +/* $NetBSD: sys_machdep.c,v 1.6 1996/01/31 21:34:03 phil Exp $ */ /*- * Copyright (c) 1990 The Regents of the University of California. @@ -108,3 +108,17 @@ vdoualarm(arg) nvualarm--; } #endif + +int +sys_sysarch(p, v, retval) + struct proc *p; + void *v; + register_t *retval; +{ + struct sysarch_args /* { + syscallarg(int) op; + syscallarg(char *) parms; + } */ *uap = v; + + return ENOSYS; +} diff --git a/sys/arch/pc532/pc532/trap.c b/sys/arch/pc532/pc532/trap.c index d72213d7d481..482cddb6843a 100644 --- a/sys/arch/pc532/pc532/trap.c +++ b/sys/arch/pc532/pc532/trap.c @@ -1,6 +1,8 @@ -/* $NetBSD: trap.c,v 1.13 1995/06/09 06:00:10 phil Exp $ */ +/* $NetBSD: trap.c,v 1.14 1996/01/31 21:34:04 phil Exp $ */ /*- + * Copyright (c) 1996 Matthias Pfaller. All rights reserved. + * Copyright (c) 1995 Charles M. Hannum. All rights reserved. * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * @@ -48,6 +50,7 @@ #include #include #include +#include #ifdef KTRACE #include #endif @@ -58,131 +61,246 @@ #include #include -#include +#include #include +#include +#include +struct proc *fpu_proc; /* Process owning the FPU. */ +/* + * Define the code needed before returning to user mode, for + * trap and syscall. + */ +static inline void +userret(p, pc, oticks) + register struct proc *p; + int pc; + u_quad_t oticks; +{ + int sig, s; -unsigned rcr2(); -extern short cpl; + /* take pending signals */ + while ((sig = CURSIG(p)) != 0) + postsig(sig); + p->p_priority = p->p_usrpri; + if (want_resched) { + /* + * Since we are curproc, a clock interrupt could + * change our priority without changing run queues + * (the running process is not kept on a run queue). + * If this happened after we setrunqueue ourselves but + * before we switch()'ed, we might not be on the queue + * indicated by our priority. + */ + s = splstatclock(); + setrunqueue(p); + p->p_stats->p_ru.ru_nivcsw++; + mi_switch(); + splx(s); + while ((sig = CURSIG(p)) != 0) + postsig(sig); + } + + /* + * If profiling, charge recent system time to the trapped pc. + */ + if (p->p_flag & P_PROFIL) { + extern int psratio; + + addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio); + } + + curpriority = p->p_priority; +} + +char *trap_type[] = { + "non-vectored interrupt", /* 0 T_NVI */ + "non-maskable interrupt", /* 1 T_NMI */ + "abort trap", /* 2 T_ABT */ + "coprocessor trap", /* 3 T_SLAVE */ + "illegal operation in user mode", /* 4 T_ILL */ + "supervisor call", /* 5 T_SVC */ + "divide by zero", /* 6 T_DVZ */ + "flag instruction", /* 7 T_FLG */ + "breakpoint instruction", /* 8 T_BPT */ + "trace trap", /* 9 T_TRC */ + "undefined instruction", /* 10 T_UND */ + "restartable bus error", /* 11 T_RBE */ + "non-restartable bus error", /* 12 T_NBE */ + "integer overflow trap", /* 13 T_OVF */ + "debug trap", /* 14 T_DBG */ + "reserved trap", /* 15 T_RESERVED */ + "unused", /* 16 unused */ + "watchpoint", /* 17 T_WATCHPOINT */ + "asynchronous system trap" /* 18 T_AST */ +}; +int trap_types = sizeof trap_type / sizeof trap_type[0]; + +#ifdef DEBUG +int trapdebug = 0; +#endif /* * trap(frame): * Exception, fault, and trap interface to BSD kernel. This - * common code is called from assembly language IDT gate entry + * common code is called from assembly language trap vector * routines that prepare a suitable stack frame, and restore this * frame after the exception has been processed. Note that the * effect is as if the arguments were passed call by reference. */ /*ARGSUSED*/ +void trap(frame) struct trapframe frame; { - register int i; register struct proc *p = curproc; - struct timeval sticks; - int ucode, type, tear, msr; + int type = frame.tf_trapno; + u_quad_t sticks; + struct pcb *pcb; + extern char fusubail[]; +#ifdef CINVSMALL + extern char cinvstart[], cinvend[]; +#endif cnt.v_trap++; - type = frame.tf_trapno; - tear = frame.tf_tear; - msr = frame.tf_msr; - - if (curpcb->pcb_onfault && frame.tf_trapno != T_ABT) { -copyfault: - frame.tf_pc = (int)curpcb->pcb_onfault; - return; - } - -#ifdef DDB - if (curpcb && curpcb->pcb_onfault) { - if (frame.tf_trapno == T_BPTFLT - || frame.tf_trapno == T_TRCTRAP) - if (kdb_trap (type, 0, &frame)) - return; +#ifdef DEBUG + if (trapdebug) { + printf("trap type=%d, pc=0x%x, tear=0x%x, msr=0x%x\n", + type, frame.tf_pc, frame.tf_tear, frame.tf_msr); + printf("curproc %x\n", curproc); } #endif - - if (curpcb == 0 || curproc == 0) goto we_re_toast; - if ((frame.tf_psr & PSL_USER) == PSL_USER) { + if (USERMODE(frame.tf_psr)) { type |= T_USER; -#ifdef notdef - sticks = p->p_stime; -#endif + sticks = p->p_sticks; p->p_md.md_regs = (int *)&(frame.tf_reg); } - ucode = 0; - switch (type) { default: we_re_toast: -#ifdef KDB - if (kdb_trap(&psl)) - return; -#endif #ifdef DDB - if (kdb_trap (type, 0, &frame)) + if (kdb_trap(type, 0, &frame)) return; #endif - - printf("bad trap: type=%d, pc=0x%x, tear=0x%x, msr=0x%x\n", + if (frame.tf_trapno < trap_types) + printf("fatal %s", trap_type[frame.tf_trapno]); + else + printf("unknown trap %d", frame.tf_trapno); + printf(" in %s mode\n", (type & T_USER) ? "user" : "supervisor"); + printf("trap type=%d, pc=0x%x, tear=0x%x, msr=0x%x\n", type, frame.tf_pc, frame.tf_tear, frame.tf_msr); + panic("trap"); /*NOTREACHED*/ - case T_ABT: /* System level pagefault! */ - if (((msr & MSR_STT) == STT_SEQ_INS) - || ((msr & MSR_STT) == STT_NSQ_INS)) - { - printf ("System pagefault: pc=0x%x, tear=0x%x, msr=0x%x\n", - frame.tf_pc, frame.tf_tear, frame.tf_msr); - goto we_re_toast; - } + case T_UND | T_USER: { /* undefined instruction fault */ + int opcode, cfg; + extern int _have_fpu; + opcode = fubyte((void *)frame.tf_pc); +#ifndef NS381 + if (!_have_fpu) { +#ifdef MATH_EMULATE + int rv; + if ((rv = math_emulate(&frame)) == 0) { + if (frame.tf_psr & PSL_T) + goto trace; + return; + } +#endif + } else +#endif + if (opcode == 0x3e || opcode == 0xbe || opcode == 0xfe) { + sprd(cfg, cfg); + if ((cfg & CFG_F) == 0) { + lprd(cfg, cfg | CFG_F); + if (fpu_proc == p) + return; + pcb = &p->p_addr->u_pcb; + if (fpu_proc != 0) + save_fpu_context(&fpu_proc->p_addr->u_pcb); + restore_fpu_context(pcb); + fpu_proc = p; + return; + } + } + } - /* fall into */ - case T_ABT | T_USER: /* User level pagefault! */ -/* if (type == (T_ABT | T_USER)) - printf ("pagefault: pc=0x%x, tear=0x%x, msr=0x%x\n", - frame.tf_pc, frame.tf_tear, frame.tf_msr); */ - { + case T_ILL | T_USER: /* privileged instruction fault */ + trapsignal(p, SIGILL, type &~ T_USER); + goto out; + + case T_AST | T_USER: /* Allow process switch */ + cnt.v_soft++; + if (p->p_flag & P_OWEUPC) { + p->p_flag &= ~P_OWEUPC; + ADDUPROF(p); + } + goto out; + + case T_OVF | T_USER: + case T_DVZ | T_USER: + trapsignal(p, SIGFPE, type &~ T_USER); + goto out; + + case T_SLAVE | T_USER: { + int fsr; +#ifdef MATH_IEEE + int rv; + if ((rv = math_ieee(&frame)) == 0) { + if (frame.tf_psr & PSL_T) + goto trace; + return; + } +#endif + sfsr(fsr); + trapsignal(p, SIGFPE, 0x80000000 | fsr); + goto out; + } + + case T_ABT: /* allow page faults in kernel mode */ + if ((frame.tf_msr & MSR_STT) == STT_SEQ_INS || + (frame.tf_msr & MSR_STT) == STT_NSQ_INS || + (p == 0)) + goto we_re_toast; + pcb = &p->p_addr->u_pcb; + /* + * fusubail is used by [fs]uswintr() to prevent page faulting + * from inside the profiling interrupt. + */ + if (pcb->pcb_onfault == fusubail) + goto copyfault; +#ifdef CINVSMALL + /* + * If a address translation for a cache invalidate + * request fails, reset the pc and return. + */ + if ((unsigned int)frame.tf_pc >= (unsigned int)cinvstart && + (unsigned int)frame.tf_pc < (unsigned int)cinvend) { + frame.tf_pc = (int)cinvend; + return; + } +#endif + /* FALLTHROUGH */ + + case T_ABT | T_USER: { /* page fault */ register vm_offset_t va; register struct vmspace *vm = p->p_vmspace; register vm_map_t map; int rv; vm_prot_t ftype; extern vm_map_t kernel_map; - unsigned nss,v; + unsigned nss, v; - va = trunc_page((vm_offset_t)tear); - /* - * Avoid even looking at pde_v(va) for high va's. va's - * above VM_MAX_KERNEL_ADDRESS don't correspond to normal - * PDE's (half of them correspond to APDEpde and half to - * an unmapped kernel PDE). va's betweeen 0xFEC00000 and - * VM_MAX_KERNEL_ADDRESS correspond to unmapped kernel PDE's - * (XXX - why are only 3 initialized when 6 are required to - * reach VM_MAX_KERNEL_ADDRESS?). Faulting in an unmapped - * kernel page table would give inconsistent PTD's. - * - * XXX - faulting in unmapped page tables wastes a page if - * va turns out to be invalid. - * - * XXX - should "kernel address space" cover the kernel page - * tables? Might have same problem with PDEpde as with - * APDEpde (or there may be no problem with APDEpde). - */ - if (va > 0xFEBFF000) { - v = KERN_FAILURE; /* becomes SIGBUS */ - goto nogo; - } + va = trunc_page((vm_offset_t)frame.tf_tear); /* * It is only a kernel address space fault iff: - * 1. (type & T_USER) == 0 and - * 2. pcb_onfault not set or + * 1. (type & T_USER) == 0 and + * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. @@ -191,13 +309,13 @@ copyfault: map = kernel_map; else map = &vm->vm_map; - if ((msr & MSR_DDT) == DDT_WRITE - || (msr & MSR_STT) == STT_RMW) + if ((frame.tf_msr & MSR_DDT) == DDT_WRITE || + (frame.tf_msr & MSR_STT) == STT_RMW) ftype = VM_PROT_READ | VM_PROT_WRITE; else ftype = VM_PROT_READ; -#ifdef DEBUG +#ifdef DIAGNOSTIC if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %x\n", va); goto we_re_toast; @@ -206,365 +324,206 @@ copyfault: nss = 0; if ((caddr_t)va >= vm->vm_maxsaddr - && (caddr_t)va < (caddr_t)VM_MAXUSER_ADDRESS - && map != kernel_map) { - nss = clrnd(btoc((unsigned)vm->vm_maxsaddr - + MAXSSIZ - (unsigned)va)); + && (caddr_t)va < (caddr_t)VM_MAXUSER_ADDRESS + && map != kernel_map) { + nss = clrnd(btoc(USRSTACK-(unsigned)va)); if (nss > btoc(p->p_rlimit[RLIMIT_STACK].rlim_cur)) { -/*pg("trap rlimit %d, maxsaddr %x va %x ", nss, vm->vm_maxsaddr, va);*/ rv = KERN_FAILURE; goto nogo; } } /* check if page table is mapped, if not, fault it first */ -#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v) - if (!pde_v(va)) { + if ((PTD[pdei(va)] & PG_V) == 0) { v = trunc_page(vtopte(va)); rv = vm_fault(map, v, ftype, FALSE); - if (rv != KERN_SUCCESS) goto nogo; + if (rv != KERN_SUCCESS) + goto nogo; /* check if page table fault, increment wiring */ vm_map_pageable(map, v, round_page(v+1), FALSE); - } else v=0; - rv = vm_fault(map, va, ftype, FALSE); + } else + v = 0; + rv = vm_fault(map, va, ftype, FALSE); if (rv == KERN_SUCCESS) { - /* - * XXX: continuation of rude stack hack - */ if (nss > vm->vm_ssize) vm->vm_ssize = nss; va = trunc_page(vtopte(va)); - /* for page table, increment wiring - as long as not a page table fault as well */ + /* for page table, increment wiring as long as + not a page table fault as well */ if (!v && map != kernel_map) - vm_map_pageable(map, va, round_page(va+1), FALSE); + vm_map_pageable(map, va, round_page(va+1), + FALSE); if (type == T_ABT) return; goto out; } -nogo: + + nogo: if (type == T_ABT) { - if (curpcb->pcb_onfault) - goto copyfault; - printf("vm_fault(0x%x, 0x%x, 0x%x, 0) -> 0x%x\n", - map, va, ftype, rv); - printf(" type 0x%x, tear 0x%x msr 0x%x\n", - type, tear, msr); + if (pcb->pcb_onfault != 0) { + copyfault: + frame.tf_pc = (int)curpcb->pcb_onfault; + return; + } + printf("vm_fault(%x, %x, %x, 0) -> %x\n", + map, va, ftype, rv); goto we_re_toast; } - i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV; + trapsignal(p, (rv == KERN_PROTECTION_FAILURE) + ? SIGBUS : SIGSEGV, T_ABT); break; - } - - case T_UND | T_USER: /* undefined instruction */ - case T_ILL | T_USER: /* Illegal instruction! */ - ucode = type &~ T_USER; - i = SIGILL; - break; - - case T_NVI | T_USER: /* Non-vectored interrupt */ - case T_NMI | T_USER: /* non-maskable interrupt */ - case T_FLG | T_USER: /* flag instruction */ - goto we_re_toast; - - case T_NBE | T_USER: /* non-restartable bus error */ - ucode = type &~ T_USER; - i = SIGBUS; - break; - - case T_RBE | T_USER: /* restartable bus error */ - return; - - case T_SLAVE | T_USER: /* coprocessor trap */ - ucode = type &~ T_USER; -/* ucode = FPE_INTDIV_TRAP; */ - i = SIGFPE; - break; - - case T_DVZ | T_USER: /* divide by zero */ - ucode = type &~ T_USER; -/* ucode = FPE_INTDIV_TRAP; */ - i = SIGFPE; - break; - - case T_OVF | T_USER: /* integer overflow trap */ - ucode = type &~ T_USER; -/* ucode = FPE_INTOVF_TRAP; */ - i = SIGFPE; - break; - + } case T_TRC | T_USER: /* trace trap */ case T_BPT | T_USER: /* breakpoint instruction */ case T_DBG | T_USER: /* debug trap */ + trace: frame.tf_psr &= ~PSL_P; - i = SIGTRAP; + trapsignal(p, SIGTRAP, type &~ T_USER); break; - case T_INTERRUPT | T_USER: /* Allow Process Switch */ -/* if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) { - addupc(frame.tf_eip, &p->p_stats->p_prof, 1); - p->p_flag &= ~SOWEUPC; - } */ - goto out; + case T_NMI: /* non-maskable interrupt */ + case T_NMI | T_USER: +#ifdef DDB + /* NMI can be hooked up to a pushbutton for debugging */ + printf ("NMI ... going to debugger\n"); + if (kdb_trap (type, 0, &frame)) + return; +#endif + goto we_re_toast; + } - } /* End of switch */ - - trapsignal(p, i, ucode); if ((type & T_USER) == 0) return; out: - while (i = CURSIG(p)) - postsig(i); - p->p_priority = p->p_usrpri; - if (want_resched) { - /* - * Since we are curproc, clock will normally just change - * our priority without moving us from one queue to another - * (since the running process is not on a queue.) - * If that happened after we setrunqueue ourselves but - * before we switch()'ed, we might not be on the queue - * indicated by our priority. - */ - (void) splstatclock(); - setrunqueue(p); - p->p_stats->p_ru.ru_nivcsw++; - mi_switch(); - (void) splnone(); - while (i = CURSIG(p)) - postsig(i); - } - if (p->p_stats->p_prof.pr_scale) { - int ticks; - -#ifdef YO_WHAT - struct timeval *tv = &p->p_stime; - - ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + - (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); - if (ticks) { -#ifdef PROFTIMER - extern int profscale; - addupc(frame.tf_eip, &p->p_stats->p_prof, - ticks * profscale); -#else -/* addupc(frame.tf_pc, &p->p_stats->p_prof, ticks); */ -#endif - } -#endif - } - curpriority = p->p_priority; + userret(p, frame.tf_pc, sticks); } - /* * syscall(frame): * System call request from POSIX system call gate interface to kernel. * Like trap(), argument is call by reference. */ /*ARGSUSED*/ +void syscall(frame) - volatile struct syscframe frame; + struct syscframe frame; { - register caddr_t params; - register int i; register struct sysent *callp; register struct proc *p; - struct timeval sticks; int error, opc, nsys; - int args[8], rval[2]; - int code; + size_t argsize; + register_t code, args[8], rval[2]; + u_quad_t sticks; cnt.v_syscall++; - - /* is this a user? */ - if ((frame.sf_psr & PSL_USER) != PSL_USER) - panic("syscall - process not in user mode."); - + if (!USERMODE(frame.sf_psr)) + panic("syscall"); p = curproc; -#ifdef notdef - sticks = p->p_stime; -#endif - code = frame.sf_reg[REG_R0]; - p->p_md.md_regs = (int *) & (frame.sf_reg); - params = (caddr_t)frame.sf_usp + sizeof (int) ; - - callp = p->p_emul->e_sysent; - nsys = p->p_emul->e_nsysent; - - /* Set new return address and save old one. */ + sticks = p->p_sticks; + p->p_md.md_regs = (int *) &frame.sf_reg; opc = frame.sf_pc++; + code = frame.sf_reg[REG_R0]; + + nsys = p->p_emul->e_nsysent; + callp = p->p_emul->e_sysent; + + params = (caddr_t)frame.sf_usp + sizeof(int); switch (code) { case SYS_syscall: + /* + * Code is first argument, followed by actual args. + */ code = fuword(params); params += sizeof(int); break; - case SYS___syscall: + /* + * Like syscall, but code is a quad, so as to maintain + * quad alignment for the rest of the arguments. + */ + if (callp != sysent) + break; code = fuword(params + _QUAD_LOWWORD * sizeof(int)); params += sizeof(quad_t); break; - default: - /* do nothing by default */ break; } - - /* Guard against bad sys call numbers! */ - if (code < 0 || code >= nsys) - callp += p->p_emul->e_nosys; /* indir (illegal) */ - else - callp += code; - - if ((i = callp->sy_argsize) && - (error = copyin(params, (caddr_t)args, (u_int)i))) { - frame.sf_reg[REG_R0] = error; - frame.sf_psr |= PSL_C; + if (code < 0 || code >= nsys) + callp += p->p_emul->e_nosys; /* illegal */ + else + callp += code; + argsize = callp->sy_argsize; + if (argsize) + error = copyin(params, (caddr_t)args, argsize); + else + error = 0; #ifdef SYSCALL_DEBUG - scdebug_call(p, code, callp->sy_narg, i, args); -#endif -#ifdef KTRACE - if (KTRPOINT(p, KTR_SYSCALL)) - ktrsyscall(p->p_tracep, code, i, &args); -#endif - goto done; - } -#ifdef SYSCALL_DEBUG - scdebug_call(p, code, callp->sy_narg, i, args); + scdebug_call(p, code, args); #endif #ifdef KTRACE if (KTRPOINT(p, KTR_SYSCALL)) - ktrsyscall(p->p_tracep, code, i, &args); + ktrsyscall(p->p_tracep, code, argsize, args); #endif + if (error) + goto bad; rval[0] = 0; - rval[1] = 0; + rval[1] = frame.sf_reg[REG_R1]; error = (*callp->sy_call)(p, args, rval); - if (error == ERESTART) - frame.sf_pc = opc; - else if (error != EJUSTRETURN) { - if (error) { - frame.sf_reg[REG_R0] = error; - frame.sf_psr |= PSL_C; - } else { - frame.sf_reg[REG_R0] = rval[0]; - frame.sf_reg[REG_R1] = rval[1]; - frame.sf_psr &= ~PSL_C; - } - } - /* else if (error == EJUSTRETURN) */ - /* nothing to do */ -done: - /* - * Reinitialize proc pointer `p' as it may be different - * if this is a child returning from fork syscall. - */ - p = curproc; - while (i = CURSIG(p)) - postsig(i); - p->p_priority = p->p_usrpri; - if (want_resched) { + switch (error) { + case 0: /* - * Since we are curproc, clock will normally just change - * our priority without moving us from one queue to another - * (since the running process is not on a queue.) - * If that happened after we setrunqeue ourselves but before - * we switch()'ed, we might not be on the queue indicated by - * our priority. + * Reinitialize proc pointer `p' as it may be different + * if this is a child returning from fork syscall. */ - (void) splstatclock(); - setrunqueue(p); - p->p_stats->p_ru.ru_nivcsw++; - mi_switch(); - (void) splnone(); - while (i = CURSIG(p)) - postsig(i); + p = curproc; + frame.sf_reg[REG_R0] = rval[0]; + frame.sf_reg[REG_R1] = rval[1]; + frame.sf_psr &= ~PSL_C; /* carry bit */ + break; + case ERESTART: + /* + * Just reset the pc to the SVC instruction. + */ + frame.sf_pc = opc; + break; + case EJUSTRETURN: + /* nothing to do */ + break; + default: + bad: + if (p->p_emul->e_errno) + error = p->p_emul->e_errno[error]; + frame.sf_reg[REG_R0] = error; + frame.sf_psr |= PSL_C; /* carry bit */ + break; } - if (p->p_stats->p_prof.pr_scale) { - int ticks; -#ifdef YO_WHAT - struct timeval *tv = &p->p_stime; - ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + - (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); - if (ticks) { -#ifdef PROFTIMER - extern int profscale; - addupc(frame.sf_pc, &p->p_stats->p_prof, - ticks * profscale); -#else -/* addupc(frame.sf_pc, &p->p_stats->p_prof, ticks); */ -#endif - } -#endif - } - curpriority = p->p_priority; #ifdef SYSCALL_DEBUG - scdebug_ret(p, code, error, rval[0]); + scdebug_ret(p, code, error, rval); #endif + userret(p, frame.sf_pc, sticks); #ifdef KTRACE if (KTRPOINT(p, KTR_SYSRET)) ktrsysret(p->p_tracep, code, error, rval[0]); #endif - } -/* For the child, do the stuff after mi_swtch() in syscall so - low_level_fork does not have to rethread the kernel stack. */ void -ll_fork_sig() +child_return(p, frame) + struct proc *p; + struct syscframe frame; { - register struct proc *p = curproc; - int i; + frame.sf_reg[REG_R0] = 0; + frame.sf_psr &= ~PSL_C; - (void) splnone(); - while (i = CURSIG(p)) - postsig(i); -} - - -/* #define dbg_user */ -/* Other stuff.... */ -int -check_user_write ( u_long addr, u_long size) -{ - int rv; - vm_offset_t va; - -#ifdef dbg_user -printf ("ck_ur_wr: addr=0x%x, size=0x%x", addr, size); -#endif - /* check for all possible places! */ - va = trunc_page((vm_offset_t) addr); - if (va > VM_MAXUSER_ADDRESS) return (1); - - while ((u_long)va < (addr + size)) { - /* check for copy on write access. */ -#ifdef dbg_user -printf (" (0x%x:%d)", va, vtopte(va)->pg_prot); -#endif - if (!(vtopte(va)->pg_v) || vtopte(va)->pg_prot != 3 ) { -#ifdef dbg_user -printf (" fault"); -#endif - rv = vm_fault(&curproc->p_vmspace->vm_map, va, - VM_PROT_READ | VM_PROT_WRITE, FALSE); - if (rv != KERN_SUCCESS) -#ifdef dbg_user -{ printf (" bad\n"); -#endif - return(1); -#ifdef dbg_user -} -#endif - } - va += NBPG; - } -#ifdef dbg_user -printf ("\n"); -#endif - - return (0); + userret(p, frame.sf_pc, 0); +#ifdef KTRACE + if (KTRPOINT(p, KTR_SYSRET)) + ktrsysret(p->p_tracep, SYS_fork, 0, 0); +#endif } diff --git a/sys/arch/pc532/pc532/vm_machdep.c b/sys/arch/pc532/pc532/vm_machdep.c index b4a9fc90248a..8aad84d10f61 100644 --- a/sys/arch/pc532/pc532/vm_machdep.c +++ b/sys/arch/pc532/pc532/vm_machdep.c @@ -1,6 +1,9 @@ -/* $NetBSD: vm_machdep.c,v 1.11 1995/08/29 22:37:54 phil Exp $ */ +/* $NetBSD: vm_machdep.c,v 1.12 1996/01/31 21:34:06 phil Exp $ */ /*- + * Copyright (c) 1996 Matthias Pfaller. + * Copyright (c) 1995 Charles M. Hannum. All rights reserved. + * Copyright (c) 1993 Philip A. Nelson. * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * All rights reserved. @@ -54,120 +57,140 @@ #include #include +#include + +extern struct proc *fpu_proc; /* * Finish a fork operation, with process p2 nearly set up. - * Copy and update the kernel stack and pcb, making the child - * ready to run, and marking it so that it can return differently - * than the parent. Returns 1 in the child process, 0 in the parent. - * We currently double-map the user area so that the stack is at the same - * address in each process; in the future we will probably relocate - * the frame pointers on the stack after copying. + * Copy the pcb and setup the kernel stack for the child. + * Setup the child's stackframe to return to child_return + * via proc_trampoline from cpu_switch. */ cpu_fork(p1, p2) register struct proc *p1, *p2; { - struct user *up = p2->p_addr; - int foo, offset, addr, i; + register struct pcb *pcb = &p2->p_addr->u_pcb; + register struct syscframe *tf; + register struct switchframe *sf; + extern void proc_trampoline(), child_return(); + + /* Copy curpcb (which is presumably p1's PCB) to p2. */ + *pcb = p1->p_addr->u_pcb; + pcb->pcb_onstack = (struct on_stack *)((u_int)p2->p_addr + USPACE) - 1; + *pcb->pcb_onstack = *p1->p_addr->u_pcb.pcb_onstack; + /* If p1 is holding the FPU, update the FPU context of p2. */ + if (fpu_proc == p1) + save_fpu_context(pcb); + pmap_activate(&p2->p_vmspace->vm_pmap, pcb); /* - * Copy pcb from proc p1 to p2. - * _low_level_init will copy the kernel stack as cheeply as - * possible. + * Copy the syscframe, and arrange for the child to return directly + * through rei(). */ - p2->p_addr->u_pcb = p1->p_addr->u_pcb; - p2->p_addr->u_pcb.pcb_onstack = - (struct on_stack *) p2->p_addr + USPACE - - sizeof (struct on_stack); - - /* - * Wire top of address space of child to it's kstack. - * First, fault in a page of pte's to map it. - */ - addr = trunc_page((u_int)vtopte(USRSTACK)); - vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+USPACE, FALSE); - for (i=0; i < UPAGES; i++) - pmap_enter(&p2->p_vmspace->vm_pmap, USRSTACK+i*NBPG, - pmap_extract(pmap_kernel(), ((int)p2->p_addr)+i*NBPG), - VM_PROT_READ, TRUE); - - pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb); - - /* - * Low_level_fork returns twice! First with a 0 in the - * parent space and Second with a 1 in the child. - */ - - return (low_level_fork(up)); + tf = (struct syscframe *)((u_int)p2->p_addr + USPACE) - 1; + p2->p_md.md_regs = (int *)&(tf->sf_reg); + sf = (struct switchframe *)tf - 1; + sf->sf_pc = (long) proc_trampoline; + sf->sf_fp = (long) &tf->sf_fp; + sf->sf_reg[REG_R3] = (long) child_return; + sf->sf_reg[REG_R4] = (long) p2; + sf->sf_pl = imask[IPL_ZERO]; + pcb->pcb_ksp = (long) sf; + pcb->pcb_kfp = (long) &sf->sf_fp; } +/* + * cpu_set_kpc: + * + * Arrange for in-kernel execution of a process to continue at the + * named pc, as if the code at that address were called as a function + * with argument, the current process's process pointer. + * + * Note that it's assumed that when the named process returns, rei() + * should be invoked, to return to user mode. + */ +void +cpu_set_kpc(p, pc) + struct proc *p; + u_long pc; +{ + struct pcb *pcbp; + struct switchframe *sf; + extern void proc_trampoline(); -#ifdef notyet + pcbp = &p->p_addr->u_pcb; + sf = (struct switchframe *) pcbp->pcb_ksp; + sf->sf_pc = (long) proc_trampoline; + sf->sf_reg[REG_R3] = pc; + sf->sf_reg[REG_R4] = (long) p; +} + +/* + * cpu_swapout is called immediately before a process's 'struct user' + * and kernel stack are unwired (which are in turn done immediately + * before it's P_INMEM flag is cleared). If the process is the + * current owner of the floating point unit, the FP state has to be + * saved, so that it goes out with the pcb, which is in the user area. + */ +void +cpu_swapout(p) + struct proc *p; +{ + /* + * Make sure we save the FP state before the user area vanishes. + */ + if (fpu_proc != p) + return; + save_fpu_context(&p->p_addr->u_pcb); + fpu_proc = 0; +} /* * cpu_exit is called as the last action during exit. * - * We change to an inactive address space and a "safe" stack, - * passing thru an argument to the new stack. Now, safely isolated - * from the resources we're shedding, we release the address space - * and any remaining machine-dependent resources, including the - * memory for the user structure and kernel stack. - * - * Next, we assign a dummy context to be written over by swtch, - * calling it to send this process off to oblivion. - * [The nullpcb allows us to minimize cost in swtch() by not having - * a special case]. + * We switch to a temorary stack and address space. Then we release + * release the original address space and machine-dependent resources, + * including the memory for the user structure and kernel stack. + * Once finished, we call cpu_exit, which never returns. + * We block interrupts until cpu_switch has made things safe again. */ -struct proc *swtch_to_inactive(); - void -cpu_exit(p) - register struct proc *p; +cpu_exit(arg) + struct proc *arg; { - static struct pcb nullpcb; /* pcb to overwrite on last swtch */ + register struct proc *p __asm("r3"); + cnt.v_swtch++; - /* free cporcessor (if we have it) */ - if( p == npxproc) npxproc =0; + /* Copy arg into a register. */ + movd(arg, p); - /* move to inactive space and stack, passing arg accross */ - p = swtch_to_inactive(p); + /* If we were using the FPU, forget about it. */ + if (fpu_proc == p) + fpu_proc = 0; - /* drop per-process resources */ + /* Switch to temporary stack and address space. */ + lprd(sp, INTSTACK); + load_ptb(PTDpaddr); + + /* Free resources. */ vmspace_free(p->p_vmspace); - kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); + (void) splhigh(); + kmem_free(kernel_map, (vm_offset_t)p->p_addr, USPACE); - p->p_addr = (struct user *) &nullpcb; - splstatclock(); + /* Don't update pcb in cpu_switch. */ + curproc = NULL; cpu_switch(); /* NOTREACHED */ } -#else -void -cpu_exit(p) - register struct proc *p; -{ - - splstatclock(); - cpu_switch(); - /* Not reached. */ - panic ("cpu_exit! swtch returned!"); -} - -void -cpu_wait(p) - struct proc *p; -{ - - /* drop per-process resources */ - vmspace_free(p->p_vmspace); - kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); -} -#endif - /* * Dump the machine specific segment at the start of a core dump. */ +struct md_core { + struct reg intreg; + struct fpreg freg; +}; int cpu_coredump(p, vp, cred, chdr) struct proc *p; @@ -175,38 +198,43 @@ cpu_coredump(p, vp, cred, chdr) struct ucred *cred; struct core *chdr; { - int error; - struct { - struct reg regs; - struct fpreg fpregs; - } cpustate; + struct md_core md_core; struct coreseg cseg; + int error; CORE_SETMAGIC(*chdr, COREMAGIC, MID_NS32532, 0); chdr->c_hdrsize = ALIGN(sizeof(*chdr)); chdr->c_seghdrsize = ALIGN(sizeof(cseg)); - chdr->c_cpusize = sizeof(cpustate); - cpustate.regs = *((struct reg *)p->p_md.md_regs); - cpustate.fpregs = *((struct fpreg *)&p->p_addr->u_pcb.pcb_fsr); + chdr->c_cpusize = sizeof(md_core); + + /* Save integer registers. */ + error = process_read_regs(p, &md_core.intreg); + if (error) + return error; + + /* Save floating point registers. */ + error = process_read_fpregs(p, &md_core.freg); + if (error) + return error; CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_NS32532, CORE_CPU); cseg.c_addr = 0; cseg.c_size = chdr->c_cpusize; error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize, - (off_t)chdr->c_hdrsize, UIO_SYSSPACE, - IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p); + (off_t)chdr->c_hdrsize, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, + (int *)0, p); if (error) return error; - error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cpustate, sizeof(cpustate), + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core), (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE, - IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p); + IO_NODELOCKED|IO_UNIT, cred, (int *)0, p); + if (error) + return error; - if (!error) - chdr->c_nseg++; - - return error; + chdr->c_nseg++; + return 0; } @@ -236,7 +264,7 @@ pagemove(from, to, size) register caddr_t from, to; int size; { - register struct pte *fpte, *tpte; + int *fpte, *tpte; if (size % CLBYTES) panic("pagemove"); @@ -249,7 +277,7 @@ pagemove(from, to, size) to += NBPG; size -= NBPG; } - tlbflush(); + pmap_update(); } /* @@ -259,6 +287,7 @@ kvtop(addr) register caddr_t addr; { vm_offset_t va; + va = pmap_extract(pmap_kernel(), (vm_offset_t)addr); if (va == 0) panic("kvtop: zero page frame"); @@ -285,62 +314,49 @@ extern vm_map_t phys_map; * All requests are (re)mapped into kernel VA space via the useriomap * (a name with only slightly more meaning than "kernelmap") */ -vmapbuf(bp) - register struct buf *bp; +vmapbuf(bp, len) + struct buf *bp; + vm_size_t len; { - register int npf; - register caddr_t addr; - register long flags = bp->b_flags; - struct proc *p; - int off; - vm_offset_t kva; - register vm_offset_t pa; + vm_offset_t faddr, taddr, off; + pt_entry_t *fpte, *tpte; + pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t)); - if ((flags & B_PHYS) == 0) + if ((bp->b_flags & B_PHYS) == 0) panic("vmapbuf"); - addr = bp->b_saveaddr = bp->b_un.b_addr; - off = (int)addr & PGOFSET; - p = bp->b_proc; - npf = btoc(round_page(bp->b_bcount + off)); - kva = kmem_alloc_wait(phys_map, ctob(npf)); - bp->b_un.b_addr = (caddr_t) (kva + off); - while (npf--) { - pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr); - if (pa == 0) - panic("vmapbuf: null page frame"); - pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa), - VM_PROT_READ|VM_PROT_WRITE, TRUE); - addr += PAGE_SIZE; - kva += PAGE_SIZE; - } + faddr = trunc_page(bp->b_saveaddr = bp->b_data); + off = (vm_offset_t)bp->b_data - faddr; + len = round_page(off + len); + taddr = kmem_alloc_wait(phys_map, len); + bp->b_data = (caddr_t)(taddr + off); + /* + * The region is locked, so we expect that pmap_pte() will return + * non-NULL. + */ + fpte = pmap_pte(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map), faddr); + tpte = pmap_pte(vm_map_pmap(phys_map), taddr); + do { + *tpte++ = *fpte++; + len -= PAGE_SIZE; + } while (len); } /* * Free the io map PTEs associated with this IO operation. * We also invalidate the TLB entries and restore the original b_addr. */ -vunmapbuf(bp) - register struct buf *bp; +vunmapbuf(bp, len) + struct buf *bp; + vm_size_t len; { - register int npf; - register caddr_t addr = bp->b_un.b_addr; - vm_offset_t kva; + vm_offset_t addr, off; if ((bp->b_flags & B_PHYS) == 0) panic("vunmapbuf"); - npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET))); - kva = (vm_offset_t)((int)addr & ~PGOFSET); - kmem_free_wakeup(phys_map, kva, ctob(npf)); - bp->b_un.b_addr = bp->b_saveaddr; - bp->b_saveaddr = NULL; -} - -/* - * (Force reset the processor by invalidating the entire address space!) - * Well, lets just hang! - */ -cpu_reset() -{ - splhigh(); - while (1); + addr = trunc_page(bp->b_data); + off = (vm_offset_t)bp->b_data - addr; + len = round_page(off + len); + kmem_free_wakeup(phys_map, addr, len); + bp->b_data = bp->b_saveaddr; + bp->b_saveaddr = 0; }