diff --git a/sys/arch/arm32/arm32/db_interface.c b/sys/arch/arm32/arm32/db_interface.c deleted file mode 100644 index 096148488f80..000000000000 --- a/sys/arch/arm32/arm32/db_interface.c +++ /dev/null @@ -1,472 +0,0 @@ -/* $NetBSD: db_interface.c,v 1.34 2001/01/22 13:56:57 jdolecek Exp $ */ - -/* - * Copyright (c) 1996 Scott K. Stevens - * - * Mach Operating System - * Copyright (c) 1991,1990 Carnegie Mellon University - * All Rights Reserved. - * - * Permission to use, copy, modify and distribute this software and its - * documentation is hereby granted, provided that both the copyright - * notice and this permission notice appear in all copies of the - * software, derivative works or modified versions, and any portions - * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" - * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR - * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * - * Carnegie Mellon requests users of this software to return to - * - * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU - * School of Computer Science - * Carnegie Mellon University - * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie the - * rights to redistribute these changes. - * - * From: db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU) - */ - -/* - * Interface to new debugger. - */ -#include "opt_ddb.h" - -#include -#include -#include -#include /* just for boothowto */ -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int nil; - -int db_access_und_sp __P((const struct db_variable *, db_expr_t *, int)); -int db_access_abt_sp __P((const struct db_variable *, db_expr_t *, int)); -int db_access_irq_sp __P((const struct db_variable *, db_expr_t *, int)); -u_int db_fetch_reg __P((int, db_regs_t *)); - -const struct db_variable db_regs[] = { - { "spsr", (long *)&DDB_REGS->tf_spsr, FCN_NULL, }, - { "r0", (long *)&DDB_REGS->tf_r0, FCN_NULL, }, - { "r1", (long *)&DDB_REGS->tf_r1, FCN_NULL, }, - { "r2", (long *)&DDB_REGS->tf_r2, FCN_NULL, }, - { "r3", (long *)&DDB_REGS->tf_r3, FCN_NULL, }, - { "r4", (long *)&DDB_REGS->tf_r4, FCN_NULL, }, - { "r5", (long *)&DDB_REGS->tf_r5, FCN_NULL, }, - { "r6", (long *)&DDB_REGS->tf_r6, FCN_NULL, }, - { "r7", (long *)&DDB_REGS->tf_r7, FCN_NULL, }, - { "r8", (long *)&DDB_REGS->tf_r8, FCN_NULL, }, - { "r9", (long *)&DDB_REGS->tf_r9, FCN_NULL, }, - { "r10", (long *)&DDB_REGS->tf_r10, FCN_NULL, }, - { "r11", (long *)&DDB_REGS->tf_r11, FCN_NULL, }, - { "r12", (long *)&DDB_REGS->tf_r12, FCN_NULL, }, - { "usr_sp", (long *)&DDB_REGS->tf_usr_sp, FCN_NULL, }, - { "usr_lr", (long *)&DDB_REGS->tf_usr_lr, FCN_NULL, }, - { "svc_sp", (long *)&DDB_REGS->tf_svc_sp, FCN_NULL, }, - { "svc_lr", (long *)&DDB_REGS->tf_svc_lr, FCN_NULL, }, - { "pc", (long *)&DDB_REGS->tf_pc, FCN_NULL, }, - { "und_sp", (long *)&nil, db_access_und_sp, }, - { "abt_sp", (long *)&nil, db_access_abt_sp, }, - { "irq_sp", (long *)&nil, db_access_irq_sp, }, -}; - -const struct db_variable * const db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]); - -extern label_t *db_recover; - -int db_active = 0; - -int db_access_und_sp(vp, valp, rw) - const struct db_variable *vp; - db_expr_t *valp; - int rw; -{ - if (rw == DB_VAR_GET) - *valp = get_stackptr(PSR_UND32_MODE); - return(0); -} - -int db_access_abt_sp(vp, valp, rw) - const struct db_variable *vp; - db_expr_t *valp; - int rw; -{ - if (rw == DB_VAR_GET) - *valp = get_stackptr(PSR_ABT32_MODE); - return(0); -} - -int db_access_irq_sp(vp, valp, rw) - const struct db_variable *vp; - db_expr_t *valp; - int rw; -{ - if (rw == DB_VAR_GET) - *valp = get_stackptr(PSR_IRQ32_MODE); - return(0); -} - -/* - * kdb_trap - field a TRACE or BPT trap - */ -int -kdb_trap(type, regs) - int type; - db_regs_t *regs; -{ - int s; - - switch (type) { - case T_BREAKPOINT: /* breakpoint */ - case -1: /* keyboard interrupt */ - break; - default: - db_printf("kernel: trap"); - if (db_recover != 0) { - db_error("Faulted in DDB; continuing...\n"); - /*NOTREACHED*/ - } - } - - /* Should switch to kdb`s own stack here. */ - - ddb_regs = *regs; - - s = splhigh(); - db_active++; - cnpollc(TRUE); - db_trap(type, 0/*code*/); - cnpollc(FALSE); - db_active--; - splx(s); - - *regs = ddb_regs; - - return (1); -} - - -/* - * Received keyboard interrupt sequence. - */ -void -kdb_kbd_trap(regs) - db_regs_t *regs; -{ - if (db_active == 0 && (boothowto & RB_KDB)) { - printf("\n\nkernel: keyboard interrupt\n"); - kdb_trap(-1, regs); - } -} - - -static int -db_validate_address(addr) - vm_offset_t addr; -{ - pt_entry_t *ptep; - pd_entry_t *pdep; - struct proc *p = curproc; - - /* - * If we have a valid pmap for curproc, use it's page directory - * otherwise use the kernel pmap's page directory. - */ - if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap) - pdep = kernel_pmap->pm_pdir; - else - pdep = p->p_vmspace->vm_map.pmap->pm_pdir; - - /* Make sure the address we are reading is valid */ - switch ((pdep[(addr >> 20) + 0] & L1_MASK)) { - case L1_SECTION: - break; - case L1_PAGE: - /* Check the L2 page table for validity */ - ptep = vtopte(addr); - if ((*ptep & L2_MASK) != L2_INVAL) - break; - /* FALLTHROUGH */ - default: - return 1; - } - - return 0; -} - -/* - * Read bytes from kernel address space for debugger. - */ -void -db_read_bytes(addr, size, data) - vm_offset_t addr; - int size; - char *data; -{ - char *src; - - src = (char *)addr; - while (--size >= 0) { - if (db_validate_address((u_int)src)) { - db_printf("address %p is invalid\n", src); - return; - } - *data++ = *src++; - } -} - -static void -db_write_text(dst, ch) - unsigned char *dst; - int ch; -{ - pt_entry_t *ptep, pteo; - vm_offset_t va; - - va = (unsigned long)dst & (~PGOFSET); - ptep = vtopte(va); - - if (db_validate_address((u_int)dst)) { - db_printf(" address %p not a valid page\n", dst); - return; - } - - pteo = *ptep; - *ptep = pteo | PT_AP(AP_KRW); - cpu_tlb_flushD_SE(va); - - *dst = (unsigned char)ch; - - /* make sure the caches and memory are in sync */ - cpu_cache_syncI_rng((u_int)dst, 4); - - *ptep = pteo; - cpu_tlb_flushD_SE(va); -} - -/* - * Write bytes to kernel address space for debugger. - */ -void -db_write_bytes(addr, size, data) - vm_offset_t addr; - int size; - char *data; -{ - extern char etext[]; - char *dst; - int loop; - - dst = (char *)addr; - loop = size; - while (--loop >= 0) { - if ((dst >= (char *)KERNEL_TEXT_BASE) && (dst < etext)) - db_write_text(dst, *data); - else { - if (db_validate_address((u_int)dst)) { - db_printf("address %p is invalid\n", dst); - return; - } - *dst = *data; - } - dst++, data++; - } - /* make sure the caches and memory are in sync */ - cpu_cache_syncI_rng(addr, size); - - /* In case the current page tables have been modified ... */ - cpu_tlb_flushID(); -} - -void -cpu_Debugger() -{ - asm(".word 0xe7ffffff"); -} - -void db_show_vmstat_cmd __P((db_expr_t addr, int have_addr, db_expr_t count, char *modif)); -void db_show_intrchain_cmd __P((db_expr_t addr, int have_addr, db_expr_t count, char *modif)); -void db_show_panic_cmd __P((db_expr_t addr, int have_addr, db_expr_t count, char *modif)); -void db_show_frame_cmd __P((db_expr_t addr, int have_addr, db_expr_t count, char *modif)); -#ifdef OFW -void db_of_boot_cmd __P((db_expr_t addr, int have_addr, db_expr_t count, char *modif)); -void db_of_enter_cmd __P((db_expr_t addr, int have_addr, db_expr_t count, char *modif)); -void db_of_exit_cmd __P((db_expr_t addr, int have_addr, db_expr_t count, char *modif)); -#endif - -const struct db_command db_machine_command_table[] = { - { "frame", db_show_frame_cmd, 0, NULL }, - { "intrchain", db_show_intrchain_cmd, 0, NULL }, -#ifdef OFW - { "ofboot", db_of_boot_cmd, 0, NULL }, - { "ofenter", db_of_enter_cmd, 0, NULL }, - { "ofexit", db_of_exit_cmd, 0, NULL }, -#endif - { "panic", db_show_panic_cmd, 0, NULL }, - { "vmstat", db_show_vmstat_cmd, 0, NULL }, - { NULL, NULL, 0, NULL } -}; - -int -db_trapper(addr, inst, frame, fault_code) - u_int addr; - u_int inst; - trapframe_t *frame; - int fault_code; -{ - if (fault_code == 0) { - frame->tf_pc -= INSN_SIZE; - if ((inst & ~INSN_COND_MASK) == (BKPT_INST & ~INSN_COND_MASK)) - kdb_trap(T_BREAKPOINT, frame); - else - kdb_trap(-1, frame); - } else - return (1); - return (0); -} - -extern u_int esym; -extern u_int end; - -void -db_machine_init() -{ - struct exec *kernexec = (struct exec *)KERNEL_TEXT_BASE; - int len; - - /* - * The boot loader currently loads the kernel with the a.out - * header still attached. - */ - - if (kernexec->a_syms == 0) { - printf("[No symbol table]\n"); - } else { -#if !defined(SHARK) && !defined(OFWGENCFG) - esym = (int)&end + kernexec->a_syms + sizeof(int); -#else - /* cover the symbols themselves */ - esym = (int)&end + kernexec->a_syms; -#endif - /* - * and the string table. (int containing size of string - * table is included in string table size). - */ - len = *((u_int *)esym); - esym += (len + (sizeof(u_int) - 1)) & ~(sizeof(u_int) - 1); - } - - install_coproc_handler(0, db_trapper); -} - -u_int -db_fetch_reg(reg, db_regs) - int reg; - db_regs_t *db_regs; -{ - - switch (reg) { - case 0: - return (db_regs->tf_r0); - case 1: - return (db_regs->tf_r1); - case 2: - return (db_regs->tf_r2); - case 3: - return (db_regs->tf_r3); - case 4: - return (db_regs->tf_r4); - case 5: - return (db_regs->tf_r5); - case 6: - return (db_regs->tf_r6); - case 7: - return (db_regs->tf_r7); - case 8: - return (db_regs->tf_r8); - case 9: - return (db_regs->tf_r9); - case 10: - return (db_regs->tf_r10); - case 11: - return (db_regs->tf_r11); - case 12: - return (db_regs->tf_r12); - case 13: - return (db_regs->tf_svc_sp); - case 14: - return (db_regs->tf_svc_lr); - case 15: - return (db_regs->tf_pc); - default: - panic("db_fetch_reg: botch"); - } -} - -u_int -branch_taken(insn, pc, db_regs) - u_int insn; - u_int pc; - db_regs_t *db_regs; -{ - u_int addr, nregs; - - switch ((insn >> 24) & 0xf) { - case 0xa: /* b ... */ - case 0xb: /* bl ... */ - addr = ((insn << 2) & 0x03ffffff); - if (addr & 0x02000000) - addr |= 0xfc000000; - return (pc + 8 + addr); - case 0x7: /* ldr pc, [pc, reg, lsl #2] */ - addr = db_fetch_reg(insn & 0xf, db_regs); - addr = pc + 8 + (addr << 2); - db_read_bytes(addr, 4, (char *)&addr); - return (addr); - case 0x1: /* mov pc, reg */ - addr = db_fetch_reg(insn & 0xf, db_regs); - return (addr); - case 0x8: /* ldmxx reg, {..., pc} */ - case 0x9: - addr = db_fetch_reg((insn >> 16) & 0xf, db_regs); - nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555); - nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333); - nregs = (nregs + (nregs >> 4)) & 0x0f0f; - nregs = (nregs + (nregs >> 8)) & 0x001f; - switch ((insn >> 23) & 0x3) { - case 0x0: /* ldmda */ - addr = addr - 0; - break; - case 0x1: /* ldmia */ - addr = addr + 0 + ((nregs - 1) << 2); - break; - case 0x2: /* ldmdb */ - addr = addr - 4; - break; - case 0x3: /* ldmib */ - addr = addr + 4 + ((nregs - 1) << 2); - break; - } - db_read_bytes(addr, 4, (char *)&addr); - return (addr); - default: - panic("branch_taken: botch"); - } -} diff --git a/sys/arch/arm32/arm32/db_machdep.c b/sys/arch/arm32/arm32/db_machdep.c deleted file mode 100644 index 19c3cc50ce42..000000000000 --- a/sys/arch/arm32/arm32/db_machdep.c +++ /dev/null @@ -1,211 +0,0 @@ -/* $NetBSD: db_machdep.c,v 1.16 2000/11/21 16:34:53 chs Exp $ */ - -/* - * Copyright (c) 1996 Mark Brinicombe - * - * Mach Operating System - * Copyright (c) 1991,1990 Carnegie Mellon University - * All Rights Reserved. - * - * Permission to use, copy, modify and distribute this software and its - * documentation is hereby granted, provided that both the copyright - * notice and this permission notice appear in all copies of the - * software, derivative works or modified versions, and any portions - * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" - * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR - * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * - * Carnegie Mellon requests users of this software to return to - * - * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU - * School of Computer Science - * Carnegie Mellon University - * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie the - * rights to redistribute these changes. - */ - -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include - -#ifdef OFW -#include -#endif - -void -db_show_vmstat_cmd(addr, have_addr, count, modif) - db_expr_t addr; - int have_addr; - db_expr_t count; - char *modif; -{ - - db_printf("Current UVM status:\n"); - db_printf(" pagesize=%d (0x%x), pagemask=0x%x, pageshift=%d\n", - uvmexp.pagesize, uvmexp.pagesize, uvmexp.pagemask, - uvmexp.pageshift); - db_printf(" %d VM pages: %d active, %d inactive, %d wired, %d free\n", - uvmexp.npages, uvmexp.active, uvmexp.inactive, uvmexp.wired, - uvmexp.free); - db_printf(" freemin=%d, free-target=%d, inactive-target=%d, " - "wired-max=%d\n", uvmexp.freemin, uvmexp.freetarg, uvmexp.inactarg, - uvmexp.wiredmax); - db_printf(" faults=%d, traps=%d, intrs=%d, ctxswitch=%d\n", - uvmexp.faults, uvmexp.traps, uvmexp.intrs, uvmexp.swtch); - db_printf(" softint=%d, syscalls=%d, swapins=%d, swapouts=%d\n", - uvmexp.softs, uvmexp.syscalls, uvmexp.swapins, uvmexp.swapouts); - - db_printf(" fault counts:\n"); - db_printf(" noram=%d, noanon=%d, pgwait=%d, pgrele=%d\n", - uvmexp.fltnoram, uvmexp.fltnoanon, uvmexp.fltpgwait, - uvmexp.fltpgrele); - db_printf(" ok relocks(total)=%d(%d), anget(retrys)=%d(%d), " - "amapcopy=%d\n", uvmexp.fltrelckok, uvmexp.fltrelck, - uvmexp.fltanget, uvmexp.fltanretry, uvmexp.fltamcopy); - db_printf(" neighbor anon/obj pg=%d/%d, gets(lock/unlock)=%d/%d\n", - uvmexp.fltnamap, uvmexp.fltnomap, uvmexp.fltlget, uvmexp.fltget); - db_printf(" cases: anon=%d, anoncow=%d, obj=%d, prcopy=%d, przero=%d\n", - uvmexp.flt_anon, uvmexp.flt_acow, uvmexp.flt_obj, uvmexp.flt_prcopy, - uvmexp.flt_przero); - - db_printf(" daemon and swap counts:\n"); - db_printf(" woke=%d, revs=%d, scans=%d, swout=%d\n", uvmexp.pdwoke, - uvmexp.pdrevs, uvmexp.pdscans, uvmexp.pdswout); - db_printf(" busy=%d, freed=%d, reactivate=%d, deactivate=%d\n", - uvmexp.pdbusy, uvmexp.pdfreed, uvmexp.pdreact, uvmexp.pddeact); - db_printf(" pageouts=%d, pending=%d, nswget=%d\n", uvmexp.pdpageouts, - uvmexp.pdpending, uvmexp.nswget); - db_printf(" nswapdev=%d, nanon=%d, nfreeanon=%d\n", uvmexp.nswapdev, - uvmexp.nanon, uvmexp.nfreeanon); - - db_printf(" kernel pointers:\n"); - db_printf(" objs(kmem/mb)=%p/%p\n", uvmexp.kmem_object, - uvmexp.mb_object); -} - -void -db_show_intrchain_cmd(addr, have_addr, count, modif) - db_expr_t addr; - int have_addr; - db_expr_t count; - char *modif; -{ - int loop; - irqhandler_t *ptr; - char *name; - db_expr_t offset; - - for (loop = 0; loop < NIRQS; ++loop) { - ptr = irqhandlers[loop]; - if (ptr) { - db_printf("IRQ %d\n", loop); - - while (ptr) { - db_printf(" %-13s %d ", ptr->ih_name, ptr->ih_level); - db_find_sym_and_offset((u_int)ptr->ih_func, &name, &offset); - if (name == NULL) - name = "?"; - - db_printf("%s(", name); - db_printsym((u_int)ptr->ih_func, DB_STGY_PROC, - db_printf); - db_printf(") %08x\n", (u_int)ptr->ih_arg); - ptr = ptr->ih_next; - } - } - } -} - - -void -db_show_panic_cmd(addr, have_addr, count, modif) - db_expr_t addr; - int have_addr; - db_expr_t count; - char *modif; -{ - int s; - - s = splhigh(); - - db_printf("Panic string: %s\n", panicstr); - - (void)splx(s); -} - - -void -db_show_frame_cmd(addr, have_addr, count, modif) - db_expr_t addr; - int have_addr; - db_expr_t count; - char *modif; -{ - struct trapframe *frame; - - if (!have_addr) { - db_printf("frame address must be specified\n"); - return; - } - - frame = (struct trapframe *)addr; - - db_printf("frame address = %08x ", (u_int)frame); - db_printf("spsr=%08x\n", frame->tf_spsr); - db_printf("r0 =%08x r1 =%08x r2 =%08x r3 =%08x\n", - frame->tf_r0, frame->tf_r1, frame->tf_r2, frame->tf_r3); - db_printf("r4 =%08x r5 =%08x r6 =%08x r7 =%08x\n", - frame->tf_r4, frame->tf_r5, frame->tf_r6, frame->tf_r7); - db_printf("r8 =%08x r9 =%08x r10=%08x r11=%08x\n", - frame->tf_r8, frame->tf_r9, frame->tf_r10, frame->tf_r11); - db_printf("r12=%08x r13=%08x r14=%08x r15=%08x\n", - frame->tf_r12, frame->tf_usr_sp, frame->tf_usr_lr, frame->tf_pc); - db_printf("slr=%08x\n", frame->tf_svc_lr); -} - -#ifdef OFW -void -db_of_boot_cmd(addr, have_addr, count, modif) - db_expr_t addr; - int have_addr; - db_expr_t count; - char *modif; -{ - OF_boot(""); -} - - -void -db_of_enter_cmd(addr, have_addr, count, modif) - db_expr_t addr; - int have_addr; - db_expr_t count; - char *modif; -{ - OF_enter(); -} - - -void -db_of_exit_cmd(addr, have_addr, count, modif) - db_expr_t addr; - int have_addr; - db_expr_t count; - char *modif; -{ - OF_exit(); -} -#endif /* OFW */ diff --git a/sys/arch/arm32/arm32/db_trace.c b/sys/arch/arm32/arm32/db_trace.c deleted file mode 100644 index c7358228ea86..000000000000 --- a/sys/arch/arm32/arm32/db_trace.c +++ /dev/null @@ -1,149 +0,0 @@ -/* $NetBSD: db_trace.c,v 1.14 2001/02/25 21:31:14 bjh21 Exp $ */ - -/* - * Copyright (c) 1996 Scott K. Stevens - * - * Mach Operating System - * Copyright (c) 1991,1990 Carnegie Mellon University - * All Rights Reserved. - * - * Permission to use, copy, modify and distribute this software and its - * documentation is hereby granted, provided that both the copyright - * notice and this permission notice appear in all copies of the - * software, derivative works or modified versions, and any portions - * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" - * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR - * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * - * Carnegie Mellon requests users of this software to return to - * - * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU - * School of Computer Science - * Carnegie Mellon University - * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie the - * rights to redistribute these changes. - */ - -#include -#include -#include -#include - -#include -#include -#include - -#define INKERNEL(va) (((vm_offset_t)(va)) >= VM_MIN_KERNEL_ADDRESS) - -void -db_stack_trace_print(addr, have_addr, count, modif, pr) - db_expr_t addr; - int have_addr; - db_expr_t count; - char *modif; - void (*pr) __P((const char *, ...)); -{ - struct frame *frame, *lastframe; - char c, *cp = modif; - boolean_t kernel_only = TRUE; - boolean_t trace_thread = FALSE; - - while ((c = *cp++) != 0) { - if (c == 'u') - kernel_only = FALSE; - if (c == 't') - trace_thread = TRUE; - } - - /* - * The frame pointer points to the top word of the stack frame so we - * need to adjust it by sizeof(struct frame) - sizeof(u_int)) - * to get the address of the start of the frame structure. - */ - - if (!have_addr) - frame = (struct frame *)(DDB_REGS->tf_r11 - - (sizeof(struct frame) - sizeof(u_int))); - else { - if (trace_thread) { - struct proc *p; - struct user *u; - (*pr)("trace: pid %d ", (int)addr); - p = pfind(addr); - if (p == NULL) { - (*pr)("not found\n"); - return; - } - if (!(p->p_flag & P_INMEM)) { - (*pr)("swapped out\n"); - return; - } - u = p->p_addr; - frame = (struct frame *) (u->u_pcb.pcb_r11 - - (sizeof(struct frame) - sizeof(u_int))); - (*pr)("at %p\n", frame); - } else - frame = (struct frame *)(addr - (sizeof(struct frame) - - sizeof(u_int))); - } - lastframe = NULL; - - while (count--) { - db_expr_t offset; - char *name; - db_addr_t pc; - -/* (*pr)("fp=%08x: fp=%08x sp=%08x lr=%08x pc=%08x\n", - (u_int)frame, frame->fr_fp, frame->fr_sp, frame->fr_lr, - frame->fr_pc);*/ - - pc = frame->fr_pc; - /* Adjust the PC so the same address is printed no matter what CPU */ - if (cputype == CPU_ID_SA110 || cputype == CPU_ID_ARM810) - pc += 4; - if (!INKERNEL(pc)) - break; - - db_find_sym_and_offset(pc, &name, &offset); - if (name == NULL) - name = "?"; - - (*pr)("%s(", name); - db_printsym(pc, DB_STGY_PROC, pr); - (*pr)(")"); - (*pr)("\n"); - - /* - * Switch to next frame up - */ - lastframe = frame; - frame = (struct frame *)(frame->fr_fp - (sizeof(struct frame) - - sizeof(u_int))); - - if (frame == NULL) - break; - - if (INKERNEL((int)frame)) { - /* staying in kernel */ - if (frame <= lastframe) { - (*pr)("Bad frame pointer: %p\n", frame); - break; - } - } else if (INKERNEL((int)lastframe)) { - /* switch from user to kernel */ - if (kernel_only) - break; /* kernel stack only */ - } else { - /* in user */ - if (frame <= lastframe) { - (*pr)("Bad user frame pointer: %p\n", - frame); - break; - } - } - } -} diff --git a/sys/arch/arm32/arm32/disksubr.c b/sys/arch/arm32/arm32/disksubr.c deleted file mode 100644 index cc72cd2b1298..000000000000 --- a/sys/arch/arm32/arm32/disksubr.c +++ /dev/null @@ -1,438 +0,0 @@ -/* $NetBSD: disksubr.c,v 1.14 2000/11/20 08:24:11 chs Exp $ */ - -/* - * Copyright (c) 1998 Christopher G. Demetriou. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Christopher G. Demetriou - * for the NetBSD Project. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Copyright (c) 1982, 1986, 1988 Regents of the University of California. - * Copyright (c) 1995 Mark Brinicombe - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)ufs_disksubr.c 7.16 (Berkeley) 5/4/91 - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Attempt to read a disk label from a device - * using the indicated stategy routine. - * The label must be partly set up before this: - * secpercyl, secsize and anything required for a block i/o read - * operation in the driver's strategy/start routines - * must be filled in before calling us. - * - * If dos partition table requested, attempt to load it and - * find disklabel inside a DOS partition. Also, if bad block - * table needed, attempt to extract it as well. Return buffer - * for use in signalling errors if requested. - * - * Returns null on success and an error string on failure. - */ - -char * -readdisklabel(dev, strat, lp, osdep) - dev_t dev; - void (*strat)(); - struct disklabel *lp; - struct cpu_disklabel *osdep; -{ - struct buf *bp; - struct disklabel *dlp; - char *msg = NULL; - int cyl, netbsdpartoff, i; - -/* printf("Reading disclabel for %04x\n", dev);*/ - - /* minimal requirements for archtypal disk label */ - - if (lp->d_secsize == 0) - lp->d_secsize = DEV_BSIZE; - - if (lp->d_secperunit == 0) - lp->d_secperunit = 0x1fffffff; - - lp->d_npartitions = MAXPARTITIONS; - for (i = 0; i < MAXPARTITIONS; i++) { - if (i == RAW_PART) continue; - lp->d_partitions[i].p_offset = 0; - lp->d_partitions[i].p_fstype = FS_UNUSED; - lp->d_partitions[i].p_size = 0; - } - - if (lp->d_partitions[RAW_PART].p_size == 0) { - lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED; - lp->d_partitions[RAW_PART].p_offset = 0; - lp->d_partitions[RAW_PART].p_size = 0x1fffffff; - } - - /* obtain buffer to probe drive with */ - - bp = geteblk((int)lp->d_secsize); - - /* request no partition relocation by driver on I/O operations */ - - bp->b_dev = dev; - - /* do netbsd partitions in the process of getting disklabel? */ - - netbsdpartoff = 0; - cyl = LABELSECTOR / lp->d_secpercyl; - - if (osdep) { - if (filecore_label_read(dev, strat,lp, osdep, &msg, &cyl, - &netbsdpartoff) || - mbr_label_read(dev, strat, lp, osdep, &msg, &cyl, - &netbsdpartoff)) { - if (msg != NULL) - goto done; - } else { - /* - * We didn't find anything we like; NetBSD native. - * netbsdpartoff and cyl should be unchanged. - */ - KASSERT(netbsdpartoff == 0); - KASSERT(cyl == (LABELSECTOR / lp->d_secpercyl)); - } - } - - /* next, dig out disk label */ - -/* printf("Reading disklabel addr=%08x\n", netbsdpartoff * DEV_BSIZE);*/ - - bp->b_blkno = netbsdpartoff + LABELSECTOR; - bp->b_cylinder = bp->b_blkno / lp->d_secpercyl; - bp->b_bcount = lp->d_secsize; - bp->b_flags |= B_READ; - (*strat)(bp); - - /* if successful, locate disk label within block and validate */ - - if (biowait(bp)) { - msg = "disk label I/O error"; - goto done; - } - for (dlp = (struct disklabel *)bp->b_data; - dlp <= (struct disklabel *)(bp->b_data + lp->d_secsize - sizeof(*dlp)); - dlp = (struct disklabel *)((char *)dlp + sizeof(long))) { - if (dlp->d_magic != DISKMAGIC || dlp->d_magic2 != DISKMAGIC) { - if (msg == NULL) - msg = "no disk label"; - } else if (dlp->d_npartitions > MAXPARTITIONS || - dkcksum(dlp) != 0) - msg = "disk label corrupted"; - else { - *lp = *dlp; - msg = NULL; - break; - } - } - - if (msg) - goto done; - - /* obtain bad sector table if requested and present */ - if (osdep && (lp->d_flags & D_BADSECT)) { - struct dkbad *bdp = &osdep->bad; - struct dkbad *db; - - i = 0; - do { - /* read a bad sector table */ - bp->b_flags &= ~(B_DONE); - bp->b_flags |= B_READ; - bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i; - if (lp->d_secsize > DEV_BSIZE) - bp->b_blkno *= lp->d_secsize / DEV_BSIZE; - else - bp->b_blkno /= DEV_BSIZE / lp->d_secsize; - bp->b_bcount = lp->d_secsize; - bp->b_cylinder = lp->d_ncylinders - 1; - (*strat)(bp); - - /* if successful, validate, otherwise try another */ - if (biowait(bp)) { - msg = "bad sector table I/O error"; - } else { - db = (struct dkbad *)(bp->b_data); -#define DKBAD_MAGIC 0x4321 - if (db->bt_mbz == 0 - && db->bt_flag == DKBAD_MAGIC) { - msg = NULL; - *bdp = *db; - break; - } else - msg = "bad sector table corrupted"; - } - } while ((bp->b_flags & B_ERROR) && (i += 2) < 10 && - i < lp->d_nsectors); - } - -done: - brelse(bp); - return (msg); -} - - -/* - * Check new disk label for sensibility - * before setting it. - */ - -int -setdisklabel(olp, nlp, openmask, osdep) - struct disklabel *olp; - struct disklabel *nlp; - u_long openmask; - struct cpu_disklabel *osdep; -{ - int i; - struct partition *opp, *npp; - - /* sanity clause */ - - if (nlp->d_secpercyl == 0 || nlp->d_secsize == 0 - || (nlp->d_secsize % DEV_BSIZE) != 0) - return(EINVAL); - - /* special case to allow disklabel to be invalidated */ - - if (nlp->d_magic == 0xffffffff) { - *olp = *nlp; - return (0); - } - - if (nlp->d_magic != DISKMAGIC || nlp->d_magic2 != DISKMAGIC - || dkcksum(nlp) != 0) - return (EINVAL); - - /* XXX missing check if other acorn/dos partitions will be overwritten */ - - while (openmask != 0) { - i = ffs(openmask) - 1; - openmask &= ~(1 << i); - if (nlp->d_npartitions <= i) - return (EBUSY); - opp = &olp->d_partitions[i]; - npp = &nlp->d_partitions[i]; - if (npp->p_offset != opp->p_offset || npp->p_size < opp->p_size) - return (EBUSY); - /* - * Copy internally-set partition information - * if new label doesn't include it. XXX - */ - if (npp->p_fstype == FS_UNUSED && opp->p_fstype != FS_UNUSED) { - npp->p_fstype = opp->p_fstype; - npp->p_fsize = opp->p_fsize; - npp->p_frag = opp->p_frag; - npp->p_cpg = opp->p_cpg; - } - } - - nlp->d_checksum = 0; - nlp->d_checksum = dkcksum(nlp); - *olp = *nlp; - return (0); -} - - -/* - * Write disk label back to device after modification. - */ - -int -writedisklabel(dev, strat, lp, osdep) - dev_t dev; - void (*strat)(); - struct disklabel *lp; - struct cpu_disklabel *osdep; -{ - struct buf *bp; - struct disklabel *dlp; - int cyl, netbsdpartoff; - int error = 0, rv; - - /* get a buffer and initialize it */ - - bp = geteblk((int)lp->d_secsize); - bp->b_dev = dev; - - /* do netbsd partitions in the process of getting disklabel? */ - - netbsdpartoff = 0; - cyl = LABELSECTOR / lp->d_secpercyl; - - if (osdep) { - if ((rv = filecore_label_locate(dev, strat,lp, osdep, &cyl, - &netbsdpartoff)) != 0|| - (rv = mbr_label_locate(dev, strat, lp, osdep, &cyl, - &netbsdpartoff)) != 0) { - if (rv < 0) { - error = -rv; - goto done; - } - } else { - /* - * We didn't find anything we like; NetBSD native. - * netbsdpartoff and cyl should be unchanged. - */ - KASSERT(netbsdpartoff == 0); - KASSERT(cyl == (LABELSECTOR / lp->d_secpercyl)); - } - } - -/* writelabel: */ - -/* printf("writedisklabel: Reading disklabel addr=%08x\n", - netbsdpartoff * DEV_BSIZE);*/ - - /* next, dig out disk label */ - - bp->b_blkno = netbsdpartoff + LABELSECTOR; - bp->b_cylinder = cyl; - bp->b_bcount = lp->d_secsize; - bp->b_flags |= B_READ; - (*strat)(bp); - - /* if successful, locate disk label within block and validate */ - - if ((error = biowait(bp))) - goto done; - for (dlp = (struct disklabel *)bp->b_data; - dlp <= (struct disklabel *)(bp->b_data + lp->d_secsize - sizeof(*dlp)); - dlp = (struct disklabel *)((char *)dlp + sizeof(long))) { - if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC && - dkcksum(dlp) == 0) { - *dlp = *lp; - bp->b_flags &= ~(B_READ|B_DONE); - bp->b_flags |= B_WRITE; - (*strat)(bp); - error = biowait(bp); - goto done; - } - } - - error = ESRCH; - -done: - brelse(bp); - return (error); -} - - -/* - * Determine the size of the transfer, and make sure it is - * within the boundaries of the partition. Adjust transfer - * if needed, and signal errors or early completion. - */ -int -bounds_check_with_label(bp, lp, wlabel) - struct buf *bp; - struct disklabel *lp; - int wlabel; -{ - struct partition *p = lp->d_partitions + DISKPART(bp->b_dev); - int labelsector = lp->d_partitions[0].p_offset + LABELSECTOR; - int sz; - - sz = howmany(bp->b_bcount, lp->d_secsize); - - if (bp->b_blkno + sz > p->p_size) { - sz = p->p_size - bp->b_blkno; - if (sz == 0) { - /* If exactly at end of disk, return EOF. */ - bp->b_resid = bp->b_bcount; - goto done; - } - if (sz < 0) { - /* If past end of disk, return EINVAL. */ - bp->b_error = EINVAL; - goto bad; - } - /* Otherwise, truncate request. */ - bp->b_bcount = sz << DEV_BSHIFT; - } - - /* Overwriting disk label? */ - if (bp->b_blkno + p->p_offset <= labelsector && -#if LABELSECTOR != 0 - bp->b_blkno + p->p_offset + sz > labelsector && -#endif - (bp->b_flags & B_READ) == 0 && !wlabel) { - bp->b_error = EROFS; - goto bad; - } - - /* calculate cylinder for disksort to order transfers with */ - bp->b_cylinder = (bp->b_blkno + p->p_offset) / - (lp->d_secsize / DEV_BSIZE) / lp->d_secpercyl; - return (1); - -bad: - bp->b_flags |= B_ERROR; -done: - return (0); -} - -/* End of disksubr.c */ diff --git a/sys/arch/arm32/arm32/disksubr_acorn.c b/sys/arch/arm32/arm32/disksubr_acorn.c deleted file mode 100644 index 529971219676..000000000000 --- a/sys/arch/arm32/arm32/disksubr_acorn.c +++ /dev/null @@ -1,356 +0,0 @@ -/* $NetBSD: disksubr_acorn.c,v 1.4 2000/11/20 08:24:11 chs Exp $ */ - -/* - * Copyright (c) 1998 Christopher G. Demetriou. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Christopher G. Demetriou - * for the NetBSD Project. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Copyright (c) 1982, 1986, 1988 Regents of the University of California. - * Copyright (c) 1995 Mark Brinicombe - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)ufs_disksubr.c 7.16 (Berkeley) 5/4/91 - */ - -#include -#include -#include -#include - -/* - * static int filecore_checksum(u_char *bootblock) - * - * Calculates the filecore boot block checksum. This is used to validate - * a filecore boot block on the disk. If a boot block is validated then - * it is used to locate the partition table. If the boot block is not - * validated, it is assumed that the whole disk is NetBSD. - * - * The basic algorithm is: - * - * for (each byte in block, excluding checksum) { - * sum += byte; - * if (sum > 255) - * sum -= 255; - * } - * - * That's equivalent to summing all of the bytes in the block - * (excluding the checksum byte, of course), then calculating the - * checksum as "cksum = sum - ((sum - 1) / 255) * 255)". That - * expression may or may not yield a faster checksum function, - * but it's easier to reason about. - * - * Note that if you have a block filled with bytes of a single - * value "X" (regardless of that value!) and calculate the cksum - * of the block (excluding the checksum byte), you will _always_ - * end up with a checksum of X. (Do the math; that can be derived - * from the checksum calculation function!) That means that - * blocks which contain bytes which all have the same value will - * always checksum properly. That's a _very_ unlikely occurence - * (probably impossible, actually) for a valid filecore boot block, - * so we treat such blocks as invalid. - */ -static int -filecore_checksum(bootblock) - u_char *bootblock; -{ - u_char byte0, accum_diff; - u_int sum; - int i; - - sum = 0; - accum_diff = 0; - byte0 = bootblock[0]; - - /* - * Sum the contents of the block, keeping track of whether - * or not all bytes are the same. If 'accum_diff' ends up - * being zero, all of the bytes are, in fact, the same. - */ - for (i = 0; i < 511; ++i) { - sum += bootblock[i]; - accum_diff |= bootblock[i] ^ byte0; - } - - /* - * Check to see if the checksum byte is the same as the - * rest of the bytes, too. (Note that if all of the bytes - * are the same except the checksum, a checksum compare - * won't succeed, but that's not our problem.) - */ - accum_diff |= bootblock[i] ^ byte0; - - /* All bytes in block are the same; call it invalid. */ - if (accum_diff == 0) - return (-1); - - return (sum - ((sum - 1) / 255) * 255); -} - - -int -filecore_label_read(dev, strat, lp, osdep, msgp, cylp, netbsd_label_offp) - dev_t dev; - void (*strat) __P((struct buf *)); - struct disklabel *lp; - struct cpu_disklabel *osdep; - char **msgp; - int *cylp, *netbsd_label_offp; -{ - struct filecore_bootblock *bb; - int heads; - int sectors; - int rv = 1; - int cyl, netbsdpartoff; - struct buf *bp; - -#ifdef __GNUC__ - netbsdpartoff = 0; /* XXX -Wuninitialized */ -#endif - - /* get a buffer and initialize it */ - bp = geteblk((int)lp->d_secsize); - bp->b_dev = dev; - - /* read the Acorn filecore boot block */ - - bp->b_blkno = FILECORE_BOOT_SECTOR; - bp->b_bcount = lp->d_secsize; - bp->b_flags |= B_READ; - bp->b_cylinder = bp->b_blkno / lp->d_secpercyl; - (*strat)(bp); - - /* - * if successful, validate boot block and - * locate partition table - */ - - if (biowait(bp)) { - *msgp = "filecore boot block I/O error"; - goto out; - } - - bb = (struct filecore_bootblock *)bp->b_data; - - /* Validate boot block */ - - if (bb->checksum != filecore_checksum((u_char *)bb)) { - /* - * Invalid boot block so lets assume the - * entire disc is NetBSD - */ - rv = 0; - goto out; - } - - /* Get some information from the boot block */ - - cyl = bb->partition_cyl_low + (bb->partition_cyl_high << 8); - - heads = bb->heads; - sectors = bb->secspertrack; - - /* Do we have a NETBSD partition table ? */ - - if (bb->partition_type == PARTITION_FORMAT_RISCBSD) { -/* printf("heads = %d nsectors = %d\n", heads, sectors);*/ - netbsdpartoff = cyl * heads * sectors; - } else if (bb->partition_type == PARTITION_FORMAT_RISCIX) { - struct riscix_partition_table *rpt; - int loop; - - /* - * We have a RISCiX partition table :-( groan - * - * Read the RISCiX partition table and see if - * there is a NetBSD partition - */ - - bp->b_blkno = cyl * heads * sectors; -/* printf("Found RiscIX partition table @ %08x\n", - bp->b_blkno);*/ - bp->b_cylinder = bp->b_blkno / lp->d_secpercyl; - bp->b_bcount = lp->d_secsize; - bp->b_flags &= ~(B_DONE); - bp->b_flags |= B_READ; - (*strat)(bp); - - /* - * if successful, locate disk label within block - * and validate - */ - - if (biowait(bp)) { - *msgp = "disk label I/O error"; - goto out; - } - - rpt = (struct riscix_partition_table *)bp->b_data; -/* for (loop = 0; loop < NRISCIX_PARTITIONS; ++loop) - printf("p%d: %16s %08x %08x %08x\n", loop, - rpt->partitions[loop].rp_name, - rpt->partitions[loop].rp_start, - rpt->partitions[loop].rp_length, - rpt->partitions[loop].rp_type); -*/ - for (loop = 0; loop < NRISCIX_PARTITIONS; ++loop) { - if (strcmp(rpt->partitions[loop].rp_name, - "RiscBSD") == 0 || - strcmp(rpt->partitions[loop].rp_name, - "NetBSD") == 0 || - strcmp(rpt->partitions[loop].rp_name, - "Empty:") == 0) { - netbsdpartoff = - rpt->partitions[loop].rp_start; - break; - } - } - if (loop == NRISCIX_PARTITIONS) { - *msgp = "NetBSD partition identifier string not found."; - goto out; - } - } else { - *msgp = "Invalid partition format"; - goto out; - } - - *cylp = cyl; - *netbsd_label_offp = netbsdpartoff; - *msgp = NULL; -out: - brelse(bp); - return (rv); -} - - -int -filecore_label_locate(dev, strat, lp, osdep, cylp, netbsd_label_offp) - dev_t dev; - void (*strat) __P((struct buf *)); - struct disklabel *lp; - struct cpu_disklabel *osdep; - int *cylp, *netbsd_label_offp; -{ - struct filecore_bootblock *bb; - int heads; - int sectors; - int rv; - int cyl, netbsdpartoff; - struct buf *bp; - - /* get a buffer and initialize it */ - bp = geteblk((int)lp->d_secsize); - bp->b_dev = dev; - - /* read the filecore boot block */ - -/* printf("writedisklabel: Reading boot block\n");*/ - - bp->b_blkno = FILECORE_BOOT_SECTOR; - bp->b_bcount = lp->d_secsize; - bp->b_flags |= B_READ; - bp->b_cylinder = bp->b_blkno / lp->d_secpercyl; - (*strat)(bp); - - /* - * if successful, validate boot block and locate - * partition table - */ - - if ((rv = biowait(bp)) != 0) { - rv = -rv; - goto out; - } - - bb = (struct filecore_bootblock *)bp->b_data; - rv = 1; - - /* Validate boot block */ - - if (bb->checksum != filecore_checksum((u_char *)bb)) { - /* - * Invalid boot block so lets assume the - * entire disc is NetBSD - */ - -/* printf("writedisklabel: Invalid filecore boot block (incorrect checksum)\n");*/ - rv = 0; - goto out; - } - - /* Do we have a NetBSD partition ? */ - - if (bb->partition_type != PARTITION_FORMAT_RISCBSD) { - printf("writedisklabel: Invalid partition format\n"); - rv = -1; - goto out; - } - - cyl = bb->partition_cyl_low + (bb->partition_cyl_high << 8); - - heads = bb->heads; - sectors = bb->secspertrack; - - /*printf("heads = %d nsectors = %d\n", heads, sectors);*/ - - netbsdpartoff = cyl * heads * sectors; - - *cylp = cyl; - *netbsd_label_offp = netbsdpartoff; -out: - brelse(bp); - return (rv); -} diff --git a/sys/arch/arm32/arm32/disksubr_mbr.c b/sys/arch/arm32/arm32/disksubr_mbr.c deleted file mode 100644 index e395289c194d..000000000000 --- a/sys/arch/arm32/arm32/disksubr_mbr.c +++ /dev/null @@ -1,276 +0,0 @@ -/* $NetBSD: disksubr_mbr.c,v 1.5 2000/11/20 08:24:12 chs Exp $ */ - -/* - * Copyright (c) 1998 Christopher G. Demetriou. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Christopher G. Demetriou - * for the NetBSD Project. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Copyright (c) 1982, 1986, 1988 Regents of the University of California. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)ufs_disksubr.c 7.16 (Berkeley) 5/4/91 - */ - -/* - * From i386 disklabel.c rev 1.29, with cleanups and modifications to - * make it easier to use on the arm32 and to use as MI code (not quite - * clean enough, yet). - */ - -#include -#include -#include -#include - -#include "opt_mbr.h" - -#define MBRSIGOFS 0x1fe -static char mbrsig[2] = {0x55, 0xaa}; - -int fat_types[] = { - MBR_PTYPE_FAT12, MBR_PTYPE_FAT16S, - MBR_PTYPE_FAT16B, MBR_PTYPE_FAT32, - MBR_PTYPE_FAT32L, MBR_PTYPE_FAT16L, - -1 -}; - -int -mbr_label_read(dev, strat, lp, osdep, msgp, cylp, netbsd_label_offp) - dev_t dev; - void (*strat) __P((struct buf *)); - struct disklabel *lp; - struct cpu_disklabel *osdep; - char **msgp; - int *cylp, *netbsd_label_offp; -{ - struct mbr_partition *mbrp; - struct partition *pp; - int cyl, mbrpartoff, i, *ip; - struct buf *bp; - int rv = 1; - - /* get a buffer and initialize it */ - bp = geteblk((int)lp->d_secsize); - bp->b_dev = dev; - - /* In case nothing sets them */ - mbrpartoff = 0; - cyl = LABELSECTOR / lp->d_secpercyl; - - mbrp = osdep->mbrparts; - - /* read master boot record */ - bp->b_blkno = MBR_BBSECTOR; - bp->b_bcount = lp->d_secsize; - bp->b_flags |= B_READ; - bp->b_cylinder = MBR_BBSECTOR / lp->d_secpercyl; - (*strat)(bp); - - /* if successful, wander through dos partition table */ - if (biowait(bp)) { - *msgp = "dos partition I/O error"; - goto out; - } else { - struct mbr_partition *ourmbrp = NULL; - - /* XXX "there has to be a better check than this." */ - if (bcmp(bp->b_data + MBRSIGOFS, mbrsig, sizeof(mbrsig))) { - rv = 0; - goto out; - } - - /* XXX how do we check veracity/bounds of this? */ - bcopy(bp->b_data + MBR_PARTOFF, mbrp, - NMBRPART * sizeof(*mbrp)); - - /* look for NetBSD partition */ - ourmbrp = NULL; - for (i = 0; !ourmbrp && i < NMBRPART; i++) { - if (mbrp[i].mbrp_typ == MBR_PTYPE_NETBSD) - ourmbrp = &mbrp[i]; - } -#ifdef COMPAT_386BSD_MBRPART - /* didn't find it -- look for 386BSD partition */ - for (i = 0; !ourmbrp && i < NMBRPART; i++) { - if (mbrp[i].mbrp_typ == MBR_PTYPE_386BSD) { - printf("WARNING: old BSD partition ID!\n"); - ourmbrp = &mbrp[i]; - break; - } - } -#endif - for (i = 0; i < NMBRPART; i++, mbrp++) { - - strncpy(lp->d_packname, "fictitious-MBR", - sizeof lp->d_packname); - - /* Install in partition e, f, g, or h. */ - pp = &lp->d_partitions['e' - 'a' + i]; - pp->p_offset = mbrp->mbrp_start; - pp->p_size = mbrp->mbrp_size; - for (ip = fat_types; *ip != -1; ip++) { - if (mbrp->mbrp_typ == *ip) - pp->p_fstype = FS_MSDOS; - } - if (mbrp->mbrp_typ == MBR_PTYPE_LNXEXT2) - pp->p_fstype = FS_EX2FS; - - /* is this ours? */ - if (mbrp == ourmbrp) { - /* need sector address for SCSI/IDE, - cylinder for ESDI/ST506/RLL */ - mbrpartoff = mbrp->mbrp_start; - cyl = MBR_PCYL(mbrp->mbrp_scyl, mbrp->mbrp_ssect); - -#ifdef __i386__ /* XXX? */ - /* update disklabel with details */ - lp->d_partitions[2].p_size = - mbrp->mbrp_size; - lp->d_partitions[2].p_offset = - mbrp->mbrp_start; - lp->d_ntracks = mbrp->mbrp_ehd + 1; - lp->d_nsectors = MBR_PSECT(mbrp->mbrp_esect); - lp->d_secpercyl = - lp->d_ntracks * lp->d_nsectors; -#endif - } - } - lp->d_npartitions = 'e' - 'a' + i; - } - - *cylp = cyl; - *netbsd_label_offp = mbrpartoff; - *msgp = NULL; -out: - brelse(bp); - return (rv); -} - -int -mbr_label_locate(dev, strat, lp, osdep, cylp, netbsd_label_offp) - dev_t dev; - void (*strat) __P((struct buf *)); - struct disklabel *lp; - struct cpu_disklabel *osdep; - int *cylp, *netbsd_label_offp; -{ - struct mbr_partition *mbrp; - int cyl, mbrpartoff, i; - struct mbr_partition *ourmbrp = NULL; - struct buf *bp; - int rv = 1; - - /* get a buffer and initialize it */ - bp = geteblk((int)lp->d_secsize); - bp->b_dev = dev; - - /* do MBR partitions in the process of getting disklabel? */ - mbrpartoff = 0; - cyl = LABELSECTOR / lp->d_secpercyl; - - mbrp = osdep->mbrparts; - - /* read master boot record */ - bp->b_blkno = MBR_BBSECTOR; - bp->b_bcount = lp->d_secsize; - bp->b_flags |= B_READ; - bp->b_cylinder = MBR_BBSECTOR / lp->d_secpercyl; - (*strat)(bp); - - if ((rv = biowait(bp)) != 0) { - rv = -rv; - goto out; - } - - if (bcmp(bp->b_data + MBRSIGOFS, mbrsig, sizeof(mbrsig))) { - rv = 0; - goto out; - } - - /* XXX how do we check veracity/bounds of this? */ - bcopy(bp->b_data + MBR_PARTOFF, mbrp, NMBRPART * sizeof(*mbrp)); - - /* look for NetBSD partition */ - ourmbrp = NULL; - for (i = 0; !ourmbrp && i < NMBRPART; i++) { - if (mbrp[i].mbrp_typ == MBR_PTYPE_NETBSD) - ourmbrp = &mbrp[i]; - } -#ifdef COMPAT_386BSD_MBRPART - /* didn't find it -- look for 386BSD partition */ - for (i = 0; !ourmbrp && i < NMBRPART; i++) { - if (mbrp[i].mbrp_typ == MBR_PTYPE_386BSD) { - printf("WARNING: old BSD partition ID!\n"); - ourmbrp = &mbrp[i]; - } - } -#endif - if (!ourmbrp) { - rv = 0; /* XXX allow easy clobber? */ - goto out; - } - - /* need sector address for SCSI/IDE, cylinder for ESDI/ST506/RLL */ - mbrpartoff = ourmbrp->mbrp_start; - cyl = MBR_PCYL(ourmbrp->mbrp_scyl, ourmbrp->mbrp_ssect); - - *cylp = cyl; - *netbsd_label_offp = mbrpartoff; -out: - brelse(bp); - return (rv); -} diff --git a/sys/arch/arm32/arm32/pmap.c b/sys/arch/arm32/arm32/pmap.c deleted file mode 100644 index 49930d8bae87..000000000000 --- a/sys/arch/arm32/arm32/pmap.c +++ /dev/null @@ -1,2771 +0,0 @@ -/* $NetBSD: pmap.c,v 1.72 2001/02/24 20:04:42 reinoud Exp $ */ - -/*- - * Copyright (c) 1999 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Charles M. Hannum. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the NetBSD - * Foundation, Inc. and its contributors. - * 4. Neither the name of The NetBSD Foundation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Copyright (c) 1994-1998 Mark Brinicombe. - * Copyright (c) 1994 Brini. - * All rights reserved. - * - * This code is derived from software written for Brini by Mark Brinicombe - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Mark Brinicombe. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * - * RiscBSD kernel project - * - * pmap.c - * - * Machine dependant vm stuff - * - * Created : 20/09/94 - */ - -/* - * Performance improvements, UVM changes, overhauls and part-rewrites - * were contributed by Neil A. Carson . - */ - -/* - * The dram block info is currently referenced from the bootconfig. - * This should be placed in a separate structure. - */ - -/* - * Special compilation symbols - * PMAP_DEBUG - Build in pmap_debug_level code - */ - -/* Include header files */ - -#include "opt_pmap_debug.h" -#include "opt_ddb.h" - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include - -#ifdef PMAP_DEBUG -#define PDEBUG(_lev_,_stat_) \ - if (pmap_debug_level >= (_lev_)) \ - ((_stat_)) -int pmap_debug_level = -2; -#else /* PMAP_DEBUG */ -#define PDEBUG(_lev_,_stat_) /* Nothing */ -#endif /* PMAP_DEBUG */ - -struct pmap kernel_pmap_store; -pmap_t kernel_pmap; - -pagehook_t page_hook0; -pagehook_t page_hook1; -char *memhook; -pt_entry_t msgbufpte; -extern caddr_t msgbufaddr; - -#ifdef DIAGNOSTIC -boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ -#endif - -TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist; - -int pv_nfree = 0; - -vm_size_t npages; - -extern vm_offset_t physical_start; -extern vm_offset_t physical_freestart; -extern vm_offset_t physical_end; -extern vm_offset_t physical_freeend; -extern unsigned int free_pages; -extern int max_processes; - -vm_offset_t virtual_start; -vm_offset_t virtual_end; - -vm_offset_t avail_start; -vm_offset_t avail_end; - -extern pv_addr_t systempage; - -#define ALLOC_PAGE_HOOK(x, s) \ - x.va = virtual_start; \ - x.pte = (pt_entry_t *)pmap_pte(kernel_pmap, virtual_start); \ - virtual_start += s; - -/* Variables used by the L1 page table queue code */ -SIMPLEQ_HEAD(l1pt_queue, l1pt); -struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */ -int l1pt_static_queue_count; /* items in the static l1 queue */ -int l1pt_static_create_count; /* static l1 items created */ -struct l1pt_queue l1pt_queue; /* head of our l1 queue */ -int l1pt_queue_count; /* items in the l1 queue */ -int l1pt_create_count; /* stat - L1's create count */ -int l1pt_reuse_count; /* stat - L1's reused count */ - -/* Local function prototypes (not used outside this file) */ -pt_entry_t *pmap_pte __P((pmap_t pmap, vm_offset_t va)); -int pmap_page_index __P((vm_offset_t pa)); -void map_pagetable __P((vm_offset_t pagetable, vm_offset_t va, - vm_offset_t pa, unsigned int flags)); -void pmap_copy_on_write __P((vm_offset_t pa)); -void pmap_pinit __P((pmap_t)); -void pmap_release __P((pmap_t)); - -/* Other function prototypes */ -extern void bzero_page __P((vm_offset_t)); -extern void bcopy_page __P((vm_offset_t, vm_offset_t)); - -struct l1pt *pmap_alloc_l1pt __P((void)); -static __inline void pmap_map_in_l1 __P((pmap_t pmap, vm_offset_t va, - vm_offset_t l2pa)); - -#ifdef MYCROFT_HACK -int mycroft_hack = 0; -#endif - -/* Function to set the debug level of the pmap code */ - -#ifdef PMAP_DEBUG -void -pmap_debug(level) - int level; -{ - pmap_debug_level = level; - printf("pmap_debug: level=%d\n", pmap_debug_level); -} -#endif /* PMAP_DEBUG */ - -#include "isadma.h" - -#if NISADMA > 0 -/* - * Used to protect memory for ISA DMA bounce buffers. If, when loading - * pages into the system, memory intersects with any of these ranges, - * the intersecting memory will be loaded into a lower-priority free list. - */ -bus_dma_segment_t *pmap_isa_dma_ranges; -int pmap_isa_dma_nranges; - -boolean_t pmap_isa_dma_range_intersect __P((vm_offset_t, vm_size_t, - vm_offset_t *, vm_size_t *)); - -/* - * Check if a memory range intersects with an ISA DMA range, and - * return the page-rounded intersection if it does. The intersection - * will be placed on a lower-priority free list. - */ -boolean_t -pmap_isa_dma_range_intersect(pa, size, pap, sizep) - vm_offset_t pa; - vm_size_t size; - vm_offset_t *pap; - vm_size_t *sizep; -{ - bus_dma_segment_t *ds; - int i; - - if (pmap_isa_dma_ranges == NULL) - return (FALSE); - - for (i = 0, ds = pmap_isa_dma_ranges; - i < pmap_isa_dma_nranges; i++, ds++) { - if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) { - /* - * Beginning of region intersects with this range. - */ - *pap = trunc_page(pa); - *sizep = round_page(min(pa + size, - ds->ds_addr + ds->ds_len) - pa); - return (TRUE); - } - if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) { - /* - * End of region intersects with this range. - */ - *pap = trunc_page(ds->ds_addr); - *sizep = round_page(min((pa + size) - ds->ds_addr, - ds->ds_len)); - return (TRUE); - } - } - - /* - * No intersection found. - */ - return (FALSE); -} -#endif /* NISADMA > 0 */ - -/* - * Functions for manipluation pv_entry structures. These are used to keep a - * record of the mappings of virtual addresses and the associated physical - * pages. - */ - -/* - * Allocate a new pv_entry structure from the freelist. If the list is - * empty allocate a new page and fill the freelist. - */ -struct pv_entry * -pmap_alloc_pv() -{ - struct pv_page *pvp; - struct pv_entry *pv; - int i; - - /* - * Do we have any free pv_entry structures left ? - * If not allocate a page of them - */ - - if (pv_nfree == 0) { - /* NOTE: can't lock kernel_map here */ - MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK); - if (pvp == 0) - panic("pmap_alloc_pv: kmem_alloc() failed"); - pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1]; - for (i = NPVPPG - 2; i; i--, pv++) - pv->pv_next = pv + 1; - pv->pv_next = 0; - pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1; - TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list); - pv = &pvp->pvp_pv[0]; - } else { - --pv_nfree; - pvp = pv_page_freelist.tqh_first; - if (--pvp->pvp_pgi.pgi_nfree == 0) { - TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); - } - pv = pvp->pvp_pgi.pgi_freelist; -#ifdef DIAGNOSTIC - if (pv == 0) - panic("pmap_alloc_pv: pgi_nfree inconsistent"); -#endif /* DIAGNOSTIC */ - pvp->pvp_pgi.pgi_freelist = pv->pv_next; - } - return pv; -} - -/* - * Release a pv_entry structure putting it back on the freelist. - */ - -void -pmap_free_pv(pv) - struct pv_entry *pv; -{ - struct pv_page *pvp; - - pvp = (struct pv_page *) trunc_page((vaddr_t)pv); - switch (++pvp->pvp_pgi.pgi_nfree) { - case 1: - TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list); - default: - pv->pv_next = pvp->pvp_pgi.pgi_freelist; - pvp->pvp_pgi.pgi_freelist = pv; - ++pv_nfree; - break; - case NPVPPG: - pv_nfree -= NPVPPG - 1; - TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); - FREE((vm_offset_t)pvp, M_VMPVENT); - break; - } -} - -#if 0 -void -pmap_collect_pv() -{ - struct pv_page_list pv_page_collectlist; - struct pv_page *pvp, *npvp; - struct pv_entry *ph, *ppv, *pv, *npv; - int s; - - TAILQ_INIT(&pv_page_collectlist); - - for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) { - if (pv_nfree < NPVPPG) - break; - npvp = pvp->pvp_pgi.pgi_list.tqe_next; - if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) { - TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); - TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp, - pvp_pgi.pgi_list); - pv_nfree -= NPVPPG; - pvp->pvp_pgi.pgi_nfree = -1; - } - } - - if (pv_page_collectlist.tqh_first == 0) - return; - - for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) { - if (ph->pv_pmap == 0) - continue; - s = splvm(); - for (ppv = ph; (pv = ppv->pv_next) != 0; ) { - pvp = (struct pv_page *) trunc_page((vaddr_t)pv); - if (pvp->pvp_pgi.pgi_nfree == -1) { - pvp = pv_page_freelist.tqh_first; - if (--pvp->pvp_pgi.pgi_nfree == 0) { - TAILQ_REMOVE(&pv_page_freelist, - pvp, pvp_pgi.pgi_list); - } - npv = pvp->pvp_pgi.pgi_freelist; -#ifdef DIAGNOSTIC - if (npv == 0) - panic("pmap_collect_pv: pgi_nfree inconsistent"); -#endif /* DIAGNOSTIC */ - pvp->pvp_pgi.pgi_freelist = npv->pv_next; - *npv = *pv; - ppv->pv_next = npv; - ppv = npv; - } else - ppv = pv; - } - splx(s); - } - - for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) { - npvp = pvp->pvp_pgi.pgi_list.tqe_next; - FREE((vm_offset_t)pvp, M_VMPVENT); - } -} -#endif - -/* - * Enter a new physical-virtual mapping into the pv table - */ - -/*__inline*/ void -pmap_enter_pv(pmap, va, pv, flags) - pmap_t pmap; - vm_offset_t va; - struct pv_entry *pv; - u_int flags; -{ - struct pv_entry *npv; - u_int s; - -#ifdef DIAGNOSTIC - if (!pmap_initialized) - panic("pmap_enter_pv: !pmap_initialized"); -#endif - - s = splvm(); - - PDEBUG(5, printf("pmap_enter_pv: pv %p: %08lx/%p/%p\n", - pv, pv->pv_va, pv->pv_pmap, pv->pv_next)); - - if (pv->pv_pmap == NULL) { - /* - * No entries yet, use header as the first entry - */ - pv->pv_va = va; - pv->pv_pmap = pmap; - pv->pv_next = NULL; - pv->pv_flags = flags; - } else { - /* - * There is at least one other VA mapping this page. - * Place this entry after the header. - */ -#ifdef PMAP_DEBUG - for (npv = pv; npv; npv = npv->pv_next) - if (pmap == npv->pv_pmap && va == npv->pv_va) - panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p", - pv, pv->pv_va, pv->pv_pmap, pv->pv_next); -#endif - npv = pmap_alloc_pv(); - npv->pv_va = va; - npv->pv_pmap = pmap; - npv->pv_flags = flags; - npv->pv_next = pv->pv_next; - pv->pv_next = npv; - } - - if (flags & PT_W) - ++pmap->pm_stats.wired_count; - - splx(s); -} - - -/* - * Remove a physical-virtual mapping from the pv table - */ - -/*__inline*/ void -pmap_remove_pv(pmap, va, pv) - pmap_t pmap; - vm_offset_t va; - struct pv_entry *pv; -{ - struct pv_entry *npv; - u_int s; - u_int flags = 0; - -#ifdef DIAGNOSTIC - if (!pmap_initialized) - panic("pmap_remove_pv: !pmap_initialized"); -#endif - - s = splvm(); - - /* - * If it is the first entry on the list, it is actually - * in the header and we must copy the following entry up - * to the header. Otherwise we must search the list for - * the entry. In either case we free the now unused entry. - */ - - if (pmap == pv->pv_pmap && va == pv->pv_va) { - npv = pv->pv_next; - if (npv) { - *pv = *npv; - flags = npv->pv_flags; - pmap_free_pv(npv); - } else { - flags = pv->pv_flags; - pv->pv_pmap = NULL; - } - } else { - for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { - if (pmap == npv->pv_pmap && va == npv->pv_va) - break; - } - if (npv) { - pv->pv_next = npv->pv_next; - flags = npv->pv_flags; - pmap_free_pv(npv); - } else - panic("pmap_remove_pv: lost entry"); - } - - if (flags & PT_W) - --pmap->pm_stats.wired_count; - - splx(s); -} - -/* - * Modify a physical-virtual mapping in the pv table - */ - -/*__inline */ u_int -pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask) - pmap_t pmap; - vm_offset_t va; - struct pv_entry *pv; - u_int bic_mask; - u_int eor_mask; -{ - struct pv_entry *npv; - u_int s; - u_int flags, oflags; - - PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n", - pmap, va, pv, bic_mask, eor_mask)); - -#ifdef DIAGNOSTIC - if (!pmap_initialized) - panic("pmap_modify_pv: !pmap_initialized"); -#endif - - s = splvm(); - - PDEBUG(5, printf("pmap_modify_pv: pv %p: %08lx/%p/%p/%08x ", - pv, pv->pv_va, pv->pv_pmap, pv->pv_next, pv->pv_flags)); - - /* - * There is at least one VA mapping this page. - */ - - for (npv = pv; npv; npv = npv->pv_next) { - if (pmap == npv->pv_pmap && va == npv->pv_va) { - oflags = npv->pv_flags; - npv->pv_flags = flags = - ((oflags & ~bic_mask) ^ eor_mask); - if ((flags ^ oflags) & PT_W) { - if (flags & PT_W) - ++pmap->pm_stats.wired_count; - else - --pmap->pm_stats.wired_count; - } - PDEBUG(0, printf("done flags=%08x\n", flags)); - splx(s); - return (oflags); - } - } - - PDEBUG(0, printf("done.\n")); - splx(s); - return (0); -} - - -/* - * Map the specified level 2 pagetable into the level 1 page table for - * the given pmap to cover a chunk of virtual address space starting from the - * address specified. - */ -static /*__inline*/ void -pmap_map_in_l1(pmap, va, l2pa) - pmap_t pmap; - vm_offset_t va, l2pa; -{ - vm_offset_t ptva; - - /* Calculate the index into the L1 page table. */ - ptva = (va >> PDSHIFT) & ~3; - - PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa, - pmap->pm_pdir, L1_PTE(l2pa), ptva)); - - /* Map page table into the L1. */ - pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000); - pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400); - pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800); - pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00); - - PDEBUG(0, printf("pt self reference %lx in %lx\n", - L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt)); - - /* Map the page table into the page table area. */ - *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_PTE_NC_NB(l2pa, AP_KRW); - - /* XXX should be a purge */ -/* cpu_tlb_flushD();*/ -} - -#if 0 -static /*__inline*/ void -pmap_unmap_in_l1(pmap, va) - pmap_t pmap; - vm_offset_t va; -{ - vm_offset_t ptva; - - /* Calculate the index into the L1 page table. */ - ptva = (va >> PDSHIFT) & ~3; - - /* Unmap page table from the L1. */ - pmap->pm_pdir[ptva + 0] = 0; - pmap->pm_pdir[ptva + 1] = 0; - pmap->pm_pdir[ptva + 2] = 0; - pmap->pm_pdir[ptva + 3] = 0; - - /* Unmap the page table from the page table area. */ - *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0; - - /* XXX should be a purge */ -/* cpu_tlb_flushD();*/ -} -#endif - - -/* - * Used to map a range of physical addresses into kernel - * virtual address space. - * - * For now, VM is already on, we only need to map the - * specified memory. - */ -vm_offset_t -pmap_map(va, spa, epa, prot) - vm_offset_t va, spa, epa; - int prot; -{ - while (spa < epa) { - pmap_enter(pmap_kernel(), va, spa, prot, 0); - va += NBPG; - spa += NBPG; - } - return(va); -} - - -/* - * void pmap_bootstrap(pd_entry_t *kernel_l1pt) - * - * bootstrap the pmap system. This is called from initarm and allows - * the pmap system to initailise any structures it requires. - * - * Currently this sets up the kernel_pmap that is statically allocated - * and also allocated virtual addresses for certain page hooks. - * Currently the only one page hook is allocated that is used - * to zero physical pages of memory. - * It also initialises the start and end address of the kernel data space. - */ -extern vm_offset_t physical_freestart; -extern vm_offset_t physical_freeend; - -struct pv_entry *boot_pvent; -char *boot_attrs; - -void -pmap_bootstrap(kernel_l1pt, kernel_ptpt) - pd_entry_t *kernel_l1pt; - pv_addr_t kernel_ptpt; -{ - int loop; - vm_offset_t start, end; -#if NISADMA > 0 - vm_offset_t istart; - vm_size_t isize; -#endif - vsize_t size; - - kernel_pmap = &kernel_pmap_store; - - kernel_pmap->pm_pdir = kernel_l1pt; - kernel_pmap->pm_pptpt = kernel_ptpt.pv_pa; - kernel_pmap->pm_vptpt = kernel_ptpt.pv_va; - simple_lock_init(&kernel_pmap->pm_lock); - kernel_pmap->pm_count = 1; - - /* - * Initialize PAGE_SIZE-dependent variables. - */ - uvm_setpagesize(); - - npages = 0; - loop = 0; - while (loop < bootconfig.dramblocks) { - start = (vm_offset_t)bootconfig.dram[loop].address; - end = start + (bootconfig.dram[loop].pages * NBPG); - if (start < physical_freestart) - start = physical_freestart; - if (end > physical_freeend) - end = physical_freeend; -#if 0 - printf("%d: %lx -> %lx\n", loop, start, end - 1); -#endif -#if NISADMA > 0 - if (pmap_isa_dma_range_intersect(start, end - start, - &istart, &isize)) { - /* - * Place the pages that intersect with the - * ISA DMA range onto the ISA DMA free list. - */ -#if 0 - printf(" ISADMA 0x%lx -> 0x%lx\n", istart, - istart + isize - 1); -#endif - uvm_page_physload(atop(istart), - atop(istart + isize), atop(istart), - atop(istart + isize), VM_FREELIST_ISADMA); - npages += atop(istart + isize) - atop(istart); - - /* - * Load the pieces that come before - * the intersection into the default - * free list. - */ - if (start < istart) { -#if 0 - printf(" BEFORE 0x%lx -> 0x%lx\n", - start, istart - 1); -#endif - uvm_page_physload(atop(start), - atop(istart), atop(start), - atop(istart), VM_FREELIST_DEFAULT); - npages += atop(istart) - atop(start); - } - - /* - * Load the pieces that come after - * the intersection into the default - * free list. - */ - if ((istart + isize) < end) { -#if 0 - printf(" AFTER 0x%lx -> 0x%lx\n", - (istart + isize), end - 1); -#endif - uvm_page_physload(atop(istart + isize), - atop(end), atop(istart + isize), - atop(end), VM_FREELIST_DEFAULT); - npages += atop(end) - atop(istart + isize); - } - } else { - uvm_page_physload(atop(start), atop(end), - atop(start), atop(end), VM_FREELIST_DEFAULT); - npages += atop(end) - atop(start); - } -#else /* NISADMA > 0 */ - uvm_page_physload(atop(start), atop(end), - atop(start), atop(end), VM_FREELIST_DEFAULT); - npages += atop(end) - atop(start); -#endif /* NISADMA > 0 */ - ++loop; - } - -#ifdef MYCROFT_HACK - printf("npages = %ld\n", npages); -#endif - - virtual_start = KERNEL_VM_BASE; - virtual_end = virtual_start + KERNEL_VM_SIZE - 1; - - ALLOC_PAGE_HOOK(page_hook0, NBPG); - ALLOC_PAGE_HOOK(page_hook1, NBPG); - - /* - * The mem special device needs a virtual hook but we don't - * need a pte - */ - memhook = (char *)virtual_start; - virtual_start += NBPG; - - msgbufaddr = (caddr_t)virtual_start; - msgbufpte = (pt_entry_t)pmap_pte(kernel_pmap, virtual_start); - virtual_start += round_page(MSGBUFSIZE); - - size = npages * sizeof(struct pv_entry); - boot_pvent = (struct pv_entry *)uvm_pageboot_alloc(size); - bzero(boot_pvent, size); - size = npages * sizeof(char); - boot_attrs = (char *)uvm_pageboot_alloc(size); - bzero(boot_attrs, size); - - cpu_cache_cleanD(); -} - -/* - * void pmap_init(void) - * - * Initialize the pmap module. - * Called by vm_init() in vm/vm_init.c in order to initialise - * any structures that the pmap system needs to map virtual memory. - */ - -extern int physmem; - -void -pmap_init() -{ - int lcv; - -#ifdef MYCROFT_HACK - printf("physmem = %d\n", physmem); -#endif - - /* - * Set the available memory vars - These do not map to real memory - * addresses and cannot as the physical memory is fragmented. - * They are used by ps for %mem calculations. - * One could argue whether this should be the entire memory or just - * the memory that is useable in a user process. - */ - avail_start = 0; - avail_end = physmem * NBPG; - - /* Set up pmap info for physsegs. */ - for (lcv = 0; lcv < vm_nphysseg; lcv++) { - vm_physmem[lcv].pmseg.pvent = boot_pvent; - boot_pvent += vm_physmem[lcv].end - vm_physmem[lcv].start; - vm_physmem[lcv].pmseg.attrs = boot_attrs; - boot_attrs += vm_physmem[lcv].end - vm_physmem[lcv].start; - } -#ifdef MYCROFT_HACK - for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { - printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n", - lcv, - vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs, - vm_physmem[lcv].start, vm_physmem[lcv].end); - } -#endif - TAILQ_INIT(&pv_page_freelist); - -#ifdef DIAGNOSTIC - /* Now it is safe to enable pv_entry recording. */ - pmap_initialized = TRUE; -#endif - - /* Initialise our L1 page table queues and counters */ - SIMPLEQ_INIT(&l1pt_static_queue); - l1pt_static_queue_count = 0; - l1pt_static_create_count = 0; - SIMPLEQ_INIT(&l1pt_queue); - l1pt_queue_count = 0; - l1pt_create_count = 0; - l1pt_reuse_count = 0; -} - -/* - * pmap_postinit() - * - * This routine is called after the vm and kmem subsystems have been - * initialised. This allows the pmap code to perform any initialisation - * that can only be done one the memory allocation is in place. - */ - -void -pmap_postinit() -{ - int loop; - struct l1pt *pt; - -#ifdef PMAP_STATIC_L1S - for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) { -#else /* PMAP_STATIC_L1S */ - for (loop = 0; loop < max_processes; ++loop) { -#endif /* PMAP_STATIC_L1S */ - /* Allocate a L1 page table */ - pt = pmap_alloc_l1pt(); - if (!pt) - panic("Cannot allocate static L1 page tables\n"); - - /* Clean it */ - bzero((void *)pt->pt_va, PD_SIZE); - pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN); - /* Add the page table to the queue */ - SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue); - ++l1pt_static_queue_count; - ++l1pt_static_create_count; - } -} - - -/* - * Create and return a physical map. - * - * If the size specified for the map is zero, the map is an actual physical - * map, and may be referenced by the hardware. - * - * If the size specified is non-zero, the map will be used in software only, - * and is bounded by that size. - */ - -pmap_t -pmap_create() -{ - pmap_t pmap; - - /* Allocate memory for pmap structure and zero it */ - pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); - bzero(pmap, sizeof(*pmap)); - - /* Now init the machine part of the pmap */ - pmap_pinit(pmap); - return(pmap); -} - -/* - * pmap_alloc_l1pt() - * - * This routine allocates physical and virtual memory for a L1 page table - * and wires it. - * A l1pt structure is returned to describe the allocated page table. - * - * This routine is allowed to fail if the required memory cannot be allocated. - * In this case NULL is returned. - */ - -struct l1pt * -pmap_alloc_l1pt(void) -{ - vm_offset_t va, pa; - struct l1pt *pt; - int error; - vm_page_t m; - pt_entry_t *pte; - - /* Allocate virtual address space for the L1 page table */ - va = uvm_km_valloc(kernel_map, PD_SIZE); - if (va == 0) { -#ifdef DIAGNOSTIC - printf("pmap: Cannot allocate pageable memory for L1\n"); -#endif /* DIAGNOSTIC */ - return(NULL); - } - - /* Allocate memory for the l1pt structure */ - pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK); - - /* - * Allocate pages from the VM system. - */ - TAILQ_INIT(&pt->pt_plist); - error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end, - PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK); - if (error) { -#ifdef DIAGNOSTIC - printf("pmap: Cannot allocate physical memory for L1 (%d)\n", - error); -#endif /* DIAGNOSTIC */ - /* Release the resources we already have claimed */ - free(pt, M_VMPMAP); - uvm_km_free(kernel_map, va, PD_SIZE); - return(NULL); - } - - /* Map our physical pages into our virtual space */ - pt->pt_va = va; - m = pt->pt_plist.tqh_first; - while (m && va < (pt->pt_va + PD_SIZE)) { - pa = VM_PAGE_TO_PHYS(m); - - pmap_enter(pmap_kernel(), va, pa, - VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED); - - /* Revoke cacheability and bufferability */ - /* XXX should be done better than this */ - pte = pmap_pte(pmap_kernel(), va); - *pte = *pte & ~(PT_C | PT_B); - - va += NBPG; - m = m->pageq.tqe_next; - } - -#ifdef DIAGNOSTIC - if (m) - panic("pmap_alloc_l1pt: pglist not empty\n"); -#endif /* DIAGNOSTIC */ - - pt->pt_flags = 0; - return(pt); -} - -/* - * Free a L1 page table previously allocated with pmap_alloc_l1pt(). - */ -void -pmap_free_l1pt(pt) - struct l1pt *pt; -{ - /* Separate the physical memory for the virtual space */ - pmap_remove(kernel_pmap, pt->pt_va, pt->pt_va + PD_SIZE); - - /* Return the physical memory */ - uvm_pglistfree(&pt->pt_plist); - - /* Free the virtual space */ - uvm_km_free(kernel_map, pt->pt_va, PD_SIZE); - - /* Free the l1pt structure */ - free(pt, M_VMPMAP); -} - -/* - * Allocate a page directory. - * This routine will either allocate a new page directory from the pool - * of L1 page tables currently held by the kernel or it will allocate - * a new one via pmap_alloc_l1pt(). - * It will then initialise the l1 page table for use. - */ -int -pmap_allocpagedir(pmap) - struct pmap *pmap; -{ - vm_offset_t pa; - struct l1pt *pt; - pt_entry_t *pte; - - PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap)); - - /* Do we have any spare L1's lying around ? */ - if (l1pt_static_queue_count) { - --l1pt_static_queue_count; - pt = l1pt_static_queue.sqh_first; - SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue); - } else if (l1pt_queue_count) { - --l1pt_queue_count; - pt = l1pt_queue.sqh_first; - SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue); - ++l1pt_reuse_count; - } else { - pt = pmap_alloc_l1pt(); - if (!pt) - return(ENOMEM); - ++l1pt_create_count; - } - - /* Store the pointer to the l1 descriptor in the pmap. */ - pmap->pm_l1pt = pt; - - /* Get the physical address of the start of the l1 */ - pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first); - - /* Store the virtual address of the l1 in the pmap. */ - pmap->pm_pdir = (pd_entry_t *)pt->pt_va; - - /* Clean the L1 if it is dirty */ - if (!(pt->pt_flags & PTFLAG_CLEAN)) - bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE)); - - /* Do we already have the kernel mappings ? */ - if (!(pt->pt_flags & PTFLAG_KPT)) { - /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */ - - bcopy((char *)kernel_pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE), - (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE), - KERNEL_PD_SIZE); - pt->pt_flags |= PTFLAG_KPT; - } - - /* Allocate a page table to map all the page tables for this pmap */ - -#ifdef DIAGNOSTIC - if (pmap->pm_vptpt) { - /* XXX What if we have one already ? */ - panic("pmap_allocpagedir: have pt already\n"); - } -#endif /* DIAGNOSTIC */ - pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG); - (void) pmap_extract(kernel_pmap, pmap->pm_vptpt, &pmap->pm_pptpt); - pmap->pm_pptpt &= PG_FRAME; - /* Revoke cacheability and bufferability */ - /* XXX should be done better than this */ - pte = pmap_pte(kernel_pmap, pmap->pm_vptpt); - *pte = *pte & ~(PT_C | PT_B); - - /* Wire in this page table */ - pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt); - - pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */ - - /* - * Map the kernel page tables for 0xf0000000 + - * into the page table used to map the - * pmap's page tables - */ - bcopy((char *)(PROCESS_PAGE_TBLS_BASE - + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) - + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)), - (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2), - (KERNEL_PD_SIZE >> 2)); - - pmap->pm_count = 1; - simple_lock_init(&pmap->pm_lock); - - return(0); -} - - -/* - * Initialize a preallocated and zeroed pmap structure, - * such as one in a vmspace structure. - */ - -static int pmap_pagedir_ident; /* tsleep() ident */ - -void -pmap_pinit(pmap) - struct pmap *pmap; -{ - PDEBUG(0, printf("pmap_pinit(%p)\n", pmap)); - - /* Keep looping until we succeed in allocating a page directory */ - while (pmap_allocpagedir(pmap) != 0) { - /* - * Ok we failed to allocate a suitable block of memory for an - * L1 page table. This means that either: - * 1. 16KB of virtual address space could not be allocated - * 2. 16KB of physically contiguous memory on a 16KB boundary - * could not be allocated. - * - * Since we cannot fail we will sleep for a while and try - * again. Although we will be wakened when another page table - * is freed other memory releasing and swapping may occur - * that will mean we can succeed so we will keep trying - * regularly just in case. - */ - - if (tsleep((caddr_t)&pmap_pagedir_ident, PZERO, - "l1ptwait", 1000) == EWOULDBLOCK) - printf("pmap: Cannot allocate L1 page table, sleeping ...\n"); - } - - /* Map zero page for the pmap. This will also map the L2 for it */ - pmap_enter(pmap, 0x00000000, systempage.pv_pa, - VM_PROT_READ, VM_PROT_READ | PMAP_WIRED); -} - - -void -pmap_freepagedir(pmap) - pmap_t pmap; -{ - /* Free the memory used for the page table mapping */ - uvm_km_free(kernel_map, (vm_offset_t)pmap->pm_vptpt, NBPG); - - /* junk the L1 page table */ - if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) { - /* Add the page table to the queue */ - SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue); - ++l1pt_static_queue_count; - /* Wake up any sleeping processes waiting for a l1 page table */ - wakeup((caddr_t)&pmap_pagedir_ident); - } else if (l1pt_queue_count < 8) { - /* Add the page table to the queue */ - SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue); - ++l1pt_queue_count; - /* Wake up any sleeping processes waiting for a l1 page table */ - wakeup((caddr_t)&pmap_pagedir_ident); - } else - pmap_free_l1pt(pmap->pm_l1pt); -} - - -/* - * Retire the given physical map from service. - * Should only be called if the map contains no valid mappings. - */ - -void -pmap_destroy(pmap) - pmap_t pmap; -{ - int count; - - if (pmap == NULL) - return; - - PDEBUG(0, printf("pmap_destroy(%p)\n", pmap)); - simple_lock(&pmap->pm_lock); - count = --pmap->pm_count; - simple_unlock(&pmap->pm_lock); - if (count == 0) { - pmap_release(pmap); - free((caddr_t)pmap, M_VMPMAP); - } -} - - -/* - * Release any resources held by the given physical map. - * Called when a pmap initialized by pmap_pinit is being released. - * Should only be called if the map contains no valid mappings. - */ - -void -pmap_release(pmap) - pmap_t pmap; -{ - struct vm_page *page; - pt_entry_t *pte; - int loop; - - PDEBUG(0, printf("pmap_release(%p)\n", pmap)); - -#if 0 - if (pmap->pm_count != 1) /* XXX: needs sorting */ - panic("pmap_release count %d", pmap->pm_count); -#endif - - /* Remove the zero page mapping */ - pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG); - - /* - * Free any page tables still mapped - * This is only temporay until pmap_enter can count the number - * of mappings made in a page table. Then pmap_remove() can - * reduce the count and free the pagetable when the count - * reaches zero. - */ - for (loop = 0; loop < (((PD_SIZE - KERNEL_PD_SIZE) >> 4) - 1); ++loop) { - pte = (pt_entry_t *)(pmap->pm_vptpt + loop * 4); - if (*pte != 0) { - PDEBUG(0, printf("%x: pte=%p:%08x\n", loop, pte, *pte)); - page = PHYS_TO_VM_PAGE(pmap_pte_pa(pte)); - if (page == NULL) - panic("pmap_release: bad address for phys page"); - uvm_pagefree(page); - } - } - /* Free the page dir */ - pmap_freepagedir(pmap); -} - - -/* - * void pmap_reference(pmap_t pmap) - * - * Add a reference to the specified pmap. - */ - -void -pmap_reference(pmap) - pmap_t pmap; -{ - if (pmap == NULL) - return; - - simple_lock(&pmap->pm_lock); - pmap->pm_count++; - simple_unlock(&pmap->pm_lock); -} - -/* - * void pmap_virtual_space(vm_offset_t *start, vm_offset_t *end) - * - * Return the start and end addresses of the kernel's virtual space. - * These values are setup in pmap_bootstrap and are updated as pages - * are allocated. - */ - -void -pmap_virtual_space(start, end) - vm_offset_t *start; - vm_offset_t *end; -{ - *start = virtual_start; - *end = virtual_end; -} - - -/* - * Activate the address space for the specified process. If the process - * is the current process, load the new MMU context. - */ -void -pmap_activate(p) - struct proc *p; -{ - pmap_t pmap = p->p_vmspace->vm_map.pmap; - struct pcb *pcb = &p->p_addr->u_pcb; - - (void) pmap_extract(kernel_pmap, (vaddr_t)pmap->pm_pdir, - (paddr_t *)&pcb->pcb_pagedir); - - PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n", - p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir)); - - if (p == curproc) { - PDEBUG(0, printf("pmap_activate: setting TTB\n")); - setttb((u_int)pcb->pcb_pagedir); - } -#if 0 - pmap->pm_pdchanged = FALSE; -#endif -} - - -/* - * Deactivate the address space of the specified process. - */ -void -pmap_deactivate(p) - struct proc *p; -{ -} - - -/* - * pmap_clean_page() - * - * This is a local function used to work out the best strategy to clean - * a single page referenced by its entry in the PV table. It's used by - * pmap_copy_page, pmap_zero page and maybe some others later on. - * - * Its policy is effectively: - * o If there are no mappings, we don't bother doing anything with the cache. - * o If there is one mapping, we clean just that page. - * o If there are multiple mappings, we clean the entire cache. - * - * So that some functions can be further optimised, it returns 0 if it didn't - * clean the entire cache, or 1 if it did. - * - * XXX One bug in this routine is that if the pv_entry has a single page - * mapped at 0x00000000 a whole cache clean will be performed rather than - * just the 1 page. Since this should not occur in everyday use and if it does - * it will just result in not the most efficient clean for the page. - */ -static int -pmap_clean_page(pv) - struct pv_entry *pv; -{ - int s; - int cache_needs_cleaning = 0; - vm_offset_t page_to_clean = 0; - - /* Go to splvm() so we get exclusive lock for a mo */ - s = splvm(); - if (pv->pv_pmap) { - cache_needs_cleaning = 1; - if (!pv->pv_next) - page_to_clean = pv->pv_va; - } - splx(s); - - /* Do cache ops outside the splvm. */ - if (page_to_clean) - cpu_cache_purgeID_rng(page_to_clean, NBPG); - else if (cache_needs_cleaning) { - cpu_cache_purgeID(); - return (1); - } - return (0); -} - -/* - * pmap_find_pv() - * - * This is a local function that finds a PV entry for a given physical page. - * This is a common op, and this function removes loads of ifdefs in the code. - */ -static __inline struct pv_entry * -pmap_find_pv(phys) - vm_offset_t phys; -{ - int bank, off; - struct pv_entry *pv; - -#ifdef DIAGNOSTIC - if (!pmap_initialized) - panic("pmap_find_pv: !pmap_initialized"); -#endif - - if ((bank = vm_physseg_find(atop(phys), &off)) == -1) - panic("pmap_find_pv: not a real page, phys=%lx\n", phys); - pv = &vm_physmem[bank].pmseg.pvent[off]; - return (pv); -} - -/* - * pmap_zero_page() - * - * Zero a given physical page by mapping it at a page hook point. - * In doing the zero page op, the page we zero is mapped cachable, as with - * StrongARM accesses to non-cached pages are non-burst making writing - * _any_ bulk data very slow. - */ -void -pmap_zero_page(phys) - vm_offset_t phys; -{ - struct pv_entry *pv; - - /* Get an entry for this page, and clean it it. */ - pv = pmap_find_pv(phys); - pmap_clean_page(pv); - - /* - * Hook in the page, zero it, and purge the cache for that - * zeroed page. Invalidate the TLB as needed. - */ - *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW); - cpu_tlb_flushD_SE(page_hook0.va); - bzero_page(page_hook0.va); - cpu_cache_purgeD_rng(page_hook0.va, NBPG); -} - -/* - * pmap_copy_page() - * - * Copy one physical page into another, by mapping the pages into - * hook points. The same comment regarding cachability as in - * pmap_zero_page also applies here. - */ -void -pmap_copy_page(src, dest) - vm_offset_t src; - vm_offset_t dest; -{ - struct pv_entry *src_pv, *dest_pv; - - /* Get PV entries for the pages, and clean them if needed. */ - src_pv = pmap_find_pv(src); - dest_pv = pmap_find_pv(dest); - if (!pmap_clean_page(src_pv)) - pmap_clean_page(dest_pv); - - /* - * Map the pages into the page hook points, copy them, and purge - * the cache for the appropriate page. Invalidate the TLB - * as required. - */ - *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW); - *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW); - cpu_tlb_flushD_SE(page_hook0.va); - cpu_tlb_flushD_SE(page_hook1.va); - bcopy_page(page_hook0.va, page_hook1.va); - cpu_cache_purgeD_rng(page_hook0.va, NBPG); - cpu_cache_purgeD_rng(page_hook1.va, NBPG); -} - -/* - * int pmap_next_phys_page(vm_offset_t *addr) - * - * Allocate another physical page returning true or false depending - * on whether a page could be allocated. - */ - -vm_offset_t -pmap_next_phys_page(addr) - vm_offset_t addr; - -{ - int loop; - - if (addr < bootconfig.dram[0].address) - return(bootconfig.dram[0].address); - - loop = 0; - - while (bootconfig.dram[loop].address != 0 - && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) - ++loop; - - if (bootconfig.dram[loop].address == 0) - return(0); - - addr += NBPG; - - if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) { - if (bootconfig.dram[loop + 1].address == 0) - return(0); - addr = bootconfig.dram[loop + 1].address; - } - - return(addr); -} - -#if 0 -void -pmap_pte_addref(pmap, va) - pmap_t pmap; - vm_offset_t va; -{ - pd_entry_t *pde; - vm_offset_t pa; - struct vm_page *m; - - if (pmap == pmap_kernel()) - return; - - pde = pmap_pde(pmap, va & ~(3 << PDSHIFT)); - pa = pmap_pte_pa(pde); - m = PHYS_TO_VM_PAGE(pa); - ++m->wire_count; -#ifdef MYCROFT_HACK - printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n", - pmap, va, pde, pa, m, m->wire_count); -#endif -} - -void -pmap_pte_delref(pmap, va) - pmap_t pmap; - vm_offset_t va; -{ - pd_entry_t *pde; - vm_offset_t pa; - struct vm_page *m; - - if (pmap == pmap_kernel()) - return; - - pde = pmap_pde(pmap, va & ~(3 << PDSHIFT)); - pa = pmap_pte_pa(pde); - m = PHYS_TO_VM_PAGE(pa); - --m->wire_count; -#ifdef MYCROFT_HACK - printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n", - pmap, va, pde, pa, m, m->wire_count); -#endif - if (m->wire_count == 0) { -#ifdef MYCROFT_HACK - printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n", - pmap, va, pde, pa, m); -#endif - pmap_unmap_in_l1(pmap, va); - uvm_pagefree(m); - --pmap->pm_stats.resident_count; - } -} -#else -#define pmap_pte_addref(pmap, va) -#define pmap_pte_delref(pmap, va) -#endif - -/* - * Since we have a virtually indexed cache, we may need to inhibit caching if - * there is more than one mapping and at least one of them is writable. - * Since we purge the cache on every context switch, we only need to check for - * other mappings within the same pmap, or kernel_pmap. - * This function is also called when a page is unmapped, to possibly reenable - * caching on any remaining mappings. - */ -void -pmap_vac_me_harder(pmap, pv) - pmap_t pmap; - struct pv_entry *pv; -{ - struct pv_entry *npv; - pt_entry_t *pte; - int entries = 0; - int writeable = 0; - - if (pv->pv_pmap == NULL) - return; - - /* - * Count mappings and writable mappings in this pmap. - * Keep a pointer to the first one. - */ - for (npv = pv; npv; npv = npv->pv_next) { - /* Count mappings in the same pmap */ - if (pmap == npv->pv_pmap) { - if (entries++ == 0) - pv = npv; - /* Writeable mappings */ - if (npv->pv_flags & PT_Wr) - ++writeable; - } - } - - /* - * Enable or disable caching as necessary. - * We do a quick check of the first PTE to avoid walking the list if - * we're already in the right state. - */ - if (entries > 1 && writeable) { - pte = pmap_pte(pmap, pv->pv_va); - if (~*pte & (PT_C | PT_B)) - return; - *pte = *pte & ~(PT_C | PT_B); - for (npv = pv->pv_next; npv; npv = npv->pv_next) { - if (pmap == npv->pv_pmap) { - pte = pmap_pte(pmap, npv->pv_va); - *pte = *pte & ~(PT_C | PT_B); - } - } - } else if (entries > 0) { - pte = pmap_pte(pmap, pv->pv_va); - if (*pte & (PT_C | PT_B)) - return; - *pte = *pte | (PT_C | PT_B); - for (npv = pv->pv_next; npv; npv = npv->pv_next) { - if (pmap == npv->pv_pmap) { - pte = pmap_pte(pmap, npv->pv_va); - *pte = *pte | (PT_C | PT_B); - } - } - } -} - -/* - * pmap_remove() - * - * pmap_remove is responsible for nuking a number of mappings for a range - * of virtual address space in the current pmap. To do this efficiently - * is interesting, because in a number of cases a wide virtual address - * range may be supplied that contains few actual mappings. So, the - * optimisations are: - * 1. Try and skip over hunks of address space for which an L1 entry - * does not exist. - * 2. Build up a list of pages we've hit, up to a maximum, so we can - * maybe do just a partial cache clean. This path of execution is - * complicated by the fact that the cache must be flushed _before_ - * the PTE is nuked, being a VAC :-) - * 3. Maybe later fast-case a single page, but I don't think this is - * going to make _that_ much difference overall. - */ - -#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 - -void -pmap_remove(pmap, sva, eva) - pmap_t pmap; - vm_offset_t sva; - vm_offset_t eva; -{ - int cleanlist_idx = 0; - struct pagelist { - vm_offset_t va; - pt_entry_t *pte; - } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; - pt_entry_t *pte = 0; - vm_offset_t pa; - int pmap_active; - struct pv_entry *pv; - - /* Exit quick if there is no pmap */ - if (!pmap) - return; - - PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva)); - - sva &= PG_FRAME; - eva &= PG_FRAME; - - /* Get a page table pointer */ - while (sva < eva) { - pte = pmap_pte(pmap, sva); - if (pte) - break; - sva = (sva & PD_MASK) + NBPD; - } - - /* Note if the pmap is active thus require cache and tlb cleans */ - if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) - || (pmap == kernel_pmap)) - pmap_active = 1; - else - pmap_active = 0; - - /* Now loop along */ - while (sva < eva) { - /* Check if we can move to the next PDE (l1 chunk) */ - if (!(sva & PT_MASK)) - if (!pmap_pde_v(pmap_pde(pmap, sva))) { - sva += NBPD; - pte += arm_byte_to_page(NBPD); - continue; - } - - /* We've found a valid PTE, so this page of PTEs has to go. */ - if (pmap_pte_v(pte)) { - int bank, off; - - /* Update statistics */ - --pmap->pm_stats.resident_count; - - /* - * Add this page to our cache remove list, if we can. - * If, however the cache remove list is totally full, - * then do a complete cache invalidation taking note - * to backtrack the PTE table beforehand, and ignore - * the lists in future because there's no longer any - * point in bothering with them (we've paid the - * penalty, so will carry on unhindered). Otherwise, - * when we fall out, we just clean the list. - */ - PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte)); - pa = pmap_pte_pa(pte); - - if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { - /* Add to the clean list. */ - cleanlist[cleanlist_idx].pte = pte; - cleanlist[cleanlist_idx].va = sva; - cleanlist_idx++; - } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { - int cnt; - - /* Nuke everything if needed. */ - if (pmap_active) { - cpu_cache_purgeID(); - cpu_tlb_flushID(); - } - - /* - * Roll back the previous PTE list, - * and zero out the current PTE. - */ - for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { - *cleanlist[cnt].pte = 0; - pmap_pte_delref(pmap, cleanlist[cnt].va); - } - *pte = 0; - pmap_pte_delref(pmap, sva); - cleanlist_idx++; - } else { - /* - * We've already nuked the cache and - * TLB, so just carry on regardless, - * and we won't need to do it again - */ - *pte = 0; - pmap_pte_delref(pmap, sva); - } - - /* - * Update flags. In a number of circumstances, - * we could cluster a lot of these and do a - * number of sequential pages in one go. - */ - if ((bank = vm_physseg_find(atop(pa), &off)) != -1) { - pv = &vm_physmem[bank].pmseg.pvent[off]; - pmap_remove_pv(pmap, sva, pv); - pmap_vac_me_harder(pmap, pv); - } - } - sva += NBPG; - pte++; - } - - /* - * Now, if we've fallen through down to here, chances are that there - * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left. - */ - if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { - u_int cnt; - - for (cnt = 0; cnt < cleanlist_idx; cnt++) { - if (pmap_active) { - cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG); - *cleanlist[cnt].pte = 0; - cpu_tlb_flushID_SE(cleanlist[cnt].va); - } else - *cleanlist[cnt].pte = 0; - pmap_pte_delref(pmap, cleanlist[cnt].va); - } - } -} - -/* - * Routine: pmap_remove_all - * Function: - * Removes this physical page from - * all physical maps in which it resides. - * Reflects back modify bits to the pager. - */ - -void -pmap_remove_all(pa) - vm_offset_t pa; -{ - struct pv_entry *ph, *pv, *npv; - pmap_t pmap; - pt_entry_t *pte; - int s; - - PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa)); - - pv = ph = pmap_find_pv(pa); - pmap_clean_page(pv); - - s = splvm(); - - if (ph->pv_pmap == NULL) { - PDEBUG(0, printf("free page\n")); - splx(s); - return; - } - - while (pv) { - pmap = pv->pv_pmap; - pte = pmap_pte(pmap, pv->pv_va); - - PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte, - pv->pv_va, pv->pv_flags)); -#ifdef DEBUG - if (!pte || !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa) - panic("pmap_remove_all: bad mapping"); -#endif /* DEBUG */ - - /* - * Update statistics - */ - --pmap->pm_stats.resident_count; - - /* Wired bit */ - if (pv->pv_flags & PT_W) - --pmap->pm_stats.wired_count; - - /* - * Invalidate the PTEs. - * XXX: should cluster them up and invalidate as many - * as possible at once. - */ - -#ifdef needednotdone -reduce wiring count on page table pages as references drop -#endif - - *pte = 0; - pmap_pte_delref(pmap, pv->pv_va); - - npv = pv->pv_next; - if (pv == ph) - ph->pv_pmap = NULL; - else - pmap_free_pv(pv); - pv = npv; - } - - splx(s); - - PDEBUG(0, printf("done\n")); - cpu_tlb_flushID(); -} - - -/* - * Set the physical protection on the specified range of this map as requested. - */ - -void -pmap_protect(pmap, sva, eva, prot) - pmap_t pmap; - vm_offset_t sva; - vm_offset_t eva; - vm_prot_t prot; -{ - pt_entry_t *pte = NULL; - int armprot; - int flush = 0; - vm_offset_t pa; - int bank, off; - struct pv_entry *pv; - - /* - * Make sure pmap is valid. -dct - */ - if (pmap == NULL) - return; - PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n", - pmap, sva, eva, prot)); - - if (~prot & VM_PROT_READ) { - /* Just remove the mappings. */ - pmap_remove(pmap, sva, eva); - return; - } - if (prot & VM_PROT_WRITE) { - /* - * If this is a read->write transition, just ignore it and let - * uvm_fault() take care of it later. - */ - return; - } - - sva &= PG_FRAME; - eva &= PG_FRAME; - - /* - * We need to acquire a pointer to a page table page before entering - * the following loop. - */ - while (sva < eva) { - pte = pmap_pte(pmap, sva); - if (pte) - break; - sva = (sva & PD_MASK) + NBPD; - } - - while (sva < eva) { - /* only check once in a while */ - if ((sva & PT_MASK) == 0) { - if (!pmap_pde_v(pmap_pde(pmap, sva))) { - /* We can race ahead here, to the next pde. */ - sva += NBPD; - pte += arm_byte_to_page(NBPD); - continue; - } - } - - if (!pmap_pte_v(pte)) - goto next; - - flush = 1; - - armprot = 0; - if (sva < VM_MAXUSER_ADDRESS) - armprot |= PT_AP(AP_U); - else if (sva < VM_MAX_ADDRESS) - armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */ - *pte = (*pte & 0xfffff00f) | armprot; - - pa = pmap_pte_pa(pte); - - /* Get the physical page index */ - - /* Clear write flag */ - if ((bank = vm_physseg_find(atop(pa), &off)) != -1) { - pv = &vm_physmem[bank].pmseg.pvent[off]; - (void) pmap_modify_pv(pmap, sva, pv, PT_Wr, 0); - pmap_vac_me_harder(pmap, pv); - } - -next: - sva += NBPG; - pte++; - } - - if (flush) - cpu_tlb_flushID(); -} - -/* - * void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, - * int flags) - * - * Insert the given physical page (p) at - * the specified virtual address (v) in the - * target physical map with the protection requested. - * - * If specified, the page will be wired down, meaning - * that the related pte can not be reclaimed. - * - * NB: This is the only routine which MAY NOT lazy-evaluate - * or lose information. That is, this routine must actually - * insert this page into the given map NOW. - */ - -int -pmap_enter(pmap, va, pa, prot, flags) - pmap_t pmap; - vm_offset_t va; - vm_offset_t pa; - vm_prot_t prot; - int flags; -{ - pt_entry_t *pte; - u_int npte; - int bank, off; - struct pv_entry *pv = NULL; - vm_offset_t opa; - int nflags; - boolean_t wired = (flags & PMAP_WIRED) != 0; - - PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n", - va, pa, pmap, prot, wired)); - - /* Valid pmap ? */ - if (pmap == NULL) - return (KERN_SUCCESS); - -#ifdef DIAGNOSTIC - /* Valid address ? */ - if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE)) - panic("pmap_enter: too big"); - if (pmap != pmap_kernel() && va != 0) { - if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS) - panic("pmap_enter: kernel page in user map"); - } else { - if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS) - panic("pmap_enter: user page in kernel map"); - if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS) - panic("pmap_enter: entering PT page"); - } -#endif - - /* - * Get a pointer to the pte for this virtual address. If the - * pte pointer is NULL then we are missing the L2 page table - * so we need to create one. - */ - pte = pmap_pte(pmap, va); - if (!pte) { - vm_offset_t l2pa; - struct vm_page *m; - - /* Allocate a page table */ - for (;;) { - m = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); - if (m != NULL) - break; - - /* - * No page available. If we're the kernel - * pmap, we die, since we might not have - * a valid thread context. For user pmaps, - * we assume that we _do_ have a valid thread - * context, so we wait here for the pagedaemon - * to free up some pages. - * - * XXX THE VM CODE IS PROBABLY HOLDING LOCKS - * XXX RIGHT NOW, BUT ONLY ON OUR PARENT VM_MAP - * XXX SO THIS IS PROBABLY SAFE. In any case, - * XXX other pmap modules claim it is safe to - * XXX sleep here if it's a user pmap. - */ - if (pmap == pmap_kernel()) - panic("pmap_enter: no free pages"); - else - uvm_wait("pmap_enter"); - } - - /* Wire this page table into the L1. */ - l2pa = VM_PAGE_TO_PHYS(m); - pmap_zero_page(l2pa); - pmap_map_in_l1(pmap, va, l2pa); - ++pmap->pm_stats.resident_count; - - pte = pmap_pte(pmap, va); -#ifdef DIAGNOSTIC - if (!pte) - panic("pmap_enter: no pte"); -#endif - } - - nflags = 0; - if (prot & VM_PROT_WRITE) - nflags |= PT_Wr; - if (wired) - nflags |= PT_W; - - /* More debugging info */ - PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte, - *pte)); - - /* Is the pte valid ? If so then this page is already mapped */ - if (pmap_pte_v(pte)) { - /* Get the physical address of the current page mapped */ - opa = pmap_pte_pa(pte); - -#ifdef MYCROFT_HACK - printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa); -#endif - - /* Are we mapping the same page ? */ - if (opa == pa) { - /* All we must be doing is changing the protection */ - PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n", - va, pa)); - - /* Has the wiring changed ? */ - if ((bank = vm_physseg_find(atop(pa), &off)) != -1) { - pv = &vm_physmem[bank].pmseg.pvent[off]; - (void) pmap_modify_pv(pmap, va, pv, - PT_Wr | PT_W, nflags); - } - } else { - /* We are replacing the page with a new one. */ - cpu_cache_purgeID_rng(va, NBPG); - - PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n", - va, pa, opa)); - - /* - * If it is part of our managed memory then we - * must remove it from the PV list - */ - if ((bank = vm_physseg_find(atop(opa), &off)) != -1) { - pv = &vm_physmem[bank].pmseg.pvent[off]; - pmap_remove_pv(pmap, va, pv); - } - - goto enter; - } - } else { - opa = 0; - pmap_pte_addref(pmap, va); - - /* pte is not valid so we must be hooking in a new page */ - ++pmap->pm_stats.resident_count; - - enter: - /* - * Enter on the PV list if part of our managed memory - */ - if ((bank = vm_physseg_find(atop(pa), &off)) != -1) { - pv = &vm_physmem[bank].pmseg.pvent[off]; - pmap_enter_pv(pmap, va, pv, nflags); - } - } - -#ifdef MYCROFT_HACK - if (mycroft_hack) - printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv); -#endif - - /* Construct the pte, giving the correct access. */ - npte = (pa & PG_FRAME); - - /* VA 0 is magic. */ - if (pmap != pmap_kernel() && va != 0) - npte |= PT_AP(AP_U); - - if (bank != -1) { -#ifdef DIAGNOSTIC - if ((flags & VM_PROT_ALL) & ~prot) - panic("pmap_enter: access_type exceeds prot"); -#endif - npte |= PT_C | PT_B; - if (flags & VM_PROT_WRITE) { - npte |= L2_SPAGE | PT_AP(AP_W); - vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M; - } else if (flags & VM_PROT_ALL) { - npte |= L2_SPAGE; - vm_physmem[bank].pmseg.attrs[off] |= PT_H; - } else - npte |= L2_INVAL; - } else { - if (prot & VM_PROT_WRITE) - npte |= L2_SPAGE | PT_AP(AP_W); - else if (prot & VM_PROT_ALL) - npte |= L2_SPAGE; - else - npte |= L2_INVAL; - } - -#ifdef MYCROFT_HACK - if (mycroft_hack) - printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte); -#endif - - *pte = npte; - - if (bank != -1) - pmap_vac_me_harder(pmap, pv); - - /* Better flush the TLB ... */ - cpu_tlb_flushID_SE(va); - - PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte)); - - return (KERN_SUCCESS); -} - -void -pmap_kenter_pa(va, pa, prot) - vaddr_t va; - paddr_t pa; - vm_prot_t prot; -{ - pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED); -} - -void -pmap_kenter_pgs(va, pgs, npgs) - vaddr_t va; - struct vm_page **pgs; - int npgs; -{ - int i; - - for (i = 0; i < npgs; i++, va += PAGE_SIZE) { - pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]), - VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); - } -} - -void -pmap_kremove(va, len) - vaddr_t va; - vsize_t len; -{ - for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) { - pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); - } -} - -/* - * pmap_page_protect: - * - * Lower the permission for all mappings to a given page. - */ - -void -pmap_page_protect(pg, prot) - struct vm_page *pg; - vm_prot_t prot; -{ - paddr_t pa = VM_PAGE_TO_PHYS(pg); - - PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot)); - - switch(prot) { - case VM_PROT_READ: - case VM_PROT_READ|VM_PROT_EXECUTE: - pmap_copy_on_write(pa); - break; - - case VM_PROT_ALL: - break; - - default: - pmap_remove_all(pa); - break; - } -} - - -/* - * Routine: pmap_unwire - * Function: Clear the wired attribute for a map/virtual-address - * pair. - * In/out conditions: - * The mapping must already exist in the pmap. - */ - -void -pmap_unwire(pmap, va) - pmap_t pmap; - vm_offset_t va; -{ - pt_entry_t *pte; - vm_offset_t pa; - int bank, off; - struct pv_entry *pv; - - /* - * Make sure pmap is valid. -dct - */ - if (pmap == NULL) - return; - - /* Get the pte */ - pte = pmap_pte(pmap, va); - if (!pte) - return; - - /* Extract the physical address of the page */ - pa = pmap_pte_pa(pte); - - if ((bank = vm_physseg_find(atop(pa), &off)) == -1) - return; - pv = &vm_physmem[bank].pmseg.pvent[off]; - /* Update the wired bit in the pv entry for this page. */ - (void) pmap_modify_pv(pmap, va, pv, PT_W, 0); -} - -/* - * pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va) - * - * Return the pointer to a page table entry corresponding to the supplied - * virtual address. - * - * The page directory is first checked to make sure that a page table - * for the address in question exists and if it does a pointer to the - * entry is returned. - * - * The way this works is that that the kernel page tables are mapped - * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB. - * This allows page tables to be located quickly. - */ -pt_entry_t * -pmap_pte(pmap, va) - pmap_t pmap; - vm_offset_t va; -{ - pt_entry_t *ptp; - pt_entry_t *result; - - /* The pmap must be valid */ - if (!pmap) - return(NULL); - - /* Return the address of the pte */ - PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n", - pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va)))); - - /* Do we have a valid pde ? If not we don't have a page table */ - if (!pmap_pde_v(pmap_pde(pmap, va))) { - PDEBUG(0, printf("pmap_pte: failed - pde = %p\n", - pmap_pde(pmap, va))); - return(NULL); - } - - PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n", - pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE - + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) + - (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME))); - - /* - * If the pmap is the kernel pmap or the pmap is the active one - * then we can just return a pointer to entry relative to - * PROCESS_PAGE_TBLS_BASE. - * Otherwise we need to map the page tables to an alternative - * address and reference them there. - */ - if (pmap == kernel_pmap || pmap->pm_pptpt - == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE - + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) & - ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) { - ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE; - } else { - struct proc *p = curproc; - - /* If we don't have a valid curproc use proc0 */ - /* Perhaps we should just use kernel_pmap instead */ - if (p == NULL) - p = &proc0; -#ifdef DIAGNOSTIC - /* - * The pmap should always be valid for the process so - * panic if it is not. - */ - if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) { - printf("pmap_pte: va=%08lx p=%p vm=%p\n", - va, p, p->p_vmspace); - console_debugger(); - } - /* - * The pmap for the current process should be mapped. If it - * is not then we have a problem. - */ - if (p->p_vmspace->vm_map.pmap->pm_pptpt != - (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE - + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) + - (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) { - printf("pmap pagetable = P%08lx current = P%08x ", - pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE - + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) + - (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & - PG_FRAME)); - printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt); - panic("pmap_pte: current and pmap mismatch\n"); - } -#endif - - ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE; - pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE, - pmap->pm_pptpt); - cpu_tlb_flushD(); - } - PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp, - ((va >> (PGSHIFT-2)) & ~3))); - result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3)); - return(result); -} - -/* - * Routine: pmap_extract - * Function: - * Extract the physical page address associated - * with the given map/virtual_address pair. - */ -boolean_t -pmap_extract(pmap, va, pap) - pmap_t pmap; - vaddr_t va; - paddr_t *pap; -{ - pt_entry_t *pte; - paddr_t pa; - - PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va)); - - /* - * Get the pte for this virtual address. If there is no pte - * then there is no page table etc. - */ - - pte = pmap_pte(pmap, va); - if (!pte) - return(FALSE); - - /* Is the pte valid ? If not then no paged is actually mapped here */ - if (!pmap_pte_v(pte)) - return(FALSE); - - /* Return the physical address depending on the PTE type */ - /* XXX What about L1 section mappings ? */ - if ((*(pte) & L2_MASK) == L2_LPAGE) { - /* Extract the physical address from the pte */ - pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1); - - PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n", - (pa | (va & (L2_LPAGE_SIZE - 1))))); - - if (pap != NULL) - *pap = pa | (va & (L2_LPAGE_SIZE - 1)); - return (TRUE); - } else { - /* Extract the physical address from the pte */ - pa = pmap_pte_pa(pte); - - PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n", - (pa | (va & ~PG_FRAME)))); - - if (pap != NULL) - *pap = pa | (va & ~PG_FRAME); - return (TRUE); - } -} - - -/* - * Copy the range specified by src_addr/len from the source map to the - * range dst_addr/len in the destination map. - * - * This routine is only advisory and need not do anything. - */ - -void -pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) - pmap_t dst_pmap; - pmap_t src_pmap; - vm_offset_t dst_addr; - vm_size_t len; - vm_offset_t src_addr; -{ - PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n", - dst_pmap, src_pmap, dst_addr, len, src_addr)); -} - -#if defined(PMAP_DEBUG) -void -pmap_dump_pvlist(phys, m) - vm_offset_t phys; - char *m; -{ - struct pv_entry *pv; - int bank, off; - - if ((bank = vm_physseg_find(atop(phys), &off)) == -1) { - printf("INVALID PA\n"); - return; - } - pv = &vm_physmem[bank].pmseg.pvent[off]; - printf("%s %08lx:", m, phys); - if (pv->pv_pmap == NULL) { - printf(" no mappings\n"); - return; - } - - for (; pv; pv = pv->pv_next) - printf(" pmap %p va %08lx flags %08x", pv->pv_pmap, - pv->pv_va, pv->pv_flags); - - printf("\n"); -} - -#endif /* PMAP_DEBUG */ - -boolean_t -pmap_testbit(pa, setbits) - vm_offset_t pa; - int setbits; -{ - int bank, off; - - PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits)); - - if ((bank = vm_physseg_find(atop(pa), &off)) == -1) - return(FALSE); - - /* - * Check saved info only - */ - if (vm_physmem[bank].pmseg.attrs[off] & setbits) { - PDEBUG(0, printf("pmap_attributes = %02x\n", - vm_physmem[bank].pmseg.attrs[off])); - return(TRUE); - } - - return(FALSE); -} - - -/* - * Modify pte bits for all ptes corresponding to the given physical address. - * We use `maskbits' rather than `clearbits' because we're always passing - * constants and the latter would require an extra inversion at run-time. - */ - -void -pmap_clearbit(pa, maskbits) - vm_offset_t pa; - int maskbits; -{ - struct pv_entry *pv; - pt_entry_t *pte; - vm_offset_t va; - int bank, off; - int s; - - PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n", - pa, maskbits)); - if ((bank = vm_physseg_find(atop(pa), &off)) == -1) - return; - pv = &vm_physmem[bank].pmseg.pvent[off]; - s = splvm(); - - /* - * Clear saved attributes (modify, reference) - */ - vm_physmem[bank].pmseg.attrs[off] &= ~maskbits; - - if (pv->pv_pmap == NULL) { - splx(s); - return; - } - - /* - * Loop over all current mappings setting/clearing as appropos - */ - for (; pv; pv = pv->pv_next) { - va = pv->pv_va; - - /* - * XXX don't write protect pager mappings - */ - if (va >= uvm.pager_sva && va < uvm.pager_eva) { - printf("pmap_clearbit: bogon alpha\n"); - continue; - } - - pv->pv_flags &= ~maskbits; - pte = pmap_pte(pv->pv_pmap, va); - if (maskbits & (PT_Wr|PT_M)) - *pte = *pte & ~PT_AP(AP_W); - if (maskbits & PT_H) - *pte = (*pte & ~L2_MASK) | L2_INVAL; - } - cpu_tlb_flushID(); - - splx(s); -} - - -boolean_t -pmap_clear_modify(pg) - struct vm_page *pg; -{ - paddr_t pa = VM_PAGE_TO_PHYS(pg); - boolean_t rv; - - PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa)); - rv = pmap_testbit(pa, PT_M); - pmap_clearbit(pa, PT_M); - return rv; -} - - -boolean_t -pmap_clear_reference(pg) - struct vm_page *pg; -{ - paddr_t pa = VM_PAGE_TO_PHYS(pg); - boolean_t rv; - - PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa)); - rv = pmap_testbit(pa, PT_H); - pmap_clearbit(pa, PT_H); - return rv; -} - - -void -pmap_copy_on_write(pa) - vm_offset_t pa; -{ - PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa)); - pmap_clearbit(pa, PT_Wr); -} - - -boolean_t -pmap_is_modified(pg) - struct vm_page *pg; -{ - paddr_t pa = VM_PAGE_TO_PHYS(pg); - boolean_t result; - - result = pmap_testbit(pa, PT_M); - PDEBUG(0, printf("pmap_is_modified pa=%08lx %x\n", pa, result)); - return (result); -} - - -boolean_t -pmap_is_referenced(pg) - struct vm_page *pg; -{ - paddr_t pa = VM_PAGE_TO_PHYS(pg); - boolean_t result; - - result = pmap_testbit(pa, PT_H); - PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result)); - return (result); -} - - -int -pmap_modified_emulation(pmap, va) - pmap_t pmap; - vm_offset_t va; -{ - pt_entry_t *pte; - vm_offset_t pa; - int bank, off; - struct pv_entry *pv; - u_int flags; - - PDEBUG(2, printf("pmap_modified_emulation\n")); - - /* Get the pte */ - pte = pmap_pte(pmap, va); - if (!pte) { - PDEBUG(2, printf("no pte\n")); - return(0); - } - - PDEBUG(1, printf("*pte=%08x\n", *pte)); - - /* Check for a zero pte */ - if (*pte == 0) - return(0); - - /* This can happen if user code tries to access kernel memory. */ - if ((*pte & PT_AP(AP_W)) != 0) - return (0); - - /* Extract the physical address of the page */ - pa = pmap_pte_pa(pte); - if ((bank = vm_physseg_find(atop(pa), &off)) == -1) - return(0); - - /* Get the current flags for this page. */ - pv = &vm_physmem[bank].pmseg.pvent[off]; - flags = pmap_modify_pv(pmap, va, pv, 0, 0); - PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags)); - - /* - * Do the flags say this page is writable ? If not then it is a - * genuine write fault. If yes then the write fault is our fault - * as we did not reflect the write access in the PTE. Now we know - * a write has occurred we can correct this and also set the - * modified bit - */ - if (~flags & PT_Wr) - return(0); - - PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n", - va, pte, *pte)); - vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M; - *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W); - PDEBUG(0, printf("->(%08x)\n", *pte)); - - /* Return, indicating the problem has been dealt with */ - cpu_tlb_flushID_SE(va); - return(1); -} - - -int -pmap_handled_emulation(pmap, va) - pmap_t pmap; - vm_offset_t va; -{ - pt_entry_t *pte; - vm_offset_t pa; - int bank, off; - - PDEBUG(2, printf("pmap_handled_emulation\n")); - - /* Get the pte */ - pte = pmap_pte(pmap, va); - if (!pte) { - PDEBUG(2, printf("no pte\n")); - return(0); - } - - PDEBUG(1, printf("*pte=%08x\n", *pte)); - - /* Check for a zero pte */ - if (*pte == 0) - return(0); - - /* This can happen if user code tries to access kernel memory. */ - if ((*pte & L2_MASK) != L2_INVAL) - return (0); - - /* Extract the physical address of the page */ - pa = pmap_pte_pa(pte); - if ((bank = vm_physseg_find(atop(pa), &off)) == -1) - return(0); - - /* - * Ok we just enable the pte and mark the attibs as handled - */ - PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n", - va, pte, *pte)); - vm_physmem[bank].pmseg.attrs[off] |= PT_H; - *pte = (*pte & ~L2_MASK) | L2_SPAGE; - PDEBUG(0, printf("->(%08x)\n", *pte)); - - /* Return, indicating the problem has been dealt with */ - cpu_tlb_flushID_SE(va); - return(1); -} - -/* - * pmap_collect: free resources held by a pmap - * - * => optional function. - * => called when a process is swapped out to free memory. - */ - -void -pmap_collect(pmap) - pmap_t pmap; -{ -} - -/* - * Routine: pmap_procwr - * - * Function: - * Synchronize caches corresponding to [addr, addr+len) in p. - * - */ -void -pmap_procwr(p, va, len) - struct proc *p; - vm_offset_t va; - u_long len; -{ - /* We only need to do anything if it is the current process. */ - if (p == curproc) - cpu_cache_syncI_rng(va, len); -} - -/* End of pmap.c */ diff --git a/sys/arch/arm32/arm32/sys_machdep.c b/sys/arch/arm32/arm32/sys_machdep.c deleted file mode 100644 index 0416b3544dda..000000000000 --- a/sys/arch/arm32/arm32/sys_machdep.c +++ /dev/null @@ -1,114 +0,0 @@ -/* $NetBSD: sys_machdep.c,v 1.12 2000/06/29 08:52:58 mrg Exp $ */ - -/* - * Copyright (c) 1995-1997 Mark Brinicombe. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Mark Brinicombe - * 4. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * RiscBSD kernel project - * - * sys_machdep.c - * - * Machine dependant syscalls - * - * Created : 10/01/96 - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static int -arm32_sync_icache(p, args, retval) - struct proc *p; - char *args; - register_t *retval; -{ - struct arm32_sync_icache_args ua; - int error; - - if ((error = copyin(args, &ua, sizeof(ua))) != 0) - return (error); - - cpu_cache_syncI_rng(ua.addr, ua.len); - - *retval = 0; - return(0); -} - -static int -arm32_drain_writebuf(p, args, retval) - struct proc *p; - char *args; - register_t *retval; -{ - /* No args. */ - - cpu_drain_writebuf(); - - *retval = 0; - return(0); -} - -int -sys_sysarch(p, v, retval) - struct proc *p; - void *v; - register_t *retval; -{ - struct sys_sysarch_args /* { - syscallarg(int) op; - syscallarg(void *) parms; - } */ *uap = v; - int error = 0; - - switch(SCARG(uap, op)) { - case ARM32_SYNC_ICACHE : - error = arm32_sync_icache(p, SCARG(uap, parms), retval); - break; - - case ARM32_DRAIN_WRITEBUF : - error = arm32_drain_writebuf(p, SCARG(uap, parms), retval); - break; - - default: - error = EINVAL; - break; - } - return (error); -} - -/* End of sys_machdep.c */ diff --git a/sys/arch/arm32/arm32/syscall.c b/sys/arch/arm32/arm32/syscall.c deleted file mode 100644 index 730f9b9d58a8..000000000000 --- a/sys/arch/arm32/arm32/syscall.c +++ /dev/null @@ -1,295 +0,0 @@ -/* $NetBSD: syscall.c,v 1.39 2001/02/28 18:15:44 bjh21 Exp $ */ - -/*- - * Copyright (c) 2000 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Charles M. Hannum. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the NetBSD - * Foundation, Inc. and its contributors. - * 4. Neither the name of The NetBSD Foundation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Copyright (c) 1994-1998 Mark Brinicombe. - * Copyright (c) 1994 Brini. - * All rights reserved. - * - * This code is derived from software written for Brini by Mark Brinicombe - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Mark Brinicombe - * for the NetBSD Project. - * 4. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * syscall entry handling - * - * Created : 09/11/94 - */ - -#include "opt_ktrace.h" -#include "opt_syscall_debug.h" - -#include -#include -#include -#include -#include -#include -#include -#ifdef KTRACE -#include -#endif - -#include - -#include -#include -#include -#include - -u_int arm700bugcount = 0; - -/* - * syscall(frame): - * - * System call request from POSIX system call gate interface to kernel. - */ -void -syscall(frame, code) - trapframe_t *frame; - int code; -{ - caddr_t stackargs; - const struct sysent *callp; - struct proc *p; - int error; - u_int argsize; - int *args, copyargs[8], rval[2]; - int regparams; - - /* - * Enable interrupts if they were enabled before the exception. - * Since all syscalls *should* come from user mode it will always - * be safe to enable them, but check anyway. - */ - if (!(frame->tf_spsr & I32_bit)) - enable_interrupts(I32_bit); - -#ifdef DEBUG - if ((GetCPSR() & PSR_MODE) != PSR_SVC32_MODE) - panic("syscall: not in SVC32 mode"); -#endif /* DEBUG */ - - uvmexp.syscalls++; - p = curproc; - p->p_addr->u_pcb.pcb_tf = frame; - -#ifdef CPU_ARM7 - /* - * This code is only needed if we are including support for the ARM7 - * core. Other CPUs do not need it but it does not hurt. - */ - - /* - * ARM700/ARM710 match sticks and sellotape job ... - * - * I know this affects GPS/VLSI ARM700/ARM710 + various ARM7500. - * - * On occasion data aborts are mishandled and end up calling - * the swi vector. - * - * If the instruction that caused the exception is not a SWI - * then we hit the bug. - */ - if ((ReadWord(frame->tf_pc - INSN_SIZE) & 0x0f000000) != 0x0f000000) { - frame->tf_pc -= INSN_SIZE; - ++arm700bugcount; - userret(p); - return; - } -#endif /* CPU_ARM7 */ - - /* - * Support for architecture dependant SWIs - */ - if (code & 0x00f00000) { - /* - * Support for the Architecture defined SWI's in case the - * processor does not support them. - */ - switch (code) { - case 0x00f00000 : /* IMB */ - case 0x00f00001 : /* IMB_range */ - /* - * Do nothing as there is no prefetch unit that needs - * flushing - */ - break; - default: - /* Undefined so illegal instruction */ - trapsignal(p, SIGILL, ReadWord(frame->tf_pc - INSN_SIZE)); - break; - } - - userret(p); - return; - } - - stackargs = (caddr_t)&frame->tf_r0; - regparams = 4 * sizeof(int); - callp = p->p_emul->e_sysent; - - switch (code) { - case SYS_syscall: - /* Don't have to look in user space, we have it in the trapframe */ -/* code = fuword(stackargs);*/ - code = ReadWord(stackargs); - stackargs += sizeof(int); - regparams -= sizeof(int); - break; - - case SYS___syscall: - if (callp != sysent) - break; - - /* Since this will be a register we look in the trapframe not user land */ -/* code = fuword(stackargs + _QUAD_LOWWORD * sizeof(int));*/ - code = ReadWord(stackargs + _QUAD_LOWWORD * sizeof(int)); - stackargs += sizeof(quad_t); - regparams -= sizeof(quad_t); - break; - - default: - /* do nothing by default */ - break; - } - - code &= (SYS_NSYSENT - 1); - callp += code; - argsize = callp->sy_argsize; - if (argsize <= regparams) - args = (int *)stackargs; - else { - args = copyargs; - bcopy(stackargs, (caddr_t)args, regparams); - error = copyin((caddr_t)frame->tf_usr_sp, - (caddr_t)args + regparams, argsize - regparams); - if (error) - goto bad; - } - -#ifdef SYSCALL_DEBUG - scdebug_call(p, code, callp->sy_narg, args); -#endif -#ifdef KTRACE - if (KTRPOINT(p, KTR_SYSCALL)) - ktrsyscall(p, code, argsize, args); -#endif - - rval[0] = 0; - rval[1] = 0; - error = (*callp->sy_call)(p, args, rval); - - switch (error) { - case 0: - frame->tf_r0 = rval[0]; - frame->tf_r1 = rval[1]; - frame->tf_spsr &= ~PSR_C_bit; /* carry bit */ - break; - - case ERESTART: - /* - * Reconstruct the pc to point at the swi. - */ - frame->tf_pc -= INSN_SIZE; - break; - - case EJUSTRETURN: - /* nothing to do */ - break; - - default: - bad: - frame->tf_r0 = error; - frame->tf_spsr |= PSR_C_bit; /* carry bit */ - break; - } - -#ifdef SYSCALL_DEBUG - scdebug_ret(p, code, error, rval); -#endif - userret(p); -#ifdef KTRACE - if (KTRPOINT(p, KTR_SYSRET)) - ktrsysret(p, code, error, rval[0]); -#endif -} - -void -child_return(arg) - void *arg; -{ - struct proc *p = arg; - struct trapframe *frame = p->p_addr->u_pcb.pcb_tf; - - frame->tf_r0 = 0; - frame->tf_spsr &= ~PSR_C_bit; /* carry bit */ - - userret(p); -#ifdef KTRACE - if (KTRPOINT(p, KTR_SYSRET)) - ktrsysret(p, SYS_fork, 0, 0); -#endif -} - -/* End of syscall.c */