From 022e55668b77cc32f93afb33ba9addcbe81da37e Mon Sep 17 00:00:00 2001 From: matt Date: Mon, 13 Jun 2011 21:19:01 +0000 Subject: [PATCH] Add __HAVE_CPU_UAREA_ROUTINES support so that uareas will be direct-mapped. (This avoids the nasty tlb recursion problem on ibm4xx as well on mpc85xx). --- sys/arch/powerpc/include/cpu.h | 9 ++-- sys/arch/powerpc/include/types.h | 3 +- sys/arch/powerpc/powerpc/vm_machdep.c | 62 ++++++++++++++++++++++++++- 3 files changed, 67 insertions(+), 7 deletions(-) diff --git a/sys/arch/powerpc/include/cpu.h b/sys/arch/powerpc/include/cpu.h index 4d0b1bb35794..6f0104378bd9 100644 --- a/sys/arch/powerpc/include/cpu.h +++ b/sys/arch/powerpc/include/cpu.h @@ -1,4 +1,4 @@ -/* $NetBSD: cpu.h,v 1.78 2011/06/12 16:27:14 matt Exp $ */ +/* $NetBSD: cpu.h,v 1.79 2011/06/13 21:19:01 matt Exp $ */ /* * Copyright (C) 1999 Wolfgang Solfrank. @@ -357,9 +357,8 @@ extern int cpu_timebase; extern int cpu_printfataltraps; extern char cpu_model[]; -void cpu_uarea_remap(struct lwp *); -struct cpu_info *cpu_attach_common(struct device *, int); -void cpu_setup(struct device *, struct cpu_info *); +struct cpu_info *cpu_attach_common(device_t, int); +void cpu_setup(device_t, struct cpu_info *); void cpu_identify(char *, size_t); int cpu_get_dfs(void); void cpu_set_dfs(int); @@ -397,6 +396,8 @@ void cpu_spinup_trampoline(void); #define DELAY(n) delay(n) +void * cpu_uarea_alloc(bool); +bool cpu_uarea_free(void *); void cpu_need_resched(struct cpu_info *, int); void cpu_signotify(struct lwp *); void cpu_need_proftick(struct lwp *); diff --git a/sys/arch/powerpc/include/types.h b/sys/arch/powerpc/include/types.h index 1e2ab6f1b7cf..030494dbf759 100644 --- a/sys/arch/powerpc/include/types.h +++ b/sys/arch/powerpc/include/types.h @@ -1,4 +1,4 @@ -/* $NetBSD: types.h,v 1.42 2011/06/12 06:10:41 matt Exp $ */ +/* $NetBSD: types.h,v 1.43 2011/06/13 21:19:02 matt Exp $ */ /*- * Copyright (C) 1995 Wolfgang Solfrank. @@ -75,6 +75,7 @@ typedef volatile __uint32_t __cpuset_t; #define __HAVE_CPU_COUNTER #define __HAVE_SYSCALL_INTERN #define __HAVE_CPU_DATA_FIRST +#define __HAVE_CPU_UAREA_ROUTINES #ifdef _LP64 #define __HAVE_ATOMIC64_OPS #endif diff --git a/sys/arch/powerpc/powerpc/vm_machdep.c b/sys/arch/powerpc/powerpc/vm_machdep.c index 7ad9ee5fdf88..1a221fb120c0 100644 --- a/sys/arch/powerpc/powerpc/vm_machdep.c +++ b/sys/arch/powerpc/powerpc/vm_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: vm_machdep.c,v 1.85 2011/06/06 22:04:34 matt Exp $ */ +/* $NetBSD: vm_machdep.c,v 1.86 2011/06/13 21:19:02 matt Exp $ */ /* * Copyright (C) 1995, 1996 Wolfgang Solfrank. @@ -32,7 +32,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.85 2011/06/06 22:04:34 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.86 2011/06/13 21:19:02 matt Exp $"); #include "opt_altivec.h" #include "opt_multiprocessor.h" @@ -281,3 +281,61 @@ vunmapbuf(struct buf *bp, vsize_t len) bp->b_data = bp->b_saveaddr; bp->b_saveaddr = 0; } + +#ifdef __HAVE_CPU_UAREA_ROUTINES +void * +cpu_uarea_alloc(bool system) +{ + struct pglist pglist; + int error; + + /* + * Allocate a new physically contiguous uarea which can be + * direct-mapped. + */ + error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1); + if (error) { +#ifdef _LP64 + if (!system) + return NULL; +#endif + panic("%s: uvm_pglistalloc failed: %d", __func__, error); + } + + /* + * Get the physical address from the first page. + */ + const struct vm_page * const pg = TAILQ_FIRST(&pglist); + KASSERT(pg != NULL); + const paddr_t pa = VM_PAGE_TO_PHYS(pg); + + /* + * we need to return a direct-mapped VA for the pa. But since + * we map vajpa 1:1 that's easy/ + */ + + return (void *)(uintptr_t) pa; +} + +/* + * Return true if we freed it, false if we didn't. + */ +bool +cpu_uarea_free(void *vva) +{ + vaddr_t va = (vaddr_t) vva; + if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) + return false; + + /* + * Since the pages are physically contiguous, the vm_page structurs + * will be as well. + */ + struct vm_page *pg = PHYS_TO_VM_PAGE((paddr_t)va); + KASSERT(pg != NULL); + for (size_t i = 0; i < UPAGES; i++, pg++) { + uvm_pagefree(pg); + } + return true; +} +#endif /* __HAVE_CPU_UAREA_ROUTINES */