Add __HAVE_CPU_UAREA_ROUTINES support so that uareas will be direct-mapped.

(This avoids the nasty tlb recursion problem on ibm4xx as well on mpc85xx).
This commit is contained in:
matt 2011-06-13 21:19:01 +00:00
parent 9a80b9608f
commit 022e55668b
3 changed files with 67 additions and 7 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.78 2011/06/12 16:27:14 matt Exp $ */
/* $NetBSD: cpu.h,v 1.79 2011/06/13 21:19:01 matt Exp $ */
/*
* Copyright (C) 1999 Wolfgang Solfrank.
@ -357,9 +357,8 @@ extern int cpu_timebase;
extern int cpu_printfataltraps;
extern char cpu_model[];
void cpu_uarea_remap(struct lwp *);
struct cpu_info *cpu_attach_common(struct device *, int);
void cpu_setup(struct device *, struct cpu_info *);
struct cpu_info *cpu_attach_common(device_t, int);
void cpu_setup(device_t, struct cpu_info *);
void cpu_identify(char *, size_t);
int cpu_get_dfs(void);
void cpu_set_dfs(int);
@ -397,6 +396,8 @@ void cpu_spinup_trampoline(void);
#define DELAY(n) delay(n)
void * cpu_uarea_alloc(bool);
bool cpu_uarea_free(void *);
void cpu_need_resched(struct cpu_info *, int);
void cpu_signotify(struct lwp *);
void cpu_need_proftick(struct lwp *);

View File

@ -1,4 +1,4 @@
/* $NetBSD: types.h,v 1.42 2011/06/12 06:10:41 matt Exp $ */
/* $NetBSD: types.h,v 1.43 2011/06/13 21:19:02 matt Exp $ */
/*-
* Copyright (C) 1995 Wolfgang Solfrank.
@ -75,6 +75,7 @@ typedef volatile __uint32_t __cpuset_t;
#define __HAVE_CPU_COUNTER
#define __HAVE_SYSCALL_INTERN
#define __HAVE_CPU_DATA_FIRST
#define __HAVE_CPU_UAREA_ROUTINES
#ifdef _LP64
#define __HAVE_ATOMIC64_OPS
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.85 2011/06/06 22:04:34 matt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.86 2011/06/13 21:19:02 matt Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.85 2011/06/06 22:04:34 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.86 2011/06/13 21:19:02 matt Exp $");
#include "opt_altivec.h"
#include "opt_multiprocessor.h"
@ -281,3 +281,61 @@ vunmapbuf(struct buf *bp, vsize_t len)
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
}
#ifdef __HAVE_CPU_UAREA_ROUTINES
void *
cpu_uarea_alloc(bool system)
{
struct pglist pglist;
int error;
/*
* Allocate a new physically contiguous uarea which can be
* direct-mapped.
*/
error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
if (error) {
#ifdef _LP64
if (!system)
return NULL;
#endif
panic("%s: uvm_pglistalloc failed: %d", __func__, error);
}
/*
* Get the physical address from the first page.
*/
const struct vm_page * const pg = TAILQ_FIRST(&pglist);
KASSERT(pg != NULL);
const paddr_t pa = VM_PAGE_TO_PHYS(pg);
/*
* we need to return a direct-mapped VA for the pa. But since
* we map vajpa 1:1 that's easy/
*/
return (void *)(uintptr_t) pa;
}
/*
* Return true if we freed it, false if we didn't.
*/
bool
cpu_uarea_free(void *vva)
{
vaddr_t va = (vaddr_t) vva;
if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)
return false;
/*
* Since the pages are physically contiguous, the vm_page structurs
* will be as well.
*/
struct vm_page *pg = PHYS_TO_VM_PAGE((paddr_t)va);
KASSERT(pg != NULL);
for (size_t i = 0; i < UPAGES; i++, pg++) {
uvm_pagefree(pg);
}
return true;
}
#endif /* __HAVE_CPU_UAREA_ROUTINES */