allocate uareas contiguously and access them via the direct map.

This commit is contained in:
chs 2012-01-21 16:48:56 +00:00
parent 461609b5cd
commit e8223ac8ab
3 changed files with 64 additions and 5 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.59 2008/12/30 12:35:23 pooka Exp $ */ /* $NetBSD: cpu.h,v 1.60 2012/01/21 16:48:56 chs Exp $ */
/*- /*-
* Copyright (c) 1990 The Regents of the University of California. * Copyright (c) 1990 The Regents of the University of California.
@ -89,6 +89,9 @@ cpu_set_curpri(int pri)
#define CLKF_INTR(frame) (curcpu()->ci_idepth > 0) #define CLKF_INTR(frame) (curcpu()->ci_idepth > 0)
#define LWP_PC(l) ((l)->l_md.md_regs->tf_rip) #define LWP_PC(l) ((l)->l_md.md_regs->tf_rip)
void *cpu_uarea_alloc(bool);
bool cpu_uarea_free(void *);
#endif /* _KERNEL */ #endif /* _KERNEL */
#else /* __x86_64__ */ #else /* __x86_64__ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: types.h,v 1.40 2011/12/04 16:24:13 chs Exp $ */ /* $NetBSD: types.h,v 1.41 2012/01/21 16:48:56 chs Exp $ */
/*- /*-
* Copyright (c) 1990 The Regents of the University of California. * Copyright (c) 1990 The Regents of the University of California.
@ -99,6 +99,7 @@ typedef volatile unsigned char __cpu_simple_lock_t;
#define __HAVE_DIRECT_MAP 1 #define __HAVE_DIRECT_MAP 1
#define __HAVE_MM_MD_DIRECT_MAPPED_IO #define __HAVE_MM_MD_DIRECT_MAPPED_IO
#define __HAVE_MM_MD_DIRECT_MAPPED_PHYS #define __HAVE_MM_MD_DIRECT_MAPPED_PHYS
#define __HAVE_CPU_UAREA_ROUTINES
#endif #endif
#endif #endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.13 2011/02/10 14:46:48 pooka Exp $ */ /* $NetBSD: vm_machdep.c,v 1.14 2012/01/21 16:48:57 chs Exp $ */
/*- /*-
* Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1982, 1986 The Regents of the University of California.
@ -80,7 +80,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.13 2011/02/10 14:46:48 pooka Exp $"); __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.14 2012/01/21 16:48:57 chs Exp $");
#include "opt_mtrr.h" #include "opt_mtrr.h"
@ -93,7 +93,7 @@ __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.13 2011/02/10 14:46:48 pooka Exp $"
#include <sys/exec.h> #include <sys/exec.h>
#include <sys/ptrace.h> #include <sys/ptrace.h>
#include <uvm/uvm_extern.h> #include <uvm/uvm.h>
#include <machine/cpu.h> #include <machine/cpu.h>
#include <machine/gdt.h> #include <machine/gdt.h>
@ -356,3 +356,58 @@ vunmapbuf(struct buf *bp, vsize_t len)
bp->b_data = bp->b_saveaddr; bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0; bp->b_saveaddr = 0;
} }
#ifdef __HAVE_CPU_UAREA_ROUTINES
void *
cpu_uarea_alloc(bool system)
{
struct pglist pglist;
int error;
/*
* Allocate a new physically contiguous uarea which can be
* direct-mapped.
*/
error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
if (error) {
return NULL;
}
/*
* Get the physical address from the first page.
*/
const struct vm_page * const pg = TAILQ_FIRST(&pglist);
KASSERT(pg != NULL);
const paddr_t pa = VM_PAGE_TO_PHYS(pg);
/*
* We need to return a direct-mapped VA for the pa.
*/
return (void *)PMAP_MAP_POOLPAGE(pa);
}
/*
* Return true if we freed it, false if we didn't.
*/
bool
cpu_uarea_free(void *vva)
{
vaddr_t va = (vaddr_t) vva;
if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) {
return false;
}
/*
* Since the pages are physically contiguous, the vm_page structures
* will be as well.
*/
struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va));
KASSERT(pg != NULL);
for (size_t i = 0; i < UPAGES; i++, pg++) {
uvm_pagefree(pg);
}
return true;
}
#endif /* __HAVE_CPU_UAREA_ROUTINES */