Use direct map if available in linux_kmap.

Yields 20% increase in glxgears framerate.
This commit is contained in:
riastradh 2014-08-27 16:41:50 +00:00
parent d9db38b5ac
commit ff92100382
1 changed files with 38 additions and 8 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: linux_kmap.c,v 1.9 2014/08/27 16:19:54 riastradh Exp $ */
/* $NetBSD: linux_kmap.c,v 1.10 2014/08/27 16:41:50 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -30,13 +30,17 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: linux_kmap.c,v 1.9 2014/08/27 16:19:54 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: linux_kmap.c,v 1.10 2014/08/27 16:41:50 riastradh Exp $");
#include <sys/types.h>
#include <sys/kmem.h>
#include <sys/mutex.h>
#include <sys/rbtree.h>
#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
#include <dev/mm.h>
#endif
#include <uvm/uvm_extern.h>
#include <linux/highmem.h>
@ -48,12 +52,6 @@ __KERNEL_RCSID(0, "$NetBSD: linux_kmap.c,v 1.9 2014/08/27 16:19:54 riastradh Exp
* use at a time.
*/
/*
* XXX Use direct-mapped physical pages where available, e.g. amd64.
*
* XXX ...or add an abstraction to uvm for this. (uvm_emap?)
*/
static kmutex_t linux_kmap_atomic_lock;
static vaddr_t linux_kmap_atomic_vaddr;
@ -146,6 +144,11 @@ kmap_atomic(struct page *page)
const paddr_t paddr = uvm_vm_page_to_phys(&page->p_vmp);
vaddr_t vaddr;
#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
if (mm_md_direct_mapped_phys(paddr, &vaddr))
return (void *)vaddr;
#endif
mutex_spin_enter(&linux_kmap_atomic_lock);
KASSERT(linux_kmap_atomic_vaddr != 0);
KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
@ -161,6 +164,19 @@ kunmap_atomic(void *addr)
{
const vaddr_t vaddr = (vaddr_t)addr;
#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
{
paddr_t paddr;
vaddr_t vaddr1;
bool ok __diagused;
ok = pmap_extract(pmap_kernel(), vaddr, &paddr);
KASSERT(ok);
if (mm_md_direct_mapped_phys(paddr, &vaddr1) && vaddr1 == vaddr)
return;
}
#endif
KASSERT(mutex_owned(&linux_kmap_atomic_lock));
KASSERT(linux_kmap_atomic_vaddr == vaddr);
KASSERT(pmap_extract(pmap_kernel(), vaddr, NULL));
@ -179,6 +195,11 @@ kmap(struct page *page)
ASSERT_SLEEPABLE();
#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
if (mm_md_direct_mapped_phys(paddr, &vaddr))
return (void *)vaddr;
#endif
vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
(UVM_KMF_VAONLY | UVM_KMF_WAITVA));
KASSERT(vaddr != 0);
@ -208,6 +229,15 @@ kunmap(struct page *page)
ASSERT_SLEEPABLE();
#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
{
vaddr_t vaddr1;
if (mm_md_direct_mapped_phys(paddr, &vaddr1))
return;
}
#endif
mutex_enter(&linux_kmap_lock);
struct linux_kmap_entry *const lke =
rb_tree_find_node(&linux_kmap_entries, &paddr);