A small speedup, when mapping memory in bus_dmamem_map, if the memory is

cacheable and is in a single segment (physically contiguous) don't bother
allocating KVA for it, just use the physical address as the VA.
This commit is contained in:
matt 2011-06-08 15:19:57 +00:00
parent bae8a3a858
commit ca37a5ee9e
1 changed files with 22 additions and 7 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma.c,v 1.38 2011/01/18 01:02:55 matt Exp $ */
/* $NetBSD: bus_dma.c,v 1.39 2011/06/08 15:19:57 matt Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -32,7 +32,7 @@
#define _POWERPC_BUS_DMA_PRIVATE
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.38 2011/01/18 01:02:55 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.39 2011/06/08 15:19:57 matt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -601,6 +601,19 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size
size = round_page(size);
#ifdef PMAP_MAP_POOLPAGE
/*
* If we are mapping a cacheable physically contiguous segment, treat
* it as if we are mapping a poolpage and avoid consuming any KVAs.
*/
if (nsegs == 1 && (flags & BUS_DMA_NOCACHE) == 0) {
KASSERT(size == segs->ds_len);
addr = BUS_MEM_TO_PHYS(t, segs->ds_addr);
*kvap = (void *)PMAP_MAP_POOLPAGE(addr);
return 0;
}
#endif
va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
if (va == 0)
@ -639,16 +652,18 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size
void
_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
{
vaddr_t va = (vaddr_t) kva;
#ifdef DIAGNOSTIC
if ((u_long)kva & PGOFSET)
if (va & PGOFSET)
panic("_bus_dmamem_unmap");
#endif
size = round_page(size);
pmap_kremove((vaddr_t)kva, size);
uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) {
size = round_page(size);
pmap_kremove(va, size);
uvm_km_free(kernel_map, va, size, UVM_KMF_VAONLY);
}
}
/*