From ca37a5ee9e2b6f9dfe2ebb331a08ddd2bf8b7da1 Mon Sep 17 00:00:00 2001 From: matt Date: Wed, 8 Jun 2011 15:19:57 +0000 Subject: [PATCH] A small speedup, when mapping memory in bus_dmamem_map, if the memory is cacheable and is in a single segment (physically contiguous) don't bother allocating KVA for it, just use the physical address as the VA. --- sys/arch/powerpc/powerpc/bus_dma.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/sys/arch/powerpc/powerpc/bus_dma.c b/sys/arch/powerpc/powerpc/bus_dma.c index c2d339742d7b..4106fcbe4ab1 100644 --- a/sys/arch/powerpc/powerpc/bus_dma.c +++ b/sys/arch/powerpc/powerpc/bus_dma.c @@ -1,4 +1,4 @@ -/* $NetBSD: bus_dma.c,v 1.38 2011/01/18 01:02:55 matt Exp $ */ +/* $NetBSD: bus_dma.c,v 1.39 2011/06/08 15:19:57 matt Exp $ */ /*- * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. @@ -32,7 +32,7 @@ #define _POWERPC_BUS_DMA_PRIVATE #include -__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.38 2011/01/18 01:02:55 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.39 2011/06/08 15:19:57 matt Exp $"); #include #include @@ -601,6 +601,19 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size size = round_page(size); +#ifdef PMAP_MAP_POOLPAGE + /* + * If we are mapping a cacheable physically contiguous segment, treat + * it as if we are mapping a poolpage and avoid consuming any KVAs. + */ + if (nsegs == 1 && (flags & BUS_DMA_NOCACHE) == 0) { + KASSERT(size == segs->ds_len); + addr = BUS_MEM_TO_PHYS(t, segs->ds_addr); + *kvap = (void *)PMAP_MAP_POOLPAGE(addr); + return 0; + } +#endif + va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); if (va == 0) @@ -639,16 +652,18 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size void _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) { + vaddr_t va = (vaddr_t) kva; #ifdef DIAGNOSTIC - if ((u_long)kva & PGOFSET) + if (va & PGOFSET) panic("_bus_dmamem_unmap"); #endif - size = round_page(size); - - pmap_kremove((vaddr_t)kva, size); - uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); + if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) { + size = round_page(size); + pmap_kremove(va, size); + uvm_km_free(kernel_map, va, size, UVM_KMF_VAONLY); + } } /*