More adjustements to deal with Xen's physical <=> machine addresses mappings:

- Allow _bus_dmamem_alloc_range to be provided from external source:
  Use a _PRIVATE_BUS_DMAMEM_ALLOC_RANGE macro, defined to
  _bus_dmamem_alloc_range by default.
- avail_end is the end of the physical address range. Define a macro
 _BUS_AVAIL_END (defined by default to avail_end) and use it instead.
This commit is contained in:
bouyer 2005-08-20 19:18:11 +00:00
parent 680596d08e
commit 7ce7159ca0
2 changed files with 82 additions and 69 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_private.h,v 1.1 2005/04/16 08:53:09 yamt Exp $ */ /* $NetBSD: bus_private.h,v 1.2 2005/08/20 19:18:11 bouyer Exp $ */
/* NetBSD: bus.h,v 1.8 2005/03/09 19:04:46 matt Exp */ /* NetBSD: bus.h,v 1.8 2005/03/09 19:04:46 matt Exp */
/*- /*-
@ -98,12 +98,12 @@ void _bus_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size);
paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs,
int nsegs, off_t off, int prot, int flags); int nsegs, off_t off, int prot, int flags);
#ifndef _PRIVATE_BUS_DMAMEM_ALLOC_RANGE
int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size, int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
bus_size_t alignment, bus_size_t boundary, bus_size_t alignment, bus_size_t boundary,
bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
paddr_t low, paddr_t high); bus_addr_t low, bus_addr_t high);
#endif
/* /*
* Cookie used for bounce buffers. A pointer to one of these it stashed in * Cookie used for bounce buffers. A pointer to one of these it stashed in
@ -173,4 +173,13 @@ _bus_virt_to_bus(struct pmap *pm, vaddr_t va)
} }
#endif /* !defined(_BUS_VIRT_TO_BUS) */ #endif /* !defined(_BUS_VIRT_TO_BUS) */
/*
* by default, the end address of RAM visible on bus is the same as the
* largest physical address.
*/
#ifndef _BUS_AVAIL_END
#define _BUS_AVAIL_END (avail_end)
#endif
#endif /* !defined(_X86_BUS_PRIVATE_H_) */ #endif /* !defined(_X86_BUS_PRIVATE_H_) */

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp $ */ /* $NetBSD: bus_dma.c,v 1.22 2005/08/20 19:18:11 bouyer Exp $ */
/*- /*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp $"); __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.22 2005/08/20 19:18:11 bouyer Exp $");
/* /*
* The following is included because _bus_dma_uiomove is derived from * The following is included because _bus_dma_uiomove is derived from
@ -139,6 +139,68 @@ static int _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
static __inline int _bus_dmamap_load_busaddr(bus_dma_tag_t, bus_dmamap_t, static __inline int _bus_dmamap_load_busaddr(bus_dma_tag_t, bus_dmamap_t,
bus_addr_t, int); bus_addr_t, int);
#ifndef _PRIVATE_BUS_DMAMEM_ALLOC_RANGE
#define _PRIVATE_BUS_DMAMEM_ALLOC_RANGE _bus_dmamem_alloc_range
/*
* Allocate physical memory from the given physical address range.
* Called by DMA-safe memory allocation methods.
*/
int
_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags, bus_addr_t low, bus_addr_t high)
{
paddr_t curaddr, lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error;
/* Always round the size. */
size = round_page(size);
/*
* Allocate pages from the VM system.
*/
error = uvm_pglistalloc(size, low, high, alignment, boundary,
&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
if (error)
return (error);
/*
* Compute the location, size, and number of segments actually
* returned by the VM code.
*/
m = mlist.tqh_first;
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_len = PAGE_SIZE;
m = m->pageq.tqe_next;
for (; m != NULL; m = m->pageq.tqe_next) {
curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
if (curaddr < low || curaddr >= high) {
printf("vm_page_alloc_memory returned non-sensical"
" address 0x%lx\n", curaddr);
panic("_bus_dmamem_alloc_range");
}
#endif
if (curaddr == (lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return (0);
}
#endif /* _PRIVATE_BUS_DMAMEM_ALLOC_RANGE */
/* /*
* Create a DMA map. * Create a DMA map.
@ -186,7 +248,7 @@ _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
*dmamp = map; *dmamp = map;
if (t->_bounce_thresh == 0 || avail_end <= t->_bounce_thresh) if (t->_bounce_thresh == 0 || _BUS_AVAIL_END <= t->_bounce_thresh)
map->_dm_bounce_thresh = 0; map->_dm_bounce_thresh = 0;
cookieflags = 0; cookieflags = 0;
@ -791,14 +853,14 @@ _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags) int flags)
{ {
paddr_t high; bus_addr_t high;
if (t->_bounce_alloc_hi != 0 && avail_end > t->_bounce_alloc_hi) if (t->_bounce_alloc_hi != 0 && _BUS_AVAIL_END > t->_bounce_alloc_hi)
high = trunc_page(t->_bounce_alloc_hi); high = trunc_page(t->_bounce_alloc_hi);
else else
high = trunc_page(avail_end); high = trunc_page(_BUS_AVAIL_END);
return (_bus_dmamem_alloc_range(t, size, alignment, boundary, return (_PRIVATE_BUS_DMAMEM_ALLOC_RANGE(t, size, alignment, boundary,
segs, nsegs, rsegs, flags, t->_bounce_alloc_lo, high)); segs, nsegs, rsegs, flags, t->_bounce_alloc_lo, high));
} }
@ -1133,61 +1195,3 @@ _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
return (0); return (0);
} }
/*
* Allocate physical memory from the given physical address range.
* Called by DMA-safe memory allocation methods.
*/
int
_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags, paddr_t low, paddr_t high)
{
paddr_t curaddr, lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error;
/* Always round the size. */
size = round_page(size);
/*
* Allocate pages from the VM system.
*/
error = uvm_pglistalloc(size, low, high, alignment, boundary,
&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
if (error)
return (error);
/*
* Compute the location, size, and number of segments actually
* returned by the VM code.
*/
m = mlist.tqh_first;
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_len = PAGE_SIZE;
m = m->pageq.tqe_next;
for (; m != NULL; m = m->pageq.tqe_next) {
curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
if (curaddr < low || curaddr >= high) {
printf("vm_page_alloc_memory returned non-sensical"
" address 0x%lx\n", curaddr);
panic("_bus_dmamem_alloc_range");
}
#endif
if (curaddr == (lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return (0);
}