NetBSD/sys/arch/arm/arm32/bus_dma.c
thorpej 08342df793 Overhaul bus_dmamap_sync for the ARM:
* Track which process (XXX really, vmspace) owns the mapping.  When
  we sync the map, if the mapping doesn't belong to the kernel or to
  the current process (XXX really, vmspace), then no cache fobbing
  is necessary, since the cache is Wb-Inv'd on context switch (XXX need
  to revisit this when we support FCSE).
* Be smarter about which cache operation we do when sync'ing the map:
  - PREREAD -- Invalidate D$ (XXX right now, we actually do Wb-Inv)
  - PREWRITE -- Write-back D$ (note, we do NOT invalidate here)
  - PREREAD|PREWRITE -- Wb-Inv D$

More work is needed here.  In particular, a version for CPUs
with write-through caches should be provided, to eliminate
the write-back steps (which are noops on such CPUs, but skipping
two branches would be nice).
2002-01-25 20:57:41 +00:00

812 lines
21 KiB
C

/* $NetBSD: bus_dma.c,v 1.8 2002/01/25 20:57:41 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
* NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/map.h>
#include <sys/proc.h>
#include <sys/buf.h>
#include <sys/reboot.h>
#include <sys/conf.h>
#include <sys/file.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/vnode.h>
#include <sys/device.h>
#include <uvm/uvm_extern.h>
#define _ARM32_BUS_DMA_PRIVATE
#include <machine/bus.h>
#include <machine/cpu.h>
#include <arm/cpufunc.h>
int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
bus_size_t, struct proc *, int, vm_offset_t *, int *, int);
int _bus_dma_inrange(bus_dma_segment_t *, int, bus_addr_t);
/*
* Common function for DMA map creation. May be called by bus-specific
* DMA map creation functions.
*/
int
_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
{
struct arm32_bus_dmamap *map;
void *mapstore;
size_t mapsize;
#ifdef DEBUG_DMA
printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
t, size, nsegments, maxsegsz, boundary, flags);
#endif /* DEBUG_DMA */
/*
* Allocate and initialize the DMA map. The end of the map
* is a variable-sized array of segments, so we allocate enough
* room for them in one shot.
*
* Note we don't preserve the WAITOK or NOWAIT flags. Preservation
* of ALLOCNOW notifies others that we've reserved these resources,
* and they are not to be freed.
*
* The bus_dmamap_t includes one bus_dma_segment_t, hence
* the (nsegments - 1).
*/
mapsize = sizeof(struct arm32_bus_dmamap) +
(sizeof(bus_dma_segment_t) * (nsegments - 1));
if ((mapstore = malloc(mapsize, M_DMAMAP,
(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
return (ENOMEM);
memset(mapstore, 0, mapsize);
map = (struct arm32_bus_dmamap *)mapstore;
map->_dm_size = size;
map->_dm_segcnt = nsegments;
map->_dm_maxsegsz = maxsegsz;
map->_dm_boundary = boundary;
map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
map->_dm_proc = NULL;
map->dm_mapsize = 0; /* no valid mappings */
map->dm_nsegs = 0;
*dmamp = map;
#ifdef DEBUG_DMA
printf("dmamap_create:map=%p\n", map);
#endif /* DEBUG_DMA */
return (0);
}
/*
* Common function for DMA map destruction. May be called by bus-specific
* DMA map destruction functions.
*/
void
_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
{
#ifdef DEBUG_DMA
printf("dmamap_destroy: t=%p map=%p\n", t, map);
#endif /* DEBUG_DMA */
#ifdef DIAGNOSTIC
if (map->dm_nsegs > 0)
printf("bus_dmamap_destroy() called for map with valid mappings\n");
#endif /* DIAGNOSTIC */
free(map, M_DEVBUF);
}
/*
* Common function for loading a DMA map with a linear buffer. May
* be called by bus-specific DMA map load functions.
*/
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
bus_size_t buflen, struct proc *p, int flags)
{
vm_offset_t lastaddr;
int seg, error;
#ifdef DEBUG_DMA
printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
t, map, buf, buflen, p, flags);
#endif /* DEBUG_DMA */
/*
* Make sure that on error condition we return "no valid mappings".
*/
map->dm_mapsize = 0;
map->dm_nsegs = 0;
if (buflen > map->_dm_size)
return (EINVAL);
seg = 0;
error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
&lastaddr, &seg, 1);
if (error == 0) {
map->dm_mapsize = buflen;
map->dm_nsegs = seg + 1;
map->_dm_proc = p;
}
#ifdef DEBUG_DMA
printf("dmamap_load: error=%d\n", error);
#endif /* DEBUG_DMA */
return (error);
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
int
_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
int flags)
{
vm_offset_t lastaddr;
int seg, error, first;
struct mbuf *m;
#ifdef DEBUG_DMA
printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
t, map, m0, flags);
#endif /* DEBUG_DMA */
/*
* Make sure that on error condition we return "no valid mappings."
*/
map->dm_mapsize = 0;
map->dm_nsegs = 0;
#ifdef DIAGNOSTIC
if ((m0->m_flags & M_PKTHDR) == 0)
panic("_bus_dmamap_load_mbuf: no packet header");
#endif /* DIAGNOSTIC */
if (m0->m_pkthdr.len > map->_dm_size)
return (EINVAL);
first = 1;
seg = 0;
error = 0;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
NULL, flags, &lastaddr, &seg, first);
first = 0;
}
if (error == 0) {
map->dm_mapsize = m0->m_pkthdr.len;
map->dm_nsegs = seg + 1;
map->_dm_proc = NULL; /* always kernel */
}
#ifdef DEBUG_DMA
printf("dmamap_load_mbuf: error=%d\n", error);
#endif /* DEBUG_DMA */
return (error);
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
int
_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
int flags)
{
vm_offset_t lastaddr;
int seg, i, error, first;
bus_size_t minlen, resid;
struct proc *p = NULL;
struct iovec *iov;
caddr_t addr;
/*
* Make sure that on error condition we return "no valid mappings."
*/
map->dm_mapsize = 0;
map->dm_nsegs = 0;
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
p = uio->uio_procp;
#ifdef DIAGNOSTIC
if (p == NULL)
panic("_bus_dmamap_load_uio: USERSPACE but no proc");
#endif
}
first = 1;
seg = 0;
error = 0;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
addr = (caddr_t)iov[i].iov_base;
error = _bus_dmamap_load_buffer(t, map, addr, minlen,
p, flags, &lastaddr, &seg, first);
first = 0;
resid -= minlen;
}
if (error == 0) {
map->dm_mapsize = uio->uio_resid;
map->dm_nsegs = seg + 1;
map->_dm_proc = p;
}
return (error);
}
/*
* Like _bus_dmamap_load(), but for raw memory allocated with
* bus_dmamem_alloc().
*/
int
_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{
panic("_bus_dmamap_load_raw: not implemented");
}
/*
* Common function for unloading a DMA map. May be called by
* bus-specific DMA map unload functions.
*/
void
_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
{
#ifdef DEBUG_DMA
printf("dmamap_unload: t=%p map=%p\n", t, map);
#endif /* DEBUG_DMA */
/*
* No resources to free; just mark the mappings as
* invalid.
*/
map->dm_mapsize = 0;
map->dm_nsegs = 0;
map->_dm_proc = NULL;
}
/*
* Common function for DMA map synchronization. May be called
* by bus-specific DMA map synchronization functions.
*
* This version works for the Virtually Indexed Virtually Tagged
* cache found on 32-bit ARM processors.
*
* XXX Should have separate versions for write-through vs.
* XXX write-back caches. We currently assume write-back
* XXX here, which is not as efficient as it could be for
* XXX the write-through case.
*/
void
_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
bus_size_t len, int ops)
{
bus_size_t minlen;
bus_addr_t addr;
int i;
#ifdef DEBUG_DMA
printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
t, map, offset, len, ops);
#endif /* DEBUG_DMA */
/*
* Mixing of PRE and POST operations is not allowed.
*/
if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
(ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
panic("_bus_dmamap_sync: mix PRE and POST");
#ifdef DIAGNOSTIC
if (offset >= map->dm_mapsize)
panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
offset, map->dm_mapsize);
if (len == 0 || (offset + len) > map->dm_mapsize)
panic("_bus_dmamap_sync: bad length");
#endif
/*
* For a virtually-indexed write-back cache, we need
* to do the following things:
*
* PREREAD -- Invalidate the D-cache. We do this
* here in case a write-back is required by the back-end.
*
* PREWRITE -- Write-back the D-cache. Note that if
* we are doing a PREREAD|PREWRITE, we can collapse
* the whole thing into a single Wb-Inv.
*
* POSTREAD -- Nothing.
*
* POSTWRITE -- Nothing.
*/
ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
if (ops == 0)
return;
/*
* XXX Skip cache frobbing if mapping was COHERENT.
*/
/*
* If the mapping is not the kernel's and also not the
* current process's (XXX actually, vmspace), then we
* don't have anything to do, since the cache is Wb-Inv'd
* on context switch.
*
* XXX REVISIT WHEN WE DO FCSE!
*/
if (__predict_false(map->_dm_proc != NULL && map->_dm_proc != curproc))
return;
for (i = 0; i < map->dm_nsegs && len != 0; i++) {
/* Find beginning segment. */
if (offset >= map->dm_segs[i].ds_len) {
offset -= map->dm_segs[i].ds_len;
continue;
}
/*
* Now at the first segment to sync; nail
* each segment until we have exhausted the
* length.
*/
minlen = len < map->dm_segs[i].ds_len - offset ?
len : map->dm_segs[i].ds_len - offset;
addr = map->dm_segs[i]._ds_vaddr;
#ifdef DEBUG_DMA
printf("bus_dmamap_sync: flushing segment %d "
"(0x%lx..0x%lx) ...", i, addr + offset,
addr + offset + minlen - 1);
#endif
switch (ops) {
case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
cpu_dcache_wbinv_range(addr + offset, minlen);
break;
case BUS_DMASYNC_PREREAD:
#if 1
cpu_dcache_wbinv_range(addr + offset, minlen);
#else
cpu_dcache_inv_range(addr + offset, minlen);
#endif
break;
case BUS_DMASYNC_PREWRITE:
cpu_dcache_wb_range(addr + offset, minlen);
break;
}
#ifdef DEBUG_DMA
printf("\n");
#endif
offset = 0;
len -= minlen;
}
/* Drain the write buffer. */
cpu_drain_writebuf();
}
/*
* Common function for DMA-safe memory allocation. May be called
* by bus-specific DMA memory allocation functions.
*/
extern vm_offset_t physical_start;
extern vm_offset_t physical_freestart;
extern vm_offset_t physical_freeend;
extern vm_offset_t physical_end;
int
_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags)
{
int error;
#ifdef DEBUG_DMA
printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x\n",
t, size, alignment, boundary, segs, nsegs, rsegs, flags);
#endif /* DEBUG_DMA */
error = (_bus_dmamem_alloc_range(t, size, alignment, boundary,
segs, nsegs, rsegs, flags, trunc_page(physical_start), trunc_page(physical_end)));
#ifdef DEBUG_DMA
printf("dmamem_alloc: =%d\n", error);
#endif /* DEBUG_DMA */
return(error);
}
/*
* Common function for freeing DMA-safe memory. May be called by
* bus-specific DMA memory free functions.
*/
void
_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
struct vm_page *m;
bus_addr_t addr;
struct pglist mlist;
int curseg;
#ifdef DEBUG_DMA
printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
#endif /* DEBUG_DMA */
/*
* Build a list of pages to free back to the VM system.
*/
TAILQ_INIT(&mlist);
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += PAGE_SIZE) {
m = PHYS_TO_VM_PAGE(addr);
TAILQ_INSERT_TAIL(&mlist, m, pageq);
}
}
uvm_pglistfree(&mlist);
}
/*
* Common function for mapping DMA-safe memory. May be called by
* bus-specific DMA memory map functions.
*/
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
size_t size, caddr_t *kvap, int flags)
{
vm_offset_t va;
bus_addr_t addr;
int curseg;
pt_entry_t *ptep/*, pte*/;
#ifdef DEBUG_DMA
printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
segs, nsegs, (unsigned long)size, flags);
#endif /* DEBUG_DMA */
size = round_page(size);
va = uvm_km_valloc(kernel_map, size);
if (va == 0)
return (ENOMEM);
*kvap = (caddr_t)va;
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
#ifdef DEBUG_DMA
printf("wiring p%lx to v%lx", addr, va);
#endif /* DEBUG_DMA */
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,
VM_PROT_READ | VM_PROT_WRITE,
VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
/*
* If the memory must remain coherent with the
* cache then we must make the memory uncacheable
* in order to maintain virtual cache coherency.
* We must also guarentee the cache does not already
* contain the virtal addresses we are making
* uncacheable.
*/
if (flags & BUS_DMA_COHERENT) {
cpu_dcache_wbinv_range(va, NBPG);
cpu_drain_writebuf();
ptep = vtopte(va);
*ptep = ((*ptep) & (~PT_C | PT_B));
tlb_flush();
}
#ifdef DEBUG_DMA
ptep = vtopte(va);
printf(" pte=v%p *pte=%x\n", ptep, *ptep);
#endif /* DEBUG_DMA */
}
}
pmap_update(pmap_kernel());
#ifdef DEBUG_DMA
printf("dmamem_map: =%p\n", *kvap);
#endif /* DEBUG_DMA */
return (0);
}
/*
* Common function for unmapping DMA-safe memory. May be called by
* bus-specific DMA memory unmapping functions.
*/
void
_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{
#ifdef DEBUG_DMA
printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
(unsigned long)size);
#endif /* DEBUG_DMA */
#ifdef DIAGNOSTIC
if ((u_long)kva & PGOFSET)
panic("_bus_dmamem_unmap");
#endif /* DIAGNOSTIC */
size = round_page(size);
uvm_km_free(kernel_map, (vm_offset_t)kva, size);
}
/*
* Common functin for mmap(2)'ing DMA-safe memory. May be called by
* bus-specific DMA mmap(2)'ing functions.
*/
paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
off_t off, int prot, int flags)
{
int i;
for (i = 0; i < nsegs; i++) {
#ifdef DIAGNOSTIC
if (off & PGOFSET)
panic("_bus_dmamem_mmap: offset unaligned");
if (segs[i].ds_addr & PGOFSET)
panic("_bus_dmamem_mmap: segment unaligned");
if (segs[i].ds_len & PGOFSET)
panic("_bus_dmamem_mmap: segment size not multiple"
" of page size");
#endif /* DIAGNOSTIC */
if (off >= segs[i].ds_len) {
off -= segs[i].ds_len;
continue;
}
return (arm_byte_to_page((u_long)segs[i].ds_addr + off));
}
/* Page not found. */
return (-1);
}
/**********************************************************************
* DMA utility functions
**********************************************************************/
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
int
_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
bus_size_t buflen, struct proc *p, int flags, vm_offset_t *lastaddrp,
int *segp, int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
pmap_t pmap;
#ifdef DEBUG_DMA
printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
buf, buflen, flags, first);
#endif /* DEBUG_DMA */
if (p != NULL)
pmap = p->p_vmspace->vm_map.pmap;
else
pmap = pmap_kernel();
lastaddr = *lastaddrp;
bmask = ~(map->_dm_boundary - 1);
for (seg = *segp; buflen > 0; ) {
/*
* Get the physical address for this segment.
*/
(void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
/*
* Make sure we're in an allowed DMA range.
*/
if (t->_ranges != NULL &&
_bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
return (EINVAL);
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (map->_dm_boundary > 0) {
baddr = (curaddr + map->_dm_boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
map->dm_segs[seg].ds_addr = curaddr;
map->dm_segs[seg].ds_len = sgsize;
map->dm_segs[seg]._ds_vaddr = vaddr;
first = 0;
} else {
if (curaddr == lastaddr &&
(map->dm_segs[seg].ds_len + sgsize) <=
map->_dm_maxsegsz &&
(map->_dm_boundary == 0 ||
(map->dm_segs[seg].ds_addr & bmask) ==
(curaddr & bmask)))
map->dm_segs[seg].ds_len += sgsize;
else {
if (++seg >= map->_dm_segcnt)
break;
map->dm_segs[seg].ds_addr = curaddr;
map->dm_segs[seg].ds_len = sgsize;
map->dm_segs[seg]._ds_vaddr = vaddr;
}
}
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
if (buflen != 0)
return (EFBIG); /* XXX better return value here? */
return (0);
}
/*
* Check to see if the specified page is in an allowed DMA range.
*/
int
_bus_dma_inrange(bus_dma_segment_t *ranges, int nranges, bus_addr_t curaddr)
{
bus_dma_segment_t *ds;
int i;
for (i = 0, ds = ranges; i < nranges; i++, ds++) {
if (curaddr >= ds->ds_addr &&
round_page(curaddr) <= (ds->ds_addr + ds->ds_len))
return (1);
}
return (0);
}
/*
* Allocate physical memory from the given physical address range.
* Called by DMA-safe memory allocation methods.
*/
int
_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags, vm_offset_t low, vm_offset_t high)
{
vm_offset_t curaddr, lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error;
#ifdef DEBUG_DMA
printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
#endif /* DEBUG_DMA */
/* Always round the size. */
size = round_page(size);
/*
* Allocate pages from the VM system.
*/
TAILQ_INIT(&mlist);
error = uvm_pglistalloc(size, low, high, alignment, boundary,
&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
if (error)
return (error);
/*
* Compute the location, size, and number of segments actually
* returned by the VM code.
*/
m = mlist.tqh_first;
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_len = PAGE_SIZE;
#ifdef DEBUG_DMA
printf("alloc: page %lx\n", lastaddr);
#endif /* DEBUG_DMA */
m = m->pageq.tqe_next;
for (; m != NULL; m = m->pageq.tqe_next) {
curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
if (curaddr < low || curaddr >= high) {
printf("uvm_pglistalloc returned non-sensical"
" address 0x%lx\n", curaddr);
panic("_bus_dmamem_alloc_range");
}
#endif /* DIAGNOSTIC */
#ifdef DEBUG_DMA
printf("alloc: page %lx\n", curaddr);
#endif /* DEBUG_DMA */
if (curaddr == (lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return (0);
}