1999-09-12 05:16:55 +04:00
|
|
|
/* $NetBSD: bus_dma.c,v 1.32 1999/09/12 01:16:58 chs Exp $ */
|
1997-06-07 03:54:24 +04:00
|
|
|
|
|
|
|
/*-
|
1998-01-19 06:12:20 +03:00
|
|
|
* Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
|
1997-06-07 03:54:24 +04:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
|
|
|
|
* NASA Ames Research Center.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the NetBSD
|
|
|
|
* Foundation, Inc. and its contributors.
|
|
|
|
* 4. Neither the name of The NetBSD Foundation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
|
|
|
|
|
1999-09-12 05:16:55 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.32 1999/09/12 01:16:58 chs Exp $");
|
1997-06-07 03:54:24 +04:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/device.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/proc.h>
|
1998-01-27 05:35:58 +03:00
|
|
|
#include <sys/mbuf.h>
|
1997-06-07 03:54:24 +04:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_kern.h>
|
1999-03-24 08:50:49 +03:00
|
|
|
|
1998-02-24 10:38:01 +03:00
|
|
|
#include <uvm/uvm_extern.h>
|
1997-06-07 03:54:24 +04:00
|
|
|
|
|
|
|
#define _ALPHA_BUS_DMA_PRIVATE
|
|
|
|
#include <machine/bus.h>
|
1997-08-26 01:17:48 +04:00
|
|
|
#include <machine/intr.h>
|
1997-06-07 03:54:24 +04:00
|
|
|
|
1998-08-18 00:15:55 +04:00
|
|
|
int _bus_dmamap_load_buffer_direct_common __P((bus_dma_tag_t,
|
|
|
|
bus_dmamap_t, void *, bus_size_t, struct proc *, int,
|
|
|
|
paddr_t *, int *, int));
|
1998-01-27 05:35:58 +03:00
|
|
|
|
1997-06-07 03:54:24 +04:00
|
|
|
/*
|
|
|
|
* Common function for DMA map creation. May be called by bus-specific
|
|
|
|
* DMA map creation functions.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
|
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_size_t size;
|
|
|
|
int nsegments;
|
|
|
|
bus_size_t maxsegsz;
|
|
|
|
bus_size_t boundary;
|
|
|
|
int flags;
|
|
|
|
bus_dmamap_t *dmamp;
|
|
|
|
{
|
|
|
|
struct alpha_bus_dmamap *map;
|
|
|
|
void *mapstore;
|
|
|
|
size_t mapsize;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allcoate and initialize the DMA map. The end of the map
|
|
|
|
* is a variable-sized array of segments, so we allocate enough
|
|
|
|
* room for them in one shot.
|
|
|
|
*
|
|
|
|
* Note we don't preserve the WAITOK or NOWAIT flags. Preservation
|
|
|
|
* of ALLOCNOW notifes others that we've reserved these resources,
|
|
|
|
* and they are not to be freed.
|
|
|
|
*
|
|
|
|
* The bus_dmamap_t includes one bus_dma_segment_t, hence
|
|
|
|
* the (nsegments - 1).
|
|
|
|
*/
|
|
|
|
mapsize = sizeof(struct alpha_bus_dmamap) +
|
|
|
|
(sizeof(bus_dma_segment_t) * (nsegments - 1));
|
1998-02-11 04:37:51 +03:00
|
|
|
if ((mapstore = malloc(mapsize, M_DMAMAP,
|
1997-06-07 03:54:24 +04:00
|
|
|
(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
|
|
|
bzero(mapstore, mapsize);
|
|
|
|
map = (struct alpha_bus_dmamap *)mapstore;
|
|
|
|
map->_dm_size = size;
|
|
|
|
map->_dm_segcnt = nsegments;
|
|
|
|
map->_dm_maxsegsz = maxsegsz;
|
1998-06-03 22:25:53 +04:00
|
|
|
if (t->_boundary != 0 && t->_boundary < boundary)
|
|
|
|
map->_dm_boundary = t->_boundary;
|
|
|
|
else
|
|
|
|
map->_dm_boundary = boundary;
|
1997-06-07 03:54:24 +04:00
|
|
|
map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
|
1998-02-04 03:04:25 +03:00
|
|
|
map->dm_mapsize = 0; /* no valid mappings */
|
|
|
|
map->dm_nsegs = 0;
|
1997-06-07 03:54:24 +04:00
|
|
|
|
|
|
|
*dmamp = map;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common function for DMA map destruction. May be called by bus-specific
|
|
|
|
* DMA map destruction functions.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_bus_dmamap_destroy(t, map)
|
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dmamap_t map;
|
|
|
|
{
|
|
|
|
|
1998-02-11 04:37:51 +03:00
|
|
|
free(map, M_DMAMAP);
|
1997-06-07 03:54:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1998-01-27 05:35:58 +03:00
|
|
|
* Utility function to load a linear buffer. lastaddrp holds state
|
|
|
|
* between invocations (for multiple-buffer loads). segp contains
|
|
|
|
* the starting segment on entrance, and the ending segment on exit.
|
|
|
|
* first indicates if this is the first invocation of this function.
|
1997-06-07 03:54:24 +04:00
|
|
|
*/
|
|
|
|
int
|
1998-08-18 00:15:55 +04:00
|
|
|
_bus_dmamap_load_buffer_direct_common(t, map, buf, buflen, p, flags,
|
|
|
|
lastaddrp, segp, first)
|
|
|
|
bus_dma_tag_t t;
|
1997-06-07 03:54:24 +04:00
|
|
|
bus_dmamap_t map;
|
|
|
|
void *buf;
|
|
|
|
bus_size_t buflen;
|
|
|
|
struct proc *p;
|
|
|
|
int flags;
|
1998-08-14 20:50:00 +04:00
|
|
|
paddr_t *lastaddrp;
|
1998-01-27 05:35:58 +03:00
|
|
|
int *segp;
|
|
|
|
int first;
|
1997-06-07 03:54:24 +04:00
|
|
|
{
|
|
|
|
bus_size_t sgsize;
|
1998-06-03 08:15:05 +04:00
|
|
|
bus_addr_t curaddr, lastaddr, baddr, bmask;
|
1998-08-14 20:50:00 +04:00
|
|
|
vaddr_t vaddr = (vaddr_t)buf;
|
1998-01-27 05:35:58 +03:00
|
|
|
int seg;
|
1997-06-07 03:54:24 +04:00
|
|
|
|
1998-01-27 05:35:58 +03:00
|
|
|
lastaddr = *lastaddrp;
|
1998-05-29 19:55:34 +04:00
|
|
|
bmask = ~(map->_dm_boundary - 1);
|
1997-06-07 03:54:24 +04:00
|
|
|
|
1998-05-29 19:25:07 +04:00
|
|
|
for (seg = *segp; buflen > 0 ; ) {
|
1997-06-07 03:54:24 +04:00
|
|
|
/*
|
|
|
|
* Get the physical address for this segment.
|
|
|
|
*/
|
|
|
|
if (p != NULL)
|
1999-07-08 22:05:21 +04:00
|
|
|
(void) pmap_extract(p->p_vmspace->vm_map.pmap,
|
|
|
|
vaddr, &curaddr);
|
1997-06-07 03:54:24 +04:00
|
|
|
else
|
|
|
|
curaddr = vtophys(vaddr);
|
|
|
|
|
1998-05-14 01:21:16 +04:00
|
|
|
/*
|
|
|
|
* If we're beyond the current DMA window, indicate
|
|
|
|
* that and try to fall back into SGMAPs.
|
|
|
|
*/
|
1998-08-18 00:15:55 +04:00
|
|
|
if (t->_wsize != 0 && curaddr >= t->_wsize)
|
1998-05-14 01:21:16 +04:00
|
|
|
return (EINVAL);
|
|
|
|
|
1998-08-18 00:15:55 +04:00
|
|
|
curaddr |= t->_wbase;
|
1997-06-07 03:54:24 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute the segment size, and adjust counts.
|
|
|
|
*/
|
|
|
|
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
|
|
|
|
if (buflen < sgsize)
|
|
|
|
sgsize = buflen;
|
1998-06-03 08:15:05 +04:00
|
|
|
|
1998-05-29 19:25:07 +04:00
|
|
|
/*
|
1998-06-03 08:15:05 +04:00
|
|
|
* Make sure we don't cross any boundaries.
|
1998-05-29 19:25:07 +04:00
|
|
|
*/
|
1998-05-29 19:55:34 +04:00
|
|
|
if (map->_dm_boundary > 0) {
|
1998-05-29 19:25:07 +04:00
|
|
|
baddr = (curaddr + map->_dm_boundary) & bmask;
|
1998-06-03 08:15:05 +04:00
|
|
|
if (sgsize > (baddr - curaddr))
|
|
|
|
sgsize = (baddr - curaddr);
|
1998-05-29 19:25:07 +04:00
|
|
|
}
|
1997-06-07 03:54:24 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert chunk into a segment, coalescing with
|
|
|
|
* the previous segment if possible.
|
|
|
|
*/
|
|
|
|
if (first) {
|
|
|
|
map->dm_segs[seg].ds_addr = curaddr;
|
|
|
|
map->dm_segs[seg].ds_len = sgsize;
|
|
|
|
first = 0;
|
|
|
|
} else {
|
|
|
|
if (curaddr == lastaddr &&
|
|
|
|
(map->dm_segs[seg].ds_len + sgsize) <=
|
1998-06-03 08:15:05 +04:00
|
|
|
map->_dm_maxsegsz &&
|
1998-05-29 19:25:07 +04:00
|
|
|
(map->_dm_boundary == 0 ||
|
|
|
|
(map->dm_segs[seg].ds_addr & bmask) ==
|
1998-05-29 19:55:34 +04:00
|
|
|
(curaddr & bmask)))
|
1997-06-07 03:54:24 +04:00
|
|
|
map->dm_segs[seg].ds_len += sgsize;
|
|
|
|
else {
|
1998-05-29 19:25:07 +04:00
|
|
|
if (++seg >= map->_dm_segcnt)
|
|
|
|
break;
|
1997-06-07 03:54:24 +04:00
|
|
|
map->dm_segs[seg].ds_addr = curaddr;
|
|
|
|
map->dm_segs[seg].ds_len = sgsize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lastaddr = curaddr + sgsize;
|
|
|
|
vaddr += sgsize;
|
|
|
|
buflen -= sgsize;
|
|
|
|
}
|
|
|
|
|
1998-01-27 05:35:58 +03:00
|
|
|
*segp = seg;
|
|
|
|
*lastaddrp = lastaddr;
|
|
|
|
|
1997-06-07 03:54:24 +04:00
|
|
|
/*
|
|
|
|
* Did we fit?
|
|
|
|
*/
|
|
|
|
if (buflen != 0) {
|
|
|
|
/*
|
1998-05-14 01:21:16 +04:00
|
|
|
* If there is a chained window, we will automatically
|
|
|
|
* fall back to it.
|
1997-06-07 03:54:24 +04:00
|
|
|
*/
|
|
|
|
return (EFBIG); /* XXX better return value here? */
|
|
|
|
}
|
1998-05-14 01:21:16 +04:00
|
|
|
|
1997-06-07 03:54:24 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1998-01-27 05:35:58 +03:00
|
|
|
/*
|
|
|
|
* Common function for loading a direct-mapped DMA map with a linear
|
|
|
|
* buffer. Called by bus-specific DMA map load functions with the
|
|
|
|
* OR value appropriate for indicating "direct-mapped" for that
|
|
|
|
* chipset.
|
|
|
|
*/
|
|
|
|
int
|
1998-05-08 00:09:37 +04:00
|
|
|
_bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
|
1998-01-27 05:35:58 +03:00
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dmamap_t map;
|
|
|
|
void *buf;
|
|
|
|
bus_size_t buflen;
|
|
|
|
struct proc *p;
|
|
|
|
int flags;
|
|
|
|
{
|
1998-08-14 20:50:00 +04:00
|
|
|
paddr_t lastaddr;
|
1998-01-27 05:35:58 +03:00
|
|
|
int seg, error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that on error condition we return "no valid mappings".
|
|
|
|
*/
|
1998-02-04 03:04:25 +03:00
|
|
|
map->dm_mapsize = 0;
|
1998-01-27 05:35:58 +03:00
|
|
|
map->dm_nsegs = 0;
|
|
|
|
|
|
|
|
if (buflen > map->_dm_size)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
seg = 0;
|
1998-08-18 00:15:55 +04:00
|
|
|
error = _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen,
|
|
|
|
p, flags, &lastaddr, &seg, 1);
|
1998-02-04 03:04:25 +03:00
|
|
|
if (error == 0) {
|
|
|
|
map->dm_mapsize = buflen;
|
1998-01-27 05:35:58 +03:00
|
|
|
map->dm_nsegs = seg + 1;
|
1998-05-14 01:21:16 +04:00
|
|
|
} else if (t->_next_window != NULL) {
|
|
|
|
/*
|
|
|
|
* Give the next window a chance.
|
|
|
|
*/
|
|
|
|
error = bus_dmamap_load(t->_next_window, map, buf, buflen,
|
|
|
|
p, flags);
|
1998-02-04 03:04:25 +03:00
|
|
|
}
|
1998-01-27 05:35:58 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-06-07 03:54:24 +04:00
|
|
|
/*
|
|
|
|
* Like _bus_dmamap_load_direct_common(), but for mbufs.
|
|
|
|
*/
|
|
|
|
int
|
1998-05-08 00:09:37 +04:00
|
|
|
_bus_dmamap_load_mbuf_direct(t, map, m0, flags)
|
1997-06-07 03:54:24 +04:00
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dmamap_t map;
|
1998-01-27 05:35:58 +03:00
|
|
|
struct mbuf *m0;
|
1997-06-07 03:54:24 +04:00
|
|
|
int flags;
|
|
|
|
{
|
1998-08-14 20:50:00 +04:00
|
|
|
paddr_t lastaddr;
|
1998-01-27 05:35:58 +03:00
|
|
|
int seg, error, first;
|
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that on error condition we return "no valid mappings."
|
|
|
|
*/
|
1998-02-04 03:04:25 +03:00
|
|
|
map->dm_mapsize = 0;
|
1998-01-27 05:35:58 +03:00
|
|
|
map->dm_nsegs = 0;
|
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if ((m0->m_flags & M_PKTHDR) == 0)
|
|
|
|
panic("_bus_dmamap_load_mbuf_direct_common: no packet header");
|
|
|
|
#endif
|
1997-06-07 03:54:24 +04:00
|
|
|
|
1998-01-27 05:35:58 +03:00
|
|
|
if (m0->m_pkthdr.len > map->_dm_size)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
first = 1;
|
|
|
|
seg = 0;
|
|
|
|
error = 0;
|
|
|
|
for (m = m0; m != NULL && error == 0; m = m->m_next) {
|
1998-08-18 00:15:55 +04:00
|
|
|
error = _bus_dmamap_load_buffer_direct_common(t, map,
|
|
|
|
m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
|
1998-01-27 05:35:58 +03:00
|
|
|
first = 0;
|
|
|
|
}
|
1998-02-04 03:04:25 +03:00
|
|
|
if (error == 0) {
|
|
|
|
map->dm_mapsize = m0->m_pkthdr.len;
|
1998-01-27 05:35:58 +03:00
|
|
|
map->dm_nsegs = seg + 1;
|
1998-05-14 01:21:16 +04:00
|
|
|
} else if (t->_next_window != NULL) {
|
|
|
|
/*
|
|
|
|
* Give the next window a chance.
|
|
|
|
*/
|
|
|
|
error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
|
1998-02-04 03:04:25 +03:00
|
|
|
}
|
1998-01-27 05:35:58 +03:00
|
|
|
return (error);
|
1997-06-07 03:54:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Like _bus_dmamap_load_direct_common(), but for uios.
|
|
|
|
*/
|
|
|
|
int
|
1998-05-08 00:09:37 +04:00
|
|
|
_bus_dmamap_load_uio_direct(t, map, uio, flags)
|
1997-06-07 03:54:24 +04:00
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dmamap_t map;
|
|
|
|
struct uio *uio;
|
|
|
|
int flags;
|
|
|
|
{
|
1998-08-14 20:50:00 +04:00
|
|
|
paddr_t lastaddr;
|
1998-07-18 01:09:59 +04:00
|
|
|
int seg, i, error, first;
|
|
|
|
bus_size_t minlen, resid;
|
|
|
|
struct proc *p = NULL;
|
|
|
|
struct iovec *iov;
|
|
|
|
caddr_t addr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that on error condition we return "no valid mappings."
|
|
|
|
*/
|
|
|
|
map->dm_mapsize = 0;
|
|
|
|
map->dm_nsegs = 0;
|
|
|
|
|
|
|
|
resid = uio->uio_resid;
|
|
|
|
iov = uio->uio_iov;
|
|
|
|
|
|
|
|
if (uio->uio_segflg == UIO_USERSPACE) {
|
|
|
|
p = uio->uio_procp;
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (p == NULL)
|
|
|
|
panic("_bus_dmamap_load_direct_common: USERSPACE but no proc");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
first = 1;
|
|
|
|
seg = 0;
|
|
|
|
error = 0;
|
|
|
|
for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
|
|
|
|
/*
|
|
|
|
* Now at the first iovec to load. Load each iovec
|
|
|
|
* until we have exhausted the residual count.
|
|
|
|
*/
|
1998-09-22 02:51:56 +04:00
|
|
|
minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
|
|
|
|
addr = (caddr_t)iov[i].iov_base;
|
1998-07-18 01:09:59 +04:00
|
|
|
|
1998-08-18 00:15:55 +04:00
|
|
|
error = _bus_dmamap_load_buffer_direct_common(t, map,
|
|
|
|
addr, minlen, p, flags, &lastaddr, &seg, first);
|
1998-07-18 01:09:59 +04:00
|
|
|
first = 0;
|
|
|
|
|
|
|
|
resid -= minlen;
|
|
|
|
}
|
|
|
|
if (error == 0) {
|
|
|
|
map->dm_mapsize = uio->uio_resid;
|
|
|
|
map->dm_nsegs = seg + 1;
|
|
|
|
} else if (t->_next_window != NULL) {
|
|
|
|
/*
|
|
|
|
* Give the next window a chance.
|
|
|
|
*/
|
|
|
|
error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
|
|
|
|
}
|
|
|
|
return (error);
|
1997-06-07 03:54:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Like _bus_dmamap_load_direct_common(), but for raw memory.
|
|
|
|
*/
|
|
|
|
int
|
1998-05-08 00:09:37 +04:00
|
|
|
_bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
|
1997-06-07 03:54:24 +04:00
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dmamap_t map;
|
|
|
|
bus_dma_segment_t *segs;
|
|
|
|
int nsegs;
|
|
|
|
bus_size_t size;
|
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
|
1998-05-08 00:09:37 +04:00
|
|
|
panic("_bus_dmamap_load_raw_direct: not implemented");
|
1997-06-07 03:54:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common function for unloading a DMA map. May be called by
|
|
|
|
* chipset-specific DMA map unload functions.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_bus_dmamap_unload(t, map)
|
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dmamap_t map;
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No resources to free; just mark the mappings as
|
|
|
|
* invalid.
|
|
|
|
*/
|
1998-02-04 03:04:25 +03:00
|
|
|
map->dm_mapsize = 0;
|
1997-06-07 03:54:24 +04:00
|
|
|
map->dm_nsegs = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common function for DMA map synchronization. May be called
|
|
|
|
* by chipset-specific DMA map synchronization functions.
|
|
|
|
*/
|
|
|
|
void
|
1998-02-04 08:12:46 +03:00
|
|
|
_bus_dmamap_sync(t, map, offset, len, ops)
|
1997-06-07 03:54:24 +04:00
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dmamap_t map;
|
1998-02-04 08:12:46 +03:00
|
|
|
bus_addr_t offset;
|
|
|
|
bus_size_t len;
|
1998-02-04 04:57:27 +03:00
|
|
|
int ops;
|
1997-06-07 03:54:24 +04:00
|
|
|
{
|
|
|
|
|
1998-02-04 10:35:30 +03:00
|
|
|
/*
|
|
|
|
* Flush the store buffer.
|
|
|
|
*/
|
|
|
|
alpha_mb();
|
1997-06-07 03:54:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common function for DMA-safe memory allocation. May be called
|
|
|
|
* by bus-specific DMA memory allocation functions.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
|
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_size_t size, alignment, boundary;
|
|
|
|
bus_dma_segment_t *segs;
|
|
|
|
int nsegs;
|
|
|
|
int *rsegs;
|
|
|
|
int flags;
|
|
|
|
{
|
1998-08-14 20:50:00 +04:00
|
|
|
extern paddr_t avail_start, avail_end;
|
|
|
|
paddr_t curaddr, lastaddr, high;
|
1997-06-07 03:54:24 +04:00
|
|
|
vm_page_t m;
|
|
|
|
struct pglist mlist;
|
|
|
|
int curseg, error;
|
|
|
|
|
|
|
|
/* Always round the size. */
|
|
|
|
size = round_page(size);
|
|
|
|
|
1998-01-09 09:37:04 +03:00
|
|
|
high = avail_end - PAGE_SIZE;
|
1997-06-07 03:54:24 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate pages from the VM system.
|
|
|
|
*/
|
|
|
|
TAILQ_INIT(&mlist);
|
1998-02-24 10:38:01 +03:00
|
|
|
error = uvm_pglistalloc(size, avail_start, high, alignment, boundary,
|
|
|
|
&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
|
1997-06-07 03:54:24 +04:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute the location, size, and number of segments actually
|
|
|
|
* returned by the VM code.
|
|
|
|
*/
|
|
|
|
m = mlist.tqh_first;
|
|
|
|
curseg = 0;
|
|
|
|
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
|
|
|
|
segs[curseg].ds_len = PAGE_SIZE;
|
|
|
|
m = m->pageq.tqe_next;
|
|
|
|
|
|
|
|
for (; m != NULL; m = m->pageq.tqe_next) {
|
|
|
|
curaddr = VM_PAGE_TO_PHYS(m);
|
|
|
|
#ifdef DIAGNOSTIC
|
1998-01-09 09:37:04 +03:00
|
|
|
if (curaddr < avail_start || curaddr >= high) {
|
1997-06-07 03:54:24 +04:00
|
|
|
printf("vm_page_alloc_memory returned non-sensical"
|
|
|
|
" address 0x%lx\n", curaddr);
|
|
|
|
panic("_bus_dmamem_alloc");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (curaddr == (lastaddr + PAGE_SIZE))
|
|
|
|
segs[curseg].ds_len += PAGE_SIZE;
|
|
|
|
else {
|
|
|
|
curseg++;
|
|
|
|
segs[curseg].ds_addr = curaddr;
|
|
|
|
segs[curseg].ds_len = PAGE_SIZE;
|
|
|
|
}
|
|
|
|
lastaddr = curaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
*rsegs = curseg + 1;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common function for freeing DMA-safe memory. May be called by
|
|
|
|
* bus-specific DMA memory free functions.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_bus_dmamem_free(t, segs, nsegs)
|
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dma_segment_t *segs;
|
|
|
|
int nsegs;
|
|
|
|
{
|
|
|
|
vm_page_t m;
|
|
|
|
bus_addr_t addr;
|
|
|
|
struct pglist mlist;
|
|
|
|
int curseg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build a list of pages to free back to the VM system.
|
|
|
|
*/
|
|
|
|
TAILQ_INIT(&mlist);
|
|
|
|
for (curseg = 0; curseg < nsegs; curseg++) {
|
|
|
|
for (addr = segs[curseg].ds_addr;
|
|
|
|
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
|
|
|
|
addr += PAGE_SIZE) {
|
|
|
|
m = PHYS_TO_VM_PAGE(addr);
|
|
|
|
TAILQ_INSERT_TAIL(&mlist, m, pageq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-02-24 10:38:01 +03:00
|
|
|
uvm_pglistfree(&mlist);
|
1997-06-07 03:54:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common function for mapping DMA-safe memory. May be called by
|
|
|
|
* bus-specific DMA memory map functions.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
_bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
|
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dma_segment_t *segs;
|
|
|
|
int nsegs;
|
|
|
|
size_t size;
|
|
|
|
caddr_t *kvap;
|
|
|
|
int flags;
|
|
|
|
{
|
1998-08-14 20:50:00 +04:00
|
|
|
vaddr_t va;
|
1997-06-07 03:54:24 +04:00
|
|
|
bus_addr_t addr;
|
1998-02-24 10:38:01 +03:00
|
|
|
int curseg;
|
1997-06-07 03:54:24 +04:00
|
|
|
|
1998-01-19 06:12:20 +03:00
|
|
|
/*
|
|
|
|
* If we're only mapping 1 segment, use K0SEG, to avoid
|
|
|
|
* TLB thrashing.
|
|
|
|
*/
|
|
|
|
if (nsegs == 1) {
|
|
|
|
*kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1997-06-07 03:54:24 +04:00
|
|
|
size = round_page(size);
|
1997-08-26 01:17:48 +04:00
|
|
|
|
1998-02-24 10:38:01 +03:00
|
|
|
va = uvm_km_valloc(kernel_map, size);
|
1997-08-26 01:17:48 +04:00
|
|
|
|
1997-06-07 03:54:24 +04:00
|
|
|
if (va == 0)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
|
|
|
*kvap = (caddr_t)va;
|
|
|
|
|
|
|
|
for (curseg = 0; curseg < nsegs; curseg++) {
|
|
|
|
for (addr = segs[curseg].ds_addr;
|
|
|
|
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
|
|
|
|
addr += NBPG, va += NBPG, size -= NBPG) {
|
|
|
|
if (size == 0)
|
|
|
|
panic("_bus_dmamem_map: size botch");
|
|
|
|
pmap_enter(pmap_kernel(), va, addr,
|
bus_dmamem_map() maps DMA safe memory, which is usually one or more
managed pages, into KVA space. Since the pages are managed, we should
use pmap_enter(), not pmap_kenter_pa().
Also, when entering the mappings, enter with an access_type of
VM_PROT_READ | VM_PROT_WRITE. We do this for a couple of reasons:
(1) On systems that have H/W mod/ref attributes, the hardware
may not be able to track mod/ref done by a bus master.
(2) On systems that have to do mod/ref emulation, this prevents
a mod/ref page fault from potentially happening while in an
interrupt context, which can be problematic.
This latter change is fairly important if we ever want to be able to
transfer DMA-safe memory pages to anonymous memory objects; we will need
to know that the pages are modified, or else data could be lost!
Note that while the pages are unowned (i.e. "just DMA-safe memory pages"),
they won't consume any swap resources, as the mappings are wired, and
the pages aren't on the active or inactive queues.
1999-05-26 03:14:03 +04:00
|
|
|
VM_PROT_READ | VM_PROT_WRITE, TRUE,
|
|
|
|
VM_PROT_READ | VM_PROT_WRITE);
|
1997-06-07 03:54:24 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common function for unmapping DMA-safe memory. May be called by
|
|
|
|
* bus-specific DMA memory unmapping functions.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_bus_dmamem_unmap(t, kva, size)
|
|
|
|
bus_dma_tag_t t;
|
|
|
|
caddr_t kva;
|
|
|
|
size_t size;
|
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if ((u_long)kva & PGOFSET)
|
|
|
|
panic("_bus_dmamem_unmap");
|
|
|
|
#endif
|
|
|
|
|
1998-01-19 06:12:20 +03:00
|
|
|
/*
|
|
|
|
* Nothing to do if we mapped it with K0SEG.
|
|
|
|
*/
|
|
|
|
if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
|
|
|
|
kva <= (caddr_t)ALPHA_K0SEG_END)
|
|
|
|
return;
|
|
|
|
|
1997-06-07 03:54:24 +04:00
|
|
|
size = round_page(size);
|
1998-08-14 20:50:00 +04:00
|
|
|
uvm_km_free(kernel_map, (vaddr_t)kva, size);
|
1997-06-07 03:54:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common functin for mmap(2)'ing DMA-safe memory. May be called by
|
|
|
|
* bus-specific DMA mmap(2)'ing functions.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
_bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
|
|
|
|
bus_dma_tag_t t;
|
|
|
|
bus_dma_segment_t *segs;
|
|
|
|
int nsegs, off, prot, flags;
|
|
|
|
{
|
1997-09-05 06:05:37 +04:00
|
|
|
int i;
|
1997-06-07 03:54:24 +04:00
|
|
|
|
1997-09-05 06:05:37 +04:00
|
|
|
for (i = 0; i < nsegs; i++) {
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (off & PGOFSET)
|
|
|
|
panic("_bus_dmamem_mmap: offset unaligned");
|
|
|
|
if (segs[i].ds_addr & PGOFSET)
|
|
|
|
panic("_bus_dmamem_mmap: segment unaligned");
|
|
|
|
if (segs[i].ds_len & PGOFSET)
|
|
|
|
panic("_bus_dmamem_mmap: segment size not multiple"
|
|
|
|
" of page size");
|
|
|
|
#endif
|
|
|
|
if (off >= segs[i].ds_len) {
|
|
|
|
off -= segs[i].ds_len;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (alpha_btop((caddr_t)segs[i].ds_addr + off));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Page not found. */
|
|
|
|
return (-1);
|
1997-06-07 03:54:24 +04:00
|
|
|
}
|