From 5f7fef3c79c5ddd1957ba1f34d49cd2198b6bda1 Mon Sep 17 00:00:00 2001 From: tsubai Date: Fri, 17 Jul 1998 18:38:10 +0000 Subject: [PATCH] Add bus_dma support. --- sys/arch/macppc/conf/files.macppc | 1 + sys/arch/macppc/include/bus.h | 38 +- sys/arch/macppc/macppc/bus_dma.c | 585 ++++++++++++++++++++++++++++++ sys/arch/macppc/pci/pci_machdep.h | 4 +- 4 files changed, 612 insertions(+), 16 deletions(-) create mode 100644 sys/arch/macppc/macppc/bus_dma.c diff --git a/sys/arch/macppc/conf/files.macppc b/sys/arch/macppc/conf/files.macppc index 5fc82a0cace7..c958429f2716 100644 --- a/sys/arch/macppc/conf/files.macppc +++ b/sys/arch/macppc/conf/files.macppc @@ -8,6 +8,7 @@ maxusers 2 8 64 include "arch/powerpc/conf/files.ofw" file arch/macppc/macppc/autoconf.c +file arch/macppc/macppc/bus_dma.c file arch/macppc/macppc/clock.c file arch/macppc/macppc/conf.c file arch/macppc/macppc/disksubr.c disk diff --git a/sys/arch/macppc/include/bus.h b/sys/arch/macppc/include/bus.h index 9ad664936cc6..17095e33858d 100644 --- a/sys/arch/macppc/include/bus.h +++ b/sys/arch/macppc/include/bus.h @@ -1,4 +1,4 @@ -/* $NetBSD: bus.h,v 1.1 1998/05/15 10:15:52 tsubai Exp $ */ +/* $NetBSD: bus.h,v 1.2 1998/07/17 18:38:10 tsubai Exp $ */ /* $OpenBSD: bus.h,v 1.1 1997/10/13 10:53:42 pefo Exp $ */ /* @@ -64,15 +64,15 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _POWERMAC_BUS_H_ -#define _POWERMAC_BUS_H_ +#ifndef _MACPPC_BUS_H_ +#define _MACPPC_BUS_H_ #include /* * Values for the PowerMac bus space tag, not to be used directly by MI code. */ -/* #define POWERMAC_BUS_REVERSE 1 */ +/* #define MACPPC_BUS_REVERSE 1 */ /* * Bus access types. @@ -684,7 +684,7 @@ bus_space_set_region_stream_4(tag, bsh, offset, val, count) * bus_space_handle_t bsh, bus_size_t offset, * bus_size_t len, int flags)); * - * Note: the powermac does not currently require barriers, but we must + * Note: the macppc does not currently require barriers, but we must * provide the flags to MI code. */ #define bus_space_barrier(t, h, o, l, f) \ @@ -720,8 +720,8 @@ struct uio; #define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */ #define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */ -typedef struct powermac_bus_dma_tag *bus_dma_tag_t; -typedef struct powermac_bus_dmamap *bus_dmamap_t; +typedef struct macppc_bus_dma_tag *bus_dma_tag_t; +typedef struct macppc_bus_dmamap *bus_dmamap_t; /* * bus_dma_segment_t @@ -729,11 +729,11 @@ typedef struct powermac_bus_dmamap *bus_dmamap_t; * Describes a single contiguous DMA transaction. Values * are suitable for programming into DMA registers. */ -struct powermac_bus_dma_segment { +struct macppc_bus_dma_segment { bus_addr_t ds_addr; /* DMA address */ bus_size_t ds_len; /* length of transfer */ }; -typedef struct powermac_bus_dma_segment bus_dma_segment_t; +typedef struct macppc_bus_dma_segment bus_dma_segment_t; /* * bus_dma_tag_t @@ -742,8 +742,16 @@ typedef struct powermac_bus_dma_segment bus_dma_segment_t; * DMA for a given bus. */ -struct powermac_bus_dma_tag { - void *_cookie; /* cookie used in the guts */ +struct macppc_bus_dma_tag { + /* + * The `bounce threshold' is checked while we are loading + * the DMA map. If the physical address of the segment + * exceeds the threshold, an error will be returned. The + * caller can then take whatever action is necessary to + * bounce the transfer. If this value is 0, it will be + * ignored. + */ + bus_addr_t _bounce_thresh; /* * DMA mapping methods. @@ -811,7 +819,7 @@ struct powermac_bus_dma_tag { * * Describes a DMA mapping. */ -struct powermac_bus_dmamap { +struct macppc_bus_dmamap { /* * PRIVATE MEMBERS: not for use my machine-independent code. */ @@ -831,7 +839,7 @@ struct powermac_bus_dmamap { bus_dma_segment_t dm_segs[1]; /* segments; variable length */ }; -#ifdef _POWERMAC_BUS_DMA_PRIVATE +#ifdef _MACPPC_BUS_DMA_PRIVATE int _bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, bus_size_t, bus_size_t, int, bus_dmamap_t *)); void _bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); @@ -863,6 +871,6 @@ int _bus_dmamem_alloc_range __P((bus_dma_tag_t tag, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags, vm_offset_t low, vm_offset_t high)); -#endif /* _POWERMAC_BUS_DMA_PRIVATE */ +#endif /* _MACPPC_BUS_DMA_PRIVATE */ -#endif /* _POWERMAC_BUS_H_ */ +#endif /* _MACPPC_BUS_H_ */ diff --git a/sys/arch/macppc/macppc/bus_dma.c b/sys/arch/macppc/macppc/bus_dma.c new file mode 100644 index 000000000000..4fbf03a4d72e --- /dev/null +++ b/sys/arch/macppc/macppc/bus_dma.c @@ -0,0 +1,585 @@ +/* $NetBSD: bus_dma.c,v 1.1 1998/07/17 18:38:10 tsubai Exp $ */ + +/*- + * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, + * NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_uvm.h" +#include "opt_pmap_new.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#if defined(UVM) +#include +#endif + +#define _MACPPC_BUS_DMA_PRIVATE +#include +#include + +int _bus_dmamap_load_buffer __P((bus_dmamap_t, void *, bus_size_t, + struct proc *, int, bus_addr_t, vm_offset_t *, int *, int)); + +/* + * Common function for DMA map creation. May be called by bus-specific + * DMA map creation functions. + */ +int +_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) + bus_dma_tag_t t; + bus_size_t size; + int nsegments; + bus_size_t maxsegsz; + bus_size_t boundary; + int flags; + bus_dmamap_t *dmamp; +{ + struct macppc_bus_dmamap *map; + void *mapstore; + size_t mapsize; + + /* + * Allocate and initialize the DMA map. The end of the map + * is a variable-sized array of segments, so we allocate enough + * room for them in one shot. + * + * Note we don't preserve the WAITOK or NOWAIT flags. Preservation + * of ALLOCNOW notifies others that we've reserved these resources, + * and they are not to be freed. + * + * The bus_dmamap_t includes one bus_dma_segment_t, hence + * the (nsegments - 1). + */ + mapsize = sizeof(struct macppc_bus_dmamap) + + (sizeof(bus_dma_segment_t) * (nsegments - 1)); + if ((mapstore = malloc(mapsize, M_DMAMAP, + (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) + return (ENOMEM); + + bzero(mapstore, mapsize); + map = (struct macppc_bus_dmamap *)mapstore; + map->_dm_size = size; + map->_dm_segcnt = nsegments; + map->_dm_maxsegsz = maxsegsz; + map->_dm_boundary = boundary; + map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); + map->dm_mapsize = 0; /* no valid mappings */ + map->dm_nsegs = 0; + + *dmamp = map; + return (0); +} + +/* + * Common function for DMA map destruction. May be called by bus-specific + * DMA map destruction functions. + */ +void +_bus_dmamap_destroy(t, map) + bus_dma_tag_t t; + bus_dmamap_t map; +{ + + free(map, M_DMAMAP); +} + +/* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrance, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +int +_bus_dmamap_load_buffer(map, buf, buflen, p, flags, bounce_thresh, lastaddrp, + segp, first) + bus_dmamap_t map; + void *buf; + bus_size_t buflen; + struct proc *p; + int flags; + bus_addr_t bounce_thresh; + vm_offset_t *lastaddrp; + int *segp; + int first; +{ + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vm_offset_t vaddr = (vm_offset_t)buf; + int seg; + + lastaddr = *lastaddrp; + bmask = ~(map->_dm_boundary - 1); + + for (seg = *segp; buflen > 0 ; ) { + /* + * Get the physical address for this segment. + */ + if (p != NULL) + curaddr = pmap_extract(p->p_vmspace->vm_map.pmap, + vaddr); + else + curaddr = vtophys(vaddr); + + /* + * If we're beyond the bounce threshold, notify + * the caller. + */ + if (bounce_thresh != 0 && curaddr >= bounce_thresh) + return (EINVAL); + + /* + * Compute the segment size, and adjust counts. + */ + sgsize = NBPG - ((u_long)vaddr & PGOFSET); + if (buflen < sgsize) + sgsize = buflen; + + /* + * Make sure we don't cross any boundaries. + */ + if (map->_dm_boundary > 0) { + baddr = (curaddr + map->_dm_boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * the previous segment if possible. + */ + if (first) { + map->dm_segs[seg].ds_addr = curaddr; + map->dm_segs[seg].ds_len = sgsize; + first = 0; + } else { + if (curaddr == lastaddr && + (map->dm_segs[seg].ds_len + sgsize) <= + map->_dm_maxsegsz && + (map->_dm_boundary == 0 || + (map->dm_segs[seg].ds_addr & bmask) == + (curaddr & bmask))) + map->dm_segs[seg].ds_len += sgsize; + else { + if (++seg >= map->_dm_segcnt) + break; + map->dm_segs[seg].ds_addr = curaddr; + map->dm_segs[seg].ds_len = sgsize; + } + } + + lastaddr = curaddr + sgsize; + vaddr += sgsize; + buflen -= sgsize; + } + + *segp = seg; + *lastaddrp = lastaddr; + + /* + * Did we fit? + */ + if (buflen != 0) + return (EFBIG); /* XXX better return value here? */ + + return (0); +} + +/* + * Common function for loading a DMA map with a linear buffer. May + * be called by bus-specific DMA map load functions. + */ +int +_bus_dmamap_load(t, map, buf, buflen, p, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + void *buf; + bus_size_t buflen; + struct proc *p; + int flags; +{ + vm_offset_t lastaddr; + int seg, error; + + /* + * Make sure that on error condition we return "no valid mappings". + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + + if (buflen > map->_dm_size) + return (EINVAL); + + seg = 0; + error = _bus_dmamap_load_buffer(map, buf, buflen, p, flags, + t->_bounce_thresh, &lastaddr, &seg, 1); + if (error == 0) { + map->dm_mapsize = buflen; + map->dm_nsegs = seg + 1; + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for mbufs. + */ +int +_bus_dmamap_load_mbuf(t, map, m0, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + struct mbuf *m0; + int flags; +{ + vm_offset_t lastaddr; + int seg, error, first; + struct mbuf *m; + + /* + * Make sure that on error condition we return "no valid mappings." + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + +#ifdef DIAGNOSTIC + if ((m0->m_flags & M_PKTHDR) == 0) + panic("_bus_dmamap_load_mbuf: no packet header"); +#endif + + if (m0->m_pkthdr.len > map->_dm_size) + return (EINVAL); + + first = 1; + seg = 0; + error = 0; + for (m = m0; m != NULL && error == 0; m = m->m_next) { + error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len, + NULL, flags, t->_bounce_thresh, &lastaddr, &seg, first); + first = 0; + } + if (error == 0) { + map->dm_mapsize = m0->m_pkthdr.len; + map->dm_nsegs = seg + 1; + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for uios. + */ +int +_bus_dmamap_load_uio(t, map, uio, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + struct uio *uio; + int flags; +{ + + panic("_bus_dmamap_load_uio: not implemented"); +} + +/* + * Like _bus_dmamap_load(), but for raw memory. + */ +int +_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + bus_dma_segment_t *segs; + int nsegs; + bus_size_t size; + int flags; +{ + + panic("_bus_dmamap_load_raw: not implemented"); +} + +/* + * Common function for unloading a DMA map. May be called by + * chipset-specific DMA map unload functions. + */ +void +_bus_dmamap_unload(t, map) + bus_dma_tag_t t; + bus_dmamap_t map; +{ + + /* + * No resources to free; just mark the mappings as + * invalid. + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; +} + +/* + * Common function for DMA map synchronization. May be called + * by chipset-specific DMA map synchronization functions. + */ +void +_bus_dmamap_sync(t, map, offset, len, ops) + bus_dma_tag_t t; + bus_dmamap_t map; + bus_addr_t offset; + bus_size_t len; + int ops; +{ + + /* Nothing to do here. */ +} + +/* + * Common function for DMA-safe memory allocation. May be called + * by bus-specific DMA memory allocation functions. + */ +int +_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) + bus_dma_tag_t t; + bus_size_t size, alignment, boundary; + bus_dma_segment_t *segs; + int nsegs; + int *rsegs; + int flags; +{ + vm_offset_t avail_start, avail_end; + vm_offset_t curaddr, lastaddr, high; + vm_page_t m; + struct pglist mlist; + int curseg, error; + + avail_start = vm_physmem[0].avail_start << PGSHIFT; + avail_end = vm_physmem[vm_nphysseg - 1].avail_end << PGSHIFT; + + /* Always round the size. */ + size = round_page(size); + + high = avail_end - PAGE_SIZE; + + /* + * Allocate pages from the VM system. + */ + TAILQ_INIT(&mlist); +#if defined(UVM) + error = uvm_pglistalloc(size, avail_start, high, alignment, boundary, + &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); +#else + error = vm_page_alloc_memory(size, avail_start, high, + alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); +#endif + if (error) + return (error); + + /* + * Compute the location, size, and number of segments actually + * returned by the VM code. + */ + m = mlist.tqh_first; + curseg = 0; + lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); + segs[curseg].ds_len = PAGE_SIZE; + m = m->pageq.tqe_next; + + for (; m != NULL; m = m->pageq.tqe_next) { + curaddr = VM_PAGE_TO_PHYS(m); +#ifdef DIAGNOSTIC + if (curaddr < avail_start || curaddr >= high) { + printf("vm_page_alloc_memory returned non-sensical" + " address 0x%lx\n", curaddr); + panic("_bus_dmamem_alloc"); + } +#endif + if (curaddr == (lastaddr + PAGE_SIZE)) + segs[curseg].ds_len += PAGE_SIZE; + else { + curseg++; + segs[curseg].ds_addr = curaddr; + segs[curseg].ds_len = PAGE_SIZE; + } + lastaddr = curaddr; + } + + *rsegs = curseg + 1; + + return (0); +} + +/* + * Common function for freeing DMA-safe memory. May be called by + * bus-specific DMA memory free functions. + */ +void +_bus_dmamem_free(t, segs, nsegs) + bus_dma_tag_t t; + bus_dma_segment_t *segs; + int nsegs; +{ + vm_page_t m; + bus_addr_t addr; + struct pglist mlist; + int curseg; + + /* + * Build a list of pages to free back to the VM system. + */ + TAILQ_INIT(&mlist); + for (curseg = 0; curseg < nsegs; curseg++) { + for (addr = segs[curseg].ds_addr; + addr < (segs[curseg].ds_addr + segs[curseg].ds_len); + addr += PAGE_SIZE) { + m = PHYS_TO_VM_PAGE(addr); + TAILQ_INSERT_TAIL(&mlist, m, pageq); + } + } + +#if defined(UVM) + uvm_pglistfree(&mlist); +#else + vm_page_free_memory(&mlist); +#endif +} + +/* + * Common function for mapping DMA-safe memory. May be called by + * bus-specific DMA memory map functions. + */ +int +_bus_dmamem_map(t, segs, nsegs, size, kvap, flags) + bus_dma_tag_t t; + bus_dma_segment_t *segs; + int nsegs; + size_t size; + caddr_t *kvap; + int flags; +{ + vm_offset_t va; + bus_addr_t addr; + int curseg; + + size = round_page(size); + +#if defined(UVM) + va = uvm_km_valloc(kernel_map, size); +#else + va = kmem_alloc_pageable(kernel_map, size); +#endif + + if (va == 0) + return (ENOMEM); + + *kvap = (caddr_t)va; + + for (curseg = 0; curseg < nsegs; curseg++) { + for (addr = segs[curseg].ds_addr; + addr < (segs[curseg].ds_addr + segs[curseg].ds_len); + addr += NBPG, va += NBPG, size -= NBPG) { + if (size == 0) + panic("_bus_dmamem_map: size botch"); +#if defined(PMAP_NEW) + pmap_kenter_pa(va, addr, VM_PROT_READ | VM_PROT_WRITE); +#else + pmap_enter(pmap_kernel(), va, addr, + VM_PROT_READ | VM_PROT_WRITE, TRUE); +#endif + } + } + + return (0); +} + +/* + * Common function for unmapping DMA-safe memory. May be called by + * bus-specific DMA memory unmapping functions. + */ +void +_bus_dmamem_unmap(t, kva, size) + bus_dma_tag_t t; + caddr_t kva; + size_t size; +{ + +#ifdef DIAGNOSTIC + if ((u_long)kva & PGOFSET) + panic("_bus_dmamem_unmap"); +#endif + + size = round_page(size); + +#if defined(UVM) + uvm_km_free(kernel_map, (vm_offset_t)kva, size); +#else + kmem_free(kernel_map, (vm_offset_t)kva, size); +#endif +} + +/* + * Common functin for mmap(2)'ing DMA-safe memory. May be called by + * bus-specific DMA mmap(2)'ing functions. + */ +int +_bus_dmamem_mmap(t, segs, nsegs, off, prot, flags) + bus_dma_tag_t t; + bus_dma_segment_t *segs; + int nsegs, off, prot, flags; +{ + int i; + + for (i = 0; i < nsegs; i++) { +#ifdef DIAGNOSTIC + if (off & PGOFSET) + panic("_bus_dmamem_mmap: offset unaligned"); + if (segs[i].ds_addr & PGOFSET) + panic("_bus_dmamem_mmap: segment unaligned"); + if (segs[i].ds_len & PGOFSET) + panic("_bus_dmamem_mmap: segment size not multiple" + " of page size"); +#endif + if (off >= segs[i].ds_len) { + off -= segs[i].ds_len; + continue; + } + + return (segs[i].ds_addr + off); + } + + /* Page not found. */ + return (-1); +} diff --git a/sys/arch/macppc/pci/pci_machdep.h b/sys/arch/macppc/pci/pci_machdep.h index ea7ba42c7216..8f4df876699d 100644 --- a/sys/arch/macppc/pci/pci_machdep.h +++ b/sys/arch/macppc/pci/pci_machdep.h @@ -1,4 +1,4 @@ -/* $NetBSD: pci_machdep.h,v 1.2 1998/07/13 19:27:13 tsubai Exp $ */ +/* $NetBSD: pci_machdep.h,v 1.3 1998/07/17 18:38:11 tsubai Exp $ */ /* * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. @@ -62,6 +62,8 @@ struct pci_bridge pci_bridges[2]; #define PCI_CHIPSET_BANDIT 0x00 #define PCI_CHIPSET_MPC106 0x10 +extern struct macppc_bus_dma_tag pci_bus_dma_tag; + /* * Functions provided to machine-independent PCI code. */