From 64572c459a369d0c24d77a5f96810a48fa0c4f12 Mon Sep 17 00:00:00 2001 From: ragge Date: Sun, 6 Jun 1999 18:58:50 +0000 Subject: [PATCH] sgmap files, copied from the Alpha port and modified for vax. --- sys/arch/vax/include/sgmap.h | 86 +++++++++ sys/arch/vax/vax/sgmap.c | 330 +++++++++++++++++++++++++++++++++++ 2 files changed, 416 insertions(+) create mode 100644 sys/arch/vax/include/sgmap.h create mode 100644 sys/arch/vax/vax/sgmap.c diff --git a/sys/arch/vax/include/sgmap.h b/sys/arch/vax/include/sgmap.h new file mode 100644 index 000000000000..a85663281aed --- /dev/null +++ b/sys/arch/vax/include/sgmap.h @@ -0,0 +1,86 @@ +/* $NetBSD: sgmap.h,v 1.1 1999/06/06 18:58:50 ragge Exp $ */ + +/*- + * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, + * NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VAX_COMMON_SGMAPVAR_H +#define _VAX_COMMON_SGMAPVAR_H + +#include +#include +#include + +/* + * A VAX SGMAP's state information. Nothing in the sgmap requires + * locking[*], with the exception of the extent map. Locking of the + * extent map is handled within the extent manager itself. + * + * [*] While the page table is a `global' resource, access to it is + * controlled by the extent map; once a region has been allocated from + * the map, that region is effectively `locked'. + */ +struct vax_sgmap { + struct extent *aps_ex; /* extent map to manage sgva space */ + struct pte *aps_pt; /* page table */ + bus_addr_t aps_sgvabase; /* base of the sgva space */ + bus_size_t aps_sgvasize; /* size of the sgva space */ + bus_addr_t aps_pa; /* Address in region */ +}; + +void vax_sgmap_init __P((bus_dma_tag_t, struct vax_sgmap *, + const char *, bus_addr_t, bus_size_t, struct pte *, bus_size_t)); + +int vax_sgmap_alloc __P((bus_dmamap_t, bus_size_t, + struct vax_sgmap *, int)); +void vax_sgmap_free __P((bus_dmamap_t, struct vax_sgmap *)); + +int vax_sgmap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, + bus_size_t, struct proc *, int, struct vax_sgmap *)); + +int vax_sgmap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t, + struct mbuf *, int, struct vax_sgmap *)); + +int vax_sgmap_load_uio __P((bus_dma_tag_t, bus_dmamap_t, + struct uio *, int, struct vax_sgmap *)); + +int vax_sgmap_load_raw __P((bus_dma_tag_t, bus_dmamap_t, + bus_dma_segment_t *, int, bus_size_t, int, struct vax_sgmap *)); + +void vax_sgmap_unload __P(( bus_dma_tag_t, bus_dmamap_t, + struct vax_sgmap *)); + +#endif /* _ALPHA_COMMON_SGMAPVAR_H */ diff --git a/sys/arch/vax/vax/sgmap.c b/sys/arch/vax/vax/sgmap.c new file mode 100644 index 000000000000..2293bb2d62c2 --- /dev/null +++ b/sys/arch/vax/vax/sgmap.c @@ -0,0 +1,330 @@ +/* $NetBSD: sgmap.c,v 1.1 1999/06/06 18:58:50 ragge Exp $ */ + +/*- + * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, + * NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include + +void +vax_sgmap_init(t, sgmap, name, sgvabase, sgvasize, ptva, minptalign) + bus_dma_tag_t t; + struct vax_sgmap *sgmap; + const char *name; + bus_addr_t sgvabase; + bus_size_t sgvasize; + struct pte *ptva; + bus_size_t minptalign; +{ + bus_dma_segment_t seg; + size_t ptsize; + int rseg; + + if (sgvasize & PGOFSET) { + printf("size botch for sgmap `%s'\n", name); + goto die; + } + + sgmap->aps_sgvabase = sgvabase; + sgmap->aps_sgvasize = sgvasize; + + if (ptva != NULL) { + /* + * We already have a page table; this may be a system + * where the page table resides in bridge-resident SRAM. + */ + sgmap->aps_pt = ptva; + } else { + /* + * Compute the page table size and allocate it. At minimum, + * this must be aligned to the page table size. However, + * some platforms have more strict alignment reqirements. + */ + ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte); + if (minptalign != 0) { + if (minptalign < ptsize) + minptalign = ptsize; + } else + minptalign = ptsize; + if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg, + BUS_DMA_NOWAIT)) { + panic("unable to allocate page table for sgmap `%s'\n", + name); + goto die; + } + sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE); + } + + /* + * Create the extent map used to manage the virtual address + * space. + */ + sgmap->aps_ex = extent_create((char *)name, sgvabase, sgvasize - 1, + M_DMAMAP, NULL, 0, EX_NOWAIT|EX_NOCOALESCE); + if (sgmap->aps_ex == NULL) { + printf("unable to create extent map for sgmap `%s'\n", name); + goto die; + } + + return; + die: + panic("vax_sgmap_init"); +} + +int +vax_sgmap_alloc(map, origlen, sgmap, flags) + bus_dmamap_t map; + bus_size_t origlen; + struct vax_sgmap *sgmap; + int flags; +{ + int error; + bus_size_t len = origlen; + +#ifdef DIAGNOSTIC + if (map->_dm_flags & DMAMAP_HAS_SGMAP) + panic("vax_sgmap_alloc: already have sgva space"); +#endif + + map->_dm_sgvalen = round_page(len); + +#if 0 + printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ", + origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary); +#endif + + error = extent_alloc(sgmap->aps_ex, map->_dm_sgvalen, VAX_NBPG, + 0, (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK, + &map->_dm_sgva); +#if 0 + printf("error %d _dm_sgva %x\n", error, map->_dm_sgva); +#endif + + if (error == 0) + map->_dm_flags |= DMAMAP_HAS_SGMAP; + else + map->_dm_flags &= ~DMAMAP_HAS_SGMAP; + + return (error); +} + +void +vax_sgmap_free(map, sgmap) + bus_dmamap_t map; + struct vax_sgmap *sgmap; +{ + +#ifdef DIAGNOSTIC + if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) + panic("vax_sgmap_free: no sgva space to free"); +#endif + + if (extent_free(sgmap->aps_ex, map->_dm_sgva, map->_dm_sgvalen, + EX_NOWAIT)) + panic("vax_sgmap_free"); + + map->_dm_flags &= ~DMAMAP_HAS_SGMAP; +} + +int +vax_sgmap_load(t, map, buf, buflen, p, flags, sgmap) + bus_dma_tag_t t; + bus_dmamap_t map; + void *buf; + bus_size_t buflen; + struct proc *p; + int flags; + struct vax_sgmap *sgmap; +{ + vaddr_t endva, va = (vaddr_t)buf; + paddr_t pa; + bus_addr_t dmaoffset; + bus_size_t dmalen; + long *pte, *page_table = (long *)sgmap->aps_pt; + int pteidx, error; + + /* + * Make sure that on error condition we return "no valid mappings". + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + + if (buflen > map->_dm_size) + return (EINVAL); + + /* + * Remember the offset into the first page and the total + * transfer length. + */ + dmaoffset = ((u_long)buf) & VAX_PGOFSET; + dmalen = buflen; + + + /* + * Allocate the necessary virtual address space for the + * mapping. Round the size, since we deal with whole pages. + * + * alpha_sgmap_alloc will deal with the appropriate spill page + * allocations. + * + */ + endva = round_page(va + buflen); + va = trunc_page(va); + if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) { + error = vax_sgmap_alloc(map, (endva - va), sgmap, flags); + if (error) + return (error); + } + + pteidx = map->_dm_sgva >> VAX_PGSHIFT; + pte = &page_table[pteidx]; + + /* + * Generate the DMA address. + */ + map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset; + map->dm_segs[0].ds_len = dmalen; + + + map->_dm_pteidx = pteidx; + map->_dm_ptecnt = 0; + + /* + * Create the bus-specific page tables. + * Can be done much more efficient than this. + */ + for (; va < endva; va += VAX_NBPG, pteidx++, + pte = &page_table[pteidx], map->_dm_ptecnt++) { + /* + * Get the physical address for this segment. + */ + if (p != NULL) + pa = pmap_extract(p->p_vmspace->vm_map.pmap, va); + else + pa = kvtophys(va); + + if (pa == 0) + panic("vax_sgmap_load"); + /* + * Load the current PTE with this page. + */ + *pte = (pa >> VAX_PGSHIFT) | PG_V; + } + + map->dm_mapsize = buflen; + map->dm_nsegs = 1; + return (0); +} + +int +vax_sgmap_load_mbuf(t, map, m, flags, sgmap) + bus_dma_tag_t t; + bus_dmamap_t map; + struct mbuf *m; + int flags; + struct vax_sgmap *sgmap; +{ + + panic("vax_sgmap_load_mbuf : not implemented"); +} + +int +vax_sgmap_load_uio(t, map, uio, flags, sgmap) + bus_dma_tag_t t; + bus_dmamap_t map; + struct uio *uio; + int flags; + struct vax_sgmap *sgmap; +{ + + panic("vax_sgmap_load_uio : not implemented"); +} + +int +vax_sgmap_load_raw(t, map, segs, nsegs, size, flags, sgmap) + bus_dma_tag_t t; + bus_dmamap_t map; + bus_dma_segment_t *segs; + int nsegs; + bus_size_t size; + int flags; + struct vax_sgmap *sgmap; +{ + + panic("vax_sgmap_load_raw : not implemented"); +} + +void +vax_sgmap_unload(t, map, sgmap) + bus_dma_tag_t t; + bus_dmamap_t map; + struct vax_sgmap *sgmap; +{ + long *pte, *page_table = (long *)sgmap->aps_pt; + int ptecnt, pteidx; + + /* + * Invalidate the PTEs for the mapping. + */ + for (ptecnt = map->_dm_ptecnt, pteidx = map->_dm_pteidx, + pte = &page_table[pteidx]; + ptecnt != 0; + ptecnt--, pteidx++, + pte = &page_table[pteidx]) { + *pte = 0; + } + + /* + * Free the virtual address space used by the mapping + * if necessary. + */ + if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) + vax_sgmap_free(map, sgmap); + /* + * Mark the mapping invalid. + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; +}