diff --git a/sys/arch/evbarm/integrator/int_bus_dma.c b/sys/arch/evbarm/integrator/int_bus_dma.c new file mode 100644 index 000000000000..b64d3b1da42d --- /dev/null +++ b/sys/arch/evbarm/integrator/int_bus_dma.c @@ -0,0 +1,614 @@ +/* $NetBSD: int_bus_dma.c,v 1.1 2001/10/27 16:17:51 rearnsha Exp $ */ + +/*- + * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, + * NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/* + * The integrator board has memory steering hardware that means that + * the normal physical addresses used by the processor cannot be used + * for DMA. Instead we have to use the "core module alias mapping + * addresses". We don't use these for normal processor accesses since + * they are much slower than the direct addresses when accessing + * memory on the local board. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define _ARM32_BUS_DMA_PRIVATE +#include + +#include +#include +#include + +static int integrator_bus_dmamap_load_buffer __P((bus_dma_tag_t, + bus_dmamap_t, void *, bus_size_t, struct proc *, int, + vm_offset_t *, int *, int)); +static int integrator_bus_dma_inrange __P((bus_dma_segment_t *, int, + bus_addr_t)); + +/* + * Common function for loading a DMA map with a linear buffer. May + * be called by bus-specific DMA map load functions. + */ +int +integrator_bus_dmamap_load(t, map, buf, buflen, p, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + void *buf; + bus_size_t buflen; + struct proc *p; + int flags; +{ + vm_offset_t lastaddr; + int seg, error; + +#ifdef DEBUG_DMA + printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n", + t, map, buf, buflen, p, flags); +#endif /* DEBUG_DMA */ + + /* + * Make sure that on error condition we return "no valid mappings". + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + + if (buflen > map->_dm_size) + return (EINVAL); + + seg = 0; + error = integrator_bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, + &lastaddr, &seg, 1); + if (error == 0) { + map->dm_mapsize = buflen; + map->dm_nsegs = seg + 1; + } +#ifdef DEBUG_DMA + printf("dmamap_load: error=%d\n", error); +#endif /* DEBUG_DMA */ + return (error); +} + +/* + * Like _bus_dmamap_load(), but for mbufs. + */ +int +integrator_bus_dmamap_load_mbuf(t, map, m0, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + struct mbuf *m0; + int flags; +{ + vm_offset_t lastaddr; + int seg, error, first; + struct mbuf *m; + +#ifdef DEBUG_DMA + printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n", + t, map, m0, flags); +#endif /* DEBUG_DMA */ + + /* + * Make sure that on error condition we return "no valid mappings." + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + +#ifdef DIAGNOSTIC + if ((m0->m_flags & M_PKTHDR) == 0) + panic("integrator_bus_dmamap_load_mbuf: no packet header"); +#endif /* DIAGNOSTIC */ + + if (m0->m_pkthdr.len > map->_dm_size) + return (EINVAL); + + first = 1; + seg = 0; + error = 0; + for (m = m0; m != NULL && error == 0; m = m->m_next) { + error = integrator_bus_dmamap_load_buffer(t, map, m->m_data, + m->m_len, NULL, flags, &lastaddr, &seg, first); + first = 0; + } + if (error == 0) { + map->dm_mapsize = m0->m_pkthdr.len; + map->dm_nsegs = seg + 1; + } +#ifdef DEBUG_DMA + printf("dmamap_load_mbuf: error=%d\n", error); +#endif /* DEBUG_DMA */ + return (error); +} + +/* + * Like _bus_dmamap_load(), but for uios. + */ +int +integrator_bus_dmamap_load_uio(t, map, uio, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + struct uio *uio; + int flags; +{ + vm_offset_t lastaddr; + int seg, i, error, first; + bus_size_t minlen, resid; + struct proc *p = NULL; + struct iovec *iov; + caddr_t addr; + + /* + * Make sure that on error condition we return "no valid mappings." + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + + resid = uio->uio_resid; + iov = uio->uio_iov; + + if (uio->uio_segflg == UIO_USERSPACE) { + p = uio->uio_procp; +#ifdef DIAGNOSTIC + if (p == NULL) + panic("integrator_bus_dmamap_load_uio: USERSPACE but no proc"); +#endif + } + + first = 1; + seg = 0; + error = 0; + for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { + /* + * Now at the first iovec to load. Load each iovec + * until we have exhausted the residual count. + */ + minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; + addr = (caddr_t)iov[i].iov_base; + + error = integrator_bus_dmamap_load_buffer(t, map, addr, minlen, + p, flags, &lastaddr, &seg, first); + first = 0; + + resid -= minlen; + } + if (error == 0) { + map->dm_mapsize = uio->uio_resid; + map->dm_nsegs = seg + 1; + } + return (error); +} + +/* + * Common function for DMA-safe memory allocation. May be called + * by bus-specific DMA memory allocation functions. + */ + +extern vm_offset_t physical_start; +extern vm_offset_t physical_freestart; +extern vm_offset_t physical_freeend; +extern vm_offset_t physical_end; + +int +integrator_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) + bus_dma_tag_t t; + bus_size_t size, alignment, boundary; + bus_dma_segment_t *segs; + int nsegs; + int *rsegs; + int flags; +{ + int error; +#ifdef DEBUG_DMA + printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x\n", + t, size, alignment, boundary, segs, nsegs, rsegs, flags); +#endif /* DEBUG_DMA */ + error = (integrator_bus_dmamem_alloc_range(t, size, alignment, boundary, + segs, nsegs, rsegs, flags, trunc_page(physical_start), trunc_page(physical_end))); +#ifdef DEBUG_DMA + printf("dmamem_alloc: =%d\n", error); +#endif /* DEBUG_DMA */ + return(error); +} + +/* + * Common function for freeing DMA-safe memory. May be called by + * bus-specific DMA memory free functions. + */ +void +integrator_bus_dmamem_free(t, segs, nsegs) + bus_dma_tag_t t; + bus_dma_segment_t *segs; + int nsegs; +{ + struct vm_page *m; + bus_addr_t addr; + struct pglist mlist; + int curseg; + +#ifdef DEBUG_DMA + printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs); +#endif /* DEBUG_DMA */ + + /* + * Build a list of pages to free back to the VM system. + */ + TAILQ_INIT(&mlist); + for (curseg = 0; curseg < nsegs; curseg++) { + for (addr = segs[curseg].ds_addr; + addr < (segs[curseg].ds_addr + segs[curseg].ds_len); + addr += PAGE_SIZE) { + m = PHYS_TO_VM_PAGE(CM_ALIAS_TO_LOCAL(addr)); + TAILQ_INSERT_TAIL(&mlist, m, pageq); + } + } + uvm_pglistfree(&mlist); +} + +/* + * Common function for mapping DMA-safe memory. May be called by + * bus-specific DMA memory map functions. + */ +int +integrator_bus_dmamem_map(t, segs, nsegs, size, kvap, flags) + bus_dma_tag_t t; + bus_dma_segment_t *segs; + int nsegs; + size_t size; + caddr_t *kvap; + int flags; +{ + vm_offset_t va; + bus_addr_t addr; + int curseg; + pt_entry_t *ptep/*, pte*/; + +#ifdef DEBUG_DMA + printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t, + segs, nsegs, (unsigned long)size, flags); +#endif /* DEBUG_DMA */ + + size = round_page(size); + va = uvm_km_valloc(kernel_map, size); + + if (va == 0) + return (ENOMEM); + + *kvap = (caddr_t)va; + + for (curseg = 0; curseg < nsegs; curseg++) { + for (addr = segs[curseg].ds_addr; + addr < (segs[curseg].ds_addr + segs[curseg].ds_len); + addr += NBPG, va += NBPG, size -= NBPG) { +#ifdef DEBUG_DMA + printf("wiring p%lx to v%lx", CM_ALIAS_TO_LOCAL(addr), + va); +#endif /* DEBUG_DMA */ + if (size == 0) + panic("integrator_bus_dmamem_map: size botch"); + pmap_enter(pmap_kernel(), va, CM_ALIAS_TO_LOCAL(addr), + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); + /* + * If the memory must remain coherent with the + * cache then we must make the memory uncacheable + * in order to maintain virtual cache coherency. + * We must also guarentee the cache does not already + * contain the virtal addresses we are making + * uncacheable. + */ + if (flags & BUS_DMA_COHERENT) { + cpu_cache_purgeD_rng(va, NBPG); + cpu_drain_writebuf(); + ptep = vtopte(va); + *ptep = ((*ptep) & (~PT_C | PT_B)); + tlb_flush(); + } +#ifdef DEBUG_DMA + ptep = vtopte(va); + printf(" pte=v%p *pte=%x\n", ptep, *ptep); +#endif /* DEBUG_DMA */ + } + } + pmap_update(pmap_kernel()); +#ifdef DEBUG_DMA + printf("dmamem_map: =%p\n", *kvap); +#endif /* DEBUG_DMA */ + return (0); +} + +/* + * Common functin for mmap(2)'ing DMA-safe memory. May be called by + * bus-specific DMA mmap(2)'ing functions. + */ +paddr_t +integrator_bus_dmamem_mmap(t, segs, nsegs, off, prot, flags) + bus_dma_tag_t t; + bus_dma_segment_t *segs; + int nsegs; + off_t off; + int prot, flags; +{ + int i; + + for (i = 0; i < nsegs; i++) { +#ifdef DIAGNOSTIC + if (off & PGOFSET) + panic("integrator_bus_dmamem_mmap: offset unaligned"); + if (segs[i].ds_addr & PGOFSET) + panic("integrator_bus_dmamem_mmap: segment unaligned"); + if (segs[i].ds_len & PGOFSET) + panic("integrator_bus_dmamem_mmap: segment size not multiple" + " of page size"); +#endif /* DIAGNOSTIC */ + if (off >= segs[i].ds_len) { + off -= segs[i].ds_len; + continue; + } + + return arm_byte_to_page((u_long)CM_ALIAS_TO_LOCAL(segs[i].ds_addr) + off); + } + + /* Page not found. */ + return -1; +} + +/********************************************************************** + * DMA utility functions + **********************************************************************/ + +/* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrace, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +static int +integrator_bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, + segp, first) + bus_dma_tag_t t; + bus_dmamap_t map; + void *buf; + bus_size_t buflen; + struct proc *p; + int flags; + vm_offset_t *lastaddrp; + int *segp; + int first; +{ + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vm_offset_t vaddr = (vm_offset_t)buf; + int seg; + pmap_t pmap; + +#ifdef DEBUG_DMA + printf("integrator_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n", + buf, buflen, flags, first); +#endif /* DEBUG_DMA */ + + if (p != NULL) + pmap = p->p_vmspace->vm_map.pmap; + else + pmap = pmap_kernel(); + + lastaddr = *lastaddrp; + bmask = ~(map->_dm_boundary - 1); + + for (seg = *segp; buflen > 0; ) { + /* + * Get the physical address for this segment. + */ + (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr); + + /* + * Make sure we're in an allowed DMA range. + */ + if (t->_ranges != NULL && + integrator_bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0) + return (EINVAL); + + /* + * Compute the segment size, and adjust counts. + */ + sgsize = NBPG - ((u_long)vaddr & PGOFSET); + if (buflen < sgsize) + sgsize = buflen; + + /* + * Make sure we don't cross any boundaries. + */ + if (map->_dm_boundary > 0) { + baddr = (curaddr + map->_dm_boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + if (first) { + map->dm_segs[seg].ds_addr = LOCAL_TO_CM_ALIAS(curaddr); + map->dm_segs[seg].ds_len = sgsize; + map->dm_segs[seg]._ds_vaddr = vaddr; + first = 0; + } else { + if (curaddr == lastaddr && + (map->dm_segs[seg].ds_len + sgsize) <= + map->_dm_maxsegsz && + (map->_dm_boundary == 0 || + (map->dm_segs[seg].ds_addr & bmask) == + (LOCAL_TO_CM_ALIAS(curaddr) & bmask))) + map->dm_segs[seg].ds_len += sgsize; + else { + if (++seg >= map->_dm_segcnt) + break; + map->dm_segs[seg].ds_addr = LOCAL_TO_CM_ALIAS(curaddr); + map->dm_segs[seg].ds_len = sgsize; + map->dm_segs[seg]._ds_vaddr = vaddr; + } + } + + lastaddr = curaddr + sgsize; + vaddr += sgsize; + buflen -= sgsize; + } + + *segp = seg; + *lastaddrp = lastaddr; + + /* + * Did we fit? + */ + if (buflen != 0) + return (EFBIG); /* XXX better return value here? */ + return (0); +} + +/* + * Check to see if the specified page is in an allowed DMA range. + */ +static int +integrator_bus_dma_inrange(ranges, nranges, curaddr) + bus_dma_segment_t *ranges; + int nranges; + bus_addr_t curaddr; +{ + bus_dma_segment_t *ds; + int i; + + for (i = 0, ds = ranges; i < nranges; i++, ds++) { + if (curaddr >= CM_ALIAS_TO_LOCAL(ds->ds_addr) && + round_page(curaddr) <= (CM_ALIAS_TO_LOCAL(ds->ds_addr) + ds->ds_len)) + return (1); + } + + return (0); +} + +/* + * Allocate physical memory from the given physical address range. + * Called by DMA-safe memory allocation methods. + */ +int +integrator_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, + flags, low, high) + bus_dma_tag_t t; + bus_size_t size, alignment, boundary; + bus_dma_segment_t *segs; + int nsegs; + int *rsegs; + int flags; + vm_offset_t low; + vm_offset_t high; +{ + vm_offset_t curaddr, lastaddr; + struct vm_page *m; + struct pglist mlist; + int curseg, error; + +#ifdef DEBUG_DMA + printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n", + t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high); +#endif /* DEBUG_DMA */ + + /* Always round the size. */ + size = round_page(size); + + /* + * Allocate pages from the VM system. + */ + TAILQ_INIT(&mlist); + error = uvm_pglistalloc(size, low, high, alignment, boundary, + &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); + if (error) + return (error); + + /* + * Compute the location, size, and number of segments actually + * returned by the VM code. + */ + m = mlist.tqh_first; + curseg = 0; + lastaddr = VM_PAGE_TO_PHYS(m); + segs[curseg].ds_addr = LOCAL_TO_CM_ALIAS(lastaddr); + segs[curseg].ds_len = PAGE_SIZE; +#ifdef DEBUG_DMA + printf("alloc: page %lx\n", lastaddr); +#endif /* DEBUG_DMA */ + m = m->pageq.tqe_next; + + for (; m != NULL; m = m->pageq.tqe_next) { + curaddr = VM_PAGE_TO_PHYS(m); +#ifdef DIAGNOSTIC + if (curaddr < low || curaddr >= high) { + printf("uvm_pglistalloc returned non-sensical" + " address 0x%lx\n", curaddr); + panic("integrator_bus_dmamem_alloc_range"); + } +#endif /* DIAGNOSTIC */ +#ifdef DEBUG_DMA + printf("alloc: page %lx\n", curaddr); +#endif /* DEBUG_DMA */ + if (curaddr == (lastaddr + PAGE_SIZE)) + segs[curseg].ds_len += PAGE_SIZE; + else { + curseg++; + segs[curseg].ds_addr = LOCAL_TO_CM_ALIAS(curaddr); + segs[curseg].ds_len = PAGE_SIZE; + } + lastaddr = curaddr; + } + + *rsegs = curseg + 1; + + return (0); +} diff --git a/sys/arch/evbarm/integrator/int_bus_dma.h b/sys/arch/evbarm/integrator/int_bus_dma.h new file mode 100644 index 000000000000..87808324c17d --- /dev/null +++ b/sys/arch/evbarm/integrator/int_bus_dma.h @@ -0,0 +1,66 @@ +/* $NetBSD: int_bus_dma.h,v 1.1 2001/10/27 16:17:51 rearnsha Exp $ */ + +/* + * Copyright (c) 2001 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _INT_BUS_DMA_H +#define _INT_BUS_DMA_H + +#include + +#ifdef _ARM32_BUS_DMA_PRIVATE + +#define CM_ALIAS_TO_LOCAL(addr) (addr & 0x0fffffff) +#define LOCAL_TO_CM_ALIAS(addr) (addr | 0x80000000) + +int integrator_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, + bus_size_t, struct proc *, int)); +int integrator_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t, + struct mbuf *, int)); +int integrator_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t, + struct uio *, int)); + +int integrator_bus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size, + bus_size_t alignment, bus_size_t boundary, + bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)); +void integrator_bus_dmamem_free __P((bus_dma_tag_t tag, + bus_dma_segment_t *segs, int nsegs)); +int integrator_bus_dmamem_map __P((bus_dma_tag_t tag, + bus_dma_segment_t *segs, int nsegs, size_t size, caddr_t *kvap, + int flags)); +paddr_t integrator_bus_dmamem_mmap __P((bus_dma_tag_t tag, + bus_dma_segment_t *segs, int nsegs, off_t off, int prot, + int flags)); + +int integrator_bus_dmamem_alloc_range __P((bus_dma_tag_t tag, + bus_size_t size, bus_size_t alignment, bus_size_t boundary, + bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags, + vaddr_t low, vaddr_t high)); +#endif /* _ARM32_BUS_DMA_PRIVATE */ +#endif /* _INT_BUS_DMA_H */ diff --git a/sys/arch/evbarm/integrator/integrator_boot.h b/sys/arch/evbarm/integrator/integrator_boot.h new file mode 100644 index 000000000000..ae9f9456ae16 --- /dev/null +++ b/sys/arch/evbarm/integrator/integrator_boot.h @@ -0,0 +1,20 @@ +struct intbootinfo { + union { + struct { + unsigned long bp_pagesize; + unsigned long bp_nrpages; + } u1_bp; + char filler1[256]; + } bi_u1; +#define bi_pagesize bi_u1.u1_bp.bp_pagesize +#define bi_nrpages bi_u1.u1_bp.bp_nrpages + union { + char paths[8][128]; + struct magic { + unsigned long magic; + char filler2[1024 - sizeof(unsigned long)]; + } u2_d; + } bi_u2; + char bi_cmdline[256]; + char bi_settings[2048]; +}; diff --git a/sys/arch/evbarm/integrator/integrator_machdep.c b/sys/arch/evbarm/integrator/integrator_machdep.c new file mode 100644 index 000000000000..4bc1a8eab0a8 --- /dev/null +++ b/sys/arch/evbarm/integrator/integrator_machdep.c @@ -0,0 +1,976 @@ +/* $NetBSD: integrator_machdep.c,v 1.1 2001/10/27 16:17:52 rearnsha Exp $ */ + +/* + * Copyright (c) 2001 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Copyright (c) 1997,1998 Mark Brinicombe. + * Copyright (c) 1997,1998 Causality Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Mark Brinicombe + * for the NetBSD Project. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Machine dependant functions for kernel setup for integrator board + * + * Created : 24/11/97 + */ + +#include "opt_ddb.h" +#include "opt_pmap_debug.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "opt_ipkdb.h" +#include "pci.h" + +void ifpga_reset(void) __attribute__((noreturn)); +/* + * Address to call from cpu_reset() to reset the machine. + * This is machine architecture dependant as it varies depending + * on where the ROM appears when you turn the MMU off. + */ + +u_int cpu_reset_address = (u_int) ifpga_reset; + +/* Define various stack sizes in pages */ +#define IRQ_STACK_SIZE 1 +#define ABT_STACK_SIZE 1 +#ifdef IPKDB +#define UND_STACK_SIZE 2 +#else +#define UND_STACK_SIZE 1 +#endif + +struct intbootinfo intbootinfo; +BootConfig bootconfig; /* Boot config storage */ +static char bootargs[MAX_BOOT_STRING + 1]; +char *boot_args = NULL; +char *boot_file = NULL; + +vm_offset_t physical_start; +vm_offset_t physical_freestart; +vm_offset_t physical_freeend; +vm_offset_t physical_end; +u_int free_pages; +vm_offset_t pagetables_start; +int physmem = 0; + +/*int debug_flags;*/ +#ifndef PMAP_STATIC_L1S +int max_processes = 64; /* Default number */ +#endif /* !PMAP_STATIC_L1S */ + +/* Physical and virtual addresses for some global pages */ +pv_addr_t systempage; +pv_addr_t irqstack; +pv_addr_t undstack; +pv_addr_t abtstack; +pv_addr_t kernelstack; + +vm_offset_t msgbufphys; + +extern u_int data_abort_handler_address; +extern u_int prefetch_abort_handler_address; +extern u_int undefined_handler_address; + +#ifdef PMAP_DEBUG +extern int pmap_debug_level; +#endif + +#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */ +#define KERNEL_PT_KERNEL 1 /* Page table for mapping kernel */ +#define KERNEL_PT_VMDATA 2 /* Page tables for mapping kernel VM */ +#define KERNEL_PT_VMDATA_NUM (KERNEL_VM_SIZE >> (PDSHIFT + 2)) +#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM) + +pt_entry_t kernel_pt_table[NUM_KERNEL_PTS]; + +struct user *proc0paddr; + +/* Prototypes */ + +void consinit __P((void)); + +void map_section __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa, + int cacheable)); +void map_pagetable __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa)); +void map_entry __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa)); +void map_entry_nc __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa)); +void map_entry_ro __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa)); +vm_size_t map_chunk __P((vm_offset_t pd, vm_offset_t pt, vm_offset_t va, + vm_offset_t pa, vm_size_t size, u_int acc, + u_int flg)); + +void process_kernel_args __P((char *)); +void data_abort_handler __P((trapframe_t *frame)); +void prefetch_abort_handler __P((trapframe_t *frame)); +void undefinedinstruction_bounce __P((trapframe_t *frame)); +void zero_page_readonly __P((void)); +void zero_page_readwrite __P((void)); +extern void configure __P((void)); +extern void db_machine_init __P((void)); +extern void parse_mi_bootargs __P((char *args)); +extern void dumpsys __P((void)); + +/* A load of console goo. */ +#include "vga.h" +#if (NVGA > 0) +#include +#include +#include +#include +#endif + +#include "pckbc.h" +#if (NPCKBC > 0) +#include +#include +#endif + +#include "com.h" +#if (NCOM > 0) +#include +#include +#ifndef CONCOMADDR +#define CONCOMADDR 0x3f8 +#endif +#endif + +#define CONSPEED B115200 +#ifndef CONSPEED +#define CONSPEED B9600 /* TTYDEF_SPEED */ +#endif +#ifndef CONMODE +#define CONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */ +#endif + +int comcnspeed = CONSPEED; +int comcnmode = CONMODE; + +#include "plcom.h" +#if (NPLCOM > 0) +#include +#include + +#include +#include +#include +#endif + +#ifndef CONSDEVNAME +#define CONSDEVNAME "plcom" +#endif + +#ifndef PLCONSPEED +#define PLCONSPEED B38400 +#endif +#ifndef PLCONMODE +#define PLCONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */ +#endif +#ifndef PLCOMCNUNIT +#define PLCOMCNUNIT -1 +#endif + +int plcomcnspeed = PLCONSPEED; +int plcomcnmode = PLCONMODE; + +#if 0 +extern struct consdev kcomcons; +static void kcomcnputc(dev_t, int); +#endif + +/* + * void cpu_reboot(int howto, char *bootstr) + * + * Reboots the system + * + * Deal with any syncing, unmounting, dumping and shutdown hooks, + * then reset the CPU. + */ + +void +cpu_reboot(howto, bootstr) + int howto; + char *bootstr; +{ +#ifdef DIAGNOSTIC + /* info */ + printf("boot: howto=%08x curproc=%p\n", howto, curproc); +#endif + + /* + * If we are still cold then hit the air brakes + * and crash to earth fast + */ + if (cold) { + doshutdownhooks(); + printf("The operating system has halted.\n"); + printf("Please press any key to reboot.\n\n"); + cngetc(); + printf("rebooting...\n"); + ifpga_reset(); + /*NOTREACHED*/ + } + + /* Disable console buffering */ +/* cnpollc(1);*/ + + /* + * If RB_NOSYNC was not specified sync the discs. + * Note: Unless cold is set to 1 here, syslogd will die during the unmount. + * It looks like syslogd is getting woken up only to find that it cannot + * page part of the binary in as the filesystem has been unmounted. + */ + if (!(howto & RB_NOSYNC)) + bootsync(); + + /* Say NO to interrupts */ + splhigh(); + + /* Do a dump if requested. */ + if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) + dumpsys(); + + /* Run any shutdown hooks */ + doshutdownhooks(); + + /* Make sure IRQ's are disabled */ + IRQdisable; + + if (howto & RB_HALT) { + printf("The operating system has halted.\n"); + printf("Please press any key to reboot.\n\n"); + cngetc(); + } + + printf("rebooting...\n"); + ifpga_reset(); + /*NOTREACHED*/ +} + +/* + * Mapping table for core kernel memory. This memory is mapped at init + * time with section mappings. + */ +struct l1_sec_map { + vm_offset_t va; + vm_offset_t pa; + vm_size_t size; + int flags; +} l1_sec_table[] = { +#if NPLCOM > 0 && defined(PLCONSOLE) + { UART0_BOOT_BASE, IFPGA_IO_BASE + IFPGA_UART0, 1024 * 1024, 0}, + { UART1_BOOT_BASE, IFPGA_IO_BASE + IFPGA_UART1, 1024 * 1024, 0}, +#endif +#if NPCI > 0 + { IFPGA_PCI_IO_VBASE, IFPGA_PCI_IO_BASE, IFPGA_PCI_IO_VSIZE, 0}, + { IFPGA_PCI_CONF_VBASE, IFPGA_PCI_CONF_BASE, IFPGA_PCI_CONF_VSIZE, 0}, +#endif + + { 0, 0, 0, 0 } +}; + +/* + * u_int initarm(struct ebsaboot *bootinfo) + * + * Initial entry point on startup. This gets called before main() is + * entered. + * It should be responsible for setting up everything that must be + * in place when main is called. + * This includes + * Taking a copy of the boot configuration structure. + * Initialising the physical console so characters can be printed. + * Setting up page tables for the kernel + * Relocating the kernel to the bottom of physical memory + */ + +u_int +initarm(bootinfo) + struct intbootinfo *bootinfo; +{ + int loop; + int loop1; + u_int l1pagetable; + u_int l2pagetable; + extern char page0[], page0_end[]; + extern int etext asm ("_etext"); + extern int end asm ("_end"); + pv_addr_t kernel_l1pt; + pv_addr_t kernel_ptpt; +#if NPLCOM > 0 && defined(PLCONSOLE) + static struct bus_space plcom_bus_space; +#endif + + +#if 0 + cn_tab = &kcomcons; +#endif + /* + * Heads up ... Setup the CPU / MMU / TLB functions + */ + if (set_cpufuncs()) + panic("cpu not recognized!"); + + /* - intbootinfo.bt_memstart) / NBPG */; + +#if NPLCOM > 0 && defined(PLCONSOLE) + /* + * Initialise the diagnostic serial console + * This allows a means of generating output during initarm(). + * Once all the memory map changes are complete we can call consinit() + * and not have to worry about things moving. + */ + + if (PLCOMCNUNIT == 0) { + ifpga_create_io_bs_tag(&plcom_bus_space, (void*)0xfd600000); + plcomcnattach(&plcom_bus_space, 0, plcomcnspeed, + IFPGA_UART_CLK, plcomcnmode, PLCOMCNUNIT); + } else if (PLCOMCNUNIT == 1) { + ifpga_create_io_bs_tag(&plcom_bus_space, (void*)0xfd700000); + plcomcnattach(&plcom_bus_space, 0, plcomcnspeed, + IFPGA_UART_CLK, plcomcnmode, PLCOMCNUNIT); + } +#endif + + /* Talk to the user */ + printf("\nNetBSD/integrator booting ...\n"); + +#if 0 + if (intbootinfo.bt_magic != BT_MAGIC_NUMBER_EBSA + && intbootinfo.bt_magic != BT_MAGIC_NUMBER_CATS) + panic("Incompatible magic number passed in boot args\n"); +#endif + +/* { + int loop; + for (loop = 0; loop < 8; ++loop) { + printf("%08x\n", *(((int *)bootinfo)+loop)); + } + }*/ + + /* + * Ok we have the following memory map + * + * virtual address == physical address apart from the areas: + * 0x00000000 -> 0x000fffff which is mapped to + * top 1MB of physical memory + * 0x00100000 -> 0x0fffffff which is mapped to + * physical addresses 0x00100000 -> 0x0fffffff + * 0x10000000 -> 0x1fffffff which is mapped to + * physical addresses 0x00000000 -> 0x0fffffff + * 0x20000000 -> 0xefffffff which is mapped to + * physical addresses 0x20000000 -> 0xefffffff + * 0xf0000000 -> 0xf03fffff which is mapped to + * physical addresses 0x00000000 -> 0x003fffff + * + * This means that the kernel is mapped suitably for continuing + * execution, all I/O is mapped 1:1 virtual to physical and + * physical memory is accessible. + * + * The initarm() has the responsibility for creating the kernel + * page tables. + * It must also set up various memory pointers that are used + * by pmap etc. + */ + + /* + * Examine the boot args string for options we need to know about + * now. + */ +#if 0 + process_kernel_args((char *)intbootinfo.bt_args); +#endif + + printf("initarm: Configuring system ...\n"); + + /* + * Set up the variables that define the availablilty of + * physical memory + */ + physical_start = 0 /*intbootinfo.bt_memstart*/; + physical_freestart = physical_start; + +#if 0 + physical_end = /*intbootinfo.bt_memend*/ /*intbootinfo.bi_nrpages * NBPG */ 32*1024*1024; +#else + { + volatile unsigned long *cm_sdram + = (volatile unsigned long *)0x10000020; + + switch ((*cm_sdram >> 2) & 0x7) + { + case 0: + physical_end = 16 * 1024 * 1024; + break; + case 1: + physical_end = 32 * 1024 * 1024; + break; + case 2: + physical_end = 64 * 1024 * 1024; + break; + case 3: + physical_end = 128 * 1024 * 1024; + break; + case 4: + physical_end = 256 * 1024 * 1024; + break; + default: + printf("CM_SDRAM retuns unknown value, using 16M\n"); + physical_end = 16 * 1024 * 1024; + break; + } + } +#endif + + physical_freeend = physical_end; + free_pages = (physical_end - physical_start) / NBPG; + + /* Set up the bootconfig structure for the benefit of pmap.c */ + bootconfig.dramblocks = 1; + bootconfig.dram[0].address = physical_start; + bootconfig.dram[0].pages = free_pages; + + physmem = (physical_end - physical_start) / NBPG; + + /* Tell the user about the memory */ + printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem, + physical_start, physical_end - 1); + + /* + * Ok the kernel occupies the bottom of physical memory. + * The first free page after the kernel can be found in + * intbootinfo->bt_memavail + * We now need to allocate some fixed page tables to get the kernel + * going. + * We allocate one page directory and a number page tables and store + * the physical addresses in the kernel_pt_table array. + * + * Ok the next bit of physical allocation may look complex but it is + * simple really. I have done it like this so that no memory gets + * wasted during the allocation of various pages and tables that are + * all different sizes. + * The start addresses will be page aligned. + * We allocate the kernel page directory on the first free 16KB boundry + * we find. + * We allocate the kernel page tables on the first 4KB boundry we find. + * Since we allocate at least 3 L2 pagetables we know that we must + * encounter at least one 16KB aligned address. + */ + +#ifdef VERBOSE_INIT_ARM + printf("Allocating page tables\n"); +#endif + + /* Update the address of the first free 16KB chunk of physical memory */ + physical_freestart = ((uintptr_t) &end - KERNEL_TEXT_BASE + PGOFSET) + & ~PGOFSET; +#if 0 + physical_freestart += (kernexec->a_syms + sizeof(int) + + *(u_int *)((int)end + kernexec->a_syms + sizeof(int)) + + (NBPG - 1)) & ~(NBPG - 1); +#endif + + free_pages -= (physical_freestart - physical_start) / NBPG; +#ifdef VERBOSE_INIT_ARM + printf("freestart = %#lx, free_pages = %d (%#x)\n", + physical_freestart, free_pages, free_pages); +#endif + + /* Define a macro to simplify memory allocation */ +#define valloc_pages(var, np) \ + alloc_pages((var).pv_pa, (np)); \ + (var).pv_va = KERNEL_TEXT_BASE + (var).pv_pa - physical_start; + +#define alloc_pages(var, np) \ + (var) = physical_freestart; \ + physical_freestart += ((np) * NBPG); \ + free_pages -= (np); \ + memset((char *)(var), 0, ((np) * NBPG)); + + loop1 = 0; + kernel_l1pt.pv_pa = 0; + for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) { + /* Are we 16KB aligned for an L1 ? */ + if ((physical_freestart & (PD_SIZE - 1)) == 0 + && kernel_l1pt.pv_pa == 0) { + valloc_pages(kernel_l1pt, PD_SIZE / NBPG); + } else { + alloc_pages(kernel_pt_table[loop1], PT_SIZE / NBPG); + ++loop1; + } + } + + /* This should never be able to happen but better confirm that. */ + if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (PD_SIZE-1)) != 0) + panic("initarm: Failed to align the kernel page directory\n"); + + /* + * Allocate a page for the system page mapped to V0x00000000 + * This page will just contain the system vectors and can be + * shared by all processes. + */ + alloc_pages(systempage.pv_pa, 1); + + /* Allocate a page for the page table to map kernel page tables*/ + valloc_pages(kernel_ptpt, PT_SIZE / NBPG); + + /* Allocate stacks for all modes */ + valloc_pages(irqstack, IRQ_STACK_SIZE); + valloc_pages(abtstack, ABT_STACK_SIZE); + valloc_pages(undstack, UND_STACK_SIZE); + valloc_pages(kernelstack, UPAGES); + +#ifdef VERBOSE_INIT_ARM + printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va); + printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va); + printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va); + printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va); +#endif + + alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / NBPG); + + /* + * Ok we have allocated physical pages for the primary kernel + * page tables + */ + +#ifdef VERBOSE_INIT_ARM + printf("Creating L1 page table at %#lx\n", kernel_l1pt.pv_pa); +#endif + + /* + * Now we start consturction of the L1 page table + * We start by mapping the L2 page tables into the L1. + * This means that we can replace L1 mappings later on if necessary + */ + l1pagetable = kernel_l1pt.pv_pa; + + /* Map the L2 pages tables in the L1 page table */ + map_pagetable(l1pagetable, 0x00000000, + kernel_pt_table[KERNEL_PT_SYS]); + map_pagetable(l1pagetable, KERNEL_BASE, + kernel_pt_table[KERNEL_PT_KERNEL]); + for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) + map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, + kernel_pt_table[KERNEL_PT_VMDATA + loop]); + map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE, + kernel_ptpt.pv_pa); + +#ifdef VERBOSE_INIT_ARM + printf("Mapping kernel\n"); +#endif + + /* Now we fill in the L2 pagetable for the kernel static code/data */ + l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL]; + + { + u_int logical; + size_t textsize = (uintptr_t) &etext - KERNEL_TEXT_BASE; + size_t totalsize = (uintptr_t) &end - KERNEL_TEXT_BASE; + + /* Round down text size and round up total size + */ + textsize = textsize & ~PGOFSET; + totalsize = (totalsize + PGOFSET) & ~PGOFSET; + /* logical = map_chunk(l1pagetable, l2pagetable, KERNEL_BASE, + physical_start, KERNEL_TEXT_BASE - KERNEL_BASE, + AP_KRW, PT_CACHEABLE); */ + logical = map_chunk(l1pagetable, l2pagetable, + KERNEL_TEXT_BASE, physical_start, textsize, + AP_KRW, PT_CACHEABLE); + logical += map_chunk(l1pagetable, l2pagetable, + KERNEL_TEXT_BASE + logical, physical_start + logical, + totalsize - textsize, AP_KRW, PT_CACHEABLE); +#if 0 + logical += map_chunk(0, l2pagetable, KERNEL_BASE + logical, + physical_start + logical, kernexec->a_syms + sizeof(int) + + *(u_int *)((int)end + kernexec->a_syms + sizeof(int)), + AP_KRW, PT_CACHEABLE); +#endif + } + +#ifdef VERBOSE_INIT_ARM + printf("Constructing L2 page tables\n"); +#endif + + /* Map the boot arguments page */ +#if 0 + map_entry_ro(l2pagetable, intbootinfo.bt_vargp, intbootinfo.bt_pargp); +#endif + + /* Map the stack pages */ + map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa, + IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE); + map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa, + ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE); + map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa, + UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE); + map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa, + UPAGES * NBPG, AP_KRW, PT_CACHEABLE); + map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, + PD_SIZE, AP_KRW, 0); + + /* Map the page table that maps the kernel pages */ + map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa); + + /* + * Map entries in the page table used to map PTE's + * Basically every kernel page table gets mapped here + */ + /* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */ + l2pagetable = kernel_ptpt.pv_pa; + map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)), + kernel_pt_table[KERNEL_PT_KERNEL]); + map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)), + kernel_ptpt.pv_pa); + map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)), + kernel_pt_table[KERNEL_PT_SYS]); + for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) + map_entry_nc(l2pagetable, ((KERNEL_VM_BASE + + (loop * 0x00400000)) >> (PGSHIFT-2)), + kernel_pt_table[KERNEL_PT_VMDATA + loop]); + + /* + * Map the system page in the kernel page table for the bottom 1Meg + * of the virtual memory map. + */ + l2pagetable = kernel_pt_table[KERNEL_PT_SYS]; +#if 1 + /* MULTI-ICE requires that page 0 is NC/NB so that it can download + the cache-clean code there. */ + map_entry_nc(l2pagetable, 0x00000000, systempage.pv_pa); +#else + map_entry_nc(l2pagetable, 0x00000000, systempage.pv_pa); +#endif + /* Map the core memory needed before autoconfig */ + loop = 0; + while (l1_sec_table[loop].size) { + vm_size_t sz; + +#ifdef VERBOSE_INIT_ARM + printf("%08lx -> %08lx @ %08lx\n", l1_sec_table[loop].pa, + l1_sec_table[loop].pa + l1_sec_table[loop].size - 1, + l1_sec_table[loop].va); +#endif + for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_SEC_SIZE) + map_section(l1pagetable, l1_sec_table[loop].va + sz, + l1_sec_table[loop].pa + sz, + l1_sec_table[loop].flags); + ++loop; + } + + /* + * Now we have the real page tables in place so we can switch to them. + * Once this is done we will be running with the REAL kernel page tables. + */ + + /* Switch tables */ +#ifdef VERBOSE_INIT_ARM + printf("freestart = %#lx, free_pages = %d (%#x)\n", + physical_freestart, free_pages, free_pages); + printf("switching to new L1 page table @%#lx...", kernel_l1pt.pv_pa); +#endif + + setttb(kernel_l1pt.pv_pa); + +#ifdef VERBOSE_INIT_ARM + printf("done!\n"); +#endif + +#ifdef PLCONSOLE + /* + * The IFPGA registers have just moved. + * Detach the diagnostic serial port and reattach at the new address. + */ + plcomcndetach(); +#endif + + /* + * XXX this should only be done in main() but it useful to + * have output earlier ... + */ + consinit(); + +#ifdef VERBOSE_INIT_ARM + printf("bootstrap done.\n"); +#endif + + /* Right set up the vectors at the bottom of page 0 */ + memcpy((char *)0x00000000, page0, page0_end - page0); + + /* We have modified a text page so sync the icache */ + cpu_cache_syncI(); + + /* + * Pages were allocated during the secondary bootstrap for the + * stacks for different CPU modes. + * We must now set the r13 registers in the different CPU modes to + * point to these stacks. + * Since the ARM stacks use STMFD etc. we must set r13 to the top end + * of the stack memory. + */ + printf("init subsystems: stacks "); + + set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * NBPG); + set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * NBPG); + set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * NBPG); + + /* + * Well we should set a data abort handler. + * Once things get going this will change as we will need a proper handler. + * Until then we will use a handler that just panics but tells us + * why. + * Initialisation of the vectors will just panic on a data abort. + * This just fills in a slighly better one. + */ + printf("vectors "); + data_abort_handler_address = (u_int)data_abort_handler; + prefetch_abort_handler_address = (u_int)prefetch_abort_handler; + undefined_handler_address = (u_int)undefinedinstruction_bounce; + + /* At last ! + * We now have the kernel in physical memory from the bottom upwards. + * Kernel page tables are physically above this. + * The kernel is mapped to KERNEL_TEXT_BASE + * The kernel data PTs will handle the mapping of 0xf1000000-0xf3ffffff + * The page tables are mapped to 0xefc00000 + */ + + /* Initialise the undefined instruction handlers */ + printf("undefined "); + undefined_init(); + + /* Boot strap pmap telling it where the kernel page table is */ + printf("pmap "); + pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, kernel_ptpt); + + /* Setup the IRQ system */ + printf("irq "); + irq_init(); + + printf("done.\n"); + +#ifdef IPKDB + /* Initialise ipkdb */ + ipkdb_init(); + if (boothowto & RB_KDB) + ipkdb_connect(0); +#endif + +#ifdef DDB + printf("ddb: "); + db_machine_init(); +#if 0 + ddb_init(end[0], end + 1, esym); +#endif + + if (boothowto & RB_KDB) + Debugger(); +#endif + + /* We return the new stack pointer address */ + return(kernelstack.pv_va + USPACE_SVC_STACK_TOP); +} + +void +process_kernel_args(args) + char *args; +{ + + boothowto = 0; + + /* Make a local copy of the bootargs */ + strncpy(bootargs, args, MAX_BOOT_STRING); + + args = bootargs; + boot_file = bootargs; + + /* Skip the kernel image filename */ + while (*args != ' ' && *args != 0) + ++args; + + if (*args != 0) + *args++ = 0; + + while (*args == ' ') + ++args; + + boot_args = args; + + printf("bootfile: %s\n", boot_file); + printf("bootargs: %s\n", boot_args); + + parse_mi_bootargs(boot_args); +} + +void +consinit(void) +{ + static int consinit_called = 0; +#if NPLCOM > 0 && defined(PLCONSOLE) + static struct bus_space plcom_bus_space; +#endif +#if 0 + char *console = CONSDEVNAME; +#endif + + if (consinit_called != 0) + return; + + consinit_called = 1; + +#if NPLCOM > 0 && defined(PLCONSOLE) + if (PLCOMCNUNIT == 0) { + ifpga_create_io_bs_tag(&plcom_bus_space, + (void*)UART0_BOOT_BASE); + if (plcomcnattach(&plcom_bus_space, 0, plcomcnspeed, + IFPGA_UART_CLK, plcomcnmode, PLCOMCNUNIT)) + panic("can't init serial console"); + return; + } else if (PLCOMCNUNIT == 1) { + ifpga_create_io_bs_tag(&plcom_bus_space, + (void*)UART0_BOOT_BASE); + if (plcomcnattach(&plcom_bus_space, 0, plcomcnspeed, + IFPGA_UART_CLK, plcomcnmode, PLCOMCNUNIT)) + panic("can't init serial console"); + return; + } +#endif +#if (NCOM > 0) + if (comcnattach(&isa_io_bs_tag, CONCOMADDR, comcnspeed, + COM_FREQ, comcnmode)) + panic("can't init serial console @%x", CONCOMADDR); + return; +#endif + panic("No serial console configured"); +} + +#if 0 +static bus_space_handle_t kcom_base = (bus_space_handle_t) (DC21285_PCI_IO_VBASE + CONCOMADDR); + +u_int8_t footbridge_bs_r_1(void *, bus_space_handle_t, bus_size_t); +void footbridge_bs_w_1(void *, bus_space_handle_t, bus_size_t, u_int8_t); + +#define KCOM_GETBYTE(r) footbridge_bs_r_1(0, kcom_base, (r)) +#define KCOM_PUTBYTE(r,v) footbridge_bs_w_1(0, kcom_base, (r), (v)) + +static int +kcomcngetc(dev_t dev) +{ + int stat, c; + + /* block until a character becomes available */ + while (!ISSET(stat = KCOM_GETBYTE(com_lsr), LSR_RXRDY)) + ; + + c = KCOM_GETBYTE(com_data); + stat = KCOM_GETBYTE(com_iir); + return c; +} + +/* + * Console kernel output character routine. + */ +static void +kcomcnputc(dev_t dev, int c) +{ + int timo; + + /* wait for any pending transmission to finish */ + timo = 150000; + while (!ISSET(KCOM_GETBYTE(com_lsr), LSR_TXRDY) && --timo) + continue; + + KCOM_PUTBYTE(com_data, c); + + /* wait for this transmission to complete */ + timo = 1500000; + while (!ISSET(KCOM_GETBYTE(com_lsr), LSR_TXRDY) && --timo) + continue; +} + +static void +kcomcnpollc(dev_t dev, int on) +{ +} + +struct consdev kcomcons = { + NULL, NULL, kcomcngetc, kcomcnputc, kcomcnpollc, NULL, + NODEV, CN_NORMAL +}; + +#endif diff --git a/sys/arch/evbarm/integrator/intmmu.S b/sys/arch/evbarm/integrator/intmmu.S new file mode 100644 index 000000000000..1a6fe914d173 --- /dev/null +++ b/sys/arch/evbarm/integrator/intmmu.S @@ -0,0 +1,150 @@ +/* $NetBSD: intmmu.S,v 1.1 2001/10/27 16:17:52 rearnsha Exp $ */ + +/* + * Copyright (c) 2001 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "assym.h" +#include +#include +#include + + .text + +ASENTRY_NP(integrator_start) + mov r6, #0x16000000 /* UART0 Physical base*/ + mov r3, #'A' + str r3, [r6] /* Let the world know we are alive */ + +/* + * At this time the MMU is off. + * We build up an initial memory map at 0x8000 that we can use to get + * the kernel running from the top of memory. All mappings in this table + * use L1 section maps. + */ + +/* + * Set Virtual == Physical + */ + mov r3, #(AP_KRW << AP_SECTION_SHIFT) + add r3, r3, #(L1_SECTION) + mov r2, #0x100000 /* advance by 1MB */ + mov r1, #0x8000 /* page table start */ + mov r0, #0x1000 /* page table size */ + +Lflat: + str r3, [r1], #0x0004 + add r3, r3, r2 + subs r0, r0, #1 + bgt Lflat + +/* + * Map VA 0xa0100000->0xa03fffff to PA 0x00000000->0x002fffff + */ + mov r3, #(AP_KRW << AP_SECTION_SHIFT) + add r3, r3, #(L1_SECTION) + mov r1, #0x8000 /* page table start */ + add r1, r1, #(0xa00 * 4) /* offset to 0xa00xxxxx */ + add r1, r1, #(0x001 * 4) /* offset to 0xa01xxxxx */ + mov r0, #47 +Lkern: + str r3, [r1], #0x0004 /* 0xa010000-0xa03fffff */ + add r3, r3, r2 + subs r0, r0, #1 + bgt Lkern +/* + * Mapping the peripheral register region (0x10000000->0x1fffffff) linearly + * would require 256MB of virtual memory (as much space as the entire kernel + * virtual space). So we map the first 1M of each 16MB sub-space into the + * region VA 0xfd000000->0xfdffffff; this should map enough of the peripheral + * space to at least get us up and running. + */ + mov r3, #(AP_KRW << AP_SECTION_SHIFT) + add r3, r3, #L1_SECTION + add r3, r3, #0x10000000 /* Peripherals base */ + mov r1, #0x8000 /* page table start */ + add r1, r1, #(0xfd0 * 4) + mov r2, #0x01000000 /* 16MB increment. */ + mov r0, #16 +Lperiph: + str r3, [r1], #4 /* 0xfd000000-0xfdffffff */ + add r3, r3, r2 + subs r0, r0, #1 + bgt Lperiph + +/* + * We now have our page table ready, so load it up and light the blue + * touch paper. + */ + + /* set the location of the L1 page table */ + mov r1, #0x8000 + mcr p15, 0, r1, c2, c0, 0 + + /* Flush the old TLBs (just in case) */ + mcr p15, 0, r1, c8, c7, 0 + mov r2, #'B' + strb r2, [r6] + + /* Set the Domain Access register. Very important! */ + mov r1, #1 + mcr p15, 0, r1, c3, c0, 0 + + /* + * set mmu bit (don't set anything else for now, we don't know + * what sort of CPU we have yet. + */ + mov r1, #CPU_CONTROL_MMU_ENABLE + +/* + * This is where it might all start to go wrong if the cpu fitted to your + * integrator does not have an MMU. + */ + /* fetch current control state */ + mrc p15, 0, r2, c1, c0, 0 + orr r2, r2, r1 + + /* set new control state */ + mcr p15, 0, r2, c1, c0, 0 + + mov r0, r0 + mov r0, r0 + mov r0, r0 + + /* emit a char. Uart is now at 0xfd600000 */ + mov r6, #0xfd000000 + add r6, r6, #0x00600000 + mov r2, #'C' + strb r2, [r6] + + /* jump to kernel space */ + mov r0, #0x0200 + + /* Switch to kernel VM and really set the ball rolling. */ + ldr pc, Lstart + +Lstart: .long start diff --git a/sys/arch/evbarm/integrator/pci_machdep.c b/sys/arch/evbarm/integrator/pci_machdep.c new file mode 100644 index 000000000000..0cbacd231f54 --- /dev/null +++ b/sys/arch/evbarm/integrator/pci_machdep.c @@ -0,0 +1,50 @@ +/* $NetBSD: pci_machdep.c,v 1.1 2001/10/27 16:17:52 rearnsha Exp $ */ + +/*- + * Copyright (c) 2001 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include +#include + +#include + +void +pci_conf_interrupt(pci_chipset_tag_t pc, int bus, int dev, int func, + int swiz, int *iline) +{ + printf("pci_conf_interrupt(pc(%lx), bus(%d), dev(%d), func(%d), swiz(%d), *iline(%p)\n", (unsigned long)pc, bus, dev, func, swiz, iline); + if (dev >= 9) + *iline = IFPGA_INTRNUM_PCIINT0 + + (((dev - 9) + (func - 1)) & 3); +}