/* $NetBSD: isa_machdep.c,v 1.51 2001/01/10 01:15:32 thorpej Exp $ */ #define ISA_DMA_STATS /*- * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace * Simulation Facility, NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)isa.c 7.2 (Berkeley) 5/13/91 */ #include #include #include #include #include #include #include #include #define _I386_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include "mca.h" #if NMCA > 0 #include /* for MCA_system */ #endif /* * ISA can only DMA to 0-16M. */ #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024) extern paddr_t avail_end; #define IDTVEC(name) __CONCAT(X,name) typedef void (vector) __P((void)); extern vector *IDTVEC(intr)[]; void isa_strayintr __P((int)); void intr_calculatemasks __P((void)); static int fakeintr __P((void *)); #if NMCA > 0 static int mca_clockfakeintr __P((void *)); #endif /* * Cookie used by ISA dma. A pointer to one of these it stashed in * the DMA map. */ struct i386_isa_dma_cookie { int id_flags; /* flags; see below */ /* * Information about the original buffer used during * DMA map syncs. Note that origibuflen is only used * for ID_BUFTYPE_LINEAR. */ void *id_origbuf; /* pointer to orig buffer if bouncing */ bus_size_t id_origbuflen; /* ...and size */ int id_buftype; /* type of buffer */ void *id_bouncebuf; /* pointer to the bounce buffer */ bus_size_t id_bouncebuflen; /* ...and size */ int id_nbouncesegs; /* number of valid bounce segs */ bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer physical memory segments */ }; /* id_flags */ #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */ #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */ #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */ /* id_buftype */ #define ID_BUFTYPE_INVALID 0 #define ID_BUFTYPE_LINEAR 1 #define ID_BUFTYPE_MBUF 2 #define ID_BUFTYPE_UIO 3 #define ID_BUFTYPE_RAW 4 int _isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, bus_size_t, bus_size_t, int, bus_dmamap_t *)); void _isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); int _isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t, struct proc *, int)); int _isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int)); int _isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t, struct uio *, int)); int _isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *, int, bus_size_t, int)); void _isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); void _isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int)); int _isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *, int)); int _isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t, bus_size_t, int)); void _isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t)); /* * Entry points for ISA DMA. These are mostly wrappers around * the generic functions that understand how to deal with bounce * buffers, if necessary. */ struct i386_bus_dma_tag isa_bus_dma_tag = { ISA_DMA_BOUNCE_THRESHOLD, _isa_bus_dmamap_create, _isa_bus_dmamap_destroy, _isa_bus_dmamap_load, _isa_bus_dmamap_load_mbuf, _isa_bus_dmamap_load_uio, _isa_bus_dmamap_load_raw, _isa_bus_dmamap_unload, _isa_bus_dmamap_sync, _isa_bus_dmamem_alloc, _bus_dmamem_free, _bus_dmamem_map, _bus_dmamem_unmap, _bus_dmamem_mmap, }; /* * Fill in default interrupt table (in case of spuruious interrupt * during configuration of kernel, setup interrupt control unit */ void isa_defaultirq() { int i; /* icu vectors */ for (i = 0; i < ICU_LEN; i++) setgate(&idt[ICU_OFFSET + i].gd, IDTVEC(intr)[i], 0, SDT_SYS386IGT, SEL_KPL); /* initialize 8259's */ #if NMCA > 0 /* level-triggered interrupts on MCA PS/2s */ if (MCA_system) outb(IO_ICU1, 0x19); /* reset; program device, four bytes */ else #endif outb(IO_ICU1, 0x11); /* reset; program device, four bytes */ outb(IO_ICU1+1, ICU_OFFSET); /* starting at this vector index */ outb(IO_ICU1+1, 1 << IRQ_SLAVE); /* slave on line 2 */ #ifdef AUTO_EOI_1 outb(IO_ICU1+1, 2 | 1); /* auto EOI, 8086 mode */ #else outb(IO_ICU1+1, 1); /* 8086 mode */ #endif outb(IO_ICU1+1, 0xff); /* leave interrupts masked */ outb(IO_ICU1, 0x68); /* special mask mode (if available) */ outb(IO_ICU1, 0x0a); /* Read IRR by default. */ #ifdef REORDER_IRQ outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */ #endif #if NMCA > 0 /* level-triggered interrupts on MCA PS/2s */ if (MCA_system) outb(IO_ICU2, 0x19); /* reset; program device, four bytes */ else #endif outb(IO_ICU2, 0x11); /* reset; program device, four bytes */ outb(IO_ICU2+1, ICU_OFFSET+8); /* staring at this vector index */ outb(IO_ICU2+1, IRQ_SLAVE); #ifdef AUTO_EOI_2 outb(IO_ICU2+1, 2 | 1); /* auto EOI, 8086 mode */ #else outb(IO_ICU2+1, 1); /* 8086 mode */ #endif outb(IO_ICU2+1, 0xff); /* leave interrupts masked */ outb(IO_ICU2, 0x68); /* special mask mode (if available) */ outb(IO_ICU2, 0x0a); /* Read IRR by default. */ } /* * Handle a NMI, possibly a machine check. * return true to panic system, false to ignore. */ int isa_nmi() { log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70)); return(0); } /* * Caught a stray interrupt, notify */ void isa_strayintr(irq) int irq; { static u_long strays; /* * Stray interrupts on irq 7 occur when an interrupt line is raised * and then lowered before the CPU acknowledges it. This generally * means either the device is screwed or something is cli'ing too * long and it's timing out. */ if (++strays <= 5) log(LOG_ERR, "stray interrupt %d%s\n", irq, strays >= 5 ? "; stopped logging" : ""); } int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN]; struct intrhand *intrhand[ICU_LEN]; /* * Recalculate the interrupt masks from scratch. * We could code special registry and deregistry versions of this function that * would be faster, but the code would be nastier, and we don't expect this to * happen very much anyway. */ void intr_calculatemasks() { int irq, level, unusedirqs; struct intrhand *q; /* First, figure out which levels each IRQ uses. */ unusedirqs = 0xffff; for (irq = 0; irq < ICU_LEN; irq++) { int levels = 0; for (q = intrhand[irq]; q; q = q->ih_next) levels |= 1 << q->ih_level; intrlevel[irq] = levels; if (levels) unusedirqs &= ~(1 << irq); } /* Then figure out which IRQs use each level. */ for (level = 0; level < NIPL; level++) { int irqs = 0; for (irq = 0; irq < ICU_LEN; irq++) if (intrlevel[irq] & (1 << level)) irqs |= 1 << irq; imask[level] = irqs | unusedirqs; } /* * Initialize soft interrupt masks to block themselves. */ imask[IPL_SOFTCLOCK] = 1 << SIR_CLOCK; imask[IPL_SOFTNET] = 1 << SIR_NET; imask[IPL_SOFTSERIAL] = 1 << SIR_SERIAL; /* * IPL_NONE is used for hardware interrupts that are never blocked, * and do not block anything else. */ imask[IPL_NONE] = 0; /* * Enforce a hierarchy that gives slow devices a better chance at not * dropping data. */ imask[IPL_SOFTCLOCK] |= imask[IPL_NONE]; imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK]; imask[IPL_BIO] |= imask[IPL_SOFTNET]; imask[IPL_NET] |= imask[IPL_BIO]; imask[IPL_SOFTSERIAL] |= imask[IPL_NET]; imask[IPL_TTY] |= imask[IPL_SOFTSERIAL]; /* * There are tty, network and disk drivers that use free() at interrupt * time, so imp > (tty | net | bio). */ imask[IPL_IMP] |= imask[IPL_TTY]; imask[IPL_AUDIO] |= imask[IPL_IMP]; /* * Since run queues may be manipulated by both the statclock and tty, * network, and disk drivers, clock > imp. */ imask[IPL_CLOCK] |= imask[IPL_AUDIO]; /* * IPL_HIGH must block everything that can manipulate a run queue. */ imask[IPL_HIGH] |= imask[IPL_CLOCK]; /* * We need serial drivers to run at the absolute highest priority to * avoid overruns, so serial > high. */ imask[IPL_SERIAL] |= imask[IPL_HIGH]; /* And eventually calculate the complete masks. */ for (irq = 0; irq < ICU_LEN; irq++) { int irqs = 1 << irq; for (q = intrhand[irq]; q; q = q->ih_next) irqs |= imask[q->ih_level]; intrmask[irq] = irqs | (1 << IPL_TAGINTR); } /* Lastly, determine which IRQs are actually in use. */ { int irqs = 0; for (irq = 0; irq < ICU_LEN; irq++) if (intrhand[irq]) irqs |= 1 << irq; if (irqs >= 0x100) /* any IRQs >= 8 in use */ irqs |= 1 << IRQ_SLAVE; imen = ~irqs; } } static int fakeintr(arg) void *arg; { return 0; } #define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN && (x) != 2) int isa_intr_alloc(ic, mask, type, irq) isa_chipset_tag_t ic; int mask; int type; int *irq; { int i, tmp, bestirq, count; struct intrhand **p, *q; if (type == IST_NONE) panic("intr_alloc: bogus type"); bestirq = -1; count = -1; /* some interrupts should never be dynamically allocated */ mask &= 0xdef8; /* * XXX some interrupts will be used later (6 for fdc, 12 for pms). * the right answer is to do "breadth-first" searching of devices. */ mask &= 0xefbf; for (i = 0; i < ICU_LEN; i++) { if (LEGAL_IRQ(i) == 0 || (mask & (1<ih_next, tmp++) ; if ((bestirq == -1) || (count > tmp)) { bestirq = i; count = tmp; } break; case IST_PULSE: /* this just isn't shareable */ continue; } } if (bestirq == -1) return (1); *irq = bestirq; return (0); } const struct evcnt * isa_intr_evcnt(isa_chipset_tag_t ic, int irq) { /* XXX for now, no evcnt parent reported */ return NULL; } /* * Set up an interrupt handler to start being called. * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM. */ void * isa_intr_establish(ic, irq, type, level, ih_fun, ih_arg) isa_chipset_tag_t ic; int irq; int type; int level; int (*ih_fun) __P((void *)); void *ih_arg; { struct intrhand **p, *q, *ih; static struct intrhand fakehand = {fakeintr}; #if NMCA > 0 /* * Need special fake handler for PS/2 MCA clock interrupt */ if (MCA_system && irq == 0) fakehand.ih_fun = &mca_clockfakeintr; #endif /* no point in sleeping unless someone can free memory. */ ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK); if (ih == NULL) panic("isa_intr_establish: can't malloc handler info"); if (!LEGAL_IRQ(irq) || type == IST_NONE) panic("intr_establish: bogus irq or type"); #if NMCA > 0 /* change IST_EDGE to IST_LEVEL if MCA system */ if (MCA_system && type == IST_EDGE) type = IST_LEVEL; #endif switch (intrtype[irq]) { case IST_NONE: intrtype[irq] = type; break; case IST_EDGE: case IST_LEVEL: if (type == intrtype[irq]) break; case IST_PULSE: if (type != IST_NONE) { /* * We can't share interrupts in this case. */ #ifdef DEBUG printf("intr_establish: irq %d can't share %s " "with %s\n", irq, isa_intr_typename(intrtype[irq]), isa_intr_typename(type)); #endif return (NULL); } break; } /* * Figure out where to put the handler. * This is O(N^2), but we want to preserve the order, and N is * generally small. */ for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next) ; /* * Actually install a fake handler momentarily, since we might be doing * this with interrupts enabled and don't want the real routine called * until masking is set up. */ fakehand.ih_level = level; *p = &fakehand; intr_calculatemasks(); /* * Poke the real handler in now. */ ih->ih_fun = ih_fun; ih->ih_arg = ih_arg; ih->ih_count = 0; ih->ih_next = NULL; ih->ih_level = level; ih->ih_irq = irq; *p = ih; SET_ICUS(); return (ih); } /* * Deregister an interrupt handler. */ void isa_intr_disestablish(ic, arg) isa_chipset_tag_t ic; void *arg; { struct intrhand *ih = arg; int irq = ih->ih_irq; struct intrhand **p, *q; if (!LEGAL_IRQ(irq)) panic("intr_disestablish: bogus irq"); /* * Remove the handler from the chain. * This is O(n^2), too. */ for (p = &intrhand[irq]; (q = *p) != NULL && q != ih; p = &q->ih_next) ; if (q) *p = q->ih_next; else panic("intr_disestablish: handler not registered"); free(ih, M_DEVBUF); intr_calculatemasks(); SET_ICUS(); if (intrhand[irq] == NULL) intrtype[irq] = IST_NONE; } void isa_attach_hook(parent, self, iba) struct device *parent, *self; struct isabus_attach_args *iba; { extern struct i386_isa_chipset i386_isa_chipset; extern int isa_has_been_seen; /* * Notify others that might need to know that the ISA bus * has now been attached. */ if (isa_has_been_seen) panic("isaattach: ISA bus already seen!"); isa_has_been_seen = 1; /* * Since we can only have one ISA bus, we just use a single * statically allocated ISA chipset structure. Pass it up * now. */ iba->iba_ic = &i386_isa_chipset; } int isa_mem_alloc(t, size, align, boundary, flags, addrp, bshp) bus_space_tag_t t; bus_size_t size, align; bus_addr_t boundary; int flags; bus_addr_t *addrp; bus_space_handle_t *bshp; { /* * Allocate physical address space in the ISA hole. */ return (bus_space_alloc(t, IOM_BEGIN, IOM_END - 1, size, align, boundary, flags, addrp, bshp)); } void isa_mem_free(t, bsh, size) bus_space_tag_t t; bus_space_handle_t bsh; bus_size_t size; { bus_space_free(t, bsh, size); } /********************************************************************** * bus.h dma interface entry points **********************************************************************/ #ifdef ISA_DMA_STATS #define STAT_INCR(v) (v)++ #define STAT_DECR(v) do { \ if ((v) == 0) \ printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \ else \ (v)--; \ } while (0) u_long isa_dma_stats_loads; u_long isa_dma_stats_bounces; u_long isa_dma_stats_nbouncebufs; #else #define STAT_INCR(v) #define STAT_DECR(v) #endif /* * Create an ISA DMA map. */ int _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) bus_dma_tag_t t; bus_size_t size; int nsegments; bus_size_t maxsegsz; bus_size_t boundary; int flags; bus_dmamap_t *dmamp; { struct i386_isa_dma_cookie *cookie; bus_dmamap_t map; int error, cookieflags; void *cookiestore; size_t cookiesize; /* Call common function to create the basic map. */ error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp); if (error) return (error); map = *dmamp; map->_dm_cookie = NULL; cookiesize = sizeof(struct i386_isa_dma_cookie); /* * ISA only has 24-bits of address space. This means * we can't DMA to pages over 16M. In order to DMA to * arbitrary buffers, we use "bounce buffers" - pages * in memory below the 16M boundary. On DMA reads, * DMA happens to the bounce buffers, and is copied into * the caller's buffer. On writes, data is copied into * but bounce buffer, and the DMA happens from those * pages. To software using the DMA mapping interface, * this looks simply like a data cache. * * If we have more than 16M of RAM in the system, we may * need bounce buffers. We check and remember that here. * * There are exceptions, however. VLB devices can do * 32-bit DMA, and indicate that here. * * ...or, there is an opposite case. The most segments * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If * the caller can't handle that many segments (e.g. the * ISA DMA controller), we may have to bounce it as well. */ if (avail_end <= t->_bounce_thresh || (flags & ISABUS_DMA_32BIT) != 0) { /* Bouncing not necessary due to memory size. */ map->_dm_bounce_thresh = 0; } cookieflags = 0; if (map->_dm_bounce_thresh != 0 || ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) { cookieflags |= ID_MIGHT_NEED_BOUNCE; cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt); } /* * Allocate our cookie. */ if ((cookiestore = malloc(cookiesize, M_DMAMAP, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { error = ENOMEM; goto out; } memset(cookiestore, 0, cookiesize); cookie = (struct i386_isa_dma_cookie *)cookiestore; cookie->id_flags = cookieflags; map->_dm_cookie = cookie; if (cookieflags & ID_MIGHT_NEED_BOUNCE) { /* * Allocate the bounce pages now if the caller * wishes us to do so. */ if ((flags & BUS_DMA_ALLOCNOW) == 0) goto out; error = _isa_dma_alloc_bouncebuf(t, map, size, flags); } out: if (error) { if (map->_dm_cookie != NULL) free(map->_dm_cookie, M_DMAMAP); _bus_dmamap_destroy(t, map); } return (error); } /* * Destroy an ISA DMA map. */ void _isa_bus_dmamap_destroy(t, map) bus_dma_tag_t t; bus_dmamap_t map; { struct i386_isa_dma_cookie *cookie = map->_dm_cookie; /* * Free any bounce pages this map might hold. */ if (cookie->id_flags & ID_HAS_BOUNCE) _isa_dma_free_bouncebuf(t, map); free(cookie, M_DMAMAP); _bus_dmamap_destroy(t, map); } /* * Load an ISA DMA map with a linear buffer. */ int _isa_bus_dmamap_load(t, map, buf, buflen, p, flags) bus_dma_tag_t t; bus_dmamap_t map; void *buf; bus_size_t buflen; struct proc *p; int flags; { struct i386_isa_dma_cookie *cookie = map->_dm_cookie; int error; STAT_INCR(isa_dma_stats_loads); /* * Make sure that on error condition we return "no valid mappings." */ map->dm_mapsize = 0; map->dm_nsegs = 0; /* * Try to load the map the normal way. If this errors out, * and we can bounce, we will. */ error = _bus_dmamap_load(t, map, buf, buflen, p, flags); if (error == 0 || (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) return (error); /* * First attempt failed; bounce it. */ STAT_INCR(isa_dma_stats_bounces); /* * Allocate bounce pages, if necessary. */ if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags); if (error) return (error); } /* * Cache a pointer to the caller's buffer and load the DMA map * with the bounce buffer. */ cookie->id_origbuf = buf; cookie->id_origbuflen = buflen; cookie->id_buftype = ID_BUFTYPE_LINEAR; error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen, p, flags); if (error) { /* * Free the bounce pages, unless our resources * are reserved for our exclusive use. */ if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) _isa_dma_free_bouncebuf(t, map); return (error); } /* ...so _isa_bus_dmamap_sync() knows we're bouncing */ cookie->id_flags |= ID_IS_BOUNCING; return (0); } /* * Like _isa_bus_dmamap_load(), but for mbufs. */ int _isa_bus_dmamap_load_mbuf(t, map, m0, flags) bus_dma_tag_t t; bus_dmamap_t map; struct mbuf *m0; int flags; { struct i386_isa_dma_cookie *cookie = map->_dm_cookie; int error; /* * Make sure on error condition we return "no valid mappings." */ map->dm_mapsize = 0; map->dm_nsegs = 0; #ifdef DIAGNOSTIC if ((m0->m_flags & M_PKTHDR) == 0) panic("_isa_bus_dmamap_load_mbuf: no packet header"); #endif if (m0->m_pkthdr.len > map->_dm_size) return (EINVAL); /* * Try to load the map the normal way. If this errors out, * and we can bounce, we will. */ error = _bus_dmamap_load_mbuf(t, map, m0, flags); if (error == 0 || (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) return (error); /* * First attempt failed; bounce it. */ STAT_INCR(isa_dma_stats_bounces); /* * Allocate bounce pages, if necessary. */ if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len, flags); if (error) return (error); } /* * Cache a pointer to the caller's buffer and load the DMA map * with the bounce buffer. */ cookie->id_origbuf = m0; cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ cookie->id_buftype = ID_BUFTYPE_MBUF; error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, m0->m_pkthdr.len, NULL, flags); if (error) { /* * Free the bounce pages, unless our resources * are reserved for our exclusive use. */ if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) _isa_dma_free_bouncebuf(t, map); return (error); } /* ...so _isa_bus_dmamap_sync() knows we're bouncing */ cookie->id_flags |= ID_IS_BOUNCING; return (0); } /* * Like _isa_bus_dmamap_load(), but for uios. */ int _isa_bus_dmamap_load_uio(t, map, uio, flags) bus_dma_tag_t t; bus_dmamap_t map; struct uio *uio; int flags; { panic("_isa_bus_dmamap_load_uio: not implemented"); } /* * Like _isa_bus_dmamap_load(), but for raw memory allocated with * bus_dmamem_alloc(). */ int _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags) bus_dma_tag_t t; bus_dmamap_t map; bus_dma_segment_t *segs; int nsegs; bus_size_t size; int flags; { panic("_isa_bus_dmamap_load_raw: not implemented"); } /* * Unload an ISA DMA map. */ void _isa_bus_dmamap_unload(t, map) bus_dma_tag_t t; bus_dmamap_t map; { struct i386_isa_dma_cookie *cookie = map->_dm_cookie; /* * If we have bounce pages, free them, unless they're * reserved for our exclusive use. */ if ((cookie->id_flags & ID_HAS_BOUNCE) && (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) _isa_dma_free_bouncebuf(t, map); cookie->id_flags &= ~ID_IS_BOUNCING; cookie->id_buftype = ID_BUFTYPE_INVALID; /* * Do the generic bits of the unload. */ _bus_dmamap_unload(t, map); } /* * Synchronize an ISA DMA map. */ void _isa_bus_dmamap_sync(t, map, offset, len, ops) bus_dma_tag_t t; bus_dmamap_t map; bus_addr_t offset; bus_size_t len; int ops; { struct i386_isa_dma_cookie *cookie = map->_dm_cookie; /* * Mixing PRE and POST operations is not allowed. */ if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) panic("_isa_bus_dmamap_sync: mix PRE and POST"); #ifdef DIAGNOSTIC if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { if (offset >= map->dm_mapsize) panic("_isa_bus_dmamap_sync: bad offset"); if (len == 0 || (offset + len) > map->dm_mapsize) panic("_isa_bus_dmamap_sync: bad length"); } #endif /* * If we're not bouncing, just return; nothing to do. */ if ((cookie->id_flags & ID_IS_BOUNCING) == 0) return; switch (cookie->id_buftype) { case ID_BUFTYPE_LINEAR: /* * Nothing to do for pre-read. */ if (ops & BUS_DMASYNC_PREWRITE) { /* * Copy the caller's buffer to the bounce buffer. */ memcpy((char *)cookie->id_bouncebuf + offset, (char *)cookie->id_origbuf + offset, len); } if (ops & BUS_DMASYNC_POSTREAD) { /* * Copy the bounce buffer to the caller's buffer. */ memcpy((char *)cookie->id_origbuf + offset, (char *)cookie->id_bouncebuf + offset, len); } /* * Nothing to do for post-write. */ break; case ID_BUFTYPE_MBUF: { struct mbuf *m, *m0 = cookie->id_origbuf; bus_size_t minlen, moff; /* * Nothing to do for pre-read. */ if (ops & BUS_DMASYNC_PREWRITE) { /* * Copy the caller's buffer to the bounce buffer. */ m_copydata(m0, offset, len, (char *)cookie->id_bouncebuf + offset); } if (ops & BUS_DMASYNC_POSTREAD) { /* * Copy the bounce buffer to the caller's buffer. */ for (moff = offset, m = m0; m != NULL && len != 0; m = m->m_next) { /* Find the beginning mbuf. */ if (moff >= m->m_len) { moff -= m->m_len; continue; } /* * Now at the first mbuf to sync; nail * each one until we have exhausted the * length. */ minlen = len < m->m_len - moff ? len : m->m_len - moff; memcpy(mtod(m, caddr_t) + moff, (char *)cookie->id_bouncebuf + offset, minlen); moff = 0; len -= minlen; offset += minlen; } } /* * Nothing to do for post-write. */ break; } case ID_BUFTYPE_UIO: panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO"); break; case ID_BUFTYPE_RAW: panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW"); break; case ID_BUFTYPE_INVALID: panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID"); break; default: printf("unknown buffer type %d\n", cookie->id_buftype); panic("_isa_bus_dmamap_sync"); } } /* * Allocate memory safe for ISA DMA. */ int _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) bus_dma_tag_t t; bus_size_t size, alignment, boundary; bus_dma_segment_t *segs; int nsegs; int *rsegs; int flags; { paddr_t high; if (avail_end > ISA_DMA_BOUNCE_THRESHOLD) high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD); else high = trunc_page(avail_end); return (_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, 0, high)); } /********************************************************************** * ISA DMA utility functions **********************************************************************/ int _isa_dma_alloc_bouncebuf(t, map, size, flags) bus_dma_tag_t t; bus_dmamap_t map; bus_size_t size; int flags; { struct i386_isa_dma_cookie *cookie = map->_dm_cookie; int error = 0; cookie->id_bouncebuflen = round_page(size); error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen, PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, map->_dm_segcnt, &cookie->id_nbouncesegs, flags); if (error) goto out; error = _bus_dmamem_map(t, cookie->id_bouncesegs, cookie->id_nbouncesegs, cookie->id_bouncebuflen, (caddr_t *)&cookie->id_bouncebuf, flags); out: if (error) { _bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs); cookie->id_bouncebuflen = 0; cookie->id_nbouncesegs = 0; } else { cookie->id_flags |= ID_HAS_BOUNCE; STAT_INCR(isa_dma_stats_nbouncebufs); } return (error); } void _isa_dma_free_bouncebuf(t, map) bus_dma_tag_t t; bus_dmamap_t map; { struct i386_isa_dma_cookie *cookie = map->_dm_cookie; STAT_DECR(isa_dma_stats_nbouncebufs); _bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen); _bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs); cookie->id_bouncebuflen = 0; cookie->id_nbouncesegs = 0; cookie->id_flags &= ~ID_HAS_BOUNCE; } #if NMCA > 0 /* * Special fake handler for PS/2 MCA clock interrupts */ static int mca_clockfakeintr(arg) void *arg; { /* Reset clock interrupt by asserting bit 7 of port 0x61 */ outb(0x61, inb(0x61) | 0x80); return 0; } #endif