NetBSD/sys/dev/sbus/isp_sbus.c
mjacob 219d22404b Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.

The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).

One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.

Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.

For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.

Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).

Additional changes:

Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.

Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).

Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.

Nuke some additional __P macros.
2001-12-14 00:13:44 +00:00

598 lines
17 KiB
C

/* $NetBSD: isp_sbus.c,v 1.48 2001/12/14 00:13:47 mjacob Exp $ */
/*
* This driver, which is contained in NetBSD in the files:
*
* sys/dev/ic/isp.c
* sys/dev/ic/isp_inline.h
* sys/dev/ic/isp_netbsd.c
* sys/dev/ic/isp_netbsd.h
* sys/dev/ic/isp_target.c
* sys/dev/ic/isp_target.h
* sys/dev/ic/isp_tpublic.h
* sys/dev/ic/ispmbox.h
* sys/dev/ic/ispreg.h
* sys/dev/ic/ispvar.h
* sys/microcode/isp/asm_sbus.h
* sys/microcode/isp/asm_1040.h
* sys/microcode/isp/asm_1080.h
* sys/microcode/isp/asm_12160.h
* sys/microcode/isp/asm_2100.h
* sys/microcode/isp/asm_2200.h
* sys/pci/isp_pci.c
* sys/sbus/isp_sbus.c
*
* Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
* This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
* Linux versions. This tends to be an interesting maintenance problem.
*
* Please coordinate with Matthew Jacob on changes you wish to make here.
*/
/*
* SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
*
* Copyright (c) 1997, 2001 by Matthew Jacob
* NASA AMES Research Center
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: isp_sbus.c,v 1.48 2001/12/14 00:13:47 mjacob Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <dev/ic/isp_netbsd.h>
#include <machine/intr.h>
#include <machine/autoconf.h>
#include <dev/microcode/isp/asm_sbus.h>
#include <dev/sbus/sbusvar.h>
#include <sys/reboot.h>
static int isp_sbus_intr(void *);
static int
isp_sbus_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
static u_int16_t isp_sbus_rd_reg(struct ispsoftc *, int);
static void isp_sbus_wr_reg (struct ispsoftc *, int, u_int16_t);
static int isp_sbus_mbxdma(struct ispsoftc *);
static int isp_sbus_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *,
u_int16_t);
static void isp_sbus_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
#ifndef ISP_1000_RISC_CODE
#define ISP_1000_RISC_CODE NULL
#endif
static struct ispmdvec mdvec = {
isp_sbus_rd_isr,
isp_sbus_rd_reg,
isp_sbus_wr_reg,
isp_sbus_mbxdma,
isp_sbus_dmasetup,
isp_sbus_dmateardown,
NULL,
NULL,
NULL,
ISP_1000_RISC_CODE
};
struct isp_sbussoftc {
struct ispsoftc sbus_isp;
struct sbusdev sbus_sd;
sdparam sbus_dev;
bus_space_tag_t sbus_bustag;
bus_space_handle_t sbus_reg;
int sbus_node;
int sbus_pri;
struct ispmdvec sbus_mdvec;
bus_dmamap_t *sbus_dmamap;
int16_t sbus_poff[_NREG_BLKS];
};
static int isp_match(struct device *, struct cfdata *, void *);
static void isp_sbus_attach(struct device *, struct device *, void *);
struct cfattach isp_sbus_ca = {
sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
};
static int
isp_match(struct device *parent, struct cfdata *cf, void *aux)
{
int rv;
#ifdef DEBUG
static int oneshot = 1;
#endif
struct sbus_attach_args *sa = aux;
rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
strcmp("PTI,ptisp", sa->sa_name) == 0 ||
strcmp("ptisp", sa->sa_name) == 0 ||
strcmp("SUNW,isp", sa->sa_name) == 0 ||
strcmp("QLGC,isp", sa->sa_name) == 0);
#ifdef DEBUG
if (rv && oneshot) {
oneshot = 0;
printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
"%d.%d Core Version %d.%d\n",
ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
}
#endif
return (rv);
}
static void
isp_sbus_attach(struct device *parent, struct device *self, void *aux)
{
int freq, ispburst, sbusburst;
struct sbus_attach_args *sa = aux;
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
struct ispsoftc *isp = &sbc->sbus_isp;
printf(" for %s\n", sa->sa_name);
sbc->sbus_bustag = sa->sa_bustag;
if (sa->sa_nintr != 0)
sbc->sbus_pri = sa->sa_pri;
sbc->sbus_mdvec = mdvec;
if (sa->sa_npromvaddrs != 0) {
sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
} else {
if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
&sbc->sbus_reg) != 0) {
printf("%s: cannot map registers\n", self->dv_xname);
return;
}
}
sbc->sbus_node = sa->sa_node;
freq = PROM_getpropint(sa->sa_node, "clock-frequency", 0);
if (freq) {
/*
* Convert from HZ to MHz, rounding up.
*/
freq = (freq + 500000)/1000000;
#if 0
printf("%s: %d MHz\n", self->dv_xname, freq);
#endif
}
sbc->sbus_mdvec.dv_clock = freq;
/*
* Now figure out what the proper burst sizes, etc., to use.
* Unfortunately, there is no ddi_dma_burstsizes here which
* walks up the tree finding the limiting burst size node (if
* any).
*/
sbusburst = ((struct sbus_softc *)parent)->sc_burst;
if (sbusburst == 0)
sbusburst = SBUS_BURST_32 - 1;
ispburst = PROM_getpropint(sa->sa_node, "burst-sizes", -1);
if (ispburst == -1) {
ispburst = sbusburst;
}
ispburst &= sbusburst;
ispburst &= ~(1 << 7);
ispburst &= ~(1 << 6);
sbc->sbus_mdvec.dv_conf1 = 0;
if (ispburst & (1 << 5)) {
sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
} else if (ispburst & (1 << 4)) {
sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
} else if (ispburst & (1 << 3)) {
sbc->sbus_mdvec.dv_conf1 =
BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
}
if (sbc->sbus_mdvec.dv_conf1) {
sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
}
/*
* Some early versions of the PTI SBus adapter
* would fail in trying to download (via poking)
* FW. We give up on them.
*/
if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
strcmp("ptisp", sa->sa_name) == 0) {
sbc->sbus_mdvec.dv_ispfw = NULL;
}
isp->isp_mdvec = &sbc->sbus_mdvec;
isp->isp_bustype = ISP_BT_SBUS;
isp->isp_type = ISP_HA_SCSI_UNKNOWN;
isp->isp_param = &sbc->sbus_dev;
isp->isp_dmatag = sa->sa_dmatag;
MEMZERO(isp->isp_param, sizeof (sdparam));
sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
/* Establish interrupt channel */
bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
isp_sbus_intr, sbc);
sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo._dev);
/*
* Set up logging levels.
*/
#ifdef ISP_LOGDEFAULT
isp->isp_dblev = ISP_LOGDEFAULT;
#else
isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
if (bootverbose)
isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
#ifdef SCSIDEBUG
isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
#endif
#ifdef DEBUG
isp->isp_dblev |= ISP_LOGDEBUG0;
#endif
#endif
isp->isp_confopts = self->dv_cfdata->cf_flags;
isp->isp_role = ISP_DEFAULT_ROLES;
/*
* There's no tool on sparc to set NVRAM for ISPs, so ignore it.
*/
isp->isp_confopts |= ISP_CFG_NONVRAM;
ISP_LOCK(isp);
isp->isp_osinfo.no_mbox_ints = 1;
isp_reset(isp);
if (isp->isp_state != ISP_RESETSTATE) {
ISP_UNLOCK(isp);
return;
}
ENABLE_INTS(isp);
isp_init(isp);
if (isp->isp_state != ISP_INITSTATE) {
isp_uninit(isp);
ISP_UNLOCK(isp);
return;
}
/*
* do generic attach.
*/
ISP_UNLOCK(isp);
isp_attach(isp);
if (isp->isp_state != ISP_RUNSTATE) {
ISP_LOCK(isp);
isp_uninit(isp);
ISP_UNLOCK(isp);
}
}
static int
isp_sbus_intr(void *arg)
{
u_int16_t isr, sema, mbox;
struct ispsoftc *isp = arg;
if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
isp->isp_intbogus++;
return (0);
} else {
struct isp_sbussoftc *sbc = arg;
sbc->sbus_isp.isp_osinfo.onintstack = 1;
isp_intr(isp, isr, sema, mbox);
sbc->sbus_isp.isp_osinfo.onintstack = 0;
return (1);
}
}
#define IspVirt2Off(a, x) \
(((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \
_BLK_REG_SHFT] + ((x) & 0xff))
#define BXR2(sbc, off) \
bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, off)
static int
isp_sbus_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
u_int16_t *semap, u_int16_t *mbp)
{
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
u_int16_t isr, sema;
isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR));
sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA));
isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
isr &= INT_PENDING_MASK(isp);
sema &= BIU_SEMA_LOCK;
if (isr == 0 && sema == 0) {
return (0);
}
*isrp = isr;
if ((*semap = sema) != 0) {
*mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0));
}
return (1);
}
static u_int16_t
isp_sbus_rd_reg(struct ispsoftc *isp, int regoff)
{
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
offset += (regoff & 0xff);
return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
}
static void
isp_sbus_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
{
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
offset += (regoff & 0xff);
bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
}
static int
isp_sbus_mbxdma(struct ispsoftc *isp)
{
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
bus_dma_segment_t reqseg, rspseg;
int reqrs, rsprs, i, progress;
size_t n;
bus_size_t len;
if (isp->isp_rquest_dma)
return (0);
n = isp->isp_maxcmds * sizeof (XS_T *);
isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
if (isp->isp_xflist == NULL) {
isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
return (1);
}
MEMZERO(isp->isp_xflist, n);
n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
if (sbc->sbus_dmamap == NULL) {
free(isp->isp_xflist, M_DEVBUF);
isp->isp_xflist = NULL;
isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
return (1);
}
for (i = 0; i < isp->isp_maxcmds; i++) {
/* Allocate a DMA handle */
if (bus_dmamap_create(isp->isp_dmatag, MAXPHYS, 1, MAXPHYS, 0,
BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
break;
}
}
if (i < isp->isp_maxcmds) {
while (--i >= 0) {
bus_dmamap_destroy(isp->isp_dmatag,
sbc->sbus_dmamap[i]);
}
free(isp->isp_xflist, M_DEVBUF);
free(sbc->sbus_dmamap, M_DEVBUF);
isp->isp_xflist = NULL;
sbc->sbus_dmamap = NULL;
return (1);
}
/*
* Allocate and map the request and response queues
*/
progress = 0;
len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &reqseg, 1, &reqrs,
BUS_DMA_NOWAIT)) {
goto dmafail;
}
progress++;
if (bus_dmamem_map(isp->isp_dmatag, &reqseg, reqrs, len,
(caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
goto dmafail;
}
progress++;
if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
&isp->isp_rqdmap) != 0) {
goto dmafail;
}
progress++;
if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rqdmap,
isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT) != 0) {
goto dmafail;
}
progress++;
isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &rspseg, 1, &rsprs,
BUS_DMA_NOWAIT)) {
goto dmafail;
}
progress++;
if (bus_dmamem_map(isp->isp_dmatag, &rspseg, rsprs, len,
(caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
goto dmafail;
}
progress++;
if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
&isp->isp_rsdmap) != 0) {
goto dmafail;
}
progress++;
if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rsdmap,
isp->isp_result, len, NULL, BUS_DMA_NOWAIT) != 0) {
goto dmafail;
}
isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
return (0);
dmafail:
isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
if (progress >= 8) {
bus_dmamap_unload(isp->isp_dmatag, isp->isp_rsdmap);
}
if (progress >= 7) {
bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rsdmap);
}
if (progress >= 6) {
bus_dmamem_unmap(isp->isp_dmatag,
isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
}
if (progress >= 5) {
bus_dmamem_free(isp->isp_dmatag, &rspseg, rsprs);
}
if (progress >= 4) {
bus_dmamap_unload(isp->isp_dmatag, isp->isp_rqdmap);
}
if (progress >= 3) {
bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rqdmap);
}
if (progress >= 2) {
bus_dmamem_unmap(isp->isp_dmatag,
isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
}
if (progress >= 1) {
bus_dmamem_free(isp->isp_dmatag, &reqseg, reqrs);
}
for (i = 0; i < isp->isp_maxcmds; i++) {
bus_dmamap_destroy(isp->isp_dmatag, sbc->sbus_dmamap[i]);
}
free(sbc->sbus_dmamap, M_DEVBUF);
free(isp->isp_xflist, M_DEVBUF);
isp->isp_xflist = NULL;
sbc->sbus_dmamap = NULL;
return (1);
}
/*
* Map a DMA request.
* We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
*/
static int
isp_sbus_dmasetup(struct ispsoftc *isp, XS_T *xs, ispreq_t *rq,
u_int16_t *nxtip, u_int16_t optr)
{
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
bus_dmamap_t dmap;
ispreq_t *qep;
int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
if (xs->datalen == 0) {
rq->req_seg_count = 1;
goto mbxsync;
}
dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
if (dmap->dm_nsegs != 0) {
panic("%s: dma map already allocated\n", isp->isp_name);
/* NOTREACHED */
}
if (bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
NULL, (cansleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) |
BUS_DMA_STREAMING) != 0) {
XS_SETERR(xs, HBA_BOTCH);
return (CMD_COMPLETE);
}
bus_dmamap_sync(isp->isp_dmatag, dmap, 0, xs->datalen,
in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
if (in) {
rq->req_flags |= REQFLAG_DATA_IN;
} else {
rq->req_flags |= REQFLAG_DATA_OUT;
}
if (XS_CDBLEN(xs) > 12) {
u_int16_t onxti;
ispcontreq_t local, *crq = &local, *cqe;
onxti = *nxtip;
cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, onxti);
*nxtip = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
if (*nxtip == optr) {
isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
bus_dmamap_unload(isp->isp_dmatag, dmap);
XS_SETERR(xs, HBA_BOTCH);
return (CMD_EAGAIN);
}
rq->req_seg_count = 2;
MEMZERO((void *)crq, sizeof (*crq));
crq->req_header.rqs_entry_count = 1;
crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
crq->req_dataseg[0].ds_count = xs->datalen;
crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
isp_put_cont_req(isp, crq, cqe);
MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
} else {
rq->req_seg_count = 1;
rq->req_dataseg[0].ds_count = xs->datalen;
rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
}
mbxsync:
if (XS_CDBLEN(xs) > 12) {
isp_put_extended_request(isp,
(ispextreq_t *)rq, (ispextreq_t *) qep);
} else {
isp_put_request(isp, rq, qep);
}
return (CMD_QUEUED);
}
static void
isp_sbus_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
{
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
bus_dmamap_t dmap;
dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
if (dmap->dm_nsegs == 0) {
panic("%s: dma map not already allocated\n", isp->isp_name);
/* NOTREACHED */
}
bus_dmamap_sync(isp->isp_dmatag, dmap, 0,
xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(isp->isp_dmatag, dmap);
}