2007-07-27 17:06:51 +04:00
|
|
|
/* $NetBSD: mpt_netbsd.c,v 1.12 2007/07/27 13:06:51 tron Exp $ */
|
2003-04-17 02:02:59 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2003 Wasabi Systems, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed for the NetBSD Project by
|
|
|
|
* Wasabi Systems, Inc.
|
|
|
|
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
|
|
|
|
* or promote products derived from this software without specific prior
|
|
|
|
* written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2000, 2001 by Greg Ansley
|
|
|
|
* Partially derived from Matt Jacob's ISP driver.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice immediately at the beginning of the file, without modification,
|
|
|
|
* this list of conditions, and the following disclaimer.
|
|
|
|
* 2. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
|
|
|
|
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Additional Copyright (c) 2002 by Matthew Jacob under same license.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mpt_netbsd.c:
|
|
|
|
*
|
|
|
|
* NetBSD-specific routines for LSI Fusion adapters. Includes some
|
|
|
|
* bus_dma glue, and SCSIPI glue.
|
|
|
|
*
|
|
|
|
* Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
|
|
|
|
* Wasabi Systems, Inc.
|
2007-07-27 17:06:51 +04:00
|
|
|
*
|
|
|
|
* Additional contributions by Garrett D'Amore on behalf of TELES AG.
|
2003-04-17 02:02:59 +04:00
|
|
|
*/
|
|
|
|
|
2003-07-14 19:47:00 +04:00
|
|
|
#include <sys/cdefs.h>
|
2007-07-27 17:06:51 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.12 2007/07/27 13:06:51 tron Exp $");
|
2003-07-14 19:47:00 +04:00
|
|
|
|
2003-04-17 02:02:59 +04:00
|
|
|
#include <dev/ic/mpt.h> /* pulls in all headers */
|
|
|
|
|
|
|
|
#include <machine/stdarg.h> /* for mpt_prt() */
|
|
|
|
|
|
|
|
static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
|
|
|
|
static void mpt_timeout(void *);
|
|
|
|
static void mpt_done(mpt_softc_t *, uint32_t);
|
|
|
|
static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
|
|
|
|
static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
|
|
|
|
static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
|
|
|
|
static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
|
|
|
|
static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
|
|
|
|
|
|
|
|
static void mpt_scsipi_request(struct scsipi_channel *,
|
|
|
|
scsipi_adapter_req_t, void *);
|
|
|
|
static void mpt_minphys(struct buf *);
|
|
|
|
|
|
|
|
void
|
|
|
|
mpt_scsipi_attach(mpt_softc_t *mpt)
|
|
|
|
{
|
|
|
|
struct scsipi_adapter *adapt = &mpt->sc_adapter;
|
|
|
|
struct scsipi_channel *chan = &mpt->sc_channel;
|
|
|
|
int maxq;
|
|
|
|
|
|
|
|
mpt->bus = 0; /* XXX ?? */
|
|
|
|
|
|
|
|
maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
|
|
|
|
mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
|
|
|
|
|
|
|
|
/* Fill in the scsipi_adapter. */
|
|
|
|
memset(adapt, 0, sizeof(*adapt));
|
|
|
|
adapt->adapt_dev = &mpt->sc_dev;
|
|
|
|
adapt->adapt_nchannels = 1;
|
|
|
|
adapt->adapt_openings = maxq;
|
|
|
|
adapt->adapt_max_periph = maxq;
|
|
|
|
adapt->adapt_request = mpt_scsipi_request;
|
|
|
|
adapt->adapt_minphys = mpt_minphys;
|
|
|
|
|
|
|
|
/* Fill in the scsipi_channel. */
|
|
|
|
memset(chan, 0, sizeof(*chan));
|
|
|
|
chan->chan_adapter = adapt;
|
|
|
|
chan->chan_bustype = &scsi_bustype;
|
|
|
|
chan->chan_channel = 0;
|
|
|
|
chan->chan_flags = 0;
|
|
|
|
chan->chan_nluns = 8;
|
2007-07-27 17:06:51 +04:00
|
|
|
chan->chan_ntargets = mpt->mpt_max_devices;
|
|
|
|
chan->chan_id = mpt->mpt_ini_id;
|
2003-04-17 02:02:59 +04:00
|
|
|
|
|
|
|
(void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
mpt_dma_mem_alloc(mpt_softc_t *mpt)
|
|
|
|
{
|
|
|
|
bus_dma_segment_t reply_seg, request_seg;
|
|
|
|
int reply_rseg, request_rseg;
|
|
|
|
bus_addr_t pptr, end;
|
2007-03-04 08:59:00 +03:00
|
|
|
char *vptr;
|
2003-04-17 02:02:59 +04:00
|
|
|
size_t len;
|
|
|
|
int error, i;
|
|
|
|
|
|
|
|
/* Check if we have already allocated the reply memory. */
|
|
|
|
if (mpt->reply != NULL)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate the request pool. This isn't really DMA'd memory,
|
|
|
|
* but it's a convenient place to do it.
|
|
|
|
*/
|
|
|
|
len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
|
|
|
|
mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
|
|
|
|
if (mpt->request_pool == NULL) {
|
|
|
|
aprint_error("%s: unable to allocate request pool\n",
|
|
|
|
mpt->sc_dev.dv_xname);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate DMA resources for reply buffers.
|
|
|
|
*/
|
|
|
|
error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
|
|
|
|
&reply_seg, 1, &reply_rseg, 0);
|
|
|
|
if (error) {
|
|
|
|
aprint_error("%s: unable to allocate reply area, error = %d\n",
|
|
|
|
mpt->sc_dev.dv_xname, error);
|
|
|
|
goto fail_0;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
|
2007-03-04 08:59:00 +03:00
|
|
|
(void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
|
2003-04-17 02:02:59 +04:00
|
|
|
if (error) {
|
|
|
|
aprint_error("%s: unable to map reply area, error = %d\n",
|
|
|
|
mpt->sc_dev.dv_xname, error);
|
|
|
|
goto fail_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
|
|
|
|
0, 0, &mpt->reply_dmap);
|
|
|
|
if (error) {
|
|
|
|
aprint_error("%s: unable to create reply DMA map, error = %d\n",
|
|
|
|
mpt->sc_dev.dv_xname, error);
|
|
|
|
goto fail_2;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
|
|
|
|
PAGE_SIZE, NULL, 0);
|
|
|
|
if (error) {
|
|
|
|
aprint_error("%s: unable to load reply DMA map, error = %d\n",
|
|
|
|
mpt->sc_dev.dv_xname, error);
|
|
|
|
goto fail_3;
|
|
|
|
}
|
|
|
|
mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate DMA resources for request buffers.
|
|
|
|
*/
|
|
|
|
error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
|
|
|
|
PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
|
|
|
|
if (error) {
|
|
|
|
aprint_error("%s: unable to allocate request area, "
|
|
|
|
"error = %d\n", mpt->sc_dev.dv_xname, error);
|
|
|
|
goto fail_4;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
|
2007-03-04 08:59:00 +03:00
|
|
|
MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
|
2003-04-17 02:02:59 +04:00
|
|
|
if (error) {
|
|
|
|
aprint_error("%s: unable to map request area, error = %d\n",
|
|
|
|
mpt->sc_dev.dv_xname, error);
|
|
|
|
goto fail_5;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
|
|
|
|
MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
|
|
|
|
if (error) {
|
|
|
|
aprint_error("%s: unable to create request DMA map, "
|
|
|
|
"error = %d\n", mpt->sc_dev.dv_xname, error);
|
|
|
|
goto fail_6;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
|
|
|
|
MPT_REQ_MEM_SIZE(mpt), NULL, 0);
|
|
|
|
if (error) {
|
|
|
|
aprint_error("%s: unable to load request DMA map, error = %d\n",
|
|
|
|
mpt->sc_dev.dv_xname, error);
|
|
|
|
goto fail_7;
|
|
|
|
}
|
|
|
|
mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
|
|
|
|
|
|
|
|
pptr = mpt->request_phys;
|
2007-03-04 08:59:00 +03:00
|
|
|
vptr = (void *) mpt->request;
|
2003-04-17 02:02:59 +04:00
|
|
|
end = pptr + MPT_REQ_MEM_SIZE(mpt);
|
|
|
|
|
|
|
|
for (i = 0; pptr < end; i++) {
|
|
|
|
request_t *req = &mpt->request_pool[i];
|
|
|
|
req->index = i;
|
|
|
|
|
|
|
|
/* Store location of Request Data */
|
|
|
|
req->req_pbuf = pptr;
|
|
|
|
req->req_vbuf = vptr;
|
|
|
|
|
|
|
|
pptr += MPT_REQUEST_AREA;
|
|
|
|
vptr += MPT_REQUEST_AREA;
|
|
|
|
|
|
|
|
req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
|
|
|
|
req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
|
|
|
|
|
2003-04-23 04:55:17 +04:00
|
|
|
error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
|
|
|
|
MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
|
2003-04-17 02:02:59 +04:00
|
|
|
if (error) {
|
|
|
|
aprint_error("%s: unable to create req %d DMA map, "
|
|
|
|
"error = %d\n", mpt->sc_dev.dv_xname, i, error);
|
|
|
|
goto fail_8;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail_8:
|
|
|
|
for (--i; i >= 0; i--) {
|
|
|
|
request_t *req = &mpt->request_pool[i];
|
2003-04-17 03:16:41 +04:00
|
|
|
if (req->dmap != NULL)
|
|
|
|
bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
|
2003-04-17 02:02:59 +04:00
|
|
|
}
|
|
|
|
bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
|
|
|
|
fail_7:
|
|
|
|
bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
|
|
|
|
fail_6:
|
2007-03-04 08:59:00 +03:00
|
|
|
bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
|
2003-04-17 02:02:59 +04:00
|
|
|
fail_5:
|
|
|
|
bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
|
|
|
|
fail_4:
|
|
|
|
bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
|
|
|
|
fail_3:
|
|
|
|
bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
|
|
|
|
fail_2:
|
2007-03-04 08:59:00 +03:00
|
|
|
bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
|
2003-04-17 02:02:59 +04:00
|
|
|
fail_1:
|
|
|
|
bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
|
|
|
|
fail_0:
|
|
|
|
free(mpt->request_pool, M_DEVBUF);
|
|
|
|
|
|
|
|
mpt->reply = NULL;
|
|
|
|
mpt->request = NULL;
|
|
|
|
mpt->request_pool = NULL;
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
mpt_intr(void *arg)
|
|
|
|
{
|
|
|
|
mpt_softc_t *mpt = arg;
|
|
|
|
int nrepl = 0;
|
|
|
|
uint32_t reply;
|
|
|
|
|
|
|
|
if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
reply = mpt_pop_reply_queue(mpt);
|
|
|
|
while (reply != MPT_REPLY_EMPTY) {
|
|
|
|
nrepl++;
|
|
|
|
if (mpt->verbose > 1) {
|
|
|
|
if ((reply & MPT_CONTEXT_REPLY) != 0) {
|
|
|
|
/* Address reply; IOC has something to say */
|
|
|
|
mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
|
|
|
|
} else {
|
|
|
|
/* Context reply; all went well */
|
|
|
|
mpt_prt(mpt, "context %u reply OK", reply);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mpt_done(mpt, reply);
|
|
|
|
reply = mpt_pop_reply_queue(mpt);
|
|
|
|
}
|
|
|
|
return (nrepl != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
printf("%s: ", mpt->sc_dev.dv_xname);
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vprintf(fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Timeouts are in msec, so we loop in 1000usec cycles */
|
|
|
|
while (count) {
|
|
|
|
mpt_intr(mpt);
|
|
|
|
if (xs->xs_status & XS_STS_DONE)
|
|
|
|
return (0);
|
|
|
|
delay(1000); /* only happens in boot, so ok */
|
|
|
|
count--;
|
|
|
|
}
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mpt_timeout(void *arg)
|
|
|
|
{
|
|
|
|
request_t *req = arg;
|
|
|
|
struct scsipi_xfer *xs = req->xfer;
|
|
|
|
struct scsipi_periph *periph = xs->xs_periph;
|
2005-02-27 03:26:58 +03:00
|
|
|
mpt_softc_t *mpt =
|
2003-04-17 02:02:59 +04:00
|
|
|
(void *) periph->periph_channel->chan_adapter->adapt_dev;
|
|
|
|
uint32_t oseq;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
scsipi_printaddr(periph);
|
|
|
|
printf("command timeout\n");
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
|
|
|
|
oseq = req->sequence;
|
|
|
|
mpt->timeouts++;
|
|
|
|
if (mpt_intr(mpt)) {
|
|
|
|
if (req->sequence != oseq) {
|
|
|
|
mpt_prt(mpt, "recovered from command timeout");
|
|
|
|
splx(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mpt_prt(mpt,
|
|
|
|
"timeout on request index = 0x%x, seq = 0x%08x",
|
|
|
|
req->index, req->sequence);
|
|
|
|
mpt_check_doorbell(mpt);
|
|
|
|
mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
|
|
|
|
mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
|
|
|
|
mpt_read(mpt, MPT_OFFSET_INTR_MASK),
|
|
|
|
mpt_read(mpt, MPT_OFFSET_DOORBELL));
|
|
|
|
mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
|
|
|
|
if (mpt->verbose > 1)
|
|
|
|
mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
|
|
|
|
|
|
|
|
/* XXX WHAT IF THE IOC IS STILL USING IT?? */
|
|
|
|
req->xfer = NULL;
|
|
|
|
mpt_free_request(mpt, req);
|
|
|
|
|
2003-05-02 00:18:35 +04:00
|
|
|
xs->error = XS_TIMEOUT;
|
|
|
|
scsipi_done(xs);
|
|
|
|
|
2003-04-17 02:02:59 +04:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mpt_done(mpt_softc_t *mpt, uint32_t reply)
|
|
|
|
{
|
2003-05-02 00:18:35 +04:00
|
|
|
struct scsipi_xfer *xs = NULL;
|
2003-04-17 02:02:59 +04:00
|
|
|
struct scsipi_periph *periph;
|
|
|
|
int index;
|
|
|
|
request_t *req;
|
|
|
|
MSG_REQUEST_HEADER *mpt_req;
|
|
|
|
MSG_SCSI_IO_REPLY *mpt_reply;
|
|
|
|
|
|
|
|
if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
|
|
|
|
/* context reply (ok) */
|
|
|
|
mpt_reply = NULL;
|
|
|
|
index = reply & MPT_CONTEXT_MASK;
|
|
|
|
} else {
|
|
|
|
/* address reply (error) */
|
|
|
|
|
|
|
|
/* XXX BUS_DMASYNC_POSTREAD XXX */
|
|
|
|
mpt_reply = MPT_REPLY_PTOV(mpt, reply);
|
|
|
|
if (mpt->verbose > 1) {
|
|
|
|
uint32_t *pReply = (uint32_t *) mpt_reply;
|
2005-02-27 03:26:58 +03:00
|
|
|
|
2003-04-17 02:02:59 +04:00
|
|
|
mpt_prt(mpt, "Address Reply (index %u):",
|
|
|
|
mpt_reply->MsgContext & 0xffff);
|
|
|
|
mpt_prt(mpt, "%08x %08x %08x %08x",
|
|
|
|
pReply[0], pReply[1], pReply[2], pReply[3]);
|
|
|
|
mpt_prt(mpt, "%08x %08x %08x %08x",
|
|
|
|
pReply[4], pReply[5], pReply[6], pReply[7]);
|
|
|
|
mpt_prt(mpt, "%08x %08x %08x %08x",
|
|
|
|
pReply[8], pReply[9], pReply[10], pReply[11]);
|
|
|
|
}
|
|
|
|
index = mpt_reply->MsgContext;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Address reply with MessageContext high bit set.
|
|
|
|
* This is most likely a notify message, so we try
|
|
|
|
* to process it, then free it.
|
|
|
|
*/
|
|
|
|
if (__predict_false((index & 0x80000000) != 0)) {
|
|
|
|
if (mpt_reply != NULL)
|
|
|
|
mpt_ctlop(mpt, mpt_reply, reply);
|
|
|
|
else
|
|
|
|
mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Did we end up with a valid index into the table? */
|
|
|
|
if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
|
|
|
|
mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
req = &mpt->request_pool[index];
|
|
|
|
|
|
|
|
/* Make sure memory hasn't been trashed. */
|
|
|
|
if (__predict_false(req->index != index)) {
|
|
|
|
mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2003-04-17 03:02:14 +04:00
|
|
|
MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
|
2003-04-17 02:02:59 +04:00
|
|
|
mpt_req = req->req_vbuf;
|
|
|
|
|
|
|
|
/* Short cut for task management replies; nothing more for us to do. */
|
|
|
|
if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
|
|
|
|
if (mpt->verbose > 1)
|
|
|
|
mpt_prt(mpt, "mpt_done: TASK MGMT");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point, it had better be a SCSI I/O command, but don't
|
|
|
|
* crash if it isn't.
|
|
|
|
*/
|
|
|
|
if (__predict_false(mpt_req->Function !=
|
|
|
|
MPI_FUNCTION_SCSI_IO_REQUEST)) {
|
|
|
|
if (mpt->verbose > 1)
|
|
|
|
mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
|
|
|
|
mpt_req->Function, index);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Recover scsipi_xfer from the request structure. */
|
|
|
|
xs = req->xfer;
|
|
|
|
|
|
|
|
/* Can't have a SCSI command without a scsipi_xfer. */
|
|
|
|
if (__predict_false(xs == NULL)) {
|
|
|
|
mpt_prt(mpt,
|
|
|
|
"mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
|
|
|
|
req->index, req->sequence);
|
|
|
|
mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
|
|
|
|
mpt_prt(mpt, "mpt_request:");
|
|
|
|
mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
|
|
|
|
|
|
|
|
if (mpt_reply != NULL) {
|
|
|
|
mpt_prt(mpt, "mpt_reply:");
|
|
|
|
mpt_print_reply(mpt_reply);
|
|
|
|
} else {
|
|
|
|
mpt_prt(mpt, "context reply: 0x%08x", reply);
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
callout_stop(&xs->xs_callout);
|
|
|
|
|
|
|
|
periph = xs->xs_periph;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we were a data transfer, unload the map that described
|
|
|
|
* the data buffer.
|
|
|
|
*/
|
|
|
|
if (__predict_true(xs->datalen != 0)) {
|
|
|
|
bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
|
|
|
|
req->dmap->dm_mapsize,
|
|
|
|
(xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
|
|
|
|
: BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(mpt->sc_dmat, req->dmap);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (__predict_true(mpt_reply == NULL)) {
|
|
|
|
/*
|
|
|
|
* Context reply; report that the command was
|
|
|
|
* successful!
|
|
|
|
*
|
|
|
|
* Also report the xfer mode, if necessary.
|
|
|
|
*/
|
|
|
|
if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
|
|
|
|
if ((mpt->mpt_report_xfer_mode &
|
|
|
|
(1 << periph->periph_target)) != 0)
|
|
|
|
mpt_get_xfer_mode(mpt, periph);
|
|
|
|
}
|
|
|
|
xs->error = XS_NOERROR;
|
|
|
|
xs->status = SCSI_OK;
|
|
|
|
xs->resid = 0;
|
2003-05-02 00:18:35 +04:00
|
|
|
mpt_free_request(mpt, req);
|
2003-04-17 02:02:59 +04:00
|
|
|
scsipi_done(xs);
|
2003-05-02 00:18:35 +04:00
|
|
|
return;
|
2003-04-17 02:02:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
xs->status = mpt_reply->SCSIStatus;
|
|
|
|
switch (mpt_reply->IOCStatus) {
|
|
|
|
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
|
|
|
|
xs->error = XS_DRIVER_STUFFUP;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
|
|
|
|
/*
|
|
|
|
* Yikes! Tagged queue full comes through this path!
|
|
|
|
*
|
|
|
|
* So we'll change it to a status error and anything
|
|
|
|
* that returns status should probably be a status
|
|
|
|
* error as well.
|
|
|
|
*/
|
|
|
|
xs->resid = xs->datalen - mpt_reply->TransferCount;
|
|
|
|
if (mpt_reply->SCSIState &
|
|
|
|
MPI_SCSI_STATE_NO_SCSI_STATUS) {
|
|
|
|
xs->error = XS_DRIVER_STUFFUP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
case MPI_IOCSTATUS_SUCCESS:
|
|
|
|
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
|
|
|
|
switch (xs->status) {
|
|
|
|
case SCSI_OK:
|
|
|
|
/* Report the xfer mode, if necessary. */
|
|
|
|
if ((mpt->mpt_report_xfer_mode &
|
|
|
|
(1 << periph->periph_target)) != 0)
|
|
|
|
mpt_get_xfer_mode(mpt, periph);
|
|
|
|
xs->resid = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCSI_CHECK:
|
|
|
|
xs->error = XS_SENSE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCSI_BUSY:
|
|
|
|
case SCSI_QUEUE_FULL:
|
|
|
|
xs->error = XS_BUSY;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
scsipi_printaddr(periph);
|
|
|
|
printf("invalid status code %d\n", xs->status);
|
|
|
|
xs->error = XS_DRIVER_STUFFUP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_IOCSTATUS_BUSY:
|
|
|
|
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
|
|
|
|
xs->error = XS_RESOURCE_SHORTAGE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_IOCSTATUS_SCSI_INVALID_BUS:
|
|
|
|
case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
|
|
|
|
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
|
|
|
|
xs->error = XS_SELTIMEOUT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
|
|
|
|
xs->error = XS_DRIVER_STUFFUP;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
|
|
|
|
/* XXX What should we do here? */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
|
|
|
|
/* XXX */
|
|
|
|
xs->error = XS_DRIVER_STUFFUP;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
|
|
|
|
/* XXX */
|
|
|
|
xs->error = XS_DRIVER_STUFFUP;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
|
|
|
|
/* XXX This is a bus-reset */
|
|
|
|
xs->error = XS_DRIVER_STUFFUP;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* XXX unrecognized HBA error */
|
|
|
|
xs->error = XS_DRIVER_STUFFUP;
|
|
|
|
break;
|
|
|
|
}
|
2005-02-27 03:26:58 +03:00
|
|
|
|
2003-04-17 02:02:59 +04:00
|
|
|
if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
|
|
|
|
memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
|
|
|
|
sizeof(xs->sense.scsi_sense));
|
|
|
|
} else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
|
|
|
|
/*
|
|
|
|
* This will cause the scsipi layer to issue
|
|
|
|
* a REQUEST SENSE.
|
|
|
|
*/
|
|
|
|
if (xs->status == SCSI_CHECK)
|
|
|
|
xs->error = XS_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
/* If IOC done with this requeset, free it up. */
|
|
|
|
if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
|
|
|
|
mpt_free_request(mpt, req);
|
|
|
|
|
|
|
|
/* If address reply, give the buffer back to the IOC. */
|
|
|
|
if (mpt_reply != NULL)
|
|
|
|
mpt_free_reply(mpt, (reply << 1));
|
2003-05-02 00:18:35 +04:00
|
|
|
|
|
|
|
if (xs != NULL)
|
|
|
|
scsipi_done(xs);
|
2003-04-17 02:02:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
|
|
|
|
{
|
|
|
|
struct scsipi_periph *periph = xs->xs_periph;
|
|
|
|
request_t *req;
|
|
|
|
MSG_SCSI_IO_REQUEST *mpt_req;
|
|
|
|
int error, s;
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
req = mpt_get_request(mpt);
|
|
|
|
if (__predict_false(req == NULL)) {
|
|
|
|
/* This should happen very infrequently. */
|
|
|
|
xs->error = XS_RESOURCE_SHORTAGE;
|
|
|
|
scsipi_done(xs);
|
|
|
|
splx(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
|
|
|
/* Link the req and the scsipi_xfer. */
|
|
|
|
req->xfer = xs;
|
|
|
|
|
|
|
|
/* Now we build the command for the IOC */
|
|
|
|
mpt_req = req->req_vbuf;
|
|
|
|
memset(mpt_req, 0, sizeof(*mpt_req));
|
|
|
|
|
|
|
|
mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
|
|
|
|
mpt_req->Bus = mpt->bus;
|
|
|
|
|
|
|
|
mpt_req->SenseBufferLength =
|
|
|
|
(sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
|
|
|
|
sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use the message context to find the request structure when
|
|
|
|
* we get the command completion interrupt from the IOC.
|
|
|
|
*/
|
|
|
|
mpt_req->MsgContext = req->index;
|
|
|
|
|
|
|
|
/* Which physical device to do the I/O on. */
|
|
|
|
mpt_req->TargetID = periph->periph_target;
|
|
|
|
mpt_req->LUN[1] = periph->periph_lun;
|
|
|
|
|
|
|
|
/* Set the direction of the transfer. */
|
|
|
|
if (xs->xs_control & XS_CTL_DATA_IN)
|
|
|
|
mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
|
|
|
|
else if (xs->xs_control & XS_CTL_DATA_OUT)
|
|
|
|
mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
|
|
|
|
else
|
|
|
|
mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
|
|
|
|
|
|
|
|
/* Set the queue behavior. */
|
2007-07-27 17:06:51 +04:00
|
|
|
if (__predict_true((!mpt->is_scsi) ||
|
2003-04-17 02:02:59 +04:00
|
|
|
(mpt->mpt_tag_enable &
|
|
|
|
(1 << periph->periph_target)))) {
|
|
|
|
switch (XS_CTL_TAGTYPE(xs)) {
|
|
|
|
case XS_CTL_HEAD_TAG:
|
|
|
|
mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
|
|
|
|
break;
|
|
|
|
|
|
|
|
#if 0 /* XXX */
|
|
|
|
case XS_CTL_ACA_TAG:
|
|
|
|
mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
case XS_CTL_ORDERED_TAG:
|
|
|
|
mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XS_CTL_SIMPLE_TAG:
|
|
|
|
mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2007-07-27 17:06:51 +04:00
|
|
|
if (mpt->is_scsi)
|
2003-04-17 02:02:59 +04:00
|
|
|
mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
|
2007-07-27 17:06:51 +04:00
|
|
|
else
|
|
|
|
mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
|
2003-04-17 02:02:59 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
|
|
|
|
|
2007-07-27 17:06:51 +04:00
|
|
|
if (__predict_false(mpt->is_scsi &&
|
2003-04-17 02:02:59 +04:00
|
|
|
(mpt->mpt_disc_enable &
|
|
|
|
(1 << periph->periph_target)) == 0))
|
|
|
|
mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
|
|
|
|
|
|
|
|
/* Copy the SCSI command block into place. */
|
|
|
|
memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
|
|
|
|
|
|
|
|
mpt_req->CDBLength = xs->cmdlen;
|
|
|
|
mpt_req->DataLength = xs->datalen;
|
|
|
|
mpt_req->SenseBufferLowAddr = req->sense_pbuf;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map the DMA transfer.
|
|
|
|
*/
|
|
|
|
if (xs->datalen) {
|
|
|
|
SGE_SIMPLE32 *se;
|
|
|
|
|
|
|
|
error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
|
|
|
|
xs->datalen, NULL,
|
|
|
|
((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
|
|
|
|
: BUS_DMA_WAITOK) |
|
|
|
|
BUS_DMA_STREAMING |
|
|
|
|
((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
|
|
|
|
: BUS_DMA_WRITE));
|
|
|
|
switch (error) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ENOMEM:
|
|
|
|
case EAGAIN:
|
|
|
|
xs->error = XS_RESOURCE_SHORTAGE;
|
|
|
|
goto out_bad;
|
|
|
|
|
|
|
|
default:
|
|
|
|
xs->error = XS_DRIVER_STUFFUP;
|
|
|
|
mpt_prt(mpt, "error %d loading DMA map", error);
|
|
|
|
out_bad:
|
|
|
|
s = splbio();
|
|
|
|
mpt_free_request(mpt, req);
|
|
|
|
scsipi_done(xs);
|
|
|
|
splx(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
|
|
|
|
int seg, i, nleft = req->dmap->dm_nsegs;
|
|
|
|
uint32_t flags;
|
|
|
|
SGE_CHAIN32 *ce;
|
|
|
|
|
|
|
|
seg = 0;
|
|
|
|
|
|
|
|
mpt_req->DataLength = xs->datalen;
|
|
|
|
flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
|
|
|
|
if (xs->xs_control & XS_CTL_DATA_OUT)
|
|
|
|
flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
|
|
|
|
|
|
|
|
se = (SGE_SIMPLE32 *) &mpt_req->SGL;
|
|
|
|
for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
|
|
|
|
i++, se++, seg++) {
|
|
|
|
uint32_t tf;
|
|
|
|
|
|
|
|
memset(se, 0, sizeof(*se));
|
|
|
|
se->Address = req->dmap->dm_segs[seg].ds_addr;
|
|
|
|
MPI_pSGE_SET_LENGTH(se,
|
|
|
|
req->dmap->dm_segs[seg].ds_len);
|
|
|
|
tf = flags;
|
|
|
|
if (i == MPT_NSGL_FIRST(mpt) - 2)
|
|
|
|
tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
|
|
|
|
MPI_pSGE_SET_FLAGS(se, tf);
|
|
|
|
nleft--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the IOC where to find the first chain element.
|
|
|
|
*/
|
|
|
|
mpt_req->ChainOffset =
|
|
|
|
((char *)se - (char *)mpt_req) >> 2;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Until we're finished with all segments...
|
|
|
|
*/
|
|
|
|
while (nleft) {
|
|
|
|
int ntodo;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Construct the chain element that points to
|
|
|
|
* the next segment.
|
|
|
|
*/
|
|
|
|
ce = (SGE_CHAIN32 *) se++;
|
|
|
|
if (nleft > MPT_NSGL(mpt)) {
|
|
|
|
ntodo = MPT_NSGL(mpt) - 1;
|
|
|
|
ce->NextChainOffset = (MPT_RQSL(mpt) -
|
|
|
|
sizeof(SGE_SIMPLE32)) >> 2;
|
2004-04-10 05:59:19 +04:00
|
|
|
ce->Length = MPT_NSGL(mpt)
|
|
|
|
* sizeof(SGE_SIMPLE32);
|
2003-04-17 02:02:59 +04:00
|
|
|
} else {
|
|
|
|
ntodo = nleft;
|
|
|
|
ce->NextChainOffset = 0;
|
2004-04-10 05:59:19 +04:00
|
|
|
ce->Length = ntodo
|
|
|
|
* sizeof(SGE_SIMPLE32);
|
2003-04-17 02:02:59 +04:00
|
|
|
}
|
|
|
|
ce->Address = req->req_pbuf +
|
|
|
|
((char *)se - (char *)mpt_req);
|
|
|
|
ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
|
|
|
|
for (i = 0; i < ntodo; i++, se++, seg++) {
|
|
|
|
uint32_t tf;
|
|
|
|
|
|
|
|
memset(se, 0, sizeof(*se));
|
|
|
|
se->Address =
|
|
|
|
req->dmap->dm_segs[seg].ds_addr;
|
|
|
|
MPI_pSGE_SET_LENGTH(se,
|
|
|
|
req->dmap->dm_segs[seg].ds_len);
|
|
|
|
tf = flags;
|
|
|
|
if (i == ntodo - 1) {
|
|
|
|
tf |=
|
|
|
|
MPI_SGE_FLAGS_LAST_ELEMENT;
|
|
|
|
if (ce->NextChainOffset == 0) {
|
|
|
|
tf |=
|
|
|
|
MPI_SGE_FLAGS_END_OF_LIST |
|
|
|
|
MPI_SGE_FLAGS_END_OF_BUFFER;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MPI_pSGE_SET_FLAGS(se, tf);
|
|
|
|
nleft--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
|
|
|
|
req->dmap->dm_mapsize,
|
|
|
|
(xs->xs_control & XS_CTL_DATA_IN) ?
|
|
|
|
BUS_DMASYNC_PREREAD
|
|
|
|
: BUS_DMASYNC_PREWRITE);
|
|
|
|
} else {
|
|
|
|
int i;
|
|
|
|
uint32_t flags;
|
|
|
|
|
|
|
|
mpt_req->DataLength = xs->datalen;
|
|
|
|
flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
|
|
|
|
if (xs->xs_control & XS_CTL_DATA_OUT)
|
|
|
|
flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
|
|
|
|
|
|
|
|
/* Copy the segments into our SG list. */
|
|
|
|
se = (SGE_SIMPLE32 *) &mpt_req->SGL;
|
|
|
|
for (i = 0; i < req->dmap->dm_nsegs;
|
|
|
|
i++, se++) {
|
|
|
|
uint32_t tf;
|
|
|
|
|
|
|
|
memset(se, 0, sizeof(*se));
|
|
|
|
se->Address = req->dmap->dm_segs[i].ds_addr;
|
|
|
|
MPI_pSGE_SET_LENGTH(se,
|
|
|
|
req->dmap->dm_segs[i].ds_len);
|
|
|
|
tf = flags;
|
|
|
|
if (i == req->dmap->dm_nsegs - 1) {
|
|
|
|
tf |=
|
|
|
|
MPI_SGE_FLAGS_LAST_ELEMENT |
|
|
|
|
MPI_SGE_FLAGS_END_OF_BUFFER |
|
|
|
|
MPI_SGE_FLAGS_END_OF_LIST;
|
|
|
|
}
|
|
|
|
MPI_pSGE_SET_FLAGS(se, tf);
|
|
|
|
}
|
|
|
|
bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
|
|
|
|
req->dmap->dm_mapsize,
|
|
|
|
(xs->xs_control & XS_CTL_DATA_IN) ?
|
|
|
|
BUS_DMASYNC_PREREAD
|
|
|
|
: BUS_DMASYNC_PREWRITE);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* No data to transfer; just make a single simple SGL
|
|
|
|
* with zero length.
|
|
|
|
*/
|
|
|
|
SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
|
|
|
|
memset(se, 0, sizeof(*se));
|
|
|
|
MPI_pSGE_SET_FLAGS(se,
|
|
|
|
(MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
|
|
|
|
MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mpt->verbose > 1)
|
|
|
|
mpt_print_scsi_io_request(mpt_req);
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
|
|
|
|
callout_reset(&xs->xs_callout,
|
|
|
|
mstohz(xs->timeout), mpt_timeout, req);
|
|
|
|
mpt_send_cmd(mpt, req);
|
|
|
|
splx(s);
|
|
|
|
|
|
|
|
if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we can't use interrupts, poll on completion.
|
|
|
|
*/
|
2003-04-17 03:17:30 +04:00
|
|
|
if (mpt_poll(mpt, xs, xs->timeout))
|
2003-04-17 02:02:59 +04:00
|
|
|
mpt_timeout(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
|
|
|
|
{
|
|
|
|
fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
|
|
|
|
|
2007-07-27 17:06:51 +04:00
|
|
|
if (!mpt->is_scsi) {
|
2003-04-17 02:02:59 +04:00
|
|
|
/*
|
|
|
|
* SCSI transport settings don't make any sense for
|
|
|
|
* Fibre Channel; silently ignore the request.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Always allow disconnect; we don't have a way to disable
|
|
|
|
* it right now, in any case.
|
|
|
|
*/
|
|
|
|
mpt->mpt_disc_enable |= (1 << xm->xm_target);
|
|
|
|
|
|
|
|
if (xm->xm_mode & PERIPH_CAP_TQING)
|
|
|
|
mpt->mpt_tag_enable |= (1 << xm->xm_target);
|
|
|
|
else
|
|
|
|
mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
|
|
|
|
|
|
|
|
tmp = mpt->mpt_dev_page1[xm->xm_target];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the wide/narrow parameter for the target.
|
|
|
|
*/
|
|
|
|
if (xm->xm_mode & PERIPH_CAP_WIDE16)
|
|
|
|
tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
|
|
|
|
else
|
|
|
|
tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the synchronous parameters for the target.
|
|
|
|
*
|
|
|
|
* XXX If we request sync transfers, we just go ahead and
|
|
|
|
* XXX request the maximum available. We need finer control
|
|
|
|
* XXX in order to implement Domain Validation.
|
|
|
|
*/
|
|
|
|
tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
|
|
|
|
MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
|
|
|
|
MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
|
|
|
|
MPI_SCSIDEVPAGE1_RP_IU);
|
|
|
|
if (xm->xm_mode & PERIPH_CAP_SYNC) {
|
|
|
|
int factor, offset, np;
|
|
|
|
|
|
|
|
factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
|
|
|
|
offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
|
|
|
|
np = 0;
|
|
|
|
if (factor < 0x9) {
|
|
|
|
/* Ultra320 */
|
|
|
|
np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
|
|
|
|
}
|
|
|
|
if (factor < 0xa) {
|
|
|
|
/* at least Ultra160 */
|
|
|
|
np |= MPI_SCSIDEVPAGE1_RP_DT;
|
|
|
|
}
|
|
|
|
np |= (factor << 8) | (offset << 16);
|
|
|
|
tmp.RequestedParameters |= np;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
|
|
|
|
mpt_prt(mpt, "unable to write Device Page 1");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
|
|
|
|
mpt_prt(mpt, "unable to read back Device Page 1");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mpt->mpt_dev_page1[xm->xm_target] = tmp;
|
|
|
|
if (mpt->verbose > 1) {
|
|
|
|
mpt_prt(mpt,
|
|
|
|
"SPI Target %d Page 1: RequestedParameters %x Config %x",
|
|
|
|
xm->xm_target,
|
|
|
|
mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
|
|
|
|
mpt->mpt_dev_page1[xm->xm_target].Configuration);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a note that we should perform an async callback at the
|
|
|
|
* end of the next successful command completion to report the
|
|
|
|
* negotiated transfer mode.
|
|
|
|
*/
|
|
|
|
mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
|
|
|
|
{
|
|
|
|
fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
|
|
|
|
struct scsipi_xfer_mode xm;
|
|
|
|
int period, offset;
|
|
|
|
|
|
|
|
tmp = mpt->mpt_dev_page0[periph->periph_target];
|
|
|
|
if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
|
|
|
|
mpt_prt(mpt, "unable to read Device Page 0");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mpt->verbose > 1) {
|
|
|
|
mpt_prt(mpt,
|
|
|
|
"SPI Tgt %d Page 0: NParms %x Information %x",
|
|
|
|
periph->periph_target,
|
|
|
|
tmp.NegotiatedParameters, tmp.Information);
|
|
|
|
}
|
|
|
|
|
|
|
|
xm.xm_target = periph->periph_target;
|
|
|
|
xm.xm_mode = 0;
|
|
|
|
|
|
|
|
if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
|
|
|
|
xm.xm_mode |= PERIPH_CAP_WIDE16;
|
|
|
|
|
|
|
|
period = (tmp.NegotiatedParameters >> 8) & 0xff;
|
|
|
|
offset = (tmp.NegotiatedParameters >> 16) & 0xff;
|
|
|
|
if (offset) {
|
|
|
|
xm.xm_period = period;
|
|
|
|
xm.xm_offset = offset;
|
|
|
|
xm.xm_mode |= PERIPH_CAP_SYNC;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tagged queueing is all controlled by us; there is no
|
|
|
|
* other setting to query.
|
|
|
|
*/
|
|
|
|
if (mpt->mpt_tag_enable & (1 << periph->periph_target))
|
|
|
|
xm.xm_mode |= PERIPH_CAP_TQING;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're going to deliver the async event, so clear the marker.
|
|
|
|
*/
|
|
|
|
mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
|
|
|
|
|
|
|
|
scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
|
|
|
|
{
|
|
|
|
MSG_DEFAULT_REPLY *dmsg = vmsg;
|
|
|
|
|
|
|
|
switch (dmsg->Function) {
|
|
|
|
case MPI_FUNCTION_EVENT_NOTIFICATION:
|
|
|
|
mpt_event_notify_reply(mpt, vmsg);
|
|
|
|
mpt_free_reply(mpt, (reply << 1));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_FUNCTION_EVENT_ACK:
|
|
|
|
mpt_free_reply(mpt, (reply << 1));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_FUNCTION_PORT_ENABLE:
|
|
|
|
{
|
|
|
|
MSG_PORT_ENABLE_REPLY *msg = vmsg;
|
|
|
|
int index = msg->MsgContext & ~0x80000000;
|
|
|
|
if (mpt->verbose > 1)
|
|
|
|
mpt_prt(mpt, "enable port reply index %d", index);
|
|
|
|
if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
|
|
|
|
request_t *req = &mpt->request_pool[index];
|
|
|
|
req->debug = REQ_DONE;
|
|
|
|
}
|
|
|
|
mpt_free_reply(mpt, (reply << 1));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MPI_FUNCTION_CONFIG:
|
|
|
|
{
|
|
|
|
MSG_CONFIG_REPLY *msg = vmsg;
|
|
|
|
int index = msg->MsgContext & ~0x80000000;
|
|
|
|
if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
|
|
|
|
request_t *req = &mpt->request_pool[index];
|
|
|
|
req->debug = REQ_DONE;
|
|
|
|
req->sequence = reply;
|
|
|
|
} else
|
|
|
|
mpt_free_reply(mpt, (reply << 1));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
|
|
|
|
{
|
|
|
|
|
|
|
|
switch (msg->Event) {
|
|
|
|
case MPI_EVENT_LOG_DATA:
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Some error occurrerd that the Fusion wants logged. */
|
|
|
|
mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
|
|
|
|
mpt_prt(mpt, "EvtLogData: Event Data:");
|
|
|
|
for (i = 0; i < msg->EventDataLength; i++) {
|
|
|
|
if ((i % 4) == 0)
|
|
|
|
printf("%s:\t", mpt->sc_dev.dv_xname);
|
|
|
|
printf("0x%08x%c", msg->Data[i],
|
|
|
|
((i % 4) == 3) ? '\n' : ' ');
|
|
|
|
}
|
|
|
|
if ((i % 4) != 0)
|
|
|
|
printf("\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MPI_EVENT_UNIT_ATTENTION:
|
|
|
|
mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
|
|
|
|
(msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_EVENT_IOC_BUS_RESET:
|
|
|
|
/* We generated a bus reset. */
|
|
|
|
mpt_prt(mpt, "IOC Bus Reset Port %d",
|
|
|
|
(msg->Data[0] >> 8) & 0xff);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_EVENT_EXT_BUS_RESET:
|
|
|
|
/* Someone else generated a bus reset. */
|
|
|
|
mpt_prt(mpt, "External Bus Reset");
|
|
|
|
/*
|
|
|
|
* These replies don't return EventData like the MPI
|
|
|
|
* spec says they do.
|
|
|
|
*/
|
|
|
|
/* XXX Send an async event? */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_EVENT_RESCAN:
|
|
|
|
/*
|
|
|
|
* In general, thise means a device has been added
|
|
|
|
* to the loop.
|
|
|
|
*/
|
|
|
|
mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
|
|
|
|
/* XXX Send an async event? */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_EVENT_LINK_STATUS_CHANGE:
|
|
|
|
mpt_prt(mpt, "Port %d: Link state %s",
|
|
|
|
(msg->Data[1] >> 8) & 0xff,
|
|
|
|
(msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_EVENT_LOOP_STATE_CHANGE:
|
|
|
|
switch ((msg->Data[0] >> 16) & 0xff) {
|
|
|
|
case 0x01:
|
|
|
|
mpt_prt(mpt,
|
|
|
|
"Port %d: FC Link Event: LIP(%02x,%02x) "
|
|
|
|
"(Loop Initialization)",
|
|
|
|
(msg->Data[1] >> 8) & 0xff,
|
|
|
|
(msg->Data[0] >> 8) & 0xff,
|
|
|
|
(msg->Data[0] ) & 0xff);
|
|
|
|
switch ((msg->Data[0] >> 8) & 0xff) {
|
|
|
|
case 0xf7:
|
|
|
|
if ((msg->Data[0] & 0xff) == 0xf7)
|
|
|
|
mpt_prt(mpt, "\tDevice needs AL_PA");
|
|
|
|
else
|
|
|
|
mpt_prt(mpt, "\tDevice %02x doesn't "
|
|
|
|
"like FC performance",
|
|
|
|
msg->Data[0] & 0xff);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xf8:
|
|
|
|
if ((msg->Data[0] & 0xff) == 0xf7)
|
|
|
|
mpt_prt(mpt, "\tDevice detected loop "
|
|
|
|
"failure before acquiring AL_PA");
|
|
|
|
else
|
|
|
|
mpt_prt(mpt, "\tDevice %02x detected "
|
|
|
|
"loop failure",
|
|
|
|
msg->Data[0] & 0xff);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
mpt_prt(mpt, "\tDevice %02x requests that "
|
|
|
|
"device %02x reset itself",
|
|
|
|
msg->Data[0] & 0xff,
|
|
|
|
(msg->Data[0] >> 8) & 0xff);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x02:
|
|
|
|
mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
|
|
|
|
"(Loop Port Enable)",
|
|
|
|
(msg->Data[1] >> 8) & 0xff,
|
|
|
|
(msg->Data[0] >> 8) & 0xff,
|
|
|
|
(msg->Data[0] ) & 0xff);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x03:
|
|
|
|
mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
|
|
|
|
"(Loop Port Bypass)",
|
|
|
|
(msg->Data[1] >> 8) & 0xff,
|
|
|
|
(msg->Data[0] >> 8) & 0xff,
|
|
|
|
(msg->Data[0] ) & 0xff);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
mpt_prt(mpt, "Port %d: FC Link Event: "
|
|
|
|
"Unknown event (%02x %02x %02x)",
|
|
|
|
(msg->Data[1] >> 8) & 0xff,
|
|
|
|
(msg->Data[0] >> 16) & 0xff,
|
|
|
|
(msg->Data[0] >> 8) & 0xff,
|
|
|
|
(msg->Data[0] ) & 0xff);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_EVENT_LOGOUT:
|
|
|
|
mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
|
|
|
|
(msg->Data[1] >> 8) & 0xff, msg->Data[0]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_EVENT_EVENT_CHANGE:
|
|
|
|
/*
|
|
|
|
* This is just an acknowledgement of our
|
|
|
|
* mpt_send_event_request().
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
2007-07-27 17:06:51 +04:00
|
|
|
case MPI_EVENT_SAS_PHY_LINK_STATUS:
|
|
|
|
switch((msg->Data[0] >> 12) & 0x0f) {
|
|
|
|
case 0x00:
|
|
|
|
mpt_prt(mpt, "Phy %d: Link Status Unknown",
|
|
|
|
msg->Data[0] & 0xff);
|
|
|
|
break;
|
|
|
|
case 0x01:
|
|
|
|
mpt_prt(mpt, "Phy %d: Link Disabled",
|
|
|
|
msg->Data[0] & 0xff);
|
|
|
|
break;
|
|
|
|
case 0x02:
|
|
|
|
mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
|
|
|
|
msg->Data[0] & 0xff);
|
|
|
|
break;
|
|
|
|
case 0x03:
|
|
|
|
mpt_prt(mpt, "Phy %d: SATA OOB Complete",
|
|
|
|
msg->Data[0] & 0xff);
|
|
|
|
break;
|
|
|
|
case 0x08:
|
|
|
|
mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
|
|
|
|
msg->Data[0] & 0xff);
|
|
|
|
break;
|
|
|
|
case 0x09:
|
|
|
|
mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
|
|
|
|
msg->Data[0] & 0xff);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
|
|
|
|
"Unknown event (%0x)",
|
|
|
|
msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
|
|
|
|
case MPI_EVENT_SAS_DISCOVERY:
|
|
|
|
/* ignore these events for now */
|
|
|
|
break;
|
|
|
|
|
2003-04-17 02:02:59 +04:00
|
|
|
default:
|
|
|
|
mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
|
|
|
|
break;
|
|
|
|
}
|
2005-02-27 03:26:58 +03:00
|
|
|
|
2003-04-17 02:02:59 +04:00
|
|
|
if (msg->AckRequired) {
|
|
|
|
MSG_EVENT_ACK *ackp;
|
|
|
|
request_t *req;
|
|
|
|
|
|
|
|
if ((req = mpt_get_request(mpt)) == NULL) {
|
|
|
|
/* XXX XXX XXX XXXJRT */
|
|
|
|
panic("mpt_event_notify_reply: unable to allocate "
|
|
|
|
"request structure");
|
|
|
|
}
|
|
|
|
|
|
|
|
ackp = (MSG_EVENT_ACK *) req->req_vbuf;
|
|
|
|
memset(ackp, 0, sizeof(*ackp));
|
|
|
|
ackp->Function = MPI_FUNCTION_EVENT_ACK;
|
|
|
|
ackp->Event = msg->Event;
|
|
|
|
ackp->EventContext = msg->EventContext;
|
|
|
|
ackp->MsgContext = req->index | 0x80000000;
|
|
|
|
mpt_check_doorbell(mpt);
|
|
|
|
mpt_send_cmd(mpt, req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXXJRT mpt_bus_reset() */
|
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
* SCSI interface routines
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
struct scsipi_adapter *adapt = chan->chan_adapter;
|
|
|
|
mpt_softc_t *mpt = (void *) adapt->adapt_dev;
|
|
|
|
|
|
|
|
switch (req) {
|
|
|
|
case ADAPTER_REQ_RUN_XFER:
|
|
|
|
mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
|
|
|
|
return;
|
|
|
|
|
|
|
|
case ADAPTER_REQ_GROW_RESOURCES:
|
|
|
|
/* Not supported. */
|
|
|
|
return;
|
|
|
|
|
|
|
|
case ADAPTER_REQ_SET_XFER_MODE:
|
|
|
|
mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mpt_minphys(struct buf *bp)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Subtract one from the SGL limit, since we need an extra one to handle
|
|
|
|
* an non-page-aligned transfer.
|
|
|
|
*/
|
|
|
|
#define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
|
|
|
|
|
|
|
|
if (bp->b_bcount > MPT_MAX_XFER)
|
|
|
|
bp->b_bcount = MPT_MAX_XFER;
|
|
|
|
minphys(bp);
|
|
|
|
}
|