NetBSD/sys/dev/ic/siop_common.c
bouyer fc92601259 Fixes related to QUEUE FULL status:
- move status handling in siop_scsicmd_end(), it's better than in siop_intr()
- define 2 internal SIOP status, for "no status reported by device" and
  reset condition
- add a list of "urgent" command, to be executed before the list of command
  queued the normal way; this is used for command which got aborted
  by a QUEUE FULL and have to be requeued in order.
- Don't accept to send a Q_TAG message not immediatly folowing a IDENTIFY
2000-10-23 23:18:10 +00:00

627 lines
20 KiB
C

/* $NetBSD: siop_common.c,v 1.11 2000/10/23 23:18:10 bouyer Exp $ */
/*
* Copyright (c) 2000 Manuel Bouyer.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Manuel Bouyer
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* SYM53c7/8xx PCI-SCSI I/O Processors driver */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/malloc.h>
#include <sys/buf.h>
#include <sys/kernel.h>
#include <sys/scsiio.h>
#include <machine/endian.h>
#include <machine/bus.h>
#include <dev/scsipi/scsi_all.h>
#include <dev/scsipi/scsi_message.h>
#include <dev/scsipi/scsipi_all.h>
#include <dev/scsipi/scsiconf.h>
#include <dev/ic/siopreg.h>
#include <dev/ic/siopvar.h>
#include <dev/ic/siopvar_common.h>
#undef DEBUG
#undef DEBUG_DR
void
siop_common_reset(sc)
struct siop_softc *sc;
{
u_int32_t stest3;
/* reset the chip */
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
delay(1000);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
/* init registers */
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
0xff & ~(SIEN1_HTH | SIEN1_GEN));
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
(0xb << STIME0_SEL_SHIFT));
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
sc->sc_link.scsipi_scsi.adapter_target | SCID_RRE);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
1 << sc->sc_link.scsipi_scsi.adapter_target);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
(sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
/* enable clock doubler or quadruler if appropriate */
if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
STEST1_DBLEN);
if (sc->features & SF_CHIP_QUAD) {
/* wait for PPL to lock */
while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
SIOP_STEST4) & STEST4_LOCK) == 0)
delay(10);
} else {
/* data sheet says 20us - more won't hurt */
delay(100);
}
/* halt scsi clock, select doubler/quad, restart clock */
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
stest3 | STEST3_HSC);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
STEST1_DBLEN | STEST1_DBLSEL);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
} else {
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
}
if (sc->features & SF_CHIP_FIFO)
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
CTEST5_DFS);
sc->sc_reset(sc);
}
/* prepare tables before sending a cmd */
void
siop_setuptables(siop_cmd)
struct siop_cmd *siop_cmd;
{
int i;
struct siop_softc *sc = siop_cmd->siop_sc;
struct scsipi_xfer *xs = siop_cmd->xs;
int target = xs->sc_link->scsipi_scsi.target;
int lun = xs->sc_link->scsipi_scsi.lun;
siop_cmd->siop_tables.id = htole32(sc->targets[target]->id);
memset(siop_cmd->siop_tables.msg_out, 0, 8);
siop_cmd->siop_tables.msg_out[0] = MSG_IDENTIFY(lun, 1);
siop_cmd->siop_tables.t_msgout.count= htole32(1);
if (sc->targets[target]->status == TARST_ASYNC) {
if (sc->targets[target]->flags & TARF_WIDE) {
sc->targets[target]->status = TARST_WIDE_NEG;
siop_wdtr_msg(siop_cmd, 1, MSG_EXT_WDTR_BUS_16_BIT);
} else if (sc->targets[target]->flags & TARF_SYNC) {
sc->targets[target]->status = TARST_SYNC_NEG;
siop_sdtr_msg(siop_cmd, 1, sc->minsync, sc->maxoff);
} else {
sc->targets[target]->status = TARST_OK;
}
} else if (sc->targets[target]->status == TARST_OK &&
(sc->targets[target]->flags & TARF_TAG) &&
siop_cmd->status != CMDST_SENSE) {
siop_cmd->flags |= CMDFL_TAG;
}
siop_cmd->siop_tables.status =
htole32(SCSI_SIOP_NOSTATUS); /* set invalid status */
siop_cmd->siop_tables.cmd.count =
htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
siop_cmd->siop_tables.cmd.addr =
htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
if ((xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) ||
siop_cmd->status == CMDST_SENSE) {
for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
siop_cmd->siop_tables.data[i].count =
htole32(siop_cmd->dmamap_data->dm_segs[i].ds_len);
siop_cmd->siop_tables.data[i].addr =
htole32(siop_cmd->dmamap_data->dm_segs[i].ds_addr);
}
}
siop_table_sync(siop_cmd, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
int
siop_wdtr_neg(siop_cmd)
struct siop_cmd *siop_cmd;
{
struct siop_softc *sc = siop_cmd->siop_sc;
struct siop_target *siop_target = siop_cmd->siop_target;
int target = siop_cmd->xs->sc_link->scsipi_scsi.target;
struct siop_xfer_common *tables = &siop_cmd->siop_xfer->tables;
if (siop_target->status == TARST_WIDE_NEG) {
/* we initiated wide negotiation */
switch (tables->msg_in[3]) {
case MSG_EXT_WDTR_BUS_8_BIT:
printf("%s: target %d using 8bit transfers\n",
sc->sc_dev.dv_xname, target);
siop_target->flags &= ~TARF_ISWIDE;
sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
break;
case MSG_EXT_WDTR_BUS_16_BIT:
if (siop_target->flags & TARF_WIDE) {
printf("%s: target %d using 16bit transfers\n",
sc->sc_dev.dv_xname, target);
siop_target->flags |= TARF_ISWIDE;
sc->targets[target]->id |= (SCNTL3_EWS << 24);
break;
}
/* FALLTHROUH */
default:
/*
* hum, we got more than what we can handle, shoudn't
* happen. Reject, and stay async
*/
siop_target->flags &= ~TARF_ISWIDE;
siop_target->status = TARST_OK;
printf("%s: rejecting invalid wide negotiation from "
"target %d (%d)\n", sc->sc_dev.dv_xname, target,
tables->msg_in[3]);
tables->t_msgout.count= htole32(1);
tables->msg_out[0] = MSG_MESSAGE_REJECT;
return SIOP_NEG_MSGOUT;
}
tables->id = htole32(sc->targets[target]->id);
bus_space_write_1(sc->sc_rt, sc->sc_rh,
SIOP_SCNTL3,
(sc->targets[target]->id >> 24) & 0xff);
/* we now need to do sync */
if (siop_target->flags & TARF_SYNC) {
siop_target->status = TARST_SYNC_NEG;
siop_sdtr_msg(siop_cmd, 0, sc->minsync, sc->maxoff);
return SIOP_NEG_MSGOUT;
} else {
siop_target->status = TARST_OK;
return SIOP_NEG_ACK;
}
} else {
/* target initiated wide negotiation */
if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
&& (siop_target->flags & TARF_WIDE)) {
printf("%s: target %d using 16bit transfers\n",
sc->sc_dev.dv_xname, target);
siop_target->flags |= TARF_ISWIDE;
sc->targets[target]->id |= SCNTL3_EWS << 24;
} else {
printf("%s: target %d using 8bit transfers\n",
sc->sc_dev.dv_xname, target);
siop_target->flags &= ~TARF_ISWIDE;
sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
}
tables->id = htole32(sc->targets[target]->id);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
(sc->targets[target]->id >> 24) & 0xff);
/*
* we did reset wide parameters, so fall back to async,
* but don't schedule a sync neg, target should initiate it
*/
siop_target->status = TARST_OK;
siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
return SIOP_NEG_MSGOUT;
}
}
int
siop_sdtr_neg(siop_cmd)
struct siop_cmd *siop_cmd;
{
struct siop_softc *sc = siop_cmd->siop_sc;
struct siop_target *siop_target = siop_cmd->siop_target;
int target = siop_cmd->xs->sc_link->scsipi_scsi.target;
int sync, offset, i;
int send_msgout = 0;
struct siop_xfer_common *tables = &siop_cmd->siop_xfer->tables;
sync = tables->msg_in[3];
offset = tables->msg_in[4];
if (siop_target->status == TARST_SYNC_NEG) {
/* we initiated sync negotiation */
siop_target->status = TARST_OK;
#ifdef DEBUG
printf("sdtr: sync %d offset %d\n", sync, offset);
#endif
if (offset > sc->maxoff || sync < sc->minsync ||
sync > sc->maxsync)
goto reject;
for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
i++) {
if (sc->clock_period != scf_period[i].clock)
continue;
if (scf_period[i].period == sync) {
/* ok, found it. we now are sync. */
printf("%s: target %d now synchronous at "
"%sMhz, offset %d\n", sc->sc_dev.dv_xname,
target, scf_period[i].rate, offset);
sc->targets[target]->id &=
~(SCNTL3_SCF_MASK << 24);
sc->targets[target]->id |= scf_period[i].scf
<< (24 + SCNTL3_SCF_SHIFT);
if (sync < 25) /* Ultra */
sc->targets[target]->id |=
SCNTL3_ULTRA << 24;
else
sc->targets[target]->id &=
~(SCNTL3_ULTRA << 24);
sc->targets[target]->id &=
~(SXFER_MO_MASK << 8);
sc->targets[target]->id |=
(offset & SXFER_MO_MASK) << 8;
goto end;
}
}
/*
* we didn't find it in our table, do async and send reject
* msg
*/
reject:
send_msgout = 1;
tables->t_msgout.count= htole32(1);
tables->msg_out[0] = MSG_MESSAGE_REJECT;
printf("%s: target %d asynchronous\n", sc->sc_dev.dv_xname,
target);
sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
} else { /* target initiated sync neg */
#ifdef DEBUG
printf("sdtr (target): sync %d offset %d\n", sync, offset);
#endif
if (offset == 0 || sync > sc->maxsync) { /* async */
goto async;
}
if (offset > sc->maxoff)
offset = sc->maxoff;
if (sync < sc->minsync)
sync = sc->minsync;
/* look for sync period */
for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
i++) {
if (sc->clock_period != scf_period[i].clock)
continue;
if (scf_period[i].period == sync) {
/* ok, found it. we now are sync. */
printf("%s: target %d now synchronous at "
"%sMhz, offset %d\n", sc->sc_dev.dv_xname,
target, scf_period[i].rate, offset);
sc->targets[target]->id &=
~(SCNTL3_SCF_MASK << 24);
sc->targets[target]->id |= scf_period[i].scf
<< (24 + SCNTL3_SCF_SHIFT);
if (sync < 25) /* Ultra */
sc->targets[target]->id |=
SCNTL3_ULTRA << 24;
else
sc->targets[target]->id &=
~(SCNTL3_ULTRA << 24);
sc->targets[target]->id &=
~(SXFER_MO_MASK << 8);
sc->targets[target]->id |=
(offset & SXFER_MO_MASK) << 8;
siop_sdtr_msg(siop_cmd, 0, sync, offset);
send_msgout = 1;
goto end;
}
}
async:
printf("%s: target %d asynchronous\n",
sc->sc_dev.dv_xname, target);
sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
siop_sdtr_msg(siop_cmd, 0, 0, 0);
send_msgout = 1;
}
end:
#ifdef DEBUG
printf("id now 0x%x\n", sc->targets[target]->id);
#endif
tables->id = htole32(sc->targets[target]->id);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
(sc->targets[target]->id >> 24) & 0xff);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
(sc->targets[target]->id >> 8) & 0xff);
if (send_msgout) {
return SIOP_NEG_MSGOUT;
} else {
return SIOP_NEG_ACK;
}
}
void
siop_sdtr_msg(siop_cmd, offset, ssync, soff)
struct siop_cmd *siop_cmd;
int offset;
int ssync, soff;
{
siop_cmd->siop_tables.msg_out[offset + 0] = MSG_EXTENDED;
siop_cmd->siop_tables.msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
siop_cmd->siop_tables.msg_out[offset + 2] = MSG_EXT_SDTR;
siop_cmd->siop_tables.msg_out[offset + 3] = ssync;
siop_cmd->siop_tables.msg_out[offset + 4] = soff;
siop_cmd->siop_tables.t_msgout.count =
htole32(offset + MSG_EXT_SDTR_LEN + 2);
}
void
siop_wdtr_msg(siop_cmd, offset, wide)
struct siop_cmd *siop_cmd;
int offset;
{
siop_cmd->siop_tables.msg_out[offset + 0] = MSG_EXTENDED;
siop_cmd->siop_tables.msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
siop_cmd->siop_tables.msg_out[offset + 2] = MSG_EXT_WDTR;
siop_cmd->siop_tables.msg_out[offset + 3] = wide;
siop_cmd->siop_tables.t_msgout.count =
htole32(offset + MSG_EXT_WDTR_LEN + 2);
}
void
siop_minphys(bp)
struct buf *bp;
{
minphys(bp);
}
int
siop_ioctl(link, cmd, arg, flag, p)
struct scsipi_link *link;
u_long cmd;
caddr_t arg;
int flag;
struct proc *p;
{
struct siop_softc *sc = link->adapter_softc;
u_int8_t scntl1;
int s;
switch (cmd) {
case SCBUSACCEL:
{
struct scbusaccel_args *sp = (struct scbusaccel_args *)arg;
s = splbio();
if (sp->sa_lun == 0) {
if (sp->sa_flags & SC_ACCEL_TAGS) {
sc->targets[sp->sa_target]->flags |= TARF_TAG;
printf("%s: target %d using tagged queuing\n",
sc->sc_dev.dv_xname, sp->sa_target);
}
if ((sp->sa_flags & SC_ACCEL_WIDE) &&
(sc->features & SF_BUS_WIDE))
sc->targets[sp->sa_target]->flags |= TARF_WIDE;
if (sp->sa_flags & SC_ACCEL_SYNC)
sc->targets[sp->sa_target]->flags |= TARF_SYNC;
if ((sp->sa_flags & (SC_ACCEL_SYNC | SC_ACCEL_WIDE)) ||
sc->targets[sp->sa_target]->status == TARST_PROBING)
sc->targets[sp->sa_target]->status =
TARST_ASYNC;
}
/* allocate a lun sw entry for this device */
siop_add_dev(sc, sp->sa_target, sp->sa_lun);
/*
* if we can to tagged queueing, inform upper layer
* we can have NIOP_NTAG concurent commands
*/
if (sc->targets[sp->sa_target]->flags & TARF_TAG)
link->openings = SIOP_NTAG;
splx(s);
return 0;
}
case SCBUSIORESET:
s = splbio();
scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
scntl1 | SCNTL1_RST);
/* minimum 25 us, more time won't hurt */
delay(100);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
splx(s);
return (0);
default:
return (ENOTTY);
}
}
void
siop_sdp(siop_cmd)
struct siop_cmd *siop_cmd;
{
/* save data pointer. Handle async only for now */
int offset, dbc, sstat;
struct siop_softc *sc = siop_cmd->siop_sc;
scr_table_t *table; /* table to patch */
if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
== 0)
return; /* no data pointers to save */
offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
if (offset >= SIOP_NSG) {
printf("%s: bad offset in siop_sdp (%d)\n",
sc->sc_dev.dv_xname, offset);
return;
}
table = &siop_cmd->siop_xfer->tables.data[offset];
#ifdef DEBUG_DR
printf("sdp: offset %d count=%d addr=0x%x ", offset,
table->count, table->addr);
#endif
dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
/* need to account stale data in FIFO */
int dfifo = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
if (sc->features & SF_CHIP_FIFO) {
dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
SIOP_CTEST5) & CTEST5_BOMASK) << 8;
dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
} else {
dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
}
sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
if (sstat & SSTAT0_OLF)
dbc++;
if (sstat & SSTAT0_ORF)
dbc++;
if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
SIOP_SSTAT2);
if (sstat & SSTAT2_OLF1)
dbc++;
if (sstat & SSTAT2_ORF1)
dbc++;
}
/* clear the FIFO */
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
CTEST3_CLF);
}
table->addr =
htole32(le32toh(table->addr) + le32toh(table->count) - dbc);
table->count = htole32(dbc);
#ifdef DEBUG_DR
printf("now count=%d addr=0x%x\n", table->count, table->addr);
#endif
}
void
siop_clearfifo(sc)
struct siop_softc *sc;
{
int timeout = 0;
int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
#ifdef DEBUG_INTR
printf("DMA fifo not empty !\n");
#endif
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
ctest3 | CTEST3_CLF);
while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
CTEST3_CLF) != 0) {
delay(1);
if (++timeout > 1000) {
printf("clear fifo failed\n");
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
bus_space_read_1(sc->sc_rt, sc->sc_rh,
SIOP_CTEST3) & ~CTEST3_CLF);
return;
}
}
}
int
siop_modechange(sc)
struct siop_softc *sc;
{
int retry;
int sist0, sist1, stest2, stest4;
for (retry = 0; retry < 5; retry++) {
/*
* datasheet says to wait 100ms and re-read SIST1,
* to check that DIFFSENSE is srable.
* We may delay() 5 times for 100ms at interrupt time;
* hopefully this will not happen often.
*/
delay(100000);
sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
if (sist1 & SIEN1_SBMC)
continue; /* we got an irq again */
stest4 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
STEST4_MODE_MASK;
stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
switch(stest4) {
case STEST4_MODE_DIF:
printf("%s: switching to differential mode\n",
sc->sc_dev.dv_xname);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
stest2 | STEST2_DIF);
break;
case STEST4_MODE_SE:
printf("%s: switching to single-ended mode\n",
sc->sc_dev.dv_xname);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
stest2 & ~STEST2_DIF);
break;
case STEST4_MODE_LVD:
printf("%s: switching to LVD mode\n",
sc->sc_dev.dv_xname);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
stest2 & ~STEST2_DIF);
break;
default:
printf("%s: invalid SCSI mode 0x%x\n",
sc->sc_dev.dv_xname, stest4);
return 0;
}
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST0,
stest4 >> 2);
return 1;
}
printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
sc->sc_dev.dv_xname);
return 0;
}
void
siop_resetbus(sc)
struct siop_softc *sc;
{
int scntl1;
scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
scntl1 | SCNTL1_RST);
/* minimum 25 us, more time won't hurt */
delay(100);
bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
}