Implement polled command mode, and use it for writing system dump

and for initial autoconf probes; the latter eliminates need for
deferred configuration (and makes dmesg a bit nicer).

g/c EDF_IODONE flag - protect against interrupt by calling tsleep()
at splbio in worker thread

g/c unneeded stuff, improve some autoconf messages
This commit is contained in:
jdolecek 2001-05-04 12:58:34 +00:00
parent 63ee3f7941
commit 5b3932d176
4 changed files with 246 additions and 189 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: ed_mca.c,v 1.5 2001/04/23 06:10:08 jdolecek Exp $ */
/* $NetBSD: ed_mca.c,v 1.6 2001/05/04 12:58:34 jdolecek Exp $ */
/*
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -102,7 +102,9 @@ static void __edstart __P((struct ed_softc*, struct buf *));
static void bad144intern __P((struct ed_softc *));
static void edworker __P((void *));
static void ed_spawn_worker __P((void *));
static void edmcadone __P((struct ed_softc *));
static void edmcadone __P((struct ed_softc *, struct buf *));
static void ed_bio __P((struct ed_softc *, int, int));
static void ed_bio_done __P((struct ed_softc *));
static struct dkdriver eddkdriver = { edmcastrategy };
@ -118,16 +120,17 @@ ed_mca_probe(parent, match, aux)
u_int16_t cmd_args[2];
struct edc_mca_softc *sc = (void *) parent;
struct ed_attach_args *eda = (void *) aux;
int found = 1;
/*
* Get Device Configuration (09).
*/
cmd_args[0] = 6; /* Options: 00s110, s: 0=Physical 1=Pseudo */
cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
cmd_args[1] = 0;
if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->sc_devno, cmd_args, 2, 0))
return (0);
if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->sc_devno, cmd_args, 2, 0, 1))
found = 0;
return (1);
return (found);
}
static void
@ -162,13 +165,13 @@ ed_mca_attach(parent, self, aux)
ed->cyl, ed->heads, ed->sectors,
ed->sc_capacity);
printf("%s: %u spares/cyl, %s.%s.%s.%s.%s\n",
printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n",
ed->sc_dev.dv_xname, ed->spares,
(ed->drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
(ed->drv_flags & (1 << 1)) ? "Removable" : "Fixed",
(ed->drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
(ed->drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
(ed->drv_flags & (1 << 4)) ? "InvalidSecondary" : "SeconOK"
(ed->drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
);
/* Create a DMA map for mapping individual transfer bufs */
@ -226,6 +229,8 @@ ed_mca_attach(parent, self, aux)
config_pending_incr();
kthread_create(ed_spawn_worker, (void *) ed);
ed->sc_flags |= EDF_INIT;
}
void
@ -322,9 +327,7 @@ done:
}
static void
__edstart(ed, bp)
struct ed_softc *ed;
struct buf *bp;
ed_bio(struct ed_softc *ed, int async, int poll)
{
u_int16_t cmd_args[4];
int error=0;
@ -333,23 +336,16 @@ __edstart(ed, bp)
u_int8_t head;
u_int8_t sector;
WDCDEBUG_PRINT(("__edstart %s (%s): %lu %lu %u\n", ed->sc_dev.dv_xname,
(bp->b_flags & B_READ) ? "read" : "write",
bp->b_bcount, bp->b_resid, bp->b_rawblkno),
DEBUG_XFERS);
ed->sc_bp = bp;
/* Get physical bus mapping for buf. */
if (bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
bp->b_data, bp->b_bcount, NULL,
ed->sc_data, ed->sc_bcount, NULL,
BUS_DMA_WAITOK|BUS_DMA_STREAMING) != 0) {
/*
* Use our DMA safe memory to get data to/from device.
*/
if ((error = bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
ed->sc_dmamkva, bp->b_bcount, NULL,
ed->sc_dmamkva, ed->sc_bcount, NULL,
BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
printf("%s: unable to load raw data for xfer, errno=%d\n",
ed->sc_dev.dv_xname, error);
@ -358,35 +354,33 @@ __edstart(ed, bp)
ed->sc_flags |= EDF_BOUNCEBUF;
/* If data write, copy the data to our bounce buffer. */
if ((bp->b_flags & B_READ) == 0)
memcpy(ed->sc_dmamkva, bp->b_data, bp->b_bcount);
if (!ed->sc_read)
memcpy(ed->sc_dmamkva, ed->sc_data, ed->sc_bcount);
}
ed->sc_flags |= EDF_DMAMAP_LOADED;
track = bp->b_rawblkno / ed->sectors;
track = ed->sc_rawblkno / ed->sectors;
head = track % ed->heads;
cyl = track / ed->heads;
sector = bp->b_rawblkno % ed->sectors;
sector = ed->sc_rawblkno % ed->sectors;
WDCDEBUG_PRINT(("__edstart %s: map: %u %u %u\n", ed->sc_dev.dv_xname,
cyl, sector, head),
DEBUG_XFERS);
/* Instrumentation. */
disk_busy(&ed->sc_dk);
ed->sc_flags |= EDF_DK_BUSY;
mca_disk_busy();
/* Read or Write Data command */
cmd_args[0] = 2; /* Options 0000010 */
cmd_args[1] = bp->b_bcount / DEV_BSIZE;
cmd_args[1] = ed->sc_bcount / DEV_BSIZE;
cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
cmd_args[3] = ((cyl & 0x3E0) >> 5);
if (edc_run_cmd(ed->edc_softc,
(bp->b_flags & B_READ) ? CMD_READ_DATA : CMD_WRITE_DATA,
ed->sc_devno, cmd_args, 4, 1)) {
(ed->sc_read) ? CMD_READ_DATA : CMD_WRITE_DATA,
ed->sc_devno, cmd_args, 4, async, poll)) {
printf("%s: data i/o command failed\n", ed->sc_dev.dv_xname);
mca_disk_unbusy();
error = EIO;
}
@ -395,13 +389,53 @@ __edstart(ed, bp)
ed->sc_error = error;
}
static void
__edstart(ed, bp)
struct ed_softc *ed;
struct buf *bp;
{
WDCDEBUG_PRINT(("__edstart %s (%s): %lu %lu %u\n", ed->sc_dev.dv_xname,
(bp->b_flags & B_READ) ? "read" : "write",
bp->b_bcount, bp->b_resid, bp->b_rawblkno),
DEBUG_XFERS);
/* Instrumentation. */
disk_busy(&ed->sc_dk);
ed->sc_flags |= EDF_DK_BUSY;
ed->sc_data = bp->b_data;
ed->sc_rawblkno = bp->b_rawblkno;
ed->sc_bcount = bp->b_bcount;
ed->sc_read = bp->b_flags & B_READ;
ed_bio(ed, 1, 0);
}
static void
edmcadone(ed)
ed_bio_done(ed)
struct ed_softc *ed;
{
struct buf *bp = ed->sc_bp;
/*
* If read transfer finished without error and using a bounce
* buffer, copy the data to buf.
*/
if (ed->sc_error == 0 && (ed->sc_flags & EDF_BOUNCEBUF) && ed->sc_read)
memcpy(ed->sc_data, ed->sc_dmamkva, ed->sc_bcount);
ed->sc_flags &= ~EDF_BOUNCEBUF;
/* Unload buf from DMA map */
if (ed->sc_flags & EDF_DMAMAP_LOADED) {
bus_dmamap_unload(ed->sc_dmat, ed->dmamap_xfer);
ed->sc_flags &= ~EDF_DMAMAP_LOADED;
}
mca_disk_unbusy();
}
static void
edmcadone(ed, bp)
struct ed_softc *ed;
struct buf *bp;
{
WDCDEBUG_PRINT(("eddone %s\n", ed->sc_dev.dv_xname),
DEBUG_XFERS);
@ -413,31 +447,14 @@ edmcadone(ed)
bp->b_resid = ed->sc_status_block[SB_RESBLKCNT_IDX] * DEV_BSIZE;
}
/*
* If read transfer finished without error and using a bounce
* buffer, copy the data to buf.
*/
if ((bp->b_flags & B_ERROR) == 0 && (ed->sc_flags & EDF_BOUNCEBUF)
&& (bp->b_flags & B_READ)) {
memcpy(bp->b_data, ed->sc_dmamkva, bp->b_bcount);
}
ed->sc_flags &= ~EDF_BOUNCEBUF;
/* Unload buf from DMA map */
if (ed->sc_flags & EDF_DMAMAP_LOADED) {
bus_dmamap_unload(ed->sc_dmat, ed->dmamap_xfer);
ed->sc_flags &= ~EDF_DMAMAP_LOADED;
}
ed_bio_done(ed);
/* If disk was busied, unbusy it now */
if (ed->sc_flags & EDF_DK_BUSY) {
disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
ed->sc_flags &= ~EDF_DK_BUSY;
mca_disk_unbusy();
}
ed->sc_flags &= ~EDF_IODONE;
#if NRND > 0
rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
#endif
@ -506,7 +523,7 @@ edmcaopen(dev, flag, fmt, p)
WDCDEBUG_PRINT(("edopen\n"), DEBUG_FUNCS);
wd = device_lookup(&ed_cd, DISKUNIT(dev));
if (wd == NULL)
if (wd == NULL || (wd->sc_flags & EDF_INIT) == 0)
return (ENXIO);
if ((error = ed_lock(wd)) != 0)
@ -898,8 +915,9 @@ edmcasize(dev)
}
/* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
static int wddoingadump = 0;
static int wddumprecalibrated = 0;
static int eddoingadump = 0;
static int eddumprecalibrated = 0;
static int eddumpmulti = 1;
/*
* Dump core after a system crash.
@ -911,30 +929,28 @@ edmcadump(dev, blkno, va, size)
caddr_t va;
size_t size;
{
struct ed_softc *wd; /* disk unit to do the I/O */
struct ed_softc *ed; /* disk unit to do the I/O */
struct disklabel *lp; /* disk's disklabel */
int part; // , err;
int nblks; /* total number of sectors left to write */
/* Check if recursive dump; if so, punt. */
if (wddoingadump)
if (eddoingadump)
return EFAULT;
wddoingadump = 1;
eddoingadump = 1;
wd = device_lookup(&ed_cd, DISKUNIT(dev));
if (wd == NULL)
ed = device_lookup(&ed_cd, DISKUNIT(dev));
if (ed == NULL)
return (ENXIO);
part = DISKPART(dev);
#if 0
/* Make sure it was initialized. */
if (wd->drvp->state < READY)
if ((ed->sc_flags & EDF_INIT) == 0)
return ENXIO;
#endif
/* Convert to disk sectors. Request must be a multiple of size. */
lp = wd->sc_dk.dk_label;
lp = ed->sc_dk.dk_label;
if ((size % lp->d_secsize) != 0)
return EFAULT;
nblks = size / lp->d_secsize;
@ -948,50 +964,34 @@ edmcadump(dev, blkno, va, size)
blkno += lp->d_partitions[part].p_offset;
/* Recalibrate, if first dump transfer. */
if (wddumprecalibrated == 0) {
wddumprecalibrated = 1;
if (eddumprecalibrated == 0) {
eddumprecalibrated = 1;
eddumpmulti = 8;
#if 0
wd->drvp->state = RESET;
#endif
}
while (nblks > 0) {
#if 0
wd->sc_wdc_bio.blkno = blkno;
wd->sc_wdc_bio.flags = ATA_POLL;
wd->sc_wdc_bio.bcount = lp->d_secsize;
wd->sc_wdc_bio.databuf = va;
#ifndef WD_DUMP_NOT_TRUSTED
switch (wdc_ata_bio(wd->drvp, &wd->sc_wdc_bio)) {
case WDC_TRY_AGAIN:
panic("wddump: try again");
break;
case WDC_QUEUED:
panic("wddump: polled command has been queued");
break;
case WDC_COMPLETE:
break;
}
if (err != 0) {
printf("\n");
return err;
}
#else /* WD_DUMP_NOT_TRUSTED */
/* Let's just talk about this first... */
printf("ed%d: dump addr 0x%x, cylin %d, head %d, sector %d\n",
unit, va, cylin, head, sector);
delay(500 * 1000); /* half a second */
#endif
#endif /* 0 */
ed->sc_data = va;
ed->sc_rawblkno = blkno;
ed->sc_bcount = min(nblks, eddumpmulti) * lp->d_secsize;
ed->sc_read = 0;
ed_bio(ed, 0, 1);
if (ed->sc_error)
return (ed->sc_error);
ed_bio_done(ed);
/* update block count */
nblks -= 1;
blkno += 1;
va += lp->d_secsize;
nblks -= min(nblks, eddumpmulti);
blkno += min(nblks, eddumpmulti);
va += min(nblks, eddumpmulti) * lp->d_secsize;
}
wddoingadump = 0;
return (ESPIPE);
eddoingadump = 0;
return (0);
}
#ifdef HAS_BAD144_HANDLING
@ -1032,7 +1032,8 @@ ed_get_params(ed)
*/
cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
cmd_args[1] = 0;
if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno, cmd_args, 2, 0))
if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno,
cmd_args, 2, 0, 1))
return (1);
ed->spares = ed->sc_status_block[1] >> 8;
@ -1109,21 +1110,16 @@ edworker(arg)
ed->sc_error = 0;
s = splbio();
__edstart(ed, bp);
splx(s);
/*
* Wait until the command executes; edc_intr() wakes
* us up.
*/
if (ed->sc_error == 0
&& (ed->sc_flags & EDF_IODONE) == 0) {
if (ed->sc_error == 0)
(void)tsleep(&ed->edc_softc, PRIBIO, "edwrk",0);
edc_cmd_wait(ed->edc_softc, ed->sc_devno, 5);
}
/* Handle i/o results */
s = splbio();
edmcadone(ed);
edmcadone(ed, bp);
splx(s);
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: edc_mca.c,v 1.8 2001/04/25 02:33:09 simonb Exp $ */
/* $NetBSD: edc_mca.c,v 1.9 2001/05/04 12:58:34 jdolecek Exp $ */
/*
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -41,7 +41,6 @@
* for MCA rev. 2.2 in hands, thanks to Scott Telford <st@epcc.ed.ac.uk>.
*
* TODO:
* - finish support for kernel memory crash dump
* - move the MCA DMA controller (edc_setup_dma()) goo to device driver
* independant location
* - improve error recovery
@ -82,7 +81,8 @@
#include <dev/mca/edvar.h>
#include <dev/mca/edcvar.h>
#define DASD_MAXDEVS 7
#define EDC_ATTN_MAXTRIES 10000 /* How many times check for unbusy */
struct edc_mca_softc {
struct device sc_dev;
@ -99,10 +99,9 @@ struct edc_mca_softc {
int sc_flags;
#define DASD_QUIET 0x01 /* don't dump cmd error info */
#define DASD_MAXDEVS 8
struct ed_softc *sc_ed[DASD_MAXDEVS];
int sc_maxdevs; /* maximum # of devs supported by
* controller */
struct ed_softc sc_controller;
};
int edc_mca_probe __P((struct device *, struct cfdata *, void *));
@ -116,11 +115,11 @@ struct cfattach edc_mca_ca = {
#define DMA_EXEC 0x1A
static int edc_intr __P((void *));
static void edc_attach_ed __P((struct device *));
static void edc_dump_status_block __P((struct edc_mca_softc *, int, int));
static int edc_setup_dma __P((struct edc_mca_softc *, struct buf *,
static int edc_setup_dma __P((struct edc_mca_softc *, int,
bus_addr_t, bus_size_t));
static int edc_do_attn __P((struct edc_mca_softc *, int, int, int));
static int edc_cmd_wait __P((struct edc_mca_softc *, int, int, int));
int
edc_mca_probe(parent, match, aux)
@ -149,6 +148,9 @@ edc_mca_attach(parent, self, aux)
int pos2, pos3, pos4;
int irq, drq, iobase;
const char *typestr;
struct ed_softc *ed;
struct ed_attach_args eda;
int devno, maxdevs;
pos2 = mca_conf_read(ma->ma_mc, ma->ma_slot, 2);
pos3 = mca_conf_read(ma->ma_mc, ma->ma_slot, 3);
@ -260,26 +262,22 @@ edc_mca_attach(parent, self, aux)
* controllers support two disks.
*/
if (ma->ma_id == MCA_PRODUCT_IBM_ESDIC_IG)
sc->sc_maxdevs = 1;
maxdevs = 1;
else
sc->sc_maxdevs = 2;
/* Defer probe for individual disks until interrupts are enabled. */
config_interrupts(self, edc_attach_ed);
}
maxdevs = 2;
/*
* Try to attach ed* at edc? if any configured and installed now
* that interrupts are enabled.
* Initialize the controller ed softc. We could do without this,
* but absence of checks for controller devno simplifies code logic
* somewhat.
*/
sc->sc_ed[DASD_DEVNO_CONTROLLER] = &sc->sc_controller;
strcpy(sc->sc_controller.sc_dev.dv_xname, sc->sc_dev.dv_xname);/*safe*/
/*
* Reset controller and attach individual disks. ed attach routine
* uses polling so that this works with interrupts disabled.
*/
static void
edc_attach_ed(self)
struct device *self;
{
struct edc_mca_softc *sc = (void *) self;
struct ed_softc *ed;
struct ed_attach_args eda;
int devno;
/* Do a reset to ensure sane state after warm boot. */
if (bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_BUSY) {
@ -293,9 +291,17 @@ edc_attach_ed(self)
edc_do_attn(sc, ATN_RESET_ATTACHMENT, DASD_DEVNO_CONTROLLER,0);
}
/* Wait until the reset completes. */
while(bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_BUSY)
delay(1);
/*
* Since interrupts are disabled ATM, it's necessary
* to detect the interrupt request and call edc_intr()
* explicitly. See also edc_run_cmd().
*/
while(bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_BUSY) {
if (bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_INTR)
edc_intr(sc);
delay(100);
}
/*
* Get dummy ed_softc to be used during probe. Once a disk is
@ -309,11 +315,10 @@ edc_attach_ed(self)
sc->sc_flags |= DASD_QUIET;
/* check for attached disks */
for(devno=0; devno < sc->sc_maxdevs; devno++) {
for(devno=0; devno < maxdevs; devno++) {
eda.sc_devno = devno;
eda.sc_dmat = sc->sc_dmat;
sc->sc_ed[devno] = ed;
sc->sc_ed[devno] =
(void *) config_found_sm(self, &eda, NULL, NULL);
}
@ -323,7 +328,19 @@ edc_attach_ed(self)
/* cleanup */
FREE(ed, M_TEMP);
/* XXX disestablish interrupt if no disks found ? */
/*
* Check if there are any disks attached. If not, disestablish
* the interrupt.
*/
for(devno=0; devno < maxdevs; devno++) {
if (sc->sc_ed[devno] && (sc->sc_ed[devno]->sc_flags & EDF_INIT))
break;
}
if (devno == maxdevs) {
printf("%s: disabling controller (no drives attached)\n",
sc->sc_dev.dv_xname);
mca_intr_disestablish(ma->ma_mc, sc->sc_ih);
}
}
void
@ -376,19 +393,18 @@ edc_intr(arg)
* Check the status block length against our supported maximum length
* and fetch the data.
*/
if (bus_space_read_1(sc->sc_iot, sc->sc_ioh,BSR) & BSR_SIFR_FULL
&& intr_id != ISR_RESET_COMPLETED) {
if (bus_space_read_1(sc->sc_iot, sc->sc_ioh,BSR) & BSR_SIFR_FULL) {
size_t len;
int i;
sifr = le16toh(bus_space_read_2(sc->sc_iot, sc->sc_ioh, SIFR));
len = (sifr & 0xff00) >> 8;
if (len > DASD_MAX_CMD_RES_LEN) {
printf("%s: maximum Status Length exceeded: %d > %d\n",
#ifdef DEBUG
if (len > DASD_MAX_CMD_RES_LEN)
panic("%s: maximum Status Length exceeded: %d > %d",
sc->sc_dev.dv_xname,
len, DASD_MAX_CMD_RES_LEN);
goto attn_eoi;
}
#endif
/* Get command code */
cmd = sifr & SIFR_CMD_MASK;
@ -413,7 +429,7 @@ edc_intr(arg)
* controller to do the transfer.
*/
ed = sc->sc_ed[devno];
if (!edc_setup_dma(sc, ed->sc_bp,
if (!edc_setup_dma(sc, ed->sc_read,
ed->dmamap_xfer->dm_segs[0].ds_addr,
ed->dmamap_xfer->dm_segs[0].ds_len)) {
/* XXX bail out? */
@ -445,7 +461,6 @@ edc_intr(arg)
break;
}
attn_eoi:
/*
* Unless the interrupt is for Data Transfer Ready or
* Attention Error, finish by assertion EOI. This makes
@ -459,7 +474,6 @@ edc_intr(arg)
if (intr_id != ISR_DATA_TRANSFER_RDY
&& (cmd == CMD_READ_DATA || cmd == CMD_WRITE_DATA)) {
sc->sc_ed[devno]->sc_error = bioerror;
sc->sc_ed[devno]->sc_flags |= EDF_IODONE;
wakeup_one(&sc->sc_ed[devno]->edc_softc);
}
@ -480,20 +494,26 @@ edc_do_attn(sc, attn_type, devno, intr_id)
/* 1. Disable interrupts in BCR. */
bus_space_write_1(sc->sc_iot, sc->sc_ioh, BCR, 0);
/* 2. Assure NOT BUSY and NO INTERRUPT PENDING, unless acknowledging
* a RESET COMPLETED interrupt. */
/*
* 2. Assure NOT BUSY and NO INTERRUPT PENDING, unless acknowledging
* a RESET COMPLETED interrupt.
*/
if (intr_id != ISR_RESET_COMPLETED) {
for(tries=0; tries < 1000; tries++) {
for(tries=1; tries < EDC_ATTN_MAXTRIES; tries++) {
if ((bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR)
& (BSR_INT_PENDING|BSR_BUSY)) == 0)
& BSR_BUSY) == 0) {
#ifdef DEBUG
if ((bus_space_read_1(sc->sc_iot, sc->sc_ioh,
BSR) & BSR_INT_PENDING) && intr_id)
panic("foobar");
#endif
break;
}
}
if (tries == 1000) {
if (tries == EDC_ATTN_MAXTRIES) {
printf("%s: edc_do_attn: timeout waiting for attachment to become available\n",
(devno == DASD_DEVNO_CONTROLLER)
? sc->sc_dev.dv_xname
: sc->sc_ed[devno]->sc_dev.dv_xname);
sc->sc_ed[devno]->sc_dev.dv_xname);
return (EAGAIN);
}
}
@ -517,48 +537,62 @@ edc_do_attn(sc, attn_type, devno, intr_id)
* We use mono_time, since we don't need actual RTC, just time
* interval.
*/
int
edc_cmd_wait(sc, devno, secs)
static int
edc_cmd_wait(sc, devno, secs, poll)
struct edc_mca_softc *sc;
int devno, secs;
int devno, secs, poll;
{
struct timeval start, now;
int s;
int val, delayed;
s = splclock();
start = mono_time;
splx(s);
delayed = 0;
do {
val = bus_space_read_1(sc->sc_iot,sc->sc_ioh, BSR);
if ((val & BSR_CMD_INPROGRESS) == 0)
break;
while((bus_space_read_1(sc->sc_iot,sc->sc_ioh,BSR)&BSR_CMD_INPROGRESS)){
s = splclock();
now = mono_time;
splx(s);
if (now.tv_sec - start.tv_sec > secs)
if (poll && (val & BSR_INTR))
goto out;
if (secs == 0)
break;
delay(1);
}
if (now.tv_sec - start.tv_sec >= secs &&
/*
* This is not as accurate as checking mono_time, but
* it works with hardclock interrupts disabled too.
*/
delayed++;
if (delayed == 1000000) {
delayed = 0;
secs--;
}
#if 0
if (delayed % 1000)
printf("looping ...");
#endif
} while(1);
if (secs == 0 &&
bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_CMD_INPROGRESS){
printf("%s: timed out waiting for previous cmd to finish\n",
sc->sc_ed[devno]->sc_dev.dv_xname);
return (EAGAIN);
}
out:
return (0);
}
int
edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async)
edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async, poll)
struct edc_mca_softc *sc;
int cmd;
int devno;
u_int16_t cmd_args[];
int cmd_len;
int async;
int cmd_len, async, poll;
{
int i, error;
int i, error, tries;
u_int16_t cmd0;
/*
@ -567,7 +601,7 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async)
*/
if (sc->sc_cmd_async) {
/* Wait maximum 15s */
if (edc_cmd_wait(sc, devno, 15))
if (edc_cmd_wait(sc, devno, 15, 0))
return (EAGAIN); /* Busy */
sc->sc_cmd_async = 0;
@ -604,9 +638,14 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async)
bus_space_write_2(sc->sc_iot, sc->sc_ioh, CIFR,
htole16(cmd_args[i]));
/* wait until CMD IN is cleared */
/*
* Wait until CMD IN is cleared. The 1ms delay for polling
* case is necessary, otherwise e.g. system dump gets stuck
* soon. Quirky hw ?
*/
tries = 0;
while(bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_CIFR_FULL)
delay(1);
delay(poll ? 1000 : 1);
}
/*
@ -619,9 +658,28 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async)
}
/* Wait for command to complete, but maximum 15 seconds. */
if (edc_cmd_wait(sc, devno, 15))
if (edc_cmd_wait(sc, devno, 15, poll))
return (EAGAIN);
/* If polling, call edc_intr() explicitly */
if (poll) {
edc_intr(sc);
/*
* If got attention id DATA TRANSFER READY, wait for
* the transfer to finish.
*/
if (sc->sc_ed[devno]->sc_error == 0
&& (cmd == CMD_READ_DATA || cmd == CMD_WRITE_DATA)) {
if (edc_cmd_wait(sc, devno, 15, 1))
return (EAGAIN);
edc_intr(sc);
}
if (edc_cmd_wait(sc, devno, 15, 0))
return (EAGAIN);
}
/* Check if the command completed successfully; if not, return error */
switch(SB_GET_CMD_STATUS(sc->sc_ed[devno]->sc_status_block)) {
case ISR_COMPLETED:
@ -635,9 +693,9 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async)
}
static int
edc_setup_dma(sc, bp, phys, cnt)
edc_setup_dma(sc, isread, phys, cnt)
struct edc_mca_softc *sc;
struct buf *bp;
int isread;
bus_addr_t phys;
bus_size_t cnt;
{
@ -667,7 +725,7 @@ edc_setup_dma(sc, bp, phys, cnt)
0x70 + sc->sc_drq);
/* Set the transfer mode */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
(bp->b_flags & B_READ) ? 0x4C : 0x44);
(isread) ? 0x4C : 0x44);
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0xA0 + sc->sc_drq);
/* Enable access to dma channel */

View File

@ -1,4 +1,4 @@
/* $NetBSD: edcvar.h,v 1.2 2001/04/22 11:32:49 jdolecek Exp $ */
/* $NetBSD: edcvar.h,v 1.3 2001/05/04 12:58:34 jdolecek Exp $ */
/*
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -39,6 +39,5 @@ struct ed_attach_args {
};
int edc_run_cmd __P((struct edc_mca_softc *, int,
int, u_int16_t [], int, int));
int, u_int16_t [], int, int, int));
void edc_add_disk __P((struct edc_mca_softc *, struct ed_softc *, int));
int edc_cmd_wait __P((struct edc_mca_softc *, int, int));

View File

@ -1,4 +1,4 @@
/* $NetBSD: edvar.h,v 1.3 2001/04/22 11:32:49 jdolecek Exp $ */
/* $NetBSD: edvar.h,v 1.4 2001/05/04 12:58:34 jdolecek Exp $ */
/*
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -45,10 +45,14 @@ struct ed_softc {
struct simplelock sc_q_lock;
struct callout sc_edstart;
struct buf *sc_bp; /* buf being transfered */
void *sc_data; /* pointer to data for tranfer */
long sc_bcount; /* bytes available in buffer */
daddr_t sc_rawblkno; /* starting blkno of transfer */
int sc_read; /* Read Transfer ? */
struct edc_mca_softc *edc_softc; /* pointer to our parent */
volatile int sc_flags;
int sc_flags;
#define WDF_WLABEL 0x001 /* label is writable */
#define WDF_LABELLING 0x002 /* writing label */
#define WDF_LOADED 0x004 /* parameters loaded */
@ -57,7 +61,7 @@ struct ed_softc {
#define EDF_DMAMAP_LOADED 0x020 /* dmamap_xfer loaded */
#define EDF_PROCESS_QUEUE 0x040
#define EDF_DK_BUSY 0x080 /* disk_busy() called */
#define EDF_IODONE 0x100
#define EDF_INIT 0x100 /* disk initialized */
int sc_capacity;
struct lock sc_lock; /* drive lock */