Convert over to use the new MCA DMA facilities, which encapsulate

the MCA DMA controller access. This also means we gain >16MB RAM support.

While here, overhaul the driver to saner state, especially:
* simplify and clean the attach code a lot, and support the 'drive'
  locator
* pass proper dev_t to readdisklabel() - formerly, the passed value was
  completely broken (used incorrect major and wrong minor), but worked
  by accident
* worker thread is now spawned per controller, rather than per-drive;
  i/o cannot be done concurrently by different drivers, only one
  i/o can be pending at any time
* simplify the command queue code, just sleep appropriately when
  !poll case, g/c 'async' code formerly needed, move the bio code
  from ed_mca.c to edc_mca.c:edc_bio()
* put all string arrays used by edc_dump_status_block() within #ifdef EDC_DEBUG,
  and use numbers instead if it's not defined; this cuts object size by 5KB
* other misc cleanups
This commit is contained in:
jdolecek 2001-11-23 22:53:09 +00:00
parent 90fac765da
commit 07312f0fbb
2 changed files with 395 additions and 668 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: ed_mca.c,v 1.9 2001/11/13 07:46:25 lukem Exp $ */ /* $NetBSD: ed_mca.c,v 1.10 2001/11/23 22:53:10 jdolecek Exp $ */
/* /*
* Copyright (c) 2001 The NetBSD Foundation, Inc. * Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -34,13 +34,14 @@
*/ */
/* /*
* Disk goo for MCA ESDI controller driver. * Disk drive goo for MCA ESDI controller driver.
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ed_mca.c,v 1.9 2001/11/13 07:46:25 lukem Exp $"); __KERNEL_RCSID(0, "$NetBSD: ed_mca.c,v 1.10 2001/11/23 22:53:10 jdolecek Exp $");
#include "rnd.h" #include "rnd.h"
#include "locators.h"
#include <sys/param.h> #include <sys/param.h>
#include <sys/systm.h> #include <sys/systm.h>
@ -58,7 +59,6 @@ __KERNEL_RCSID(0, "$NetBSD: ed_mca.c,v 1.9 2001/11/13 07:46:25 lukem Exp $");
#include <sys/syslog.h> #include <sys/syslog.h>
#include <sys/proc.h> #include <sys/proc.h>
#include <sys/vnode.h> #include <sys/vnode.h>
#include <sys/kthread.h>
#if NRND > 0 #if NRND > 0
#include <sys/rnd.h> #include <sys/rnd.h>
#endif #endif
@ -95,19 +95,11 @@ struct cfattach ed_mca_ca = {
extern struct cfdriver ed_cd; extern struct cfdriver ed_cd;
static int ed_get_params __P((struct ed_softc *)); static int ed_get_params __P((struct ed_softc *, int *));
static int ed_lock __P((struct ed_softc *)); static int ed_lock __P((struct ed_softc *));
static void ed_unlock __P((struct ed_softc *)); static void ed_unlock __P((struct ed_softc *));
static void edgetdisklabel __P((struct ed_softc *)); static void edgetdisklabel __P((dev_t, struct ed_softc *));
static void edgetdefaultlabel __P((struct ed_softc *, struct disklabel *)); static void edgetdefaultlabel __P((struct ed_softc *, struct disklabel *));
static void ed_shutdown __P((void*));
static void __edstart __P((struct ed_softc*, struct buf *));
static void bad144intern __P((struct ed_softc *));
static void edworker __P((void *));
static void ed_spawn_worker __P((void *));
static void edmcadone __P((struct ed_softc *, struct buf *));
static void ed_bio __P((struct ed_softc *, int, int));
static void ed_bio_done __P((struct ed_softc *));
static struct dkdriver eddkdriver = { edmcastrategy }; static struct dkdriver eddkdriver = { edmcastrategy };
@ -115,22 +107,29 @@ static struct dkdriver eddkdriver = { edmcastrategy };
* Just check if it's possible to identify the disk. * Just check if it's possible to identify the disk.
*/ */
static int static int
ed_mca_probe(parent, match, aux) ed_mca_probe(parent, cf, aux)
struct device *parent; struct device *parent;
struct cfdata *match; struct cfdata *cf;
void *aux; void *aux;
{ {
u_int16_t cmd_args[2]; u_int16_t cmd_args[2];
struct edc_mca_softc *sc = (void *) parent; struct edc_mca_softc *sc = (void *) parent;
struct ed_attach_args *eda = (void *) aux; struct ed_attach_args *eda = (struct ed_attach_args *) aux;
int found = 1; int found = 1;
/*
* Check we match hardwired config.
*/
if (cf->edccf_unit != EDCCF_DRIVE_DEFAULT &&
cf->edccf_unit != eda->edc_drive)
return (0);
/* /*
* Get Device Configuration (09). * Get Device Configuration (09).
*/ */
cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */ cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
cmd_args[1] = 0; cmd_args[1] = 0;
if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->sc_devno, cmd_args, 2, 0, 1)) if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->edc_drive, cmd_args, 2, 1))
found = 0; found = 0;
return (found); return (found);
@ -143,20 +142,20 @@ ed_mca_attach(parent, self, aux)
{ {
struct ed_softc *ed = (void *) self; struct ed_softc *ed = (void *) self;
struct edc_mca_softc *sc = (void *) parent; struct edc_mca_softc *sc = (void *) parent;
struct ed_attach_args *eda = (void *) aux; struct ed_attach_args *eda = (struct ed_attach_args *) aux;
char pbuf[8]; char pbuf[8], lckname[10];
int error, nsegs; int drv_flags;
ed->edc_softc = sc; ed->edc_softc = sc;
ed->sc_dmat = eda->sc_dmat; ed->sc_devno = eda->edc_drive;
ed->sc_devno = eda->sc_devno; edc_add_disk(sc, ed);
edc_add_disk(sc, ed, eda->sc_devno);
BUFQ_INIT(&ed->sc_q); BUFQ_INIT(&ed->sc_q);
simple_lock_init(&ed->sc_q_lock); simple_lock_init(&ed->sc_q_lock);
lockinit(&ed->sc_lock, PRIBIO | PCATCH, "edlck", 0, 0); snprintf(lckname, sizeof(lckname), "%slck", ed->sc_dev.dv_xname);
lockinit(&ed->sc_lock, PRIBIO | PCATCH, lckname, 0, 0);
if (ed_get_params(ed)) { if (ed_get_params(ed, &drv_flags)) {
printf(": IDENTIFY failed, no disk found\n"); printf(": IDENTIFY failed, no disk found\n");
return; return;
} }
@ -170,88 +169,27 @@ ed_mca_attach(parent, self, aux)
printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n", printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n",
ed->sc_dev.dv_xname, ed->spares, ed->sc_dev.dv_xname, ed->spares,
(ed->drv_flags & (1 << 0)) ? "NoRetries" : "Retries", (drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
(ed->drv_flags & (1 << 1)) ? "Removable" : "Fixed", (drv_flags & (1 << 1)) ? "Removable" : "Fixed",
(ed->drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew", (drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
(ed->drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects", (drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
(ed->drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK" (drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
); );
/* Create a DMA map for mapping individual transfer bufs */
if ((error = bus_dmamap_create(ed->sc_dmat, 65536, 1,
65536, 65536, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
&ed->dmamap_xfer)) != 0) {
printf("%s: unable to create xfer DMA map, error=%d\n",
ed->sc_dev.dv_xname, error);
return;
}
/*
* Allocate DMA memory used in case where passed buf isn't
* physically contiguous.
*/
ed->sc_dmam_sz = MAXPHYS;
if ((error = bus_dmamem_alloc(ed->sc_dmat, ed->sc_dmam_sz,
ed->sc_dmam_sz, 65536, ed->sc_dmam, 1, &nsegs,
BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
printf("%s: unable to allocate DMA memory for xfer, errno=%d\n",
ed->sc_dev.dv_xname, error);
bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
return;
}
/*
* Map the memory.
*/
if ((error = bus_dmamem_map(ed->sc_dmat, ed->sc_dmam, 1,
ed->sc_dmam_sz, &ed->sc_dmamkva, BUS_DMA_WAITOK)) != 0) {
printf("%s: unable to map DMA memory, error=%d\n",
ed->sc_dev.dv_xname, error);
bus_dmamem_free(ed->sc_dmat, ed->sc_dmam, 1);
bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
return;
}
/* /*
* Initialize and attach the disk structure. * Initialize and attach the disk structure.
*/ */
ed->sc_dk.dk_driver = &eddkdriver; ed->sc_dk.dk_driver = &eddkdriver;
ed->sc_dk.dk_name = ed->sc_dev.dv_xname; ed->sc_dk.dk_name = ed->sc_dev.dv_xname;
disk_attach(&ed->sc_dk); disk_attach(&ed->sc_dk);
#if 0
wd->sc_wdc_bio.lp = wd->sc_dk.dk_label;
#endif
ed->sc_sdhook = shutdownhook_establish(ed_shutdown, ed);
if (ed->sc_sdhook == NULL)
printf("%s: WARNING: unable to establish shutdown hook\n",
ed->sc_dev.dv_xname);
#if NRND > 0 #if NRND > 0
rnd_attach_source(&ed->rnd_source, ed->sc_dev.dv_xname, rnd_attach_source(&ed->rnd_source, ed->sc_dev.dv_xname,
RND_TYPE_DISK, 0); RND_TYPE_DISK, 0);
#endif #endif
config_pending_incr();
kthread_create(ed_spawn_worker, (void *) ed);
ed->sc_flags |= EDF_INIT; ed->sc_flags |= EDF_INIT;
} }
void
ed_spawn_worker(arg)
void *arg;
{
struct ed_softc *ed = (struct ed_softc *) arg;
int error;
/* Now, everything is ready, start a kthread */
if ((error = kthread_create1(edworker, ed, &ed->sc_worker,
"%s", ed->sc_dev.dv_xname))) {
printf("%s: cannot spawn worker thread: errno=%d\n",
ed->sc_dev.dv_xname, error);
panic("ed_spawn_worker");
}
}
/* /*
* Read/write routine for a buffer. Validates the arguments and schedules the * Read/write routine for a buffer. Validates the arguments and schedules the
* transfer. Does not wait for the transfer to complete. * transfer. Does not wait for the transfer to complete.
@ -260,12 +198,12 @@ void
edmcastrategy(bp) edmcastrategy(bp)
struct buf *bp; struct buf *bp;
{ {
struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(bp->b_dev)); struct ed_softc *ed = device_lookup(&ed_cd, DISKUNIT(bp->b_dev));
struct disklabel *lp = wd->sc_dk.dk_label; struct disklabel *lp = ed->sc_dk.dk_label;
daddr_t blkno; daddr_t blkno;
int s; int s;
WDCDEBUG_PRINT(("edmcastrategy (%s)\n", wd->sc_dev.dv_xname), WDCDEBUG_PRINT(("edmcastrategy (%s)\n", ed->sc_dev.dv_xname),
DEBUG_XFERS); DEBUG_XFERS);
/* Valid request? */ /* Valid request? */
@ -277,7 +215,7 @@ edmcastrategy(bp)
} }
/* If device invalidated (e.g. media change, door open), error. */ /* If device invalidated (e.g. media change, door open), error. */
if ((wd->sc_flags & WDF_LOADED) == 0) { if ((ed->sc_flags & WDF_LOADED) == 0) {
bp->b_error = EIO; bp->b_error = EIO;
goto bad; goto bad;
} }
@ -291,8 +229,8 @@ edmcastrategy(bp)
* If end of partition, just return. * If end of partition, just return.
*/ */
if (DISKPART(bp->b_dev) != RAW_PART && if (DISKPART(bp->b_dev) != RAW_PART &&
bounds_check_with_label(bp, wd->sc_dk.dk_label, bounds_check_with_label(bp, ed->sc_dk.dk_label,
(wd->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0) (ed->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
goto done; goto done;
/* /*
@ -311,13 +249,12 @@ edmcastrategy(bp)
/* Queue transfer on drive, activate drive and controller if idle. */ /* Queue transfer on drive, activate drive and controller if idle. */
s = splbio(); s = splbio();
simple_lock(&wd->sc_q_lock); simple_lock(&ed->sc_q_lock);
disksort_blkno(&wd->sc_q, bp); disksort_blkno(&ed->sc_q, bp);
simple_unlock(&wd->sc_q_lock); simple_unlock(&ed->sc_q_lock);
/* Ring the worker thread */ /* Ring the worker thread */
wd->sc_flags |= EDF_PROCESS_QUEUE; wakeup_one(ed->edc_softc);
wakeup_one(&wd->sc_q);
splx(s); splx(s);
return; return;
@ -329,141 +266,6 @@ done:
biodone(bp); biodone(bp);
} }
static void
ed_bio(struct ed_softc *ed, int async, int poll)
{
u_int16_t cmd_args[4];
int error=0;
u_int16_t track;
u_int16_t cyl;
u_int8_t head;
u_int8_t sector;
/* Get physical bus mapping for buf. */
if (bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
ed->sc_data, ed->sc_bcount, NULL,
BUS_DMA_WAITOK|BUS_DMA_STREAMING) != 0) {
/*
* Use our DMA safe memory to get data to/from device.
*/
if ((error = bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
ed->sc_dmamkva, ed->sc_bcount, NULL,
BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
printf("%s: unable to load raw data for xfer, errno=%d\n",
ed->sc_dev.dv_xname, error);
goto out;
}
ed->sc_flags |= EDF_BOUNCEBUF;
/* If data write, copy the data to our bounce buffer. */
if (!ed->sc_read)
memcpy(ed->sc_dmamkva, ed->sc_data, ed->sc_bcount);
}
ed->sc_flags |= EDF_DMAMAP_LOADED;
track = ed->sc_rawblkno / ed->sectors;
head = track % ed->heads;
cyl = track / ed->heads;
sector = ed->sc_rawblkno % ed->sectors;
WDCDEBUG_PRINT(("__edstart %s: map: %u %u %u\n", ed->sc_dev.dv_xname,
cyl, sector, head),
DEBUG_XFERS);
mca_disk_busy();
/* Read or Write Data command */
cmd_args[0] = 2; /* Options 0000010 */
cmd_args[1] = ed->sc_bcount / DEV_BSIZE;
cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
cmd_args[3] = ((cyl & 0x3E0) >> 5);
if (edc_run_cmd(ed->edc_softc,
(ed->sc_read) ? CMD_READ_DATA : CMD_WRITE_DATA,
ed->sc_devno, cmd_args, 4, async, poll)) {
printf("%s: data i/o command failed\n", ed->sc_dev.dv_xname);
mca_disk_unbusy();
error = EIO;
}
out:
if (error)
ed->sc_error = error;
}
static void
__edstart(ed, bp)
struct ed_softc *ed;
struct buf *bp;
{
WDCDEBUG_PRINT(("__edstart %s (%s): %lu %lu %u\n", ed->sc_dev.dv_xname,
(bp->b_flags & B_READ) ? "read" : "write",
bp->b_bcount, bp->b_resid, bp->b_rawblkno),
DEBUG_XFERS);
/* Instrumentation. */
disk_busy(&ed->sc_dk);
ed->sc_flags |= EDF_DK_BUSY;
ed->sc_data = bp->b_data;
ed->sc_rawblkno = bp->b_rawblkno;
ed->sc_bcount = bp->b_bcount;
ed->sc_read = bp->b_flags & B_READ;
ed_bio(ed, 1, 0);
}
static void
ed_bio_done(ed)
struct ed_softc *ed;
{
/*
* If read transfer finished without error and using a bounce
* buffer, copy the data to buf.
*/
if (ed->sc_error == 0 && (ed->sc_flags & EDF_BOUNCEBUF) && ed->sc_read)
memcpy(ed->sc_data, ed->sc_dmamkva, ed->sc_bcount);
ed->sc_flags &= ~EDF_BOUNCEBUF;
/* Unload buf from DMA map */
if (ed->sc_flags & EDF_DMAMAP_LOADED) {
bus_dmamap_unload(ed->sc_dmat, ed->dmamap_xfer);
ed->sc_flags &= ~EDF_DMAMAP_LOADED;
}
mca_disk_unbusy();
}
static void
edmcadone(ed, bp)
struct ed_softc *ed;
struct buf *bp;
{
WDCDEBUG_PRINT(("eddone %s\n", ed->sc_dev.dv_xname),
DEBUG_XFERS);
if (ed->sc_error) {
bp->b_error = ed->sc_error;
bp->b_flags |= B_ERROR;
} else {
/* Set resid, most commonly to zero. */
bp->b_resid = ed->sc_status_block[SB_RESBLKCNT_IDX] * DEV_BSIZE;
}
ed_bio_done(ed);
/* If disk was busied, unbusy it now */
if (ed->sc_flags & EDF_DK_BUSY) {
disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
ed->sc_flags &= ~EDF_DK_BUSY;
}
#if NRND > 0
rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
#endif
biodone(bp);
}
int int
edmcaread(dev, uio, flags) edmcaread(dev, uio, flags)
dev_t dev; dev_t dev;
@ -543,13 +345,17 @@ edmcaopen(dev, flag, fmt, p)
} }
} else { } else {
if ((wd->sc_flags & WDF_LOADED) == 0) { if ((wd->sc_flags & WDF_LOADED) == 0) {
int s;
wd->sc_flags |= WDF_LOADED; wd->sc_flags |= WDF_LOADED;
/* Load the physical device parameters. */ /* Load the physical device parameters. */
ed_get_params(wd); s = splbio();
ed_get_params(wd, NULL);
splx(s);
/* Load the partition info if not already loaded. */ /* Load the partition info if not already loaded. */
edgetdisklabel(wd); edgetdisklabel(dev, wd);
} }
} }
@ -629,24 +435,24 @@ edmcaclose(dev, flag, fmt, p)
} }
static void static void
edgetdefaultlabel(wd, lp) edgetdefaultlabel(ed, lp)
struct ed_softc *wd; struct ed_softc *ed;
struct disklabel *lp; struct disklabel *lp;
{ {
WDCDEBUG_PRINT(("edgetdefaultlabel\n"), DEBUG_FUNCS); WDCDEBUG_PRINT(("edgetdefaultlabel\n"), DEBUG_FUNCS);
memset(lp, 0, sizeof(struct disklabel)); memset(lp, 0, sizeof(struct disklabel));
lp->d_secsize = DEV_BSIZE; lp->d_secsize = DEV_BSIZE;
lp->d_ntracks = wd->heads; lp->d_ntracks = ed->heads;
lp->d_nsectors = wd->sectors; lp->d_nsectors = ed->sectors;
lp->d_ncylinders = wd->cyl; lp->d_ncylinders = ed->cyl;
lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
lp->d_type = DTYPE_ESDI; lp->d_type = DTYPE_ESDI;
strncpy(lp->d_typename, "ESDI", 16); strncpy(lp->d_typename, "ESDI", 16);
strncpy(lp->d_packname, "fictitious", 16); strncpy(lp->d_packname, "fictitious", 16);
lp->d_secperunit = wd->sc_capacity; lp->d_secperunit = ed->sc_capacity;
lp->d_rpm = 3600; lp->d_rpm = 3600;
lp->d_interleave = 1; lp->d_interleave = 1;
lp->d_flags = 0; lp->d_flags = 0;
@ -666,26 +472,21 @@ edgetdefaultlabel(wd, lp)
* Fabricate a default disk label, and try to read the correct one. * Fabricate a default disk label, and try to read the correct one.
*/ */
static void static void
edgetdisklabel(wd) edgetdisklabel(dev, ed)
struct ed_softc *wd; dev_t dev;
struct ed_softc *ed;
{ {
struct disklabel *lp = wd->sc_dk.dk_label; struct disklabel *lp = ed->sc_dk.dk_label;
char *errstring; char *errstring;
WDCDEBUG_PRINT(("edgetdisklabel\n"), DEBUG_FUNCS); WDCDEBUG_PRINT(("edgetdisklabel\n"), DEBUG_FUNCS);
memset(wd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel)); memset(ed->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
edgetdefaultlabel(wd, lp); edgetdefaultlabel(ed, lp);
#if 0 errstring = readdisklabel(
wd->sc_badsect[0] = -1; EDLABELDEV(dev), edmcastrategy, lp, ed->sc_dk.dk_cpulabel);
if (wd->drvp->state > RECAL)
wd->drvp->drive_flags |= DRIVE_RESET;
#endif
errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit, RAW_PART),
edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
if (errstring) { if (errstring) {
/* /*
* This probably happened because the drive's default * This probably happened because the drive's default
@ -697,22 +498,13 @@ edgetdisklabel(wd)
if (wd->drvp->state > RECAL) if (wd->drvp->state > RECAL)
wd->drvp->drive_flags |= DRIVE_RESET; wd->drvp->drive_flags |= DRIVE_RESET;
#endif #endif
errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit, errstring = readdisklabel(EDLABELDEV(dev),
RAW_PART), edmcastrategy, lp, wd->sc_dk.dk_cpulabel); edmcastrategy, lp, ed->sc_dk.dk_cpulabel);
} }
if (errstring) { if (errstring) {
printf("%s: %s\n", wd->sc_dev.dv_xname, errstring); printf("%s: %s\n", ed->sc_dev.dv_xname, errstring);
return; return;
} }
#if 0
if (wd->drvp->state > RECAL)
wd->drvp->drive_flags |= DRIVE_RESET;
#endif
#ifdef HAS_BAD144_HANDLING
if ((lp->d_flags & D_BADSECT) != 0)
bad144intern(wd);
#endif
} }
int int
@ -723,123 +515,79 @@ edmcaioctl(dev, xfer, addr, flag, p)
int flag; int flag;
struct proc *p; struct proc *p;
{ {
struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(dev)); struct ed_softc *ed = device_lookup(&ed_cd, DISKUNIT(dev));
int error; int error;
#ifdef __HAVE_OLD_DISKLABEL
struct disklabel newlabel;
#endif
WDCDEBUG_PRINT(("edioctl\n"), DEBUG_FUNCS); WDCDEBUG_PRINT(("edioctl\n"), DEBUG_FUNCS);
if ((wd->sc_flags & WDF_LOADED) == 0) if ((ed->sc_flags & WDF_LOADED) == 0)
return EIO; return EIO;
switch (xfer) { switch (xfer) {
#ifdef HAS_BAD144_HANDLING
case DIOCSBAD:
if ((flag & FWRITE) == 0)
return EBADF;
wd->sc_dk.dk_cpulabel->bad = *(struct dkbad *)addr;
wd->sc_dk.dk_label->d_flags |= D_BADSECT;
bad144intern(wd);
return 0;
#endif
case DIOCGDINFO: case DIOCGDINFO:
*(struct disklabel *)addr = *(wd->sc_dk.dk_label); *(struct disklabel *)addr = *(ed->sc_dk.dk_label);
return 0; return 0;
#ifdef __HAVE_OLD_DISKLABEL
case ODIOCGDINFO:
newlabel = *(wd->sc_dk.dk_label);
if (newlabel.d_npartitions > OLDMAXPARTITIONS)
return ENOTTY;
memcpy(addr, &newlabel, sizeof (struct olddisklabel));
return 0;
#endif
case DIOCGPART: case DIOCGPART:
((struct partinfo *)addr)->disklab = wd->sc_dk.dk_label; ((struct partinfo *)addr)->disklab = ed->sc_dk.dk_label;
((struct partinfo *)addr)->part = ((struct partinfo *)addr)->part =
&wd->sc_dk.dk_label->d_partitions[DISKPART(dev)]; &ed->sc_dk.dk_label->d_partitions[DISKPART(dev)];
return 0; return 0;
case DIOCWDINFO: case DIOCWDINFO:
case DIOCSDINFO: case DIOCSDINFO:
#ifdef __HAVE_OLD_DISKLABEL
case ODIOCWDINFO:
case ODIOCSDINFO:
#endif
{ {
struct disklabel *lp; struct disklabel *lp;
#ifdef __HAVE_OLD_DISKLABEL
if (xfer == ODIOCSDINFO || xfer == ODIOCWDINFO) {
memset(&newlabel, 0, sizeof newlabel);
memcpy(&newlabel, addr, sizeof (struct olddisklabel));
lp = &newlabel;
} else
#endif
lp = (struct disklabel *)addr; lp = (struct disklabel *)addr;
if ((flag & FWRITE) == 0) if ((flag & FWRITE) == 0)
return EBADF; return EBADF;
if ((error = ed_lock(wd)) != 0) if ((error = ed_lock(ed)) != 0)
return error; return error;
wd->sc_flags |= WDF_LABELLING; ed->sc_flags |= WDF_LABELLING;
error = setdisklabel(wd->sc_dk.dk_label, error = setdisklabel(ed->sc_dk.dk_label,
lp, /*wd->sc_dk.dk_openmask : */0, lp, /*wd->sc_dk.dk_openmask : */0,
wd->sc_dk.dk_cpulabel); ed->sc_dk.dk_cpulabel);
if (error == 0) { if (error == 0) {
#if 0 #if 0
if (wd->drvp->state > RECAL) if (wd->drvp->state > RECAL)
wd->drvp->drive_flags |= DRIVE_RESET; wd->drvp->drive_flags |= DRIVE_RESET;
#endif #endif
if (xfer == DIOCWDINFO if (xfer == DIOCWDINFO)
#ifdef __HAVE_OLD_DISKLABEL
|| xfer == ODIOCWDINFO
#endif
)
error = writedisklabel(EDLABELDEV(dev), error = writedisklabel(EDLABELDEV(dev),
edmcastrategy, wd->sc_dk.dk_label, edmcastrategy, ed->sc_dk.dk_label,
wd->sc_dk.dk_cpulabel); ed->sc_dk.dk_cpulabel);
} }
wd->sc_flags &= ~WDF_LABELLING; ed->sc_flags &= ~WDF_LABELLING;
ed_unlock(wd); ed_unlock(ed);
return error; return (error);
} }
case DIOCKLABEL: case DIOCKLABEL:
if (*(int *)addr) if (*(int *)addr)
wd->sc_flags |= WDF_KLABEL; ed->sc_flags |= WDF_KLABEL;
else else
wd->sc_flags &= ~WDF_KLABEL; ed->sc_flags &= ~WDF_KLABEL;
return 0; return 0;
case DIOCWLABEL: case DIOCWLABEL:
if ((flag & FWRITE) == 0) if ((flag & FWRITE) == 0)
return EBADF; return EBADF;
if (*(int *)addr) if (*(int *)addr)
wd->sc_flags |= WDF_WLABEL; ed->sc_flags |= WDF_WLABEL;
else else
wd->sc_flags &= ~WDF_WLABEL; ed->sc_flags &= ~WDF_WLABEL;
return 0; return 0;
case DIOCGDEFLABEL: case DIOCGDEFLABEL:
edgetdefaultlabel(wd, (struct disklabel *)addr); edgetdefaultlabel(ed, (struct disklabel *)addr);
return 0; return 0;
#ifdef __HAVE_OLD_DISKLABEL
case ODIOCGDEFLABEL:
edgetdefaultlabel(wd, &newlabel);
if (newlabel.d_npartitions > OLDMAXPARTITIONS)
return ENOTTY;
memcpy(addr, &newlabel, sizeof (struct olddisklabel));
return 0;
#endif
#ifdef notyet #if 0
case DIOCWFORMAT: case DIOCWFORMAT:
if ((flag & FWRITE) == 0) if ((flag & FWRITE) == 0)
return EBADF; return EBADF;
@ -876,18 +624,6 @@ edmcaioctl(dev, xfer, addr, flag, p)
#endif #endif
} }
#if 0
#ifdef B_FORMAT
int
edmcaformat(struct buf *bp)
{
bp->b_flags |= B_FORMAT;
return edmcastrategy(bp);
}
#endif
#endif
int int
edmcasize(dev) edmcasize(dev)
dev_t dev; dev_t dev;
@ -936,6 +672,7 @@ edmcadump(dev, blkno, va, size)
struct disklabel *lp; /* disk's disklabel */ struct disklabel *lp; /* disk's disklabel */
int part; int part;
int nblks; /* total number of sectors left to write */ int nblks; /* total number of sectors left to write */
int error;
/* Check if recursive dump; if so, punt. */ /* Check if recursive dump; if so, punt. */
if (eddoingadump) if (eddoingadump)
@ -976,16 +713,10 @@ edmcadump(dev, blkno, va, size)
} }
while (nblks > 0) { while (nblks > 0) {
ed->sc_data = va; error = edc_bio(ed->edc_softc, ed, va, blkno,
ed->sc_rawblkno = blkno; min(nblks, eddumpmulti) * lp->d_secsize, 0, 1);
ed->sc_bcount = min(nblks, eddumpmulti) * lp->d_secsize; if (error)
ed->sc_read = 0; return (error);
ed_bio(ed, 0, 1);
if (ed->sc_error)
return (ed->sc_error);
ed_bio_done(ed);
/* update block count */ /* update block count */
nblks -= min(nblks, eddumpmulti); nblks -= min(nblks, eddumpmulti);
@ -997,36 +728,10 @@ edmcadump(dev, blkno, va, size)
return (0); return (0);
} }
#ifdef HAS_BAD144_HANDLING
/*
* Internalize the bad sector table.
*/
static void
bad144intern(wd)
struct ed_softc *wd;
{
struct dkbad *bt = &wd->sc_dk.dk_cpulabel->bad;
struct disklabel *lp = wd->sc_dk.dk_label;
int i = 0;
WDCDEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
for (; i < NBT_BAD; i++) {
if (bt->bt_bad[i].bt_cyl == 0xffff)
break;
wd->sc_badsect[i] =
bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
(bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
(bt->bt_bad[i].bt_trksec & 0xff);
}
for (; i < NBT_BAD+1; i++)
wd->sc_badsect[i] = -1;
}
#endif
static int static int
ed_get_params(ed) ed_get_params(ed, drv_flags)
struct ed_softc *ed; struct ed_softc *ed;
int *drv_flags;
{ {
u_int16_t cmd_args[2]; u_int16_t cmd_args[2];
@ -1036,17 +741,17 @@ ed_get_params(ed)
cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */ cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
cmd_args[1] = 0; cmd_args[1] = 0;
if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno, if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno,
cmd_args, 2, 0, 1)) cmd_args, 2, 1))
return (1); return (1);
ed->spares = ed->sc_status_block[1] >> 8; ed->spares = ed->sense_data[1] >> 8;
ed->drv_flags = ed->sc_status_block[1] & 0x1f; if (drv_flags)
ed->rba = ed->sc_status_block[2] | *drv_flags = ed->sense_data[1] & 0x1f;
(ed->sc_status_block[3] << 16); ed->rba = ed->sense_data[2] | (ed->sense_data[3] << 16);
/* Instead of using: /* Instead of using:
ed->cyl = ed->sc_status_block[4]; ed->cyl = ed->sense_data[4];
ed->heads = ed->sc_status_block[5] & 0xff; ed->heads = ed->sense_data[5] & 0xff;
ed->sectors = ed->sc_status_block[5] >> 8; ed->sectors = ed->sense_data[5] >> 8;
* we fabricate the numbers from RBA count, so that * we fabricate the numbers from RBA count, so that
* number of sectors is 32 and heads 64. This seems * number of sectors is 32 and heads 64. This seems
* to be necessary for integrated ESDI controller. * to be necessary for integrated ESDI controller.
@ -1058,72 +763,3 @@ ed_get_params(ed)
return (0); return (0);
} }
/*
* Our shutdown hook. We attempt to park disk's head only.
*/
void
ed_shutdown(arg)
void *arg;
{
#if 0
struct ed_softc *ed = arg;
u_int16_t cmd_args[2];
/* Issue Park Head command */
cmd_args[0] = 6; /* Options: 000110 */
cmd_args[1] = 0;
(void) edc_run_cmd(ed->edc_softc, CMD_PARK_HEAD, ed->sc_devno,
cmd_args, 2, 0);
#endif
}
/*
* Main worker thread function.
*/
void
edworker(arg)
void *arg;
{
struct ed_softc *ed = (struct ed_softc *) arg;
struct buf *bp;
int s;
config_pending_decr();
for(;;) {
/* Wait until awakened */
(void) tsleep(&ed->sc_q, PRIBIO, "edidle", 0);
if ((ed->sc_flags & EDF_PROCESS_QUEUE) == 0)
panic("edworker: expecting process queue");
ed->sc_flags &= ~EDF_PROCESS_QUEUE;
for(;;) {
/* Is there a buf for us ? */
simple_lock(&ed->sc_q_lock);
if ((bp = BUFQ_FIRST(&ed->sc_q)) == NULL) {
simple_unlock(&ed->sc_q_lock);
break;
}
BUFQ_REMOVE(&ed->sc_q, bp);
simple_unlock(&ed->sc_q_lock);
/* Schedule i/o operation */
ed->sc_error = 0;
s = splbio();
__edstart(ed, bp);
/*
* Wait until the command executes; edc_intr() wakes
* us up.
*/
if (ed->sc_error == 0)
(void)tsleep(&ed->edc_softc, PRIBIO, "edwrk",0);
/* Handle i/o results */
edmcadone(ed, bp);
splx(s);
}
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: edc_mca.c,v 1.10 2001/11/13 07:46:25 lukem Exp $ */ /* $NetBSD: edc_mca.c,v 1.11 2001/11/23 22:53:09 jdolecek Exp $ */
/* /*
* Copyright (c) 2001 The NetBSD Foundation, Inc. * Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -41,19 +41,16 @@
* for MCA rev. 2.2 in hands, thanks to Scott Telford <st@epcc.ed.ac.uk>. * for MCA rev. 2.2 in hands, thanks to Scott Telford <st@epcc.ed.ac.uk>.
* *
* TODO: * TODO:
* - move the MCA DMA controller (edc_setup_dma()) goo to device driver
* independant location
* - improve error recovery * - improve error recovery
* add any soft resets when anything gets stuck? * Issue soft reset on error or timeout?
* - test with > 1 disk (this is supported by some controllers), eliminate * - test with > 1 disk (this is supported by some controllers)
* any remaining devno=0 assumptions if there are any still
* - test with > 1 ESDI controller in machine; shared interrupts * - test with > 1 ESDI controller in machine; shared interrupts
* necessary for this to work should be supported - edc_intr() specifically * necessary for this to work should be supported - edc_intr() specifically
* checks if the interrupt is for this controller * checks if the interrupt is for this controller
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: edc_mca.c,v 1.10 2001/11/13 07:46:25 lukem Exp $"); __KERNEL_RCSID(0, "$NetBSD: edc_mca.c,v 1.11 2001/11/23 22:53:09 jdolecek Exp $");
#include "rnd.h" #include "rnd.h"
@ -69,6 +66,7 @@ __KERNEL_RCSID(0, "$NetBSD: edc_mca.c,v 1.10 2001/11/13 07:46:25 lukem Exp $");
#include <sys/proc.h> #include <sys/proc.h>
#include <sys/vnode.h> #include <sys/vnode.h>
#include <sys/kernel.h> #include <sys/kernel.h>
#include <sys/kthread.h>
#if NRND > 0 #if NRND > 0
#include <sys/rnd.h> #include <sys/rnd.h>
#endif #endif
@ -85,6 +83,7 @@ __KERNEL_RCSID(0, "$NetBSD: edc_mca.c,v 1.10 2001/11/13 07:46:25 lukem Exp $");
#include <dev/mca/edcvar.h> #include <dev/mca/edcvar.h>
#define EDC_ATTN_MAXTRIES 10000 /* How many times check for unbusy */ #define EDC_ATTN_MAXTRIES 10000 /* How many times check for unbusy */
#define EDC_MAX_CMD_RES_LEN 8
struct edc_mca_softc { struct edc_mca_softc {
struct device sc_dev; struct device sc_dev;
@ -92,19 +91,23 @@ struct edc_mca_softc {
bus_space_tag_t sc_iot; bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh; bus_space_handle_t sc_ioh;
/* DMA related stuff */
bus_dma_tag_t sc_dmat; /* DMA tag as passed by parent */ bus_dma_tag_t sc_dmat; /* DMA tag as passed by parent */
bus_space_handle_t sc_dmaextcmdh; bus_dmamap_t sc_dmamap_xfer; /* transfer dma map */
bus_space_handle_t sc_dmaexech;
void *sc_ih; /* interrupt handle */ void *sc_ih; /* interrupt handle */
int sc_drq; /* DRQ number */
int sc_cmd_async; /* asynchronous cmd pending */
int sc_flags; int sc_flags;
#define DASD_QUIET 0x01 /* don't dump cmd error info */ #define DASD_QUIET 0x01 /* don't dump cmd error info */
#define DASD_MAXDEVS 8 #define DASD_MAXDEVS 8
struct ed_softc *sc_ed[DASD_MAXDEVS]; struct ed_softc *sc_ed[DASD_MAXDEVS];
struct ed_softc sc_controller; int sc_maxdevs; /* max number of disks attached to this
* controller */
/* I/O results variables */
volatile int sc_error;
volatile int sc_resblk; /* residual block count */
}; };
int edc_mca_probe __P((struct device *, struct cfdata *, void *)); int edc_mca_probe __P((struct device *, struct cfdata *, void *));
@ -114,15 +117,13 @@ struct cfattach edc_mca_ca = {
sizeof(struct edc_mca_softc), edc_mca_probe, edc_mca_attach sizeof(struct edc_mca_softc), edc_mca_probe, edc_mca_attach
}; };
#define DMA_EXTCMD 0x18
#define DMA_EXEC 0x1A
static int edc_intr __P((void *)); static int edc_intr __P((void *));
static void edc_dump_status_block __P((struct edc_mca_softc *, int, int)); static void edc_dump_status_block __P((struct edc_mca_softc *,
static int edc_setup_dma __P((struct edc_mca_softc *, int, u_int16_t *, int));
bus_addr_t, bus_size_t));
static int edc_do_attn __P((struct edc_mca_softc *, int, int, int)); static int edc_do_attn __P((struct edc_mca_softc *, int, int, int));
static int edc_cmd_wait __P((struct edc_mca_softc *, int, int, int)); static int edc_cmd_wait __P((struct edc_mca_softc *, int, int));
static void edcworker __P((void *));
static void edc_spawn_worker __P((void *));
int int
edc_mca_probe(parent, match, aux) edc_mca_probe(parent, match, aux)
@ -148,12 +149,11 @@ edc_mca_attach(parent, self, aux)
{ {
struct edc_mca_softc *sc = (void *) self; struct edc_mca_softc *sc = (void *) self;
struct mca_attach_args *ma = aux; struct mca_attach_args *ma = aux;
struct ed_attach_args eda;
int pos2, pos3, pos4; int pos2, pos3, pos4;
int irq, drq, iobase; int irq, drq, iobase;
const char *typestr; const char *typestr;
struct ed_softc *ed; int devno, error;
struct ed_attach_args eda;
int devno, maxdevs;
pos2 = mca_conf_read(ma->ma_mc, ma->ma_slot, 2); pos2 = mca_conf_read(ma->ma_mc, ma->ma_slot, 2);
pos3 = mca_conf_read(ma->ma_mc, ma->ma_slot, 3); pos3 = mca_conf_read(ma->ma_mc, ma->ma_slot, 3);
@ -231,7 +231,6 @@ edc_mca_attach(parent, self, aux)
printf("DMA pacing control disabled\n"); printf("DMA pacing control disabled\n");
sc->sc_iot = ma->ma_iot; sc->sc_iot = ma->ma_iot;
sc->sc_drq = drq;
if (bus_space_map(sc->sc_iot, iobase, if (bus_space_map(sc->sc_iot, iobase,
ESDIC_REG_NPORTS, 0, &sc->sc_ioh)) { ESDIC_REG_NPORTS, 0, &sc->sc_ioh)) {
@ -240,19 +239,6 @@ edc_mca_attach(parent, self, aux)
return; return;
} }
if (bus_space_map(sc->sc_iot, DMA_EXTCMD, 1, 0, &sc->sc_dmaextcmdh)) {
printf("%s: couldn't map registers\n",
sc->sc_dev.dv_xname);
return;
}
if (bus_space_map(sc->sc_iot, DMA_EXEC, 1, 0, &sc->sc_dmaexech)) {
printf("%s: couldn't map registers\n",
sc->sc_dev.dv_xname);
return;
}
sc->sc_dmat = ma->ma_dmat;
sc->sc_ih = mca_intr_establish(ma->ma_mc, irq, IPL_BIO, edc_intr, sc); sc->sc_ih = mca_intr_establish(ma->ma_mc, irq, IPL_BIO, edc_intr, sc);
if (sc->sc_ih == NULL) { if (sc->sc_ih == NULL) {
printf("%s: couldn't establish interrupt handler\n", printf("%s: couldn't establish interrupt handler\n",
@ -260,22 +246,23 @@ edc_mca_attach(parent, self, aux)
return; return;
} }
/* Create a MCA DMA map, used for data transfer */
sc->sc_dmat = ma->ma_dmat;
if ((error = mca_dmamap_create(sc->sc_dmat, MAXPHYS,
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &sc->sc_dmamap_xfer, drq)) != 0){
printf("%s: couldn't create DMA map - error %d\n",
sc->sc_dev.dv_xname, error);
return;
}
/* /*
* Integrated ESDI controller supports only one disk, other * Integrated ESDI controller supports only one disk, other
* controllers support two disks. * controllers support two disks.
*/ */
if (ma->ma_id == MCA_PRODUCT_IBM_ESDIC_IG) if (ma->ma_id == MCA_PRODUCT_IBM_ESDIC_IG)
maxdevs = 1; sc->sc_maxdevs = 1;
else else
maxdevs = 2; sc->sc_maxdevs = 2;
/*
* Initialize the controller ed softc. We could do without this,
* but absence of checks for controller devno simplifies code logic
* somewhat.
*/
sc->sc_ed[DASD_DEVNO_CONTROLLER] = &sc->sc_controller;
strcpy(sc->sc_controller.sc_dev.dv_xname, sc->sc_dev.dv_xname);/*safe*/
/* /*
* Reset controller and attach individual disks. ed attach routine * Reset controller and attach individual disks. ed attach routine
@ -306,53 +293,53 @@ edc_mca_attach(parent, self, aux)
delay(100); delay(100);
} }
/* /* be quiet during probes */
* Get dummy ed_softc to be used during probe. Once a disk is
* found, ed_mca_attach() calls edc_add_disk() to insert the
* right pointer into sc->sc_ed[] array.
*/
MALLOC(ed, struct ed_softc *, sizeof(struct ed_softc),
M_TEMP, M_WAITOK);
/* be quiet duting probes */
sc->sc_flags |= DASD_QUIET; sc->sc_flags |= DASD_QUIET;
/* check for attached disks */ /* check for attached disks */
for(devno=0; devno < maxdevs; devno++) { for(devno=0; devno < sc->sc_maxdevs; devno++) {
eda.sc_devno = devno; eda.edc_drive = devno;
eda.sc_dmat = sc->sc_dmat; sc->sc_ed[devno] =
sc->sc_ed[devno] = ed; (void *) config_found_sm(self, &eda, NULL, NULL);
(void *) config_found_sm(self, &eda, NULL, NULL);
/* If initialization did not succeed, NULL the pointer. */
if (sc->sc_ed[devno]
&& (sc->sc_ed[devno]->sc_flags & EDF_INIT) == 0)
sc->sc_ed[devno] = NULL;
} }
/* enable full error dumps again */ /* enable full error dumps again */
sc->sc_flags &= ~DASD_QUIET; sc->sc_flags &= ~DASD_QUIET;
/* cleanup */
FREE(ed, M_TEMP);
/* /*
* Check if there are any disks attached. If not, disestablish * Check if there are any disks attached. If not, disestablish
* the interrupt. * the interrupt.
*/ */
for(devno=0; devno < maxdevs; devno++) { for(devno=0; devno < sc->sc_maxdevs; devno++) {
if (sc->sc_ed[devno] && (sc->sc_ed[devno]->sc_flags & EDF_INIT)) if (sc->sc_ed[devno])
break; break;
} }
if (devno == maxdevs) {
if (devno == sc->sc_maxdevs) {
printf("%s: disabling controller (no drives attached)\n", printf("%s: disabling controller (no drives attached)\n",
sc->sc_dev.dv_xname); sc->sc_dev.dv_xname);
mca_intr_disestablish(ma->ma_mc, sc->sc_ih); mca_intr_disestablish(ma->ma_mc, sc->sc_ih);
return;
} }
/*
* Run the worker thread.
*/
config_pending_incr();
kthread_create(edc_spawn_worker, (void *) sc);
} }
void void
edc_add_disk(sc, ed, devno) edc_add_disk(sc, ed)
struct edc_mca_softc *sc; struct edc_mca_softc *sc;
struct ed_softc *ed; struct ed_softc *ed;
int devno;
{ {
sc->sc_ed[devno] = ed; sc->sc_ed[ed->sc_devno] = ed;
} }
static int static int
@ -362,8 +349,8 @@ edc_intr(arg)
struct edc_mca_softc *sc = arg; struct edc_mca_softc *sc = arg;
u_int8_t isr, intr_id; u_int8_t isr, intr_id;
u_int16_t sifr; u_int16_t sifr;
int cmd=-1, devno, bioerror=0; int cmd=-1, devno, error=0;
struct ed_softc *ed=NULL; u_int16_t status_block[EDC_MAX_CMD_RES_LEN]; /* CMD status block */
/* /*
* Check if the interrupt was for us. * Check if the interrupt was for us.
@ -413,14 +400,14 @@ edc_intr(arg)
cmd = sifr & SIFR_CMD_MASK; cmd = sifr & SIFR_CMD_MASK;
/* Read whole status block */ /* Read whole status block */
ed = sc->sc_ed[devno]; memset(status_block, 0, sizeof(status_block)); /* zero first */
ed->sc_status_block[0] = sifr; status_block[0] = sifr;
for(i=1; i < len; i++) { for(i=1; i < len; i++) {
while((bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) while((bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR)
& BSR_SIFR_FULL) == 0) & BSR_SIFR_FULL) == 0)
delay(1); delay(1);
ed->sc_status_block[i] = le16toh( status_block[i] = le16toh(
bus_space_read_2(sc->sc_iot, sc->sc_ioh, SIFR)); bus_space_read_2(sc->sc_iot, sc->sc_ioh, SIFR));
} }
} }
@ -428,29 +415,27 @@ edc_intr(arg)
switch (intr_id) { switch (intr_id) {
case ISR_DATA_TRANSFER_RDY: case ISR_DATA_TRANSFER_RDY:
/* /*
* Ready to do DMA, setup DMA controller and kick DASD * Ready to do DMA. The DMA controller has already been
* controller to do the transfer. * setup, now just kick disk controller to do the transfer.
*/ */
ed = sc->sc_ed[devno]; bus_space_write_1(sc->sc_iot, sc->sc_ioh, BCR,
if (!edc_setup_dma(sc, ed->sc_read, BCR_INT_ENABLE|BCR_DMA_ENABLE);
ed->dmamap_xfer->dm_segs[0].ds_addr,
ed->dmamap_xfer->dm_segs[0].ds_len)) {
/* XXX bail out? */
printf("%s: edc_setup_dma() failed\n",
ed->sc_dev.dv_xname);
bus_space_write_1(sc->sc_iot, sc->sc_ioh, BCR,
BCR_INT_ENABLE);
} else {
/* OK, proceed with DMA */
bus_space_write_1(sc->sc_iot, sc->sc_ioh, BCR,
BCR_INT_ENABLE|BCR_DMA_ENABLE);
}
break; break;
case ISR_COMPLETED: case ISR_COMPLETED:
case ISR_COMPLETED_WITH_ECC: case ISR_COMPLETED_WITH_ECC:
case ISR_COMPLETED_RETRIES: case ISR_COMPLETED_RETRIES:
case ISR_COMPLETED_WARNING: case ISR_COMPLETED_WARNING:
bioerror = 0; error = 0;
/*
* Copy device config data if appropriate. sc->sc_ed[]
* entry might be NULL during probe.
*/
if (cmd == CMD_GET_DEV_CONF && sc->sc_ed[devno]) {
memcpy(sc->sc_ed[devno]->sense_data, status_block,
sizeof(sc->sc_ed[devno]->sense_data));
}
break; break;
case ISR_RESET_COMPLETED: case ISR_RESET_COMPLETED:
case ISR_ABORT_COMPLETED: case ISR_ABORT_COMPLETED:
@ -458,9 +443,9 @@ edc_intr(arg)
break; break;
default: default:
if ((sc->sc_flags & DASD_QUIET) == 0) if ((sc->sc_flags & DASD_QUIET) == 0)
edc_dump_status_block(sc, devno, intr_id); edc_dump_status_block(sc, status_block, intr_id);
bioerror = EIO; error = EIO;
break; break;
} }
@ -476,8 +461,9 @@ edc_intr(arg)
/* If Read or Write Data, wakeup worker thread to finish it */ /* If Read or Write Data, wakeup worker thread to finish it */
if (intr_id != ISR_DATA_TRANSFER_RDY if (intr_id != ISR_DATA_TRANSFER_RDY
&& (cmd == CMD_READ_DATA || cmd == CMD_WRITE_DATA)) { && (cmd == CMD_READ_DATA || cmd == CMD_WRITE_DATA)) {
sc->sc_ed[devno]->sc_error = bioerror; if ((sc->sc_error = error) == 0)
wakeup_one(&sc->sc_ed[devno]->edc_softc); sc->sc_resblk = status_block[SB_RESBLKCNT_IDX];
wakeup_one(sc);
} }
return (1); return (1);
@ -541,22 +527,32 @@ edc_do_attn(sc, attn_type, devno, intr_id)
* interval. * interval.
*/ */
static int static int
edc_cmd_wait(sc, devno, secs, poll) edc_cmd_wait(sc, secs, poll)
struct edc_mca_softc *sc; struct edc_mca_softc *sc;
int devno, secs, poll; int secs, poll;
{ {
int val, delayed; int val, delayed;
if (!poll) {
int error;
/* Not polling, can sleep. Sleep until we are awakened,
* but maximum secs seconds.
*/
error = tsleep(sc, PRIBIO, "edcwcmd", secs * hz);
if (error)
goto err;
return (0);
}
/* Poll the controller until command finishes */
delayed = 0; delayed = 0;
do { do {
val = bus_space_read_1(sc->sc_iot,sc->sc_ioh, BSR); val = bus_space_read_1(sc->sc_iot,sc->sc_ioh, BSR);
if ((val & BSR_CMD_INPROGRESS) == 0) if ((val & BSR_CMD_INPROGRESS) == 0)
break; break;
if (poll && (val & BSR_INTR)) if (val & BSR_INTR)
goto out;
if (secs == 0)
break; break;
delay(1); delay(1);
@ -570,44 +566,33 @@ edc_cmd_wait(sc, devno, secs, poll)
delayed = 0; delayed = 0;
secs--; secs--;
} }
#if 0 } while(secs > 0);
if (delayed % 1000)
printf("looping ...");
#endif
} while(1);
if (secs == 0 && if (secs == 0 &&
bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_CMD_INPROGRESS){ bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_CMD_INPROGRESS){
printf("%s: timed out waiting for previous cmd to finish\n", err:
sc->sc_ed[devno]->sc_dev.dv_xname); printf("%s: timed out waiting for cmd to finish\n",
sc->sc_dev.dv_xname);
return (EAGAIN); return (EAGAIN);
} }
out:
return (0); return (0);
} }
int int
edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async, poll) edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, poll)
struct edc_mca_softc *sc; struct edc_mca_softc *sc;
int cmd; int cmd;
int devno; int devno;
u_int16_t cmd_args[]; u_int16_t cmd_args[];
int cmd_len, async, poll; int cmd_len, poll;
{ {
int i, error, tries; int i, error, tries;
u_int16_t cmd0; u_int16_t cmd0;
/* if (bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_BUSY) {
* If there has been an asynchronous command executed, first wait for it printf("%s: device busy?\n", sc->sc_dev.dv_xname);
* to finish. return (EAGAIN);
*/
if (sc->sc_cmd_async) {
/* Wait maximum 15s */
if (edc_cmd_wait(sc, devno, 15, 0))
return (EAGAIN); /* Busy */
sc->sc_cmd_async = 0;
} }
/* Do Attention Request for Command Request. */ /* Do Attention Request for Command Request. */
@ -635,7 +620,8 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async, poll)
/* /*
* Write word of CMD to the CIFR. This sets "Command * Write word of CMD to the CIFR. This sets "Command
* Interface Register Full (CMD IN)" in BSR. Once the attachment * Interface Register Full (CMD IN)" in BSR. Once the attachment
* detects it, it reads the word and clears CMD IN. * detects it, it reads the word and clears CMD IN. This all should
* be quite fast, so don't bother with sleeps for !poll case.
*/ */
for(i=0; i < cmd_len; i++) { for(i=0; i < cmd_len; i++) {
bus_space_write_2(sc->sc_iot, sc->sc_ioh, CIFR, bus_space_write_2(sc->sc_iot, sc->sc_ioh, CIFR,
@ -647,22 +633,20 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async, poll)
* soon. Quirky hw ? * soon. Quirky hw ?
*/ */
tries = 0; tries = 0;
while(bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_CIFR_FULL) for(; (bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR)
& BSR_CIFR_FULL) && tries < 1000 ; tries++)
delay(poll ? 1000 : 1); delay(poll ? 1000 : 1);
}
/* if (tries == 10000) {
* Attachment is now executing the command. Unless we are executing printf("%s: device too slow to accept command %d\n",
* command asynchronously, wait until it finishes. sc->sc_dev.dv_xname, cmd);
*/ return (EAGAIN);
if (async) { }
sc->sc_cmd_async = 1;
return (0);
} }
/* Wait for command to complete, but maximum 15 seconds. */ /* Wait for command to complete, but maximum 15 seconds. */
if (edc_cmd_wait(sc, devno, 15, poll)) if ((error = edc_cmd_wait(sc, 15, poll)))
return (EAGAIN); return (error);
/* If polling, call edc_intr() explicitly */ /* If polling, call edc_intr() explicitly */
if (poll) { if (poll) {
@ -672,70 +656,21 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async, poll)
* If got attention id DATA TRANSFER READY, wait for * If got attention id DATA TRANSFER READY, wait for
* the transfer to finish. * the transfer to finish.
*/ */
if (sc->sc_ed[devno]->sc_error == 0 if ((cmd == CMD_READ_DATA || cmd == CMD_WRITE_DATA)
&& (cmd == CMD_READ_DATA || cmd == CMD_WRITE_DATA)) { && sc->sc_error == 0) {
if (edc_cmd_wait(sc, devno, 15, 1)) if ((error = edc_cmd_wait(sc, 15, poll)))
return (EAGAIN); return (error);
edc_intr(sc); edc_intr(sc);
} }
if (edc_cmd_wait(sc, devno, 15, 0)) if ((error = edc_cmd_wait(sc, 15, poll)))
return (EAGAIN); return (error);
} }
/* Check if the command completed successfully; if not, return error */ return (sc->sc_error);
switch(SB_GET_CMD_STATUS(sc->sc_ed[devno]->sc_status_block)) {
case ISR_COMPLETED:
case ISR_COMPLETED_WITH_ECC:
case ISR_COMPLETED_RETRIES:
case ISR_COMPLETED_WARNING:
return (0);
default:
return (EIO);
}
}
static int
edc_setup_dma(sc, isread, phys, cnt)
struct edc_mca_softc *sc;
int isread;
bus_addr_t phys;
bus_size_t cnt;
{
/* XXX magic constants, should be moved to device-independant location*/
/* The exact sequence to setup MCA DMA controller is taken from Minix */
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0x90 + sc->sc_drq);
/* Disable access to dma channel */
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0x20 + sc->sc_drq);
/* Clear the address byte pointer */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
(phys >> 0) & 0xff); /* address bits 0..7 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
(phys >> 8) & 0xff); /* address bits 8..15 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
(phys >> 16) & 0xff); /* address bits 16..23 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0x40 + sc->sc_drq);
/* Clear the count byte pointer */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
((cnt - 1) >> 0) & 0xff); /* count bits 0..7 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
((cnt - 1) >> 8) & 0xff); /* count bits 8..15 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0x70 + sc->sc_drq);
/* Set the transfer mode */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
(isread) ? 0x4C : 0x44);
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0xA0 + sc->sc_drq);
/* Enable access to dma channel */
return (1);
} }
#ifdef EDC_DEBUG
static const char * const edc_commands[] = { static const char * const edc_commands[] = {
"Invalid Command", "Invalid Command",
"Read Data", "Read Data",
@ -832,36 +767,51 @@ static const char * const edc_dev_errors[] = {
"No ID found on track (ID search)", "No ID found on track (ID search)",
/* 0x19 - 0xff reserved */ /* 0x19 - 0xff reserved */
}; };
#endif /* EDC_DEBUG */
static void static void
edc_dump_status_block(sc, devno, intr_id) edc_dump_status_block(sc, status_block, intr_id)
struct edc_mca_softc *sc; struct edc_mca_softc *sc;
int devno, intr_id; u_int16_t *status_block;
int intr_id;
{ {
struct ed_softc *ed = sc->sc_ed[devno]; #ifdef EDC_DEBUG
printf("%s: Command: %s, Status: %s\n", printf("%s: Command: %s, Status: %s\n",
ed->sc_dev.dv_xname, sc->sc_dev.dv_xname,
edc_commands[ed->sc_status_block[0] & 0x1f], edc_commands[status_block[0] & 0x1f],
edc_cmd_status[SB_GET_CMD_STATUS(ed->sc_status_block)] edc_cmd_status[SB_GET_CMD_STATUS(status_block)]
); );
#else
printf("%s: Command: %d, Status: %d\n",
sc->sc_dev.dv_xname,
status_block[0] & 0x1f,
SB_GET_CMD_STATUS(status_block));
#endif
printf("%s: # left blocks: %u, last processed RBA: %u\n", printf("%s: # left blocks: %u, last processed RBA: %u\n",
ed->sc_dev.dv_xname, sc->sc_dev.dv_xname,
ed->sc_status_block[SB_RESBLKCNT_IDX], status_block[SB_RESBLKCNT_IDX],
(ed->sc_status_block[5] << 16) | ed->sc_status_block[4]); (status_block[5] << 16) | status_block[4]);
if (intr_id == ISR_COMPLETED_WARNING) { if (intr_id == ISR_COMPLETED_WARNING) {
#ifdef EDC_DEBUG
printf("%s: Command Error Code: %s\n", printf("%s: Command Error Code: %s\n",
ed->sc_dev.dv_xname, sc->sc_dev.dv_xname,
edc_cmd_error[ed->sc_status_block[1] & 0xff]); edc_cmd_error[status_block[1] & 0xff]);
#else
printf("%s: Command Error Code: %d\n",
sc->sc_dev.dv_xname,
status_block[1] & 0xff);
#endif
} }
if (intr_id == ISR_CMD_FAILED) { if (intr_id == ISR_CMD_FAILED) {
#ifdef EDC_DEBUG
char buf[100]; char buf[100];
printf("%s: Device Error Code: %s\n", printf("%s: Device Error Code: %s\n",
ed->sc_dev.dv_xname, sc->sc_dev.dv_xname,
edc_dev_errors[ed->sc_status_block[2] & 0xff]); edc_dev_errors[status_block[2] & 0xff]);
bitmask_snprintf((ed->sc_status_block[2] & 0xff00) >> 8, bitmask_snprintf((status_block[2] & 0xff00) >> 8,
"\20" "\20"
"\01SeekOrCmdComplete" "\01SeekOrCmdComplete"
"\02Track0Flag" "\02Track0Flag"
@ -873,6 +823,147 @@ edc_dump_status_block(sc, devno, intr_id)
"\010Reserved0", "\010Reserved0",
buf, sizeof(buf)); buf, sizeof(buf));
printf("%s: Device Status: %s\n", printf("%s: Device Status: %s\n",
ed->sc_dev.dv_xname, buf); sc->sc_dev.dv_xname, buf);
#else
printf("%s: Device Error Code: %d, Device Status: %d\n",
sc->sc_dev.dv_xname,
status_block[2] & 0xff,
(status_block[2] & 0xff00) >> 8);
#endif
} }
} }
static void
edc_spawn_worker(arg)
void *arg;
{
struct edc_mca_softc *sc = (struct edc_mca_softc *) arg;
int error;
struct proc *wrk;
/* Now, everything is ready, start a kthread */
if ((error = kthread_create1(edcworker, sc, &wrk,
"%s", sc->sc_dev.dv_xname))) {
printf("%s: cannot spawn worker thread: errno=%d\n",
sc->sc_dev.dv_xname, error);
panic("edc_spawn_worker");
}
}
/*
* Main worker thread function.
*/
void
edcworker(arg)
void *arg;
{
struct edc_mca_softc *sc = (struct edc_mca_softc *) arg;
struct ed_softc *ed;
struct buf *bp;
int s, i, error;
config_pending_decr();
s = splbio();
for(;;) {
/* Wait until awakened */
(void) tsleep(sc, PRIBIO, "edcidle", 0);
for(i=0; i<sc->sc_maxdevs; ) {
if ((ed = sc->sc_ed[i]) == NULL) {
i++;
continue;
}
/* Is there a buf for us ? */
simple_lock(&ed->sc_q_lock);
if ((bp = BUFQ_FIRST(&ed->sc_q)) == NULL) {
simple_unlock(&ed->sc_q_lock);
i++;
continue;
}
BUFQ_REMOVE(&ed->sc_q, bp);
simple_unlock(&ed->sc_q_lock);
/* Instrumentation. */
disk_busy(&ed->sc_dk);
error = edc_bio(sc, ed, bp->b_data, bp->b_bcount,
bp->b_rawblkno, (bp->b_flags & B_READ), 0);
if (error) {
bp->b_error = error;
bp->b_flags |= B_ERROR;
} else {
/* Set resid, most commonly to zero. */
bp->b_resid = sc->sc_resblk * DEV_BSIZE;
}
disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
#if NRND > 0
rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
#endif
biodone(bp);
}
}
splx(s);
}
int
edc_bio(struct edc_mca_softc *sc, struct ed_softc *ed, void *data,
size_t bcount, daddr_t rawblkno, int isread, int poll)
{
u_int16_t cmd_args[4];
int error=0, fl;
u_int16_t track;
u_int16_t cyl;
u_int8_t head;
u_int8_t sector;
mca_disk_busy();
/* set WAIT and R/W flag appropriately for the DMA transfer */
fl = ((poll) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
| ((isread) ? BUS_DMA_READ : BUS_DMA_WRITE);
/* Load the buffer for DMA transfer. */
if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_xfer, data,
bcount, NULL, BUS_DMA_STREAMING|fl))) {
printf("%s: ed_bio: unable to load DMA buffer - error %d\n",
ed->sc_dev.dv_xname, error);
goto out;
}
bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_xfer, 0,
bcount, (isread) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
track = rawblkno / ed->sectors;
head = track % ed->heads;
cyl = track / ed->heads;
sector = rawblkno % ed->sectors;
/* Read or Write Data command */
cmd_args[0] = 2; /* Options 0000010 */
cmd_args[1] = bcount / DEV_BSIZE;
cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
cmd_args[3] = ((cyl & 0x3E0) >> 5);
error = edc_run_cmd(sc,
(isread) ? CMD_READ_DATA : CMD_WRITE_DATA,
ed->sc_devno, cmd_args, 4, poll);
/* Sync the DMA memory */
if (!error) {
bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_xfer, 0, bcount,
(isread)? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
}
/* We are done, unload buffer from DMA map */
bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_xfer);
out:
mca_disk_unbusy();
return (error);
}