Convert over to use the new MCA DMA facilities, which encapsulate

the MCA DMA controller access. This also means we gain >16MB RAM support.

While here, overhaul the driver to saner state, especially:
* simplify and clean the attach code a lot, and support the 'drive'
  locator
* pass proper dev_t to readdisklabel() - formerly, the passed value was
  completely broken (used incorrect major and wrong minor), but worked
  by accident
* worker thread is now spawned per controller, rather than per-drive;
  i/o cannot be done concurrently by different drivers, only one
  i/o can be pending at any time
* simplify the command queue code, just sleep appropriately when
  !poll case, g/c 'async' code formerly needed, move the bio code
  from ed_mca.c to edc_mca.c:edc_bio()
* put all string arrays used by edc_dump_status_block() within #ifdef EDC_DEBUG,
  and use numbers instead if it's not defined; this cuts object size by 5KB
* other misc cleanups
This commit is contained in:
jdolecek 2001-11-23 22:53:09 +00:00
parent 90fac765da
commit 07312f0fbb
2 changed files with 395 additions and 668 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: ed_mca.c,v 1.9 2001/11/13 07:46:25 lukem Exp $ */
/* $NetBSD: ed_mca.c,v 1.10 2001/11/23 22:53:10 jdolecek Exp $ */
/*
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -34,13 +34,14 @@
*/
/*
* Disk goo for MCA ESDI controller driver.
* Disk drive goo for MCA ESDI controller driver.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ed_mca.c,v 1.9 2001/11/13 07:46:25 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: ed_mca.c,v 1.10 2001/11/23 22:53:10 jdolecek Exp $");
#include "rnd.h"
#include "locators.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -58,7 +59,6 @@ __KERNEL_RCSID(0, "$NetBSD: ed_mca.c,v 1.9 2001/11/13 07:46:25 lukem Exp $");
#include <sys/syslog.h>
#include <sys/proc.h>
#include <sys/vnode.h>
#include <sys/kthread.h>
#if NRND > 0
#include <sys/rnd.h>
#endif
@ -95,19 +95,11 @@ struct cfattach ed_mca_ca = {
extern struct cfdriver ed_cd;
static int ed_get_params __P((struct ed_softc *));
static int ed_get_params __P((struct ed_softc *, int *));
static int ed_lock __P((struct ed_softc *));
static void ed_unlock __P((struct ed_softc *));
static void edgetdisklabel __P((struct ed_softc *));
static void edgetdisklabel __P((dev_t, struct ed_softc *));
static void edgetdefaultlabel __P((struct ed_softc *, struct disklabel *));
static void ed_shutdown __P((void*));
static void __edstart __P((struct ed_softc*, struct buf *));
static void bad144intern __P((struct ed_softc *));
static void edworker __P((void *));
static void ed_spawn_worker __P((void *));
static void edmcadone __P((struct ed_softc *, struct buf *));
static void ed_bio __P((struct ed_softc *, int, int));
static void ed_bio_done __P((struct ed_softc *));
static struct dkdriver eddkdriver = { edmcastrategy };
@ -115,22 +107,29 @@ static struct dkdriver eddkdriver = { edmcastrategy };
* Just check if it's possible to identify the disk.
*/
static int
ed_mca_probe(parent, match, aux)
ed_mca_probe(parent, cf, aux)
struct device *parent;
struct cfdata *match;
struct cfdata *cf;
void *aux;
{
u_int16_t cmd_args[2];
struct edc_mca_softc *sc = (void *) parent;
struct ed_attach_args *eda = (void *) aux;
struct ed_attach_args *eda = (struct ed_attach_args *) aux;
int found = 1;
/*
* Check we match hardwired config.
*/
if (cf->edccf_unit != EDCCF_DRIVE_DEFAULT &&
cf->edccf_unit != eda->edc_drive)
return (0);
/*
* Get Device Configuration (09).
*/
cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
cmd_args[1] = 0;
if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->sc_devno, cmd_args, 2, 0, 1))
if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->edc_drive, cmd_args, 2, 1))
found = 0;
return (found);
@ -143,20 +142,20 @@ ed_mca_attach(parent, self, aux)
{
struct ed_softc *ed = (void *) self;
struct edc_mca_softc *sc = (void *) parent;
struct ed_attach_args *eda = (void *) aux;
char pbuf[8];
int error, nsegs;
struct ed_attach_args *eda = (struct ed_attach_args *) aux;
char pbuf[8], lckname[10];
int drv_flags;
ed->edc_softc = sc;
ed->sc_dmat = eda->sc_dmat;
ed->sc_devno = eda->sc_devno;
edc_add_disk(sc, ed, eda->sc_devno);
ed->sc_devno = eda->edc_drive;
edc_add_disk(sc, ed);
BUFQ_INIT(&ed->sc_q);
simple_lock_init(&ed->sc_q_lock);
lockinit(&ed->sc_lock, PRIBIO | PCATCH, "edlck", 0, 0);
snprintf(lckname, sizeof(lckname), "%slck", ed->sc_dev.dv_xname);
lockinit(&ed->sc_lock, PRIBIO | PCATCH, lckname, 0, 0);
if (ed_get_params(ed)) {
if (ed_get_params(ed, &drv_flags)) {
printf(": IDENTIFY failed, no disk found\n");
return;
}
@ -170,88 +169,27 @@ ed_mca_attach(parent, self, aux)
printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n",
ed->sc_dev.dv_xname, ed->spares,
(ed->drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
(ed->drv_flags & (1 << 1)) ? "Removable" : "Fixed",
(ed->drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
(ed->drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
(ed->drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
(drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
(drv_flags & (1 << 1)) ? "Removable" : "Fixed",
(drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
(drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
(drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
);
/* Create a DMA map for mapping individual transfer bufs */
if ((error = bus_dmamap_create(ed->sc_dmat, 65536, 1,
65536, 65536, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
&ed->dmamap_xfer)) != 0) {
printf("%s: unable to create xfer DMA map, error=%d\n",
ed->sc_dev.dv_xname, error);
return;
}
/*
* Allocate DMA memory used in case where passed buf isn't
* physically contiguous.
*/
ed->sc_dmam_sz = MAXPHYS;
if ((error = bus_dmamem_alloc(ed->sc_dmat, ed->sc_dmam_sz,
ed->sc_dmam_sz, 65536, ed->sc_dmam, 1, &nsegs,
BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
printf("%s: unable to allocate DMA memory for xfer, errno=%d\n",
ed->sc_dev.dv_xname, error);
bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
return;
}
/*
* Map the memory.
*/
if ((error = bus_dmamem_map(ed->sc_dmat, ed->sc_dmam, 1,
ed->sc_dmam_sz, &ed->sc_dmamkva, BUS_DMA_WAITOK)) != 0) {
printf("%s: unable to map DMA memory, error=%d\n",
ed->sc_dev.dv_xname, error);
bus_dmamem_free(ed->sc_dmat, ed->sc_dmam, 1);
bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
return;
}
/*
* Initialize and attach the disk structure.
*/
ed->sc_dk.dk_driver = &eddkdriver;
ed->sc_dk.dk_name = ed->sc_dev.dv_xname;
disk_attach(&ed->sc_dk);
#if 0
wd->sc_wdc_bio.lp = wd->sc_dk.dk_label;
#endif
ed->sc_sdhook = shutdownhook_establish(ed_shutdown, ed);
if (ed->sc_sdhook == NULL)
printf("%s: WARNING: unable to establish shutdown hook\n",
ed->sc_dev.dv_xname);
#if NRND > 0
rnd_attach_source(&ed->rnd_source, ed->sc_dev.dv_xname,
RND_TYPE_DISK, 0);
#endif
config_pending_incr();
kthread_create(ed_spawn_worker, (void *) ed);
ed->sc_flags |= EDF_INIT;
}
void
ed_spawn_worker(arg)
void *arg;
{
struct ed_softc *ed = (struct ed_softc *) arg;
int error;
/* Now, everything is ready, start a kthread */
if ((error = kthread_create1(edworker, ed, &ed->sc_worker,
"%s", ed->sc_dev.dv_xname))) {
printf("%s: cannot spawn worker thread: errno=%d\n",
ed->sc_dev.dv_xname, error);
panic("ed_spawn_worker");
}
}
/*
* Read/write routine for a buffer. Validates the arguments and schedules the
* transfer. Does not wait for the transfer to complete.
@ -260,12 +198,12 @@ void
edmcastrategy(bp)
struct buf *bp;
{
struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(bp->b_dev));
struct disklabel *lp = wd->sc_dk.dk_label;
struct ed_softc *ed = device_lookup(&ed_cd, DISKUNIT(bp->b_dev));
struct disklabel *lp = ed->sc_dk.dk_label;
daddr_t blkno;
int s;
WDCDEBUG_PRINT(("edmcastrategy (%s)\n", wd->sc_dev.dv_xname),
WDCDEBUG_PRINT(("edmcastrategy (%s)\n", ed->sc_dev.dv_xname),
DEBUG_XFERS);
/* Valid request? */
@ -277,7 +215,7 @@ edmcastrategy(bp)
}
/* If device invalidated (e.g. media change, door open), error. */
if ((wd->sc_flags & WDF_LOADED) == 0) {
if ((ed->sc_flags & WDF_LOADED) == 0) {
bp->b_error = EIO;
goto bad;
}
@ -291,8 +229,8 @@ edmcastrategy(bp)
* If end of partition, just return.
*/
if (DISKPART(bp->b_dev) != RAW_PART &&
bounds_check_with_label(bp, wd->sc_dk.dk_label,
(wd->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
bounds_check_with_label(bp, ed->sc_dk.dk_label,
(ed->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
goto done;
/*
@ -311,13 +249,12 @@ edmcastrategy(bp)
/* Queue transfer on drive, activate drive and controller if idle. */
s = splbio();
simple_lock(&wd->sc_q_lock);
disksort_blkno(&wd->sc_q, bp);
simple_unlock(&wd->sc_q_lock);
simple_lock(&ed->sc_q_lock);
disksort_blkno(&ed->sc_q, bp);
simple_unlock(&ed->sc_q_lock);
/* Ring the worker thread */
wd->sc_flags |= EDF_PROCESS_QUEUE;
wakeup_one(&wd->sc_q);
wakeup_one(ed->edc_softc);
splx(s);
return;
@ -329,141 +266,6 @@ done:
biodone(bp);
}
static void
ed_bio(struct ed_softc *ed, int async, int poll)
{
u_int16_t cmd_args[4];
int error=0;
u_int16_t track;
u_int16_t cyl;
u_int8_t head;
u_int8_t sector;
/* Get physical bus mapping for buf. */
if (bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
ed->sc_data, ed->sc_bcount, NULL,
BUS_DMA_WAITOK|BUS_DMA_STREAMING) != 0) {
/*
* Use our DMA safe memory to get data to/from device.
*/
if ((error = bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
ed->sc_dmamkva, ed->sc_bcount, NULL,
BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
printf("%s: unable to load raw data for xfer, errno=%d\n",
ed->sc_dev.dv_xname, error);
goto out;
}
ed->sc_flags |= EDF_BOUNCEBUF;
/* If data write, copy the data to our bounce buffer. */
if (!ed->sc_read)
memcpy(ed->sc_dmamkva, ed->sc_data, ed->sc_bcount);
}
ed->sc_flags |= EDF_DMAMAP_LOADED;
track = ed->sc_rawblkno / ed->sectors;
head = track % ed->heads;
cyl = track / ed->heads;
sector = ed->sc_rawblkno % ed->sectors;
WDCDEBUG_PRINT(("__edstart %s: map: %u %u %u\n", ed->sc_dev.dv_xname,
cyl, sector, head),
DEBUG_XFERS);
mca_disk_busy();
/* Read or Write Data command */
cmd_args[0] = 2; /* Options 0000010 */
cmd_args[1] = ed->sc_bcount / DEV_BSIZE;
cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
cmd_args[3] = ((cyl & 0x3E0) >> 5);
if (edc_run_cmd(ed->edc_softc,
(ed->sc_read) ? CMD_READ_DATA : CMD_WRITE_DATA,
ed->sc_devno, cmd_args, 4, async, poll)) {
printf("%s: data i/o command failed\n", ed->sc_dev.dv_xname);
mca_disk_unbusy();
error = EIO;
}
out:
if (error)
ed->sc_error = error;
}
static void
__edstart(ed, bp)
struct ed_softc *ed;
struct buf *bp;
{
WDCDEBUG_PRINT(("__edstart %s (%s): %lu %lu %u\n", ed->sc_dev.dv_xname,
(bp->b_flags & B_READ) ? "read" : "write",
bp->b_bcount, bp->b_resid, bp->b_rawblkno),
DEBUG_XFERS);
/* Instrumentation. */
disk_busy(&ed->sc_dk);
ed->sc_flags |= EDF_DK_BUSY;
ed->sc_data = bp->b_data;
ed->sc_rawblkno = bp->b_rawblkno;
ed->sc_bcount = bp->b_bcount;
ed->sc_read = bp->b_flags & B_READ;
ed_bio(ed, 1, 0);
}
static void
ed_bio_done(ed)
struct ed_softc *ed;
{
/*
* If read transfer finished without error and using a bounce
* buffer, copy the data to buf.
*/
if (ed->sc_error == 0 && (ed->sc_flags & EDF_BOUNCEBUF) && ed->sc_read)
memcpy(ed->sc_data, ed->sc_dmamkva, ed->sc_bcount);
ed->sc_flags &= ~EDF_BOUNCEBUF;
/* Unload buf from DMA map */
if (ed->sc_flags & EDF_DMAMAP_LOADED) {
bus_dmamap_unload(ed->sc_dmat, ed->dmamap_xfer);
ed->sc_flags &= ~EDF_DMAMAP_LOADED;
}
mca_disk_unbusy();
}
static void
edmcadone(ed, bp)
struct ed_softc *ed;
struct buf *bp;
{
WDCDEBUG_PRINT(("eddone %s\n", ed->sc_dev.dv_xname),
DEBUG_XFERS);
if (ed->sc_error) {
bp->b_error = ed->sc_error;
bp->b_flags |= B_ERROR;
} else {
/* Set resid, most commonly to zero. */
bp->b_resid = ed->sc_status_block[SB_RESBLKCNT_IDX] * DEV_BSIZE;
}
ed_bio_done(ed);
/* If disk was busied, unbusy it now */
if (ed->sc_flags & EDF_DK_BUSY) {
disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
ed->sc_flags &= ~EDF_DK_BUSY;
}
#if NRND > 0
rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
#endif
biodone(bp);
}
int
edmcaread(dev, uio, flags)
dev_t dev;
@ -543,13 +345,17 @@ edmcaopen(dev, flag, fmt, p)
}
} else {
if ((wd->sc_flags & WDF_LOADED) == 0) {
int s;
wd->sc_flags |= WDF_LOADED;
/* Load the physical device parameters. */
ed_get_params(wd);
s = splbio();
ed_get_params(wd, NULL);
splx(s);
/* Load the partition info if not already loaded. */
edgetdisklabel(wd);
edgetdisklabel(dev, wd);
}
}
@ -629,24 +435,24 @@ edmcaclose(dev, flag, fmt, p)
}
static void
edgetdefaultlabel(wd, lp)
struct ed_softc *wd;
edgetdefaultlabel(ed, lp)
struct ed_softc *ed;
struct disklabel *lp;
{
WDCDEBUG_PRINT(("edgetdefaultlabel\n"), DEBUG_FUNCS);
memset(lp, 0, sizeof(struct disklabel));
lp->d_secsize = DEV_BSIZE;
lp->d_ntracks = wd->heads;
lp->d_nsectors = wd->sectors;
lp->d_ncylinders = wd->cyl;
lp->d_ntracks = ed->heads;
lp->d_nsectors = ed->sectors;
lp->d_ncylinders = ed->cyl;
lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
lp->d_type = DTYPE_ESDI;
strncpy(lp->d_typename, "ESDI", 16);
strncpy(lp->d_packname, "fictitious", 16);
lp->d_secperunit = wd->sc_capacity;
lp->d_secperunit = ed->sc_capacity;
lp->d_rpm = 3600;
lp->d_interleave = 1;
lp->d_flags = 0;
@ -666,26 +472,21 @@ edgetdefaultlabel(wd, lp)
* Fabricate a default disk label, and try to read the correct one.
*/
static void
edgetdisklabel(wd)
struct ed_softc *wd;
edgetdisklabel(dev, ed)
dev_t dev;
struct ed_softc *ed;
{
struct disklabel *lp = wd->sc_dk.dk_label;
struct disklabel *lp = ed->sc_dk.dk_label;
char *errstring;
WDCDEBUG_PRINT(("edgetdisklabel\n"), DEBUG_FUNCS);
memset(wd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
memset(ed->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
edgetdefaultlabel(wd, lp);
edgetdefaultlabel(ed, lp);
#if 0
wd->sc_badsect[0] = -1;
if (wd->drvp->state > RECAL)
wd->drvp->drive_flags |= DRIVE_RESET;
#endif
errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit, RAW_PART),
edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
errstring = readdisklabel(
EDLABELDEV(dev), edmcastrategy, lp, ed->sc_dk.dk_cpulabel);
if (errstring) {
/*
* This probably happened because the drive's default
@ -697,22 +498,13 @@ edgetdisklabel(wd)
if (wd->drvp->state > RECAL)
wd->drvp->drive_flags |= DRIVE_RESET;
#endif
errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit,
RAW_PART), edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
errstring = readdisklabel(EDLABELDEV(dev),
edmcastrategy, lp, ed->sc_dk.dk_cpulabel);
}
if (errstring) {
printf("%s: %s\n", wd->sc_dev.dv_xname, errstring);
printf("%s: %s\n", ed->sc_dev.dv_xname, errstring);
return;
}
#if 0
if (wd->drvp->state > RECAL)
wd->drvp->drive_flags |= DRIVE_RESET;
#endif
#ifdef HAS_BAD144_HANDLING
if ((lp->d_flags & D_BADSECT) != 0)
bad144intern(wd);
#endif
}
int
@ -723,123 +515,79 @@ edmcaioctl(dev, xfer, addr, flag, p)
int flag;
struct proc *p;
{
struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(dev));
struct ed_softc *ed = device_lookup(&ed_cd, DISKUNIT(dev));
int error;
#ifdef __HAVE_OLD_DISKLABEL
struct disklabel newlabel;
#endif
WDCDEBUG_PRINT(("edioctl\n"), DEBUG_FUNCS);
if ((wd->sc_flags & WDF_LOADED) == 0)
if ((ed->sc_flags & WDF_LOADED) == 0)
return EIO;
switch (xfer) {
#ifdef HAS_BAD144_HANDLING
case DIOCSBAD:
if ((flag & FWRITE) == 0)
return EBADF;
wd->sc_dk.dk_cpulabel->bad = *(struct dkbad *)addr;
wd->sc_dk.dk_label->d_flags |= D_BADSECT;
bad144intern(wd);
return 0;
#endif
case DIOCGDINFO:
*(struct disklabel *)addr = *(wd->sc_dk.dk_label);
*(struct disklabel *)addr = *(ed->sc_dk.dk_label);
return 0;
#ifdef __HAVE_OLD_DISKLABEL
case ODIOCGDINFO:
newlabel = *(wd->sc_dk.dk_label);
if (newlabel.d_npartitions > OLDMAXPARTITIONS)
return ENOTTY;
memcpy(addr, &newlabel, sizeof (struct olddisklabel));
return 0;
#endif
case DIOCGPART:
((struct partinfo *)addr)->disklab = wd->sc_dk.dk_label;
((struct partinfo *)addr)->disklab = ed->sc_dk.dk_label;
((struct partinfo *)addr)->part =
&wd->sc_dk.dk_label->d_partitions[DISKPART(dev)];
&ed->sc_dk.dk_label->d_partitions[DISKPART(dev)];
return 0;
case DIOCWDINFO:
case DIOCSDINFO:
#ifdef __HAVE_OLD_DISKLABEL
case ODIOCWDINFO:
case ODIOCSDINFO:
#endif
{
struct disklabel *lp;
#ifdef __HAVE_OLD_DISKLABEL
if (xfer == ODIOCSDINFO || xfer == ODIOCWDINFO) {
memset(&newlabel, 0, sizeof newlabel);
memcpy(&newlabel, addr, sizeof (struct olddisklabel));
lp = &newlabel;
} else
#endif
lp = (struct disklabel *)addr;
if ((flag & FWRITE) == 0)
return EBADF;
if ((error = ed_lock(wd)) != 0)
if ((error = ed_lock(ed)) != 0)
return error;
wd->sc_flags |= WDF_LABELLING;
ed->sc_flags |= WDF_LABELLING;
error = setdisklabel(wd->sc_dk.dk_label,
error = setdisklabel(ed->sc_dk.dk_label,
lp, /*wd->sc_dk.dk_openmask : */0,
wd->sc_dk.dk_cpulabel);
ed->sc_dk.dk_cpulabel);
if (error == 0) {
#if 0
if (wd->drvp->state > RECAL)
wd->drvp->drive_flags |= DRIVE_RESET;
#endif
if (xfer == DIOCWDINFO
#ifdef __HAVE_OLD_DISKLABEL
|| xfer == ODIOCWDINFO
#endif
)
if (xfer == DIOCWDINFO)
error = writedisklabel(EDLABELDEV(dev),
edmcastrategy, wd->sc_dk.dk_label,
wd->sc_dk.dk_cpulabel);
edmcastrategy, ed->sc_dk.dk_label,
ed->sc_dk.dk_cpulabel);
}
wd->sc_flags &= ~WDF_LABELLING;
ed_unlock(wd);
return error;
ed->sc_flags &= ~WDF_LABELLING;
ed_unlock(ed);
return (error);
}
case DIOCKLABEL:
if (*(int *)addr)
wd->sc_flags |= WDF_KLABEL;
ed->sc_flags |= WDF_KLABEL;
else
wd->sc_flags &= ~WDF_KLABEL;
ed->sc_flags &= ~WDF_KLABEL;
return 0;
case DIOCWLABEL:
if ((flag & FWRITE) == 0)
return EBADF;
if (*(int *)addr)
wd->sc_flags |= WDF_WLABEL;
ed->sc_flags |= WDF_WLABEL;
else
wd->sc_flags &= ~WDF_WLABEL;
ed->sc_flags &= ~WDF_WLABEL;
return 0;
case DIOCGDEFLABEL:
edgetdefaultlabel(wd, (struct disklabel *)addr);
edgetdefaultlabel(ed, (struct disklabel *)addr);
return 0;
#ifdef __HAVE_OLD_DISKLABEL
case ODIOCGDEFLABEL:
edgetdefaultlabel(wd, &newlabel);
if (newlabel.d_npartitions > OLDMAXPARTITIONS)
return ENOTTY;
memcpy(addr, &newlabel, sizeof (struct olddisklabel));
return 0;
#endif
#ifdef notyet
#if 0
case DIOCWFORMAT:
if ((flag & FWRITE) == 0)
return EBADF;
@ -876,18 +624,6 @@ edmcaioctl(dev, xfer, addr, flag, p)
#endif
}
#if 0
#ifdef B_FORMAT
int
edmcaformat(struct buf *bp)
{
bp->b_flags |= B_FORMAT;
return edmcastrategy(bp);
}
#endif
#endif
int
edmcasize(dev)
dev_t dev;
@ -936,6 +672,7 @@ edmcadump(dev, blkno, va, size)
struct disklabel *lp; /* disk's disklabel */
int part;
int nblks; /* total number of sectors left to write */
int error;
/* Check if recursive dump; if so, punt. */
if (eddoingadump)
@ -976,16 +713,10 @@ edmcadump(dev, blkno, va, size)
}
while (nblks > 0) {
ed->sc_data = va;
ed->sc_rawblkno = blkno;
ed->sc_bcount = min(nblks, eddumpmulti) * lp->d_secsize;
ed->sc_read = 0;
ed_bio(ed, 0, 1);
if (ed->sc_error)
return (ed->sc_error);
ed_bio_done(ed);
error = edc_bio(ed->edc_softc, ed, va, blkno,
min(nblks, eddumpmulti) * lp->d_secsize, 0, 1);
if (error)
return (error);
/* update block count */
nblks -= min(nblks, eddumpmulti);
@ -997,36 +728,10 @@ edmcadump(dev, blkno, va, size)
return (0);
}
#ifdef HAS_BAD144_HANDLING
/*
* Internalize the bad sector table.
*/
static void
bad144intern(wd)
struct ed_softc *wd;
{
struct dkbad *bt = &wd->sc_dk.dk_cpulabel->bad;
struct disklabel *lp = wd->sc_dk.dk_label;
int i = 0;
WDCDEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
for (; i < NBT_BAD; i++) {
if (bt->bt_bad[i].bt_cyl == 0xffff)
break;
wd->sc_badsect[i] =
bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
(bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
(bt->bt_bad[i].bt_trksec & 0xff);
}
for (; i < NBT_BAD+1; i++)
wd->sc_badsect[i] = -1;
}
#endif
static int
ed_get_params(ed)
ed_get_params(ed, drv_flags)
struct ed_softc *ed;
int *drv_flags;
{
u_int16_t cmd_args[2];
@ -1036,17 +741,17 @@ ed_get_params(ed)
cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
cmd_args[1] = 0;
if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno,
cmd_args, 2, 0, 1))
cmd_args, 2, 1))
return (1);
ed->spares = ed->sc_status_block[1] >> 8;
ed->drv_flags = ed->sc_status_block[1] & 0x1f;
ed->rba = ed->sc_status_block[2] |
(ed->sc_status_block[3] << 16);
ed->spares = ed->sense_data[1] >> 8;
if (drv_flags)
*drv_flags = ed->sense_data[1] & 0x1f;
ed->rba = ed->sense_data[2] | (ed->sense_data[3] << 16);
/* Instead of using:
ed->cyl = ed->sc_status_block[4];
ed->heads = ed->sc_status_block[5] & 0xff;
ed->sectors = ed->sc_status_block[5] >> 8;
ed->cyl = ed->sense_data[4];
ed->heads = ed->sense_data[5] & 0xff;
ed->sectors = ed->sense_data[5] >> 8;
* we fabricate the numbers from RBA count, so that
* number of sectors is 32 and heads 64. This seems
* to be necessary for integrated ESDI controller.
@ -1058,72 +763,3 @@ ed_get_params(ed)
return (0);
}
/*
* Our shutdown hook. We attempt to park disk's head only.
*/
void
ed_shutdown(arg)
void *arg;
{
#if 0
struct ed_softc *ed = arg;
u_int16_t cmd_args[2];
/* Issue Park Head command */
cmd_args[0] = 6; /* Options: 000110 */
cmd_args[1] = 0;
(void) edc_run_cmd(ed->edc_softc, CMD_PARK_HEAD, ed->sc_devno,
cmd_args, 2, 0);
#endif
}
/*
* Main worker thread function.
*/
void
edworker(arg)
void *arg;
{
struct ed_softc *ed = (struct ed_softc *) arg;
struct buf *bp;
int s;
config_pending_decr();
for(;;) {
/* Wait until awakened */
(void) tsleep(&ed->sc_q, PRIBIO, "edidle", 0);
if ((ed->sc_flags & EDF_PROCESS_QUEUE) == 0)
panic("edworker: expecting process queue");
ed->sc_flags &= ~EDF_PROCESS_QUEUE;
for(;;) {
/* Is there a buf for us ? */
simple_lock(&ed->sc_q_lock);
if ((bp = BUFQ_FIRST(&ed->sc_q)) == NULL) {
simple_unlock(&ed->sc_q_lock);
break;
}
BUFQ_REMOVE(&ed->sc_q, bp);
simple_unlock(&ed->sc_q_lock);
/* Schedule i/o operation */
ed->sc_error = 0;
s = splbio();
__edstart(ed, bp);
/*
* Wait until the command executes; edc_intr() wakes
* us up.
*/
if (ed->sc_error == 0)
(void)tsleep(&ed->edc_softc, PRIBIO, "edwrk",0);
/* Handle i/o results */
edmcadone(ed, bp);
splx(s);
}
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: edc_mca.c,v 1.10 2001/11/13 07:46:25 lukem Exp $ */
/* $NetBSD: edc_mca.c,v 1.11 2001/11/23 22:53:09 jdolecek Exp $ */
/*
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -41,19 +41,16 @@
* for MCA rev. 2.2 in hands, thanks to Scott Telford <st@epcc.ed.ac.uk>.
*
* TODO:
* - move the MCA DMA controller (edc_setup_dma()) goo to device driver
* independant location
* - improve error recovery
* add any soft resets when anything gets stuck?
* - test with > 1 disk (this is supported by some controllers), eliminate
* any remaining devno=0 assumptions if there are any still
* Issue soft reset on error or timeout?
* - test with > 1 disk (this is supported by some controllers)
* - test with > 1 ESDI controller in machine; shared interrupts
* necessary for this to work should be supported - edc_intr() specifically
* checks if the interrupt is for this controller
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: edc_mca.c,v 1.10 2001/11/13 07:46:25 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: edc_mca.c,v 1.11 2001/11/23 22:53:09 jdolecek Exp $");
#include "rnd.h"
@ -69,6 +66,7 @@ __KERNEL_RCSID(0, "$NetBSD: edc_mca.c,v 1.10 2001/11/13 07:46:25 lukem Exp $");
#include <sys/proc.h>
#include <sys/vnode.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#if NRND > 0
#include <sys/rnd.h>
#endif
@ -85,6 +83,7 @@ __KERNEL_RCSID(0, "$NetBSD: edc_mca.c,v 1.10 2001/11/13 07:46:25 lukem Exp $");
#include <dev/mca/edcvar.h>
#define EDC_ATTN_MAXTRIES 10000 /* How many times check for unbusy */
#define EDC_MAX_CMD_RES_LEN 8
struct edc_mca_softc {
struct device sc_dev;
@ -92,19 +91,23 @@ struct edc_mca_softc {
bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh;
/* DMA related stuff */
bus_dma_tag_t sc_dmat; /* DMA tag as passed by parent */
bus_space_handle_t sc_dmaextcmdh;
bus_space_handle_t sc_dmaexech;
bus_dmamap_t sc_dmamap_xfer; /* transfer dma map */
void *sc_ih; /* interrupt handle */
int sc_drq; /* DRQ number */
int sc_cmd_async; /* asynchronous cmd pending */
int sc_flags;
#define DASD_QUIET 0x01 /* don't dump cmd error info */
#define DASD_MAXDEVS 8
struct ed_softc *sc_ed[DASD_MAXDEVS];
struct ed_softc sc_controller;
int sc_maxdevs; /* max number of disks attached to this
* controller */
/* I/O results variables */
volatile int sc_error;
volatile int sc_resblk; /* residual block count */
};
int edc_mca_probe __P((struct device *, struct cfdata *, void *));
@ -114,15 +117,13 @@ struct cfattach edc_mca_ca = {
sizeof(struct edc_mca_softc), edc_mca_probe, edc_mca_attach
};
#define DMA_EXTCMD 0x18
#define DMA_EXEC 0x1A
static int edc_intr __P((void *));
static void edc_dump_status_block __P((struct edc_mca_softc *, int, int));
static int edc_setup_dma __P((struct edc_mca_softc *, int,
bus_addr_t, bus_size_t));
static void edc_dump_status_block __P((struct edc_mca_softc *,
u_int16_t *, int));
static int edc_do_attn __P((struct edc_mca_softc *, int, int, int));
static int edc_cmd_wait __P((struct edc_mca_softc *, int, int, int));
static int edc_cmd_wait __P((struct edc_mca_softc *, int, int));
static void edcworker __P((void *));
static void edc_spawn_worker __P((void *));
int
edc_mca_probe(parent, match, aux)
@ -148,12 +149,11 @@ edc_mca_attach(parent, self, aux)
{
struct edc_mca_softc *sc = (void *) self;
struct mca_attach_args *ma = aux;
struct ed_attach_args eda;
int pos2, pos3, pos4;
int irq, drq, iobase;
const char *typestr;
struct ed_softc *ed;
struct ed_attach_args eda;
int devno, maxdevs;
int devno, error;
pos2 = mca_conf_read(ma->ma_mc, ma->ma_slot, 2);
pos3 = mca_conf_read(ma->ma_mc, ma->ma_slot, 3);
@ -231,7 +231,6 @@ edc_mca_attach(parent, self, aux)
printf("DMA pacing control disabled\n");
sc->sc_iot = ma->ma_iot;
sc->sc_drq = drq;
if (bus_space_map(sc->sc_iot, iobase,
ESDIC_REG_NPORTS, 0, &sc->sc_ioh)) {
@ -240,19 +239,6 @@ edc_mca_attach(parent, self, aux)
return;
}
if (bus_space_map(sc->sc_iot, DMA_EXTCMD, 1, 0, &sc->sc_dmaextcmdh)) {
printf("%s: couldn't map registers\n",
sc->sc_dev.dv_xname);
return;
}
if (bus_space_map(sc->sc_iot, DMA_EXEC, 1, 0, &sc->sc_dmaexech)) {
printf("%s: couldn't map registers\n",
sc->sc_dev.dv_xname);
return;
}
sc->sc_dmat = ma->ma_dmat;
sc->sc_ih = mca_intr_establish(ma->ma_mc, irq, IPL_BIO, edc_intr, sc);
if (sc->sc_ih == NULL) {
printf("%s: couldn't establish interrupt handler\n",
@ -260,22 +246,23 @@ edc_mca_attach(parent, self, aux)
return;
}
/* Create a MCA DMA map, used for data transfer */
sc->sc_dmat = ma->ma_dmat;
if ((error = mca_dmamap_create(sc->sc_dmat, MAXPHYS,
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &sc->sc_dmamap_xfer, drq)) != 0){
printf("%s: couldn't create DMA map - error %d\n",
sc->sc_dev.dv_xname, error);
return;
}
/*
* Integrated ESDI controller supports only one disk, other
* controllers support two disks.
*/
if (ma->ma_id == MCA_PRODUCT_IBM_ESDIC_IG)
maxdevs = 1;
sc->sc_maxdevs = 1;
else
maxdevs = 2;
/*
* Initialize the controller ed softc. We could do without this,
* but absence of checks for controller devno simplifies code logic
* somewhat.
*/
sc->sc_ed[DASD_DEVNO_CONTROLLER] = &sc->sc_controller;
strcpy(sc->sc_controller.sc_dev.dv_xname, sc->sc_dev.dv_xname);/*safe*/
sc->sc_maxdevs = 2;
/*
* Reset controller and attach individual disks. ed attach routine
@ -306,53 +293,53 @@ edc_mca_attach(parent, self, aux)
delay(100);
}
/*
* Get dummy ed_softc to be used during probe. Once a disk is
* found, ed_mca_attach() calls edc_add_disk() to insert the
* right pointer into sc->sc_ed[] array.
*/
MALLOC(ed, struct ed_softc *, sizeof(struct ed_softc),
M_TEMP, M_WAITOK);
/* be quiet duting probes */
/* be quiet during probes */
sc->sc_flags |= DASD_QUIET;
/* check for attached disks */
for(devno=0; devno < maxdevs; devno++) {
eda.sc_devno = devno;
eda.sc_dmat = sc->sc_dmat;
sc->sc_ed[devno] = ed;
(void *) config_found_sm(self, &eda, NULL, NULL);
for(devno=0; devno < sc->sc_maxdevs; devno++) {
eda.edc_drive = devno;
sc->sc_ed[devno] =
(void *) config_found_sm(self, &eda, NULL, NULL);
/* If initialization did not succeed, NULL the pointer. */
if (sc->sc_ed[devno]
&& (sc->sc_ed[devno]->sc_flags & EDF_INIT) == 0)
sc->sc_ed[devno] = NULL;
}
/* enable full error dumps again */
sc->sc_flags &= ~DASD_QUIET;
/* cleanup */
FREE(ed, M_TEMP);
/*
* Check if there are any disks attached. If not, disestablish
* the interrupt.
*/
for(devno=0; devno < maxdevs; devno++) {
if (sc->sc_ed[devno] && (sc->sc_ed[devno]->sc_flags & EDF_INIT))
for(devno=0; devno < sc->sc_maxdevs; devno++) {
if (sc->sc_ed[devno])
break;
}
if (devno == maxdevs) {
if (devno == sc->sc_maxdevs) {
printf("%s: disabling controller (no drives attached)\n",
sc->sc_dev.dv_xname);
mca_intr_disestablish(ma->ma_mc, sc->sc_ih);
return;
}
/*
* Run the worker thread.
*/
config_pending_incr();
kthread_create(edc_spawn_worker, (void *) sc);
}
void
edc_add_disk(sc, ed, devno)
edc_add_disk(sc, ed)
struct edc_mca_softc *sc;
struct ed_softc *ed;
int devno;
{
sc->sc_ed[devno] = ed;
sc->sc_ed[ed->sc_devno] = ed;
}
static int
@ -362,8 +349,8 @@ edc_intr(arg)
struct edc_mca_softc *sc = arg;
u_int8_t isr, intr_id;
u_int16_t sifr;
int cmd=-1, devno, bioerror=0;
struct ed_softc *ed=NULL;
int cmd=-1, devno, error=0;
u_int16_t status_block[EDC_MAX_CMD_RES_LEN]; /* CMD status block */
/*
* Check if the interrupt was for us.
@ -413,14 +400,14 @@ edc_intr(arg)
cmd = sifr & SIFR_CMD_MASK;
/* Read whole status block */
ed = sc->sc_ed[devno];
ed->sc_status_block[0] = sifr;
memset(status_block, 0, sizeof(status_block)); /* zero first */
status_block[0] = sifr;
for(i=1; i < len; i++) {
while((bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR)
& BSR_SIFR_FULL) == 0)
delay(1);
ed->sc_status_block[i] = le16toh(
status_block[i] = le16toh(
bus_space_read_2(sc->sc_iot, sc->sc_ioh, SIFR));
}
}
@ -428,29 +415,27 @@ edc_intr(arg)
switch (intr_id) {
case ISR_DATA_TRANSFER_RDY:
/*
* Ready to do DMA, setup DMA controller and kick DASD
* controller to do the transfer.
* Ready to do DMA. The DMA controller has already been
* setup, now just kick disk controller to do the transfer.
*/
ed = sc->sc_ed[devno];
if (!edc_setup_dma(sc, ed->sc_read,
ed->dmamap_xfer->dm_segs[0].ds_addr,
ed->dmamap_xfer->dm_segs[0].ds_len)) {
/* XXX bail out? */
printf("%s: edc_setup_dma() failed\n",
ed->sc_dev.dv_xname);
bus_space_write_1(sc->sc_iot, sc->sc_ioh, BCR,
BCR_INT_ENABLE);
} else {
/* OK, proceed with DMA */
bus_space_write_1(sc->sc_iot, sc->sc_ioh, BCR,
BCR_INT_ENABLE|BCR_DMA_ENABLE);
}
bus_space_write_1(sc->sc_iot, sc->sc_ioh, BCR,
BCR_INT_ENABLE|BCR_DMA_ENABLE);
break;
case ISR_COMPLETED:
case ISR_COMPLETED_WITH_ECC:
case ISR_COMPLETED_RETRIES:
case ISR_COMPLETED_WARNING:
bioerror = 0;
error = 0;
/*
* Copy device config data if appropriate. sc->sc_ed[]
* entry might be NULL during probe.
*/
if (cmd == CMD_GET_DEV_CONF && sc->sc_ed[devno]) {
memcpy(sc->sc_ed[devno]->sense_data, status_block,
sizeof(sc->sc_ed[devno]->sense_data));
}
break;
case ISR_RESET_COMPLETED:
case ISR_ABORT_COMPLETED:
@ -458,9 +443,9 @@ edc_intr(arg)
break;
default:
if ((sc->sc_flags & DASD_QUIET) == 0)
edc_dump_status_block(sc, devno, intr_id);
edc_dump_status_block(sc, status_block, intr_id);
bioerror = EIO;
error = EIO;
break;
}
@ -476,8 +461,9 @@ edc_intr(arg)
/* If Read or Write Data, wakeup worker thread to finish it */
if (intr_id != ISR_DATA_TRANSFER_RDY
&& (cmd == CMD_READ_DATA || cmd == CMD_WRITE_DATA)) {
sc->sc_ed[devno]->sc_error = bioerror;
wakeup_one(&sc->sc_ed[devno]->edc_softc);
if ((sc->sc_error = error) == 0)
sc->sc_resblk = status_block[SB_RESBLKCNT_IDX];
wakeup_one(sc);
}
return (1);
@ -541,22 +527,32 @@ edc_do_attn(sc, attn_type, devno, intr_id)
* interval.
*/
static int
edc_cmd_wait(sc, devno, secs, poll)
edc_cmd_wait(sc, secs, poll)
struct edc_mca_softc *sc;
int devno, secs, poll;
int secs, poll;
{
int val, delayed;
if (!poll) {
int error;
/* Not polling, can sleep. Sleep until we are awakened,
* but maximum secs seconds.
*/
error = tsleep(sc, PRIBIO, "edcwcmd", secs * hz);
if (error)
goto err;
return (0);
}
/* Poll the controller until command finishes */
delayed = 0;
do {
val = bus_space_read_1(sc->sc_iot,sc->sc_ioh, BSR);
if ((val & BSR_CMD_INPROGRESS) == 0)
break;
if (poll && (val & BSR_INTR))
goto out;
if (secs == 0)
if (val & BSR_INTR)
break;
delay(1);
@ -570,44 +566,33 @@ edc_cmd_wait(sc, devno, secs, poll)
delayed = 0;
secs--;
}
#if 0
if (delayed % 1000)
printf("looping ...");
#endif
} while(1);
} while(secs > 0);
if (secs == 0 &&
bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_CMD_INPROGRESS){
printf("%s: timed out waiting for previous cmd to finish\n",
sc->sc_ed[devno]->sc_dev.dv_xname);
err:
printf("%s: timed out waiting for cmd to finish\n",
sc->sc_dev.dv_xname);
return (EAGAIN);
}
out:
return (0);
}
int
edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async, poll)
edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, poll)
struct edc_mca_softc *sc;
int cmd;
int devno;
u_int16_t cmd_args[];
int cmd_len, async, poll;
int cmd_len, poll;
{
int i, error, tries;
u_int16_t cmd0;
/*
* If there has been an asynchronous command executed, first wait for it
* to finish.
*/
if (sc->sc_cmd_async) {
/* Wait maximum 15s */
if (edc_cmd_wait(sc, devno, 15, 0))
return (EAGAIN); /* Busy */
sc->sc_cmd_async = 0;
if (bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_BUSY) {
printf("%s: device busy?\n", sc->sc_dev.dv_xname);
return (EAGAIN);
}
/* Do Attention Request for Command Request. */
@ -635,7 +620,8 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async, poll)
/*
* Write word of CMD to the CIFR. This sets "Command
* Interface Register Full (CMD IN)" in BSR. Once the attachment
* detects it, it reads the word and clears CMD IN.
* detects it, it reads the word and clears CMD IN. This all should
* be quite fast, so don't bother with sleeps for !poll case.
*/
for(i=0; i < cmd_len; i++) {
bus_space_write_2(sc->sc_iot, sc->sc_ioh, CIFR,
@ -647,22 +633,20 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async, poll)
* soon. Quirky hw ?
*/
tries = 0;
while(bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR) & BSR_CIFR_FULL)
for(; (bus_space_read_1(sc->sc_iot, sc->sc_ioh, BSR)
& BSR_CIFR_FULL) && tries < 1000 ; tries++)
delay(poll ? 1000 : 1);
}
/*
* Attachment is now executing the command. Unless we are executing
* command asynchronously, wait until it finishes.
*/
if (async) {
sc->sc_cmd_async = 1;
return (0);
if (tries == 10000) {
printf("%s: device too slow to accept command %d\n",
sc->sc_dev.dv_xname, cmd);
return (EAGAIN);
}
}
/* Wait for command to complete, but maximum 15 seconds. */
if (edc_cmd_wait(sc, devno, 15, poll))
return (EAGAIN);
if ((error = edc_cmd_wait(sc, 15, poll)))
return (error);
/* If polling, call edc_intr() explicitly */
if (poll) {
@ -672,70 +656,21 @@ edc_run_cmd(sc, cmd, devno, cmd_args, cmd_len, async, poll)
* If got attention id DATA TRANSFER READY, wait for
* the transfer to finish.
*/
if (sc->sc_ed[devno]->sc_error == 0
&& (cmd == CMD_READ_DATA || cmd == CMD_WRITE_DATA)) {
if (edc_cmd_wait(sc, devno, 15, 1))
return (EAGAIN);
if ((cmd == CMD_READ_DATA || cmd == CMD_WRITE_DATA)
&& sc->sc_error == 0) {
if ((error = edc_cmd_wait(sc, 15, poll)))
return (error);
edc_intr(sc);
}
if (edc_cmd_wait(sc, devno, 15, 0))
return (EAGAIN);
if ((error = edc_cmd_wait(sc, 15, poll)))
return (error);
}
/* Check if the command completed successfully; if not, return error */
switch(SB_GET_CMD_STATUS(sc->sc_ed[devno]->sc_status_block)) {
case ISR_COMPLETED:
case ISR_COMPLETED_WITH_ECC:
case ISR_COMPLETED_RETRIES:
case ISR_COMPLETED_WARNING:
return (0);
default:
return (EIO);
}
}
static int
edc_setup_dma(sc, isread, phys, cnt)
struct edc_mca_softc *sc;
int isread;
bus_addr_t phys;
bus_size_t cnt;
{
/* XXX magic constants, should be moved to device-independant location*/
/* The exact sequence to setup MCA DMA controller is taken from Minix */
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0x90 + sc->sc_drq);
/* Disable access to dma channel */
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0x20 + sc->sc_drq);
/* Clear the address byte pointer */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
(phys >> 0) & 0xff); /* address bits 0..7 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
(phys >> 8) & 0xff); /* address bits 8..15 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
(phys >> 16) & 0xff); /* address bits 16..23 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0x40 + sc->sc_drq);
/* Clear the count byte pointer */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
((cnt - 1) >> 0) & 0xff); /* count bits 0..7 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
((cnt - 1) >> 8) & 0xff); /* count bits 8..15 */
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0x70 + sc->sc_drq);
/* Set the transfer mode */
bus_space_write_1(sc->sc_iot, sc->sc_dmaexech, 0,
(isread) ? 0x4C : 0x44);
bus_space_write_1(sc->sc_iot, sc->sc_dmaextcmdh, 0,
0xA0 + sc->sc_drq);
/* Enable access to dma channel */
return (1);
return (sc->sc_error);
}
#ifdef EDC_DEBUG
static const char * const edc_commands[] = {
"Invalid Command",
"Read Data",
@ -832,36 +767,51 @@ static const char * const edc_dev_errors[] = {
"No ID found on track (ID search)",
/* 0x19 - 0xff reserved */
};
#endif /* EDC_DEBUG */
static void
edc_dump_status_block(sc, devno, intr_id)
edc_dump_status_block(sc, status_block, intr_id)
struct edc_mca_softc *sc;
int devno, intr_id;
u_int16_t *status_block;
int intr_id;
{
struct ed_softc *ed = sc->sc_ed[devno];
#ifdef EDC_DEBUG
printf("%s: Command: %s, Status: %s\n",
ed->sc_dev.dv_xname,
edc_commands[ed->sc_status_block[0] & 0x1f],
edc_cmd_status[SB_GET_CMD_STATUS(ed->sc_status_block)]
sc->sc_dev.dv_xname,
edc_commands[status_block[0] & 0x1f],
edc_cmd_status[SB_GET_CMD_STATUS(status_block)]
);
#else
printf("%s: Command: %d, Status: %d\n",
sc->sc_dev.dv_xname,
status_block[0] & 0x1f,
SB_GET_CMD_STATUS(status_block));
#endif
printf("%s: # left blocks: %u, last processed RBA: %u\n",
ed->sc_dev.dv_xname,
ed->sc_status_block[SB_RESBLKCNT_IDX],
(ed->sc_status_block[5] << 16) | ed->sc_status_block[4]);
sc->sc_dev.dv_xname,
status_block[SB_RESBLKCNT_IDX],
(status_block[5] << 16) | status_block[4]);
if (intr_id == ISR_COMPLETED_WARNING) {
#ifdef EDC_DEBUG
printf("%s: Command Error Code: %s\n",
ed->sc_dev.dv_xname,
edc_cmd_error[ed->sc_status_block[1] & 0xff]);
sc->sc_dev.dv_xname,
edc_cmd_error[status_block[1] & 0xff]);
#else
printf("%s: Command Error Code: %d\n",
sc->sc_dev.dv_xname,
status_block[1] & 0xff);
#endif
}
if (intr_id == ISR_CMD_FAILED) {
#ifdef EDC_DEBUG
char buf[100];
printf("%s: Device Error Code: %s\n",
ed->sc_dev.dv_xname,
edc_dev_errors[ed->sc_status_block[2] & 0xff]);
bitmask_snprintf((ed->sc_status_block[2] & 0xff00) >> 8,
sc->sc_dev.dv_xname,
edc_dev_errors[status_block[2] & 0xff]);
bitmask_snprintf((status_block[2] & 0xff00) >> 8,
"\20"
"\01SeekOrCmdComplete"
"\02Track0Flag"
@ -873,6 +823,147 @@ edc_dump_status_block(sc, devno, intr_id)
"\010Reserved0",
buf, sizeof(buf));
printf("%s: Device Status: %s\n",
ed->sc_dev.dv_xname, buf);
sc->sc_dev.dv_xname, buf);
#else
printf("%s: Device Error Code: %d, Device Status: %d\n",
sc->sc_dev.dv_xname,
status_block[2] & 0xff,
(status_block[2] & 0xff00) >> 8);
#endif
}
}
static void
edc_spawn_worker(arg)
void *arg;
{
struct edc_mca_softc *sc = (struct edc_mca_softc *) arg;
int error;
struct proc *wrk;
/* Now, everything is ready, start a kthread */
if ((error = kthread_create1(edcworker, sc, &wrk,
"%s", sc->sc_dev.dv_xname))) {
printf("%s: cannot spawn worker thread: errno=%d\n",
sc->sc_dev.dv_xname, error);
panic("edc_spawn_worker");
}
}
/*
* Main worker thread function.
*/
void
edcworker(arg)
void *arg;
{
struct edc_mca_softc *sc = (struct edc_mca_softc *) arg;
struct ed_softc *ed;
struct buf *bp;
int s, i, error;
config_pending_decr();
s = splbio();
for(;;) {
/* Wait until awakened */
(void) tsleep(sc, PRIBIO, "edcidle", 0);
for(i=0; i<sc->sc_maxdevs; ) {
if ((ed = sc->sc_ed[i]) == NULL) {
i++;
continue;
}
/* Is there a buf for us ? */
simple_lock(&ed->sc_q_lock);
if ((bp = BUFQ_FIRST(&ed->sc_q)) == NULL) {
simple_unlock(&ed->sc_q_lock);
i++;
continue;
}
BUFQ_REMOVE(&ed->sc_q, bp);
simple_unlock(&ed->sc_q_lock);
/* Instrumentation. */
disk_busy(&ed->sc_dk);
error = edc_bio(sc, ed, bp->b_data, bp->b_bcount,
bp->b_rawblkno, (bp->b_flags & B_READ), 0);
if (error) {
bp->b_error = error;
bp->b_flags |= B_ERROR;
} else {
/* Set resid, most commonly to zero. */
bp->b_resid = sc->sc_resblk * DEV_BSIZE;
}
disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
#if NRND > 0
rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
#endif
biodone(bp);
}
}
splx(s);
}
int
edc_bio(struct edc_mca_softc *sc, struct ed_softc *ed, void *data,
size_t bcount, daddr_t rawblkno, int isread, int poll)
{
u_int16_t cmd_args[4];
int error=0, fl;
u_int16_t track;
u_int16_t cyl;
u_int8_t head;
u_int8_t sector;
mca_disk_busy();
/* set WAIT and R/W flag appropriately for the DMA transfer */
fl = ((poll) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
| ((isread) ? BUS_DMA_READ : BUS_DMA_WRITE);
/* Load the buffer for DMA transfer. */
if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_xfer, data,
bcount, NULL, BUS_DMA_STREAMING|fl))) {
printf("%s: ed_bio: unable to load DMA buffer - error %d\n",
ed->sc_dev.dv_xname, error);
goto out;
}
bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_xfer, 0,
bcount, (isread) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
track = rawblkno / ed->sectors;
head = track % ed->heads;
cyl = track / ed->heads;
sector = rawblkno % ed->sectors;
/* Read or Write Data command */
cmd_args[0] = 2; /* Options 0000010 */
cmd_args[1] = bcount / DEV_BSIZE;
cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
cmd_args[3] = ((cyl & 0x3E0) >> 5);
error = edc_run_cmd(sc,
(isread) ? CMD_READ_DATA : CMD_WRITE_DATA,
ed->sc_devno, cmd_args, 4, poll);
/* Sync the DMA memory */
if (!error) {
bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_xfer, 0, bcount,
(isread)? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
}
/* We are done, unload buffer from DMA map */
bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_xfer);
out:
mca_disk_unbusy();
return (error);
}