Fix the HPC1 transmit logic, which was previously very broken.

HPC1 does not mark transmitted descriptors like HPC3. We must
query the HPC1 chip to determine what it expects the next
descriptor to be, reclaim used ones, and restart if necessary. Each
revision's corresponding logic now lives in its own
sq_txring_hpc{1,3} function.

HPC1's transmit interrupt conditions also differ from HPC3, so
remove the INTR bits from descriptors when tagging new packets on
to the end of the chain in order to avoid unwanted interrupts.

Also, be extra careful when restarting the transmit ring. Since
transmit interrupts seem to be relatively slow on HPC1, sq_start
may be called while the DMA engine is quiescent, and before a
transmit interrupt is asserted. We cannot behave like HPC3, which
begins transmission from the first packet pulled from IFQ if the
DMA engine is quiescent as this would skip enqueued packets. It
appears that sq_start is never called before HPC3 asserts an
interrupt, which restarts the transmit queue at the appropriate
place. However, this often happens with HPC1 and we cannot assume
that if DMA is inactive in sq_start, then all previously queued
packets have fled the coop.
XXX Is there a similar race possible with HPC3?

HPC3 logic should remain functionally unchanged, and HPC1 should
finally work properly.
This commit is contained in:
rumble 2004-12-29 06:57:52 +00:00
parent 38cddbe010
commit d4734bb3d4
1 changed files with 183 additions and 73 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_sq.c,v 1.22 2004/12/29 02:11:31 rumble Exp $ */
/* $NetBSD: if_sq.c,v 1.23 2004/12/29 06:57:52 rumble Exp $ */
/*
* Copyright (c) 2001 Rafal K. Boni
@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.22 2004/12/29 02:11:31 rumble Exp $");
__KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.23 2004/12/29 06:57:52 rumble Exp $");
#include "bpfilter.h"
@ -113,6 +113,8 @@ static void sq_set_filter(struct sq_softc *);
static int sq_intr(void *);
static int sq_rxintr(struct sq_softc *);
static int sq_txintr(struct sq_softc *);
static void sq_txring_hpc1(struct sq_softc *);
static void sq_txring_hpc3(struct sq_softc *);
static void sq_reset(struct sq_softc *);
static int sq_add_rxbuf(struct sq_softc *, int);
static void sq_dump_buffer(u_int32_t addr, u_int32_t len);
@ -372,9 +374,9 @@ sq_init(struct ifnet *ifp)
/* Set up HPC ethernet DMA config */
if (sc->hpc_regs->revision == 3) {
reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
sc->hpc_regs->enetr_dmacfg);
HPC_ENETR_DMACFG);
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
sc->hpc_regs->enetr_dmacfg,
HPC_ENETR_DMACFG,
reg | ENETR_DMACFG_FIX_RXDC |
ENETR_DMACFG_FIX_INTR |
ENETR_DMACFG_FIX_EOP);
@ -388,6 +390,14 @@ sq_init(struct ifnet *ifp)
bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ctl,
sc->hpc_regs->enetr_ctl_active);
/*
* Turn off delayed receive interrupts on HPC1.
* (see Hollywood HPC Specification 2.1.4.3)
*/
if (sc->hpc_regs->revision != 3)
bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENET_INTDELAY,
HPC1_ENET_INTDELAYVAL);
ifp->if_flags |= IFF_RUNNING;
ifp->if_flags &= ~IFF_OACTIVE;
@ -651,7 +661,6 @@ sq_start(struct ifnet *ifp)
/* Advance the tx pointer. */
sc->sc_nfreetx -= dmamap->dm_nsegs;
sc->sc_nexttx = nexttx;
}
/* All transmit descriptors used up, let upper layers know */
@ -668,8 +677,9 @@ sq_start(struct ifnet *ifp)
* last packet we enqueued, mark it as the last
* descriptor.
*
* HDD_CTL_EOPACKET && HDD_CTL_INTR cause an
* interrupt.
* HDD_CTL_INTR will generate an interrupt on
* HPC1 by itself. HPC3 will not interrupt unless
* HDD_CTL_EOPACKET is set as well.
*/
KASSERT(lasttx != -1);
if (sc->hpc_regs->revision == 3) {
@ -706,25 +716,46 @@ sq_start(struct ifnet *ifp)
sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
~HDD_CTL_EOCHAIN;
if (sc->hpc_regs->revision != 3)
sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
&= ~HPC1_HDD_CTL_INTR;
SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
} else {
} else if (sc->hpc_regs->revision == 3) {
SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
sc->hpc_regs->enetx_ndbp, SQ_CDTXADDR(sc, firsttx));
if (sc->hpc_regs->revision != 3) {
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, firsttx));
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
HPC1_ENETX_CBP, SQ_CDTXADDR(sc, firsttx));
}
HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx));
/* Kick DMA channel into life */
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
sc->hpc_regs->enetx_ctl,
sc->hpc_regs->enetx_ctl_active);
HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
} else {
/*
* In the HPC1 case where transmit DMA is
* inactive, we can either kick off if
* the ring was previously empty, or call
* our transmit interrupt handler to
* figure out if the ring stopped short
* and restart at the right place.
*/
if (ofree == SQ_NTXDESC) {
SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
HPC1_ENETX_NDBP,
SQ_CDTXADDR(sc, firsttx));
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, firsttx));
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
HPC1_ENETX_CBP, SQ_CDTXADDR(sc, firsttx));
/* Kick DMA channel into life */
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
} else
sq_txring_hpc1(sc);
}
/* Set a watchdog timer in case the chip flakes out. */
@ -914,6 +945,8 @@ sq_rxintr(struct sq_softc *sc)
sc->sc_rxmap[i]->dm_mapsize,
BUS_DMASYNC_PREREAD);
SQ_INIT_RXDESC(sc, i);
SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
sc->sc_dev.dv_xname, i));
continue;
}
@ -923,6 +956,8 @@ sq_rxintr(struct sq_softc *sc)
sc->sc_rxmap[i]->dm_mapsize,
BUS_DMASYNC_PREREAD);
SQ_INIT_RXDESC(sc, i);
SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
"failed\n", sc->sc_dev.dv_xname, i));
continue;
}
@ -981,11 +1016,8 @@ sq_rxintr(struct sq_softc *sc)
static int
sq_txintr(struct sq_softc *sc)
{
int i;
int shift = 0;
u_int32_t status;
u_int32_t hpc1_ready = 0;
u_int32_t hpc3_not_ready = 1;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
if (sc->hpc_regs->revision != 3)
@ -997,7 +1029,6 @@ sq_txintr(struct sq_softc *sc)
SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
if ((status & ( (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD)) == 0) {
/* XXX */ printf("txstat: %x\n", status);
if (status & TXSTAT_COLL)
ifp->if_collisions++;
@ -1007,12 +1038,133 @@ sq_txintr(struct sq_softc *sc)
}
if (status & TXSTAT_16COLL) {
printf("%s: max collisions reached\n", sc->sc_dev.dv_xname);
printf("%s: max collisions reached\n",
sc->sc_dev.dv_xname);
ifp->if_oerrors++;
ifp->if_collisions += 16;
}
}
/* prevtx now points to next xmit packet not yet finished */
if (sc->hpc_regs->revision == 3)
sq_txring_hpc3(sc);
else
sq_txring_hpc1(sc);
/* If we have buffers free, let upper layers know */
if (sc->sc_nfreetx > 0)
ifp->if_flags &= ~IFF_OACTIVE;
/* If all packets have left the coop, cancel watchdog */
if (sc->sc_nfreetx == SQ_NTXDESC)
ifp->if_timer = 0;
SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
sq_start(ifp);
return 1;
}
/*
* Reclaim used transmit descriptors and restart the transmit DMA
* engine if necessary.
*/
static void
sq_txring_hpc1(struct sq_softc *sc)
{
/*
* HPC1 doesn't tag transmitted descriptors, however,
* the NDBP register points to the next descriptor that
* has not yet been processed. If DMA is not in progress,
* we can safely reclaim all descriptors up to NDBP, and,
* if necessary, restart DMA at NDBP. Otherwise, if DMA
* is active, we can only safely reclaim up to CBP.
*
* For now, we'll only reclaim on inactive DMA and assume
* that a sufficiently large ring keeps us out of trouble.
*/
u_int32_t reclaimto, status;
int reclaimall, i = sc->sc_prevtx;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENETX_CTL);
if (status & HPC1_ENETX_CTL_ACTIVE) {
SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
return;
} else {
reclaimto = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
HPC1_ENETX_NDBP);
}
if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
reclaimall = 1;
else
reclaimall = 0;
while (sc->sc_nfreetx < SQ_NTXDESC) {
if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
break;
SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
/* Sync the packet data, unload DMA map, free mbuf */
bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
sc->sc_txmap[i]->dm_mapsize,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
m_freem(sc->sc_txmbuf[i]);
sc->sc_txmbuf[i] = NULL;
ifp->if_opackets++;
sc->sc_nfreetx++;
SQ_TRACE(SQ_DONE_DMA, sc, i, status);
i = SQ_NEXTTX(i);
}
if (sc->sc_nfreetx < SQ_NTXDESC) {
SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENETX_CFXBP,
reclaimto);
bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENETX_CBP,
reclaimto);
/* Kick DMA channel into life */
bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENETX_CTL,
HPC1_ENETX_CTL_ACTIVE);
/*
* Set a watchdog timer in case the chip
* flakes out.
*/
ifp->if_timer = 5;
}
sc->sc_prevtx = i;
}
/*
* Reclaim used transmit descriptors and restart the transmit DMA
* engine if necessary.
*/
static void
sq_txring_hpc3(struct sq_softc *sc)
{
/*
* HPC3 tags descriptors with a bit once they've been
* transmitted. We need only free each XMITDONE'd
* descriptor, and restart the DMA engine if any
* descriptors are left over.
*/
int i;
u_int32_t status = 0;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
i = sc->sc_prevtx;
while (sc->sc_nfreetx < SQ_NTXDESC) {
/*
@ -1021,56 +1173,30 @@ sq_txintr(struct sq_softc *sc)
* has gone idle.
*/
status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
sc->hpc_regs->enetx_ctl) >> shift;
HPC_ENETX_CTL);
SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
/*
* If not yet transmitted, try and start DMA engine again.
* HPC3 tags transmitted descriptors with XMITDONE whereas
* HPC1 will not halt before sending through EOCHAIN.
*/
if (sc->hpc_regs->revision == 3) {
hpc3_not_ready =
sc->sc_txdesc[i].hpc3_hdd_ctl & HDD_CTL_XMITDONE;
} else {
if (hpc1_ready)
hpc1_ready++;
else {
if (sc->sc_txdesc[i].hpc1_hdd_ctl &
HPC1_HDD_CTL_EOPACKET)
hpc1_ready = 1;
}
}
if (hpc3_not_ready == 0 || hpc1_ready == 2) {
if ((status & (sc->hpc_regs->enetx_ctl_active >> shift)) == 0) { // XXX
/* Check for used descriptor and restart DMA chain if needed */
if ((sc->sc_txdesc[i].hpc3_hdd_ctl & HDD_CTL_XMITDONE) == 0) {
if ((status & ENETX_CTL_ACTIVE) == 0) {
SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
sc->hpc_regs->enetx_ndbp, SQ_CDTXADDR(sc, i));
if (sc->hpc_regs->revision != 3) {
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, i));
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
HPC1_ENETX_CBP, SQ_CDTXADDR(sc, i));
}
HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i));
/* Kick DMA channel into life */
bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
sc->hpc_regs->enetx_ctl,
sc->hpc_regs->enetx_ctl_active);
HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
/*
* Set a watchdog timer in case the chip
* flakes out.
*/
ifp->if_timer = 5;
} else {
} else
SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
}
break;
}
@ -1089,24 +1215,9 @@ sq_txintr(struct sq_softc *sc)
i = SQ_NEXTTX(i);
}
/* prevtx now points to next xmit packet not yet finished */
sc->sc_prevtx = i;
/* If we have buffers free, let upper layers know */
if (sc->sc_nfreetx > 0)
ifp->if_flags &= ~IFF_OACTIVE;
/* If all packets have left the coop, cancel watchdog */
if (sc->sc_nfreetx == SQ_NTXDESC)
ifp->if_timer = 0;
SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
sq_start(ifp);
return 1;
sc->sc_prevtx = i;
}
void
sq_reset(struct sq_softc *sc)
{
@ -1177,7 +1288,6 @@ sq_dump_buffer(u_int32_t addr, u_int32_t len)
printf("\n");
}
void
enaddr_aton(const char* str, u_int8_t* eaddr)
{