Various improvements to mvgbe(4). Notable is improved multicast filtering.

This commit is contained in:
jakllsch 2011-02-01 23:40:12 +00:00
parent bb246152ed
commit e43028272f
2 changed files with 182 additions and 130 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_mvgbe.c,v 1.4 2011/01/29 01:53:18 jakllsch Exp $ */
/* $NetBSD: if_mvgbe.c,v 1.5 2011/02/01 23:40:12 jakllsch Exp $ */
/*
* Copyright (c) 2007, 2008 KIYOHARA Takashi
* All rights reserved.
@ -25,7 +25,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: if_mvgbe.c,v 1.4 2011/01/29 01:53:18 jakllsch Exp $");
__KERNEL_RCSID(0, "$NetBSD: if_mvgbe.c,v 1.5 2011/02/01 23:40:12 jakllsch Exp $");
#include "rnd.h"
@ -75,10 +75,10 @@ int mvgbe_debug = MVGBE_DEBUG;
bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
#define MVGBE_WRITE(sc, reg, val) \
bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
#define MVGBE_READ_FILTER(sc, reg) \
bus_space_read_4((sc)->sc_iot, (sc)->sc_dafh, (reg))
#define MVGBE_READ_FILTER(sc, reg, val, c) \
bus_space_read_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c))
#define MVGBE_WRITE_FILTER(sc, reg, val, c) \
bus_space_set_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c))
bus_space_write_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c))
#define MVGBE_TX_RING_CNT 256
#define MVGBE_TX_RING_MSK (MVGBE_TX_RING_CNT - 1)
@ -93,7 +93,7 @@ CTASSERT(MVGBE_RX_RING_CNT > 1 && MVGBE_RX_RING_NEXT(MVGBE_RX_RING_CNT) ==
(MVGBE_RX_RING_CNT + 1) % MVGBE_RX_RING_CNT);
#define MVGBE_JSLOTS 384 /* XXXX */
#define MVGBE_JLEN (MVGBE_MRU + MVGBE_BUF_ALIGN)
#define MVGBE_JLEN ((MVGBE_MRU + MVGBE_RXBUF_ALIGN)&~MVGBE_RXBUF_MASK)
#define MVGBE_NTXSEG 30
#define MVGBE_JPAGESZ PAGE_SIZE
#define MVGBE_RESID \
@ -249,9 +249,10 @@ static int mvgbe_init(struct ifnet *);
static void mvgbe_stop(struct ifnet *, int);
static void mvgbe_watchdog(struct ifnet *);
/* MII funcstions */
static int mvgbe_ifmedia_upd(struct ifnet *);
static void mvgbe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static int mvgbe_ifflags_cb(struct ethercom *);
static int mvgbe_mediachange(struct ifnet *);
static void mvgbe_mediastatus(struct ifnet *, struct ifmediareq *);
static int mvgbe_init_rx_ring(struct mvgbe_softc *);
static int mvgbe_init_tx_ring(struct mvgbe_softc *);
@ -262,7 +263,8 @@ static void mvgbe_jfree(struct mbuf *, void *, size_t, void *);
static int mvgbe_encap(struct mvgbe_softc *, struct mbuf *, uint32_t *);
static void mvgbe_rxeof(struct mvgbe_softc *);
static void mvgbe_txeof(struct mvgbe_softc *);
static void mvgbe_setmulti(struct mvgbe_softc *);
static uint8_t mvgbe_crc8(const uint8_t *, size_t);
static void mvgbe_filter_setup(struct mvgbe_softc *);
#ifdef MVGBE_DEBUG
static void mvgbe_dump_txdesc(struct mvgbe_tx_desc *, int);
#endif
@ -697,17 +699,12 @@ mvgbe_attach(device_t parent, device_t self, void *aux)
sc->sc_rdata = (struct mvgbe_ring_data *)kva;
memset(sc->sc_rdata, 0, sizeof(struct mvgbe_ring_data));
#if 0
/*
* We can support 802.1Q VLAN-sized frames and jumbo
* Ethernet frames.
*/
sc->sc_ethercom.ec_capabilities |=
ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
#else
/* XXXX: We don't know the usage of VLAN. */
sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
#endif
ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
/* Try to allocate memory for jumbo buffers. */
if (mvgbe_alloc_jumbo_mem(sc)) {
@ -730,6 +727,10 @@ mvgbe_attach(device_t parent, device_t self, void *aux)
IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
/*
* But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
*/
sc->sc_ethercom.ec_if.if_capabilities &= ~IFCAP_CSUM_TCPv4_Tx;
IFQ_SET_MAXLEN(&ifp->if_snd, max(MVGBE_TX_RING_CNT - 1, IFQ_MAXLEN));
IFQ_SET_READY(&ifp->if_snd);
strcpy(ifp->if_xname, device_xname(sc->sc_dev));
@ -746,7 +747,7 @@ mvgbe_attach(device_t parent, device_t self, void *aux)
sc->sc_ethercom.ec_mii = &sc->sc_mii;
ifmedia_init(&sc->sc_mii.mii_media, 0,
mvgbe_ifmedia_upd, mvgbe_ifmedia_sts);
mvgbe_mediachange, mvgbe_mediastatus);
mii_attach(self, &sc->sc_mii, 0xffffffff,
MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
@ -763,6 +764,7 @@ mvgbe_attach(device_t parent, device_t self, void *aux)
if_attach(ifp);
ether_ifattach(ifp, sc->sc_enaddr);
ether_set_ifflags_cb(&sc->sc_ethercom, mvgbe_ifflags_cb);
#if NRND > 0
rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
@ -898,42 +900,26 @@ mvgbe_start(struct ifnet *ifp)
}
static int
mvgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
mvgbe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
{
struct mvgbe_softc *sc = ifp->if_softc;
struct ifreq *ifr = data;
struct mii_data *mii;
int s, error = 0;
s = splnet();
switch (command) {
case SIOCSIFFLAGS:
DPRINTFN(2, ("mvgbe_ioctl IFFLAGS\n"));
if (ifp->if_flags & IFF_UP)
mvgbe_init(ifp);
else
if (ifp->if_flags & IFF_RUNNING)
mvgbe_stop(ifp, 0);
sc->sc_if_flags = ifp->if_flags;
error = 0;
break;
switch (cmd) {
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
DPRINTFN(2, ("mvgbe_ioctl MEDIA\n"));
mii = &sc->sc_mii;
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
break;
default:
DPRINTFN(2, ("mvgbe_ioctl ETHER\n"));
error = ether_ioctl(ifp, command, data);
error = ether_ioctl(ifp, cmd, data);
if (error == ENETRESET) {
if (ifp->if_flags & IFF_RUNNING) {
mvgbe_setmulti(sc);
DPRINTFN(2,
("mvgbe_ioctl setmulti called\n"));
mvgbe_filter_setup(sc);
}
error = 0;
}
@ -945,6 +931,9 @@ mvgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
return error;
}
int mvgbe_rximt = 0;
int mvgbe_tximt = 0;
static int
mvgbe_init(struct ifnet *ifp)
{
@ -952,17 +941,10 @@ mvgbe_init(struct ifnet *ifp)
struct mvgbec_softc *csc = device_private(device_parent(sc->sc_dev));
struct mii_data *mii = &sc->sc_mii;
uint32_t reg;
int i, s;
int i;
DPRINTFN(2, ("mvgbe_init\n"));
s = splnet();
if (ifp->if_flags & IFF_RUNNING) {
splx(s);
return 0;
}
/* Cancel pending I/O and free all RX/TX buffers. */
mvgbe_stop(ifp, 0);
@ -974,13 +956,11 @@ mvgbe_init(struct ifnet *ifp)
if (mvgbe_init_tx_ring(sc) == ENOBUFS) {
aprint_error_ifnet(ifp,
"initialization failed: no memory for tx buffers\n");
splx(s);
return ENOBUFS;
}
if (mvgbe_init_rx_ring(sc) == ENOBUFS) {
aprint_error_ifnet(ifp,
"initialization failed: no memory for rx buffers\n");
splx(s);
return ENOBUFS;
}
@ -988,7 +968,7 @@ mvgbe_init(struct ifnet *ifp)
MVGBE_PSC_ANFC | /* Enable Auto-Neg Flow Ctrl */
MVGBE_PSC_RESERVED | /* Must be set to 1 */
MVGBE_PSC_FLFAIL | /* Do NOT Force Link Fail */
MVGBE_PSC_MRU(MVGBE_PSC_MRU_9700) | /* Always 9700 OK */
MVGBE_PSC_MRU(MVGBE_PSC_MRU_9022) | /* we want 9k */
MVGBE_PSC_SETFULLDX); /* Set_FullDx */
/* XXXX: mvgbe(4) always use RGMII. */
MVGBE_WRITE(sc, MVGBE_PSC1,
@ -1026,7 +1006,11 @@ mvgbe_init(struct ifnet *ifp)
MVGBE_SDC_BLMR | /* Big/Little Endian Receive Mode: No swap */
MVGBE_SDC_BLMT | /* Big/Little Endian Transmit Mode: No swap */
#endif
MVGBE_SDC_IPGINTRX(mvgbe_rximt) |
MVGBE_SDC_TXBSZ_16_64BITWORDS);
MVGBE_WRITE(sc, MVGBE_PTFUT, MVGBE_PTFUT_IPGINTTX(mvgbe_tximt));
mvgbe_filter_setup(sc);
mii_mediachg(mii);
@ -1056,8 +1040,6 @@ mvgbe_init(struct ifnet *ifp)
ifp->if_flags |= IFF_RUNNING;
ifp->if_flags &= ~IFF_OACTIVE;
splx(s);
return 0;
}
@ -1178,30 +1160,41 @@ mvgbe_watchdog(struct ifnet *ifp)
}
}
static int
mvgbe_ifflags_cb(struct ethercom *ec)
{
struct ifnet *ifp = &ec->ec_if;
struct mvgbe_softc *sc = ifp->if_softc;
int change = ifp->if_flags ^ sc->sc_if_flags;
if (change != 0)
sc->sc_if_flags = ifp->if_flags;
if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
return ENETRESET;
if ((change & IFF_PROMISC) != 0)
mvgbe_filter_setup(sc);
return 0;
}
/*
* Set media options.
*/
static int
mvgbe_ifmedia_upd(struct ifnet *ifp)
mvgbe_mediachange(struct ifnet *ifp)
{
struct mvgbe_softc *sc = ifp->if_softc;
mii_mediachg(&sc->sc_mii);
return 0;
return ether_mediachange(ifp);
}
/*
* Report current media status.
*/
static void
mvgbe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
mvgbe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct mvgbe_softc *sc = ifp->if_softc;
mii_pollstat(&sc->sc_mii);
ifmr->ifm_active = sc->sc_mii.mii_media_active;
ifmr->ifm_status = sc->sc_mii.mii_media_status;
ether_mediastatus(ifp, ifmr);
}
@ -1212,7 +1205,7 @@ mvgbe_init_rx_ring(struct mvgbe_softc *sc)
struct mvgbe_ring_data *rd = sc->sc_rdata;
int i;
bzero((char *)rd->mvgbe_rx_ring,
memset(rd->mvgbe_rx_ring, 0,
sizeof(struct mvgbe_rx_desc) * MVGBE_RX_RING_CNT);
for (i = 0; i < MVGBE_RX_RING_CNT; i++) {
@ -1252,7 +1245,7 @@ mvgbe_init_tx_ring(struct mvgbe_softc *sc)
struct mvgbe_ring_data *rd = sc->sc_rdata;
int i;
bzero((char *)sc->sc_rdata->mvgbe_tx_ring,
memset(sc->sc_rdata->mvgbe_tx_ring, 0,
sizeof(struct mvgbe_tx_desc) * MVGBE_TX_RING_CNT);
for (i = 0; i < MVGBE_TX_RING_CNT; i++) {
@ -1323,16 +1316,18 @@ mvgbe_newbuf(struct mvgbe_softc *sc, int i, struct mbuf *m,
m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN;
m_new->m_data = m_new->m_ext.ext_buf;
}
align = (u_long)m_new->m_data & MVGBE_BUF_MASK;
if (align != 0)
m_adj(m_new, MVGBE_BUF_ALIGN - align);
align = (u_long)m_new->m_data & MVGBE_RXBUF_MASK;
if (align != 0) {
DPRINTFN(1,("align = %d\n", align));
m_adj(m_new, MVGBE_RXBUF_ALIGN - align);
}
c = &sc->sc_cdata.mvgbe_rx_chain[i];
r = c->mvgbe_desc;
c->mvgbe_mbuf = m_new;
r->bufptr = dmamap->dm_segs[0].ds_addr +
(((vaddr_t)m_new->m_data - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf));
r->bufsize = MVGBE_JLEN & ~MVGBE_BUF_MASK;
r->bufsize = MVGBE_JLEN & ~MVGBE_RXBUF_MASK;
r->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_ENABLE_INTERRUPT;
MVGBE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@ -1388,7 +1383,7 @@ mvgbe_alloc_jumbo_mem(struct mvgbe_softc *sc)
state = 4;
sc->sc_cdata.mvgbe_jumbo_buf = (void *)kva;
DPRINTFN(1,("mvgbe_jumbo_buf = 0x%p\n", sc->sc_cdata.mvgbe_jumbo_buf));
DPRINTFN(1,("mvgbe_jumbo_buf = %p\n", sc->sc_cdata.mvgbe_jumbo_buf));
LIST_INIT(&sc->sc_jfree_listhead);
LIST_INIT(&sc->sc_jinuse_listhead);
@ -1680,22 +1675,26 @@ mvgbe_rxeof(struct mvgbe_softc *sc)
continue;
}
if (total_len > MVGBE_RX_CSUM_MIN_BYTE) {
/* Check IP header checksum */
if (rxstat & MVGBE_RX_IP_FRAME_TYPE) {
m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
if (!(rxstat & MVGBE_RX_IP_HEADER_OK))
m->m_pkthdr.csum_flags |=
M_CSUM_IPv4_BAD;
}
/* Check TCP/UDP checksum */
if (rxstat & MVGBE_RX_L4_TYPE_TCP)
if (total_len <= MVGBE_RX_CSUM_MIN_BYTE) /* XXX documented? */
goto sw_csum;
if (rxstat & MVGBE_RX_IP_FRAME_TYPE) {
/* Check IPv4 header checksum */
m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
if (!(rxstat & MVGBE_RX_IP_HEADER_OK))
m->m_pkthdr.csum_flags |=
M_CSUM_IPv4_BAD;
/* Check TCPv4/UDPv4 checksum */
if ((rxstat & MVGBE_RX_L4_TYPE_MASK) ==
MVGBE_RX_L4_TYPE_TCP)
m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
else if (rxstat & MVGBE_RX_L4_TYPE_UDP)
else if ((rxstat & MVGBE_RX_L4_TYPE_MASK) ==
MVGBE_RX_L4_TYPE_UDP)
m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
if (!(rxstat & MVGBE_RX_L4_CHECKSUM))
m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
}
sw_csum:
/*
* Try to allocate a new jumbo buffer. If that
@ -1803,49 +1802,99 @@ mvgbe_txeof(struct mvgbe_softc *sc)
cdata->mvgbe_tx_cons = idx;
}
static void
mvgbe_setmulti(struct mvgbe_softc *sc)
static uint8_t
mvgbe_crc8(const uint8_t *data, size_t size)
{
struct ifnet *ifp= &sc->sc_ethercom.ec_if;
uint32_t pxc, dfut, upm = 0, filter = 0;
uint8_t ln = sc->sc_enaddr[5] & 0xf; /* last nibble */
int bit;
uint8_t byte;
uint8_t crc = 0;
const uint8_t poly = 0x07;
if (ifp->if_flags & IFF_PROMISC) {
upm = MVGBE_PXC_UPM;
filter =
MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
} else if (ifp->if_flags & IFF_ALLMULTI) {
filter =
MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
while(size--)
for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
return crc;
}
CTASSERT(MVGBE_NDFSMT == MVGBE_NDFOMT);
static void
mvgbe_filter_setup(struct mvgbe_softc *sc)
{
struct ethercom *ec = &sc->sc_ethercom;
struct ifnet *ifp= &sc->sc_ethercom.ec_if;
struct ether_multi *enm;
struct ether_multistep step;
uint32_t *dfut, *dfsmt, *dfomt;
uint32_t pxc;
int i;
const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
dfut = kmem_zalloc(sizeof(*dfut) * MVGBE_NDFUT, KM_SLEEP);
dfsmt = kmem_zalloc(sizeof(*dfsmt) * MVGBE_NDFSMT, KM_SLEEP);
dfomt = kmem_zalloc(sizeof(*dfomt) * MVGBE_NDFOMT, KM_SLEEP);
if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
goto allmulti;
}
/* Set Unicast Promiscuous mode */
ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
/* ranges are complex and somewhat rare */
goto allmulti;
}
/* chip handles some IPv4 multicast specially */
if (memcmp(enm->enm_addrlo, special, 5) == 0) {
i = enm->enm_addrlo[5];
dfsmt[i>>2] =
MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
} else {
i = mvgbe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
dfomt[i>>2] =
MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
}
ETHER_NEXT_MULTI(step, enm);
}
goto set;
allmulti:
if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
for (i = 0; i < MVGBE_NDFSMT; i++) {
dfsmt[i] = dfomt[i] =
MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
}
}
set:
pxc = MVGBE_READ(sc, MVGBE_PXC);
pxc &= ~MVGBE_PXC_UPM;
pxc |= upm;
pxc |= MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP;
if (ifp->if_flags & IFF_BROADCAST) {
pxc &= ~(MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP);
}
if (ifp->if_flags & IFF_PROMISC) {
pxc |= MVGBE_PXC_UPM;
}
MVGBE_WRITE(sc, MVGBE_PXC, pxc);
/* Set Destination Address Filter Multicast Tables */
MVGBE_WRITE_FILTER(sc, MVGBE_DFSMT, filter, MVGBE_NDFSMT);
MVGBE_WRITE_FILTER(sc, MVGBE_DFOMT, filter, MVGBE_NDFOMT);
if (ifp->if_flags & IFF_PROMISC) {
/* necessary ? */
MVGBE_WRITE_FILTER(sc, MVGBE_DFUT, filter, MVGBE_NDFUT);
return;
}
/* Set Destination Address Filter Unicast Table */
dfut = MVGBE_READ_FILTER(sc, MVGBE_DFUT + (ln & 0x0c));
dfut &= ~MVGBE_DF(ln & 0x03, MVGBE_DF_QUEUE_MASK);;
dfut |= MVGBE_DF(ln & 0x03, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
MVGBE_WRITE_FILTER(sc, MVGBE_DFUT + (ln & 0x0c), dfut, 1);
i = sc->sc_enaddr[5] & 0xf; /* last nibble */
dfut[i>>2] = MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
MVGBE_WRITE_FILTER(sc, MVGBE_DFUT, dfut, MVGBE_NDFUT);
/* Set Destination Address Filter Multicast Tables */
MVGBE_WRITE_FILTER(sc, MVGBE_DFSMT, dfsmt, MVGBE_NDFSMT);
MVGBE_WRITE_FILTER(sc, MVGBE_DFOMT, dfomt, MVGBE_NDFOMT);
kmem_free(dfut, sizeof(dfut[0]) * MVGBE_NDFUT);
kmem_free(dfsmt, sizeof(dfsmt[0]) * MVGBE_NDFSMT);
kmem_free(dfomt, sizeof(dfsmt[0]) * MVGBE_NDFOMT);
}
#ifdef MVGBE_DEBUG
@ -1870,7 +1919,5 @@ mvgbe_dump_txdesc(struct mvgbe_tx_desc *desc, int idx)
DESC_PRINT(desc->nextdescptr);
#endif
#undef DESC_PRINT
printf("txdesc[%d].desc->returninfo=%#lx\n", idx, desc->returninfo);
printf("txdesc[%d].desc->alignbufptr=%p\n", idx, desc->alignbufptr);
}
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: mvgbereg.h,v 1.2 2010/10/02 05:57:42 kiyohara Exp $ */
/* $NetBSD: mvgbereg.h,v 1.3 2011/02/01 23:40:12 jakllsch Exp $ */
/*
* Copyright (c) 2007 KIYOHARA Takashi
* All rights reserved.
@ -236,8 +236,8 @@
#define MVGBE_SDC_BLMR (1 << 4)
#define MVGBE_SDC_BLMT (1 << 5)
#define MVGBE_SDC_SWAPMODE (1 << 6)
#define MVGBE_SDC_IPGINTRX(n) ((n) << 8)
#define MVGBE_SDC_IPGINTRX_MASK MVGBE_SDC_IPGINTRX(0x3fff)
#define MVGBE_SDC_IPGINTRX_MASK __BITS(21, 8)
#define MVGBE_SDC_IPGINTRX(x) __SHIFTIN(x, MVGBE_SDC_IPGINTRX_MASK)
#define MVGBE_SDC_TXBSZ(x) ((x) << 22)
#define MVGBE_SDC_TXBSZ_MASK MVGBE_SDC_TXBSZ(7)
#define MVGBE_SDC_TXBSZ_1_64BITWORDS MVGBE_SDC_TXBSZ(0)
@ -310,6 +310,10 @@
#define MVGBE_ICE_INTADDRERR (1 << 23)
#define MVGBE_ICE_ETHERINTSUM (1 << 31)
/* Port Tx FIFO Urgent Threshold (MVGBE_PTFUT) */
#define MVGBE_PTFUT_IPGINTTX_MASK __BITS(17, 4)
#define MVGBE_PTFUT_IPGINTTX(x) __SHIFTIN(x, MVGBE_PTFUT_IPGINTTX_MASK)
/* Port Rx Minimal Frame Size (MVGBE_PMFS) */
#define MVGBE_PMFS_RXMFS(rxmfs) (((rxmfs) - 40) & 0x7c)
/* RxMFS = 40,44,48,52,56,60,64 bytes */
@ -331,16 +335,22 @@
#define MVGBE_DF_QUEUE_MASK ((7) << 1)
#define MVGBE_MRU 9700 /* The Maximal Receive Packet Size */
/*
* Set the chip's packet size limit to 9022.
* (ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN)
*/
#define MVGBE_MRU 9022
#define MVGBE_BUF_ALIGN 8
#define MVGBE_BUF_MASK (MVGBE_BUF_ALIGN - 1)
#define MVGBE_RXBUF_ALIGN 8
#define MVGBE_RXBUF_MASK (MVGBE_RXBUF_ALIGN - 1)
#define MVGBE_HWHEADER_SIZE 2
/*
* DMA descriptors
* It is 32byte alignment.
* Despite the documentation saying these descriptors only need to be
* aligned to 16-byte bondaries, 32-byte alignment seems to be required
* by the hardware. We'll just pad them out to that to make it easier.
*/
struct mvgbe_tx_desc {
#if BYTE_ORDER == BIG_ENDIAN
@ -356,10 +366,7 @@ struct mvgbe_tx_desc {
uint32_t bufptr; /* Descriptor buffer pointer */
uint32_t nextdescptr; /* Next descriptor pointer */
#endif
u_long returninfo; /* User resource return information */
uint8_t *alignbufptr; /* Pointer to 8 byte aligned buffer */
uint32_t padding[2]; /* XXXX: required */
uint32_t _padding[4];
} __packed;
struct mvgbe_rx_desc {
@ -376,9 +383,7 @@ struct mvgbe_rx_desc {
uint32_t bufptr; /* Descriptor buffer pointer */
uint32_t nextdescptr; /* Next descriptor pointer */
#endif
u_long returninfo; /* User resource return information */
uint32_t padding[3]; /* XXXX: required */
uint32_t _padding[4];
} __packed;
#define MVGBE_ERROR_SUMMARY (1 << 0)