Get the gmac driver close to working.

This commit is contained in:
matt 2008-12-15 04:44:27 +00:00
parent 92a6d92cc8
commit 313f8b7991
5 changed files with 347 additions and 148 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: gemini_gmac.c,v 1.1 2008/12/14 01:57:02 matt Exp $ */
/* $NetBSD: gemini_gmac.c,v 1.2 2008/12/15 04:44:27 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
@ -49,7 +49,7 @@
#include <sys/gpio.h>
__KERNEL_RCSID(0, "$NetBSD: gemini_gmac.c,v 1.1 2008/12/14 01:57:02 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: gemini_gmac.c,v 1.2 2008/12/15 04:44:27 matt Exp $");
#define SWFREEQ_DESCS 256 /* one page worth */
#define HWFREEQ_DESCS 256 /* one page worth */
@ -89,7 +89,6 @@ gmac_intr_update(struct gmac_softc *sc)
~sc->sc_int_enabled[3]);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
~sc->sc_int_enabled[4]);
}
static void
@ -97,6 +96,36 @@ gmac_init(struct gmac_softc *sc)
{
gmac_hwqmem_t *hqm;
/*
* This shouldn't be needed.
*/
for (bus_size_t i = 0; i < GMAC_TOE_QH_SIZE; i += 4) {
bus_space_write_4(sc->sc_iot, sc->sc_ioh,
GMAC_TOE_QH_OFFSET + i, 0);
}
#if 0
{
bus_space_handle_t global_ioh;
int error;
uint32_t v;
error = bus_space_map(sc->sc_iot, GEMINI_GLOBAL_BASE, 4, 0,
&global_ioh);
KASSERT(error == 0);
aprint_normal_dev(sc->sc_dev, "gmac_init: global_ioh=%#zx\n", global_ioh);
bus_space_write_4(sc->sc_iot, global_ioh, GEMINI_GLOBAL_RESET_CTL,
GLOBAL_RESET_GMAC0|GLOBAL_RESET_GMAC1);
do {
v = bus_space_read_4(sc->sc_iot, global_ioh,
GEMINI_GLOBAL_RESET_CTL);
} while (v & (GLOBAL_RESET_GMAC0|GLOBAL_RESET_GMAC1));
bus_space_unmap(sc->sc_iot, global_ioh, 4);
DELAY(1000);
}
#endif
sc->sc_swfree_min = MIN_RXMAPS;
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_SKBSIZE,
SKB_SIZE_SET(PAGE_SIZE, MCLBYTES));
@ -112,8 +141,36 @@ gmac_init(struct gmac_softc *sc)
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_SELECT, INT3_GMAC1);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_SELECT, INT4_GMAC1);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS, ~0);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS, ~0);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS, ~0);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS, ~0);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS, ~0);
gmac_intr_update(sc);
aprint_normal_dev(sc->sc_dev, "gmac_init: sts=%#x/%#x/%#x/%#x/%#x\n",
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS));
aprint_normal_dev(sc->sc_dev, "gmac_init: mask=%#x/%#x/%#x/%#x/%#x\n",
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
aprint_normal_dev(sc->sc_dev, "gmac_init: select=%#x/%#x/%#x/%#x/%#x\n",
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_SELECT),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_SELECT),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_SELECT),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_SELECT),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_SELECT));
aprint_normal_dev(sc->sc_dev, "gmac_init: create rx dmamap cache\n");
/*
* Allocate the cache for receive dmamaps.
*/
@ -121,6 +178,15 @@ gmac_init(struct gmac_softc *sc)
MCLBYTES, 1);
KASSERT(sc->sc_rxmaps != NULL);
aprint_normal_dev(sc->sc_dev, "gmac_init: create tx dmamap cache\n");
/*
* Allocate the cache for transmit dmamaps.
*/
sc->sc_txmaps = gmac_mapcache_create(sc->sc_dmat, MAX_TXMAPS,
ETHERMTU_JUMBO + ETHER_HDR_LEN, 16);
KASSERT(sc->sc_txmaps != NULL);
aprint_normal_dev(sc->sc_dev, "gmac_init: create sw freeq\n");
/*
* Allocate the memory for sw (receive) free queue
*/
@ -130,21 +196,17 @@ gmac_init(struct gmac_softc *sc)
GMAC_SWFREEQ_RWPTR, GMAC_SWFREEQ_BASE, 0);
KASSERT(sc->sc_swfreeq != NULL);
/*
* Allocate the cache for transmit dmamaps.
*/
sc->sc_txmaps = gmac_mapcache_create(sc->sc_dmat, MAX_TXMAPS,
ETHERMTU_JUMBO + ETHER_HDR_LEN, 16);
KASSERT(sc->sc_txmaps != NULL);
aprint_normal_dev(sc->sc_dev, "gmac_init: create hw freeq\n");
/*
* Allocate the memory for hw (transmit) free queue
*/
hqm = gmac_hwqmem_create(sc->sc_rxmaps, HWFREEQ_DESCS, 1,
hqm = gmac_hwqmem_create(sc->sc_txmaps, HWFREEQ_DESCS, 1,
HQM_CONSUMER|HQM_TX);
sc->sc_hwfreeq = gmac_hwqueue_create(hqm, sc->sc_iot, sc->sc_ioh,
GMAC_HWFREEQ_RWPTR, GMAC_HWFREEQ_BASE, 0);
KASSERT(sc->sc_hwfreeq != NULL);
aprint_normal_dev(sc->sc_dev, "gmac_init: done\n");
}
int
@ -317,6 +379,8 @@ geminigmac_mii_readreg(device_t dv, int phy, int reg)
rv = mii_bitbang_readreg(parent, &geminigmac_mii_bitbang_ops, phy, reg);
mutex_exit(&sc->sc_mdiolock);
//aprint_normal_dev(dv, "mii_readreg(%d, %d): %#x\n", phy, reg, rv);
return rv;
}
@ -326,6 +390,8 @@ geminigmac_mii_writereg(device_t dv, int phy, int reg, int val)
device_t parent = device_parent(dv);
struct gmac_softc * const sc = device_private(parent);
//aprint_normal_dev(dv, "mii_writereg(%d, %d, %#x)\n", phy, reg, val);
mutex_enter(&sc->sc_mdiolock);
mii_bitbang_writereg(parent, &geminigmac_mii_bitbang_ops, phy, reg, val);
mutex_exit(&sc->sc_mdiolock);
@ -441,36 +507,79 @@ gmac_hwqueue_desc(gmac_hwqueue_t *hwq, size_t i)
return hwq->hwq_base + i;
}
static void
gmac_hwqueue_txconsume(gmac_hwqueue_t *hwq, const gmac_desc_t *d)
{
gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
struct ifnet *ifp;
bus_dmamap_t map;
struct mbuf *m;
IF_DEQUEUE(&hwq->hwq_ifq, m);
KASSERT(m != NULL);
map = M_GETCTX(m, bus_dmamap_t);
bus_dmamap_sync(hqm->hqm_dmat, map, 0, map->dm_mapsize,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(hqm->hqm_dmat, map);
M_SETCTX(m, NULL);
gmac_mapcache_put(hqm->hqm_mc, map);
ifp = hwq->hwq_ifp;
ifp->if_opackets++;
ifp->if_obytes += m->m_pkthdr.len;
printf("gmac_hwqueue_txconsume(%p): %zu@%p: %s m=%p\n",
hwq, d - hwq->hwq_base, d, ifp->if_xname, m);
#if NBPFILTER > 0
if (ifp->if_bpf)
bpf_mtap(ifp, m);
#endif
m_freem(m);
}
void
gmac_hwqueue_sync(gmac_hwqueue_t *hwq)
{
gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
uint32_t v = bus_space_read_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0);
uint16_t old_rptr = hwq->hwq_rptr;
uint32_t v;
uint16_t old_rptr;
size_t rptr;
KASSERT(hqm->hqm_flags & HQM_PRODUCER);
KASSERT(hqm->hqm_flags & HQM_TX);
hwq->hwq_rptr = (uint16_t)(v >> 4) & 0xfff;
hwq->hwq_wptr = (uint16_t)(v >> 20) & 0xfff;
old_rptr = hwq->hwq_rptr;
v = bus_space_read_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0);
hwq->hwq_rptr = (v >> 0) & 0xffff;
hwq->hwq_wptr = (v >> 16) & 0xffff;
if (old_rptr == hwq->hwq_rptr)
return;
hwq->hwq_free += hwq->hwq_rptr - old_rptr;
if (__predict_false(old_rptr > hwq->hwq_rptr)) {
hwq->hwq_free += (hwq->hwq_rptr - old_rptr) & (hwq->hwq_size - 1);
for (rptr = old_rptr;
rptr != hwq->hwq_rptr;
rptr = (rptr + 1) % (hwq->hwq_size - 1)) {
const gmac_desc_t * const d = hwq->hwq_base + rptr;
bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
sizeof(gmac_desc_t [hwq->hwq_qoff + old_rptr]),
sizeof(gmac_desc_t [hwq->hwq_size - old_rptr]),
BUS_DMASYNC_POSTREAD);
hwq->hwq_free += hwq->hwq_size;
old_rptr = 0;
}
if (old_rptr < hwq->hwq_rptr) {
bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
sizeof(gmac_desc_t [hwq->hwq_qoff + old_rptr]),
sizeof(gmac_desc_t [hwq->hwq_qoff + hwq->hwq_rptr]),
BUS_DMASYNC_POSTREAD);
sizeof(gmac_desc_t [hwq->hwq_qoff + rptr]),
sizeof(gmac_desc_t),
BUS_DMASYNC_POSTWRITE);
#if 0
printf("%s: gmac_hwqueue_sync(%p): %zu@%p=%#x/%#x/%#x/%#x\n",
hwq->hwq_ifp->if_xname, hwq, rptr, d,
d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
#endif
if ((hqm->hqm_flags & HQM_TX)
&& (d->d_desc3 & htole32(DESC3_EOF))) {
gmac_hwqueue_txconsume(hwq, d);
}
}
printf("gmac_hwqueue_sync(%p): rptr old=%u new=%u\n",
hwq, old_rptr, hwq->hwq_rptr);
}
void
@ -481,28 +590,34 @@ gmac_hwqueue_produce(gmac_hwqueue_t *hwq, size_t count)
KASSERT(count < hwq->hwq_free);
KASSERT(hqm->hqm_flags & HQM_PRODUCER);
printf("gmac_hwqueue_produce(%p, %zu): rptr=%u wptr old=%u", hwq, count,
hwq->hwq_rptr, hwq->hwq_wptr);
hwq->hwq_free -= count;
if (hwq->hwq_wptr + count >= hwq->hwq_size) {
bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
sizeof(gmac_desc_t [hwq->hwq_qoff + hwq->hwq_wptr]),
sizeof(gmac_desc_t [hwq->hwq_size - hwq->hwq_wptr]),
BUS_DMASYNC_PREREAD);
BUS_DMASYNC_PREWRITE);
count -= hwq->hwq_size - hwq->hwq_wptr;
hwq->hwq_wptr = 0;
}
if (count > 0) {
bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
sizeof(gmac_desc_t [hwq->hwq_qoff + hwq->hwq_wptr]),
sizeof(gmac_desc_t [hwq->hwq_wptr + count]),
BUS_DMASYNC_PREREAD);
hwq->hwq_wptr += sizeof(gmac_desc_t [count]);
sizeof(gmac_desc_t [count]),
BUS_DMASYNC_PREWRITE);
hwq->hwq_wptr += count;
}
/*
* Tell the h/w we've produced a few more descriptors.
* (don't bother writing the rptr since it's RO).
*/
bus_space_write_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0,
(hwq->hwq_wptr << 20) | (hwq->hwq_rptr << 4));
hwq->hwq_wptr << 16);
printf(" new=%u\n", hwq->hwq_wptr);
}
static void
@ -516,6 +631,8 @@ gmac_hwqueue_rxconsume(gmac_hwqueue_t *hwq, const gmac_desc_t *d)
KASSERT(ifp != NULL);
gmac_hwqueue_sync(hwq->hwq_producer);
/*
* First we have to find this mbuf in the software free queue
* (the producer of the mbufs) and remove it.
@ -524,6 +641,7 @@ gmac_hwqueue_rxconsume(gmac_hwqueue_t *hwq, const gmac_desc_t *d)
(m = *mp) != NULL;
last_m = m, mp = &m->m_nextpkt) {
map = M_GETCTX(m, bus_dmamap_t);
KASSERT(map != NULL);
KASSERT(map->dm_nsegs == 1);
if (d->d_bufaddr == map->dm_segs->ds_addr) {
*mp = m->m_nextpkt;
@ -547,7 +665,7 @@ gmac_hwqueue_rxconsume(gmac_hwqueue_t *hwq, const gmac_desc_t *d)
/*
* Sync the buffer contents, unload the dmamap, and save it away.
*/
bus_dmamap_sync(hqm->hqm_dmat, map, 0, buflen, BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(hqm->hqm_dmat, map, 0, buflen, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(hqm->hqm_dmat, map);
M_SETCTX(m, NULL);
gmac_mapcache_put(hqm->hqm_mc, map);
@ -597,47 +715,6 @@ gmac_hwqueue_rxconsume(gmac_hwqueue_t *hwq, const gmac_desc_t *d)
hwq->hwq_mp = &hwq->hwq_rxmbuf;
}
static void
gmac_hwqueue_txconsume(gmac_hwqueue_t *hwq, const gmac_desc_t *d)
{
gmac_hwqmem_t *hqm;
gmac_hwqueue_t *txhwq;
struct ifnet *ifp;
bus_dmamap_t map = NULL;
struct mbuf *m = NULL;
SLIST_FOREACH(txhwq, &hwq->hwq_producers, hwq_link) {
if ((m = txhwq->hwq_ifq.ifq_head) == NULL)
continue;
map = M_GETCTX(m, bus_dmamap_t);
if (d->d_bufaddr == map->dm_segs[map->dm_nsegs-1].ds_addr) {
if ((txhwq->hwq_ifq.ifq_head = m->m_nextpkt) == NULL)
txhwq->hwq_ifq.ifq_tail = NULL;
txhwq->hwq_ifq.ifq_len--;
break;
}
}
KASSERT(txhwq != NULL);
KASSERT(m != NULL);
hqm = txhwq->hwq_hqm;
bus_dmamap_sync(hqm->hqm_dmat, map, 0, map->dm_mapsize,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(hqm->hqm_dmat, map);
M_SETCTX(m, NULL);
gmac_mapcache_put(hqm->hqm_mc, map);
ifp = txhwq->hwq_ifp;
ifp->if_opackets++;
ifp->if_obytes += m->m_pkthdr.len;
#if NBPFILTER > 0
if (ifp->if_bpf)
bpf_mtap(ifp, m);
#endif
m_freem(m);
}
void
gmac_hwqueue_consume(gmac_hwqueue_t *hwq)
{
@ -645,20 +722,25 @@ gmac_hwqueue_consume(gmac_hwqueue_t *hwq)
KASSERT((hqm->hqm_flags & HQM_PRODUCER) == 0);
for (;;) {
printf("gmac_hwqueue_consume(%p): entry\n", hwq);
do {
gmac_desc_t d;
uint32_t v;
uint16_t rptr, wptr;
v = bus_space_read_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0);
rptr = (v >> 4) & 0xfff;
wptr = (v >> 20) & 0xfff;
for (; rptr != ((wptr - 1) & (hwq->hwq_size - 1));
rptr = (rptr + 1) & (hwq->hwq_size - 1)) {
rptr = (v >> 0) & 0xffff;
wptr = (v >> 16) & 0xffff;
KASSERT(rptr == hwq->hwq_rptr);
if (rptr == wptr)
break;
for (; rptr != wptr; rptr = (rptr + 1) & (hwq->hwq_size - 1)) {
bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
sizeof(gmac_desc_t [hwq->hwq_qoff + rptr]),
sizeof(gmac_desc_t),
BUS_DMASYNC_POSTWRITE);
BUS_DMASYNC_POSTREAD);
d.d_desc0 = le32toh(hwq->hwq_base[rptr].d_desc0);
d.d_desc1 = le32toh(hwq->hwq_base[rptr].d_desc1);
d.d_bufaddr = le32toh(hwq->hwq_base[rptr].d_bufaddr);
@ -666,17 +748,26 @@ gmac_hwqueue_consume(gmac_hwqueue_t *hwq)
bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
sizeof(gmac_desc_t [hwq->hwq_qoff + rptr]),
sizeof(gmac_desc_t),
BUS_DMASYNC_PREWRITE);
BUS_DMASYNC_PREREAD);
if ((hqm->hqm_flags & HQM_TX) == 0) {
gmac_hwqueue_rxconsume(hwq, &d);
} else if (d.d_desc3 & DESC3_EOF) {
gmac_hwqueue_txconsume(hwq, &d);
}
printf("gmac_hwqueue_consume(%p): rptr=%u\n",
hwq, rptr);
gmac_hwqueue_rxconsume(hwq, &d);
}
/*
* Update hardware's copy of rptr. (wptr is RO).
*/
printf("gmac_hwqueue_consume(%p): rptr old=%u new=%u wptr=%u\n",
hwq, rptr, hwq->hwq_rptr, hwq->hwq_wptr);
bus_space_write_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0, rptr);
hwq->hwq_rptr = rptr;
hwq->hwq_wptr = wptr;
}
hwq->hwq_wptr = wptr; /* not used */
} while (hwq->hwq_rptr != hwq->hwq_wptr);
printf("gmac_hwqueue_consume(%p): exit\n", hwq);
}
void
@ -723,22 +814,39 @@ gmac_hwqmem_create(gmac_mapcache_t *mc, size_t ndesc, size_t nqueue, int flags)
error = bus_dmamem_alloc(hqm->hqm_dmat, hqm->hqm_memsize, 0, 0,
hqm->hqm_segs, 1, &hqm->hqm_nsegs, BUS_DMA_WAITOK);
if (error)
if (error) {
KASSERT(error == 0);
goto failed;
}
KASSERT(hqm->hqm_nsegs == 1);
error = bus_dmamem_map(hqm->hqm_dmat, hqm->hqm_segs, hqm->hqm_nsegs,
hqm->hqm_memsize, (void **)&hqm->hqm_base, BUS_DMA_WAITOK);
if (error)
if (error) {
KASSERT(error == 0);
goto failed;
}
error = bus_dmamap_create(hqm->hqm_dmat, hqm->hqm_memsize,
hqm->hqm_nsegs, 0, 0, BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW,
&hqm->hqm_dmamap);
if (error)
hqm->hqm_nsegs, hqm->hqm_memsize, 0,
BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &hqm->hqm_dmamap);
if (error) {
KASSERT(error == 0);
goto failed;
}
error = bus_dmamap_load(hqm->hqm_dmat, hqm->hqm_dmamap, hqm->hqm_base,
hqm->hqm_memsize, NULL, BUS_DMA_WAITOK
| (flags & HQM_PRODUCER ? BUS_DMA_READ: BUS_DMA_WRITE));
if (error)
| (flags & HQM_PRODUCER ? BUS_DMA_WRITE: BUS_DMA_READ));
if (error) {
printf("gmac_hwqmem_create: ds_addr=%zu ds_len=%zu\n",
hqm->hqm_segs->ds_addr, hqm->hqm_segs->ds_len);
printf("gmac_hwqmem_create: bus_dmamap_load: %d\n", error);
KASSERT(error == 0);
goto failed;
}
memset(hqm->hqm_base, 0, hqm->hqm_memsize);
if ((flags & HQM_PRODUCER) == 0)
bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap, 0,
hqm->hqm_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
return hqm;
@ -773,7 +881,7 @@ gmac_hwqueue_create(gmac_hwqmem_t *hqm,
bus_size_t qrwptr, bus_size_t qbase,
size_t qno)
{
const size_t log2_memsize = ffs(hqm->hqm_ndesc) + 3;
const size_t log2_memsize = ffs(hqm->hqm_ndesc) - 1;
gmac_hwqueue_t *hwq;
uint32_t v;
@ -796,15 +904,22 @@ gmac_hwqueue_create(gmac_hwqmem_t *hqm,
hwq->hwq_qoff = hqm->hqm_ndesc * qno;
hwq->hwq_base = hqm->hqm_base + hwq->hwq_qoff;
v = bus_space_read_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0);
hwq->hwq_rptr = (v >> 4) & 0xfff;
hwq->hwq_wptr = (v >> 20) & 0xfff;
if (qno == 0) {
bus_space_write_4(hwq->hwq_iot, ioh, qbase,
hqm->hqm_dmamap->dm_segs[0].ds_addr | (log2_memsize));
}
v = bus_space_read_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0);
hwq->hwq_rptr = (v >> 0) & 0xffff;
hwq->hwq_wptr = (v >> 16) & 0xffff;
printf("gmac_hwqueue_create: %p: qrwptr=%zu(%#zx) wptr=%u rptr=%u"
" base=%p@%#zx(%#x) qno=%zu\n",
hwq, qrwptr, hwq->hwq_qrwptr_ioh, hwq->hwq_wptr, hwq->hwq_rptr,
hwq->hwq_base,
hqm->hqm_segs->ds_addr + sizeof(gmac_desc_t [hwq->hwq_qoff]),
bus_space_read_4(hwq->hwq_iot, ioh, qbase), qno);
hwq->hwq_free = hwq->hwq_size - 1;
hwq->hwq_ifq.ifq_maxlen = hwq->hwq_free;

View File

@ -1,4 +1,4 @@
/* $NetBSD: gemini_gmacreg.h,v 1.1 2008/12/14 01:57:02 matt Exp $ */
/* $NetBSD: gemini_gmacreg.h,v 1.2 2008/12/15 04:44:27 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
@ -203,7 +203,7 @@
#define INT4_SW_FREEQ_EMPTY __BIT(0) /* SW Free Q empty */
#define INT4_GMAC0 __BITS(16,23)
#define INT4_GMAC1 __BITS(24,31)
#define INT4_GMAC1 (__BITS(24,31)|INT4_HW_FREEQ_EMPTY)
#define GMAC_NONTOE_QH_OFFSET 0x2000
#define GMAC_NONTOE_QH_SIZE 0x1000
@ -259,17 +259,17 @@
#define GMAC_TX_WEIGHTING_1 0x0004
#define GMAC_TX_WEIGHTING_2 0x0008
#define GMAC_SW_TX_Qn_RWPTR(n) (0x000c+(n)*4)
#define GMAC_SW_TX_Q0_RWPTR GMAC_SW_TX_Q_RWPTR(0)
#define GMAC_SW_TX_Q1_RWPTR GMAC_SW_TX_Q_RWPTR(1)
#define GMAC_SW_TX_Q2_RWPTR GMAC_SW_TX_Q_RWPTR(2)
#define GMAC_SW_TX_Q3_RWPTR GMAC_SW_TX_Q_RWPTR(3)
#define GMAC_SW_TX_Q4_RWPTR GMAC_SW_TX_Q_RWPTR(4)
#define GMAC_SW_TX_Q5_RWPTR GMAC_SW_TX_Q_RWPTR(5)
#define GMAC_SW_TX_Q0_RWPTR GMAC_SW_TX_Qn_RWPTR(0)
#define GMAC_SW_TX_Q1_RWPTR GMAC_SW_TX_Qn_RWPTR(1)
#define GMAC_SW_TX_Q2_RWPTR GMAC_SW_TX_Qn_RWPTR(2)
#define GMAC_SW_TX_Q3_RWPTR GMAC_SW_TX_Qn_RWPTR(3)
#define GMAC_SW_TX_Q4_RWPTR GMAC_SW_TX_Qn_RWPTR(4)
#define GMAC_SW_TX_Q5_RWPTR GMAC_SW_TX_Qn_RWPTR(5)
#define GMAC_HW_TX_Qn_RWPTR(n) (0x0024+(n)*4)
#define GMAC_HW_TX_Q0_RWPTR GMAC_HW_TX_Q_RWPTR(0)
#define GMAC_HW_TX_Q1_RWPTR GMAC_HW_TX_Q_RWPTR(1)
#define GMAC_HW_TX_Q2_RWPTR GMAC_HW_TX_Q_RWPTR(2)
#define GMAC_HW_TX_Q3_RWPTR GMAC_HW_TX_Q_RWPTR(3)
#define GMAC_HW_TX_Q0_RWPTR GMAC_HW_TX_Qn_RWPTR(0)
#define GMAC_HW_TX_Q1_RWPTR GMAC_HW_TX_Qn_RWPTR(1)
#define GMAC_HW_TX_Q2_RWPTR GMAC_HW_TX_Qn_RWPTR(2)
#define GMAC_HW_TX_Q3_RWPTR GMAC_HW_TX_Qn_RWPTR(3)
#define GMAC_DMA_TX_1ST_DESC 0x0038
#define GMAC_DMA_TX_CUR_DESC 0x003c

View File

@ -1,4 +1,4 @@
/* $NetBSD: gemini_gmacvar.h,v 1.1 2008/12/14 01:57:02 matt Exp $ */
/* $NetBSD: gemini_gmacvar.h,v 1.2 2008/12/15 04:44:27 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
@ -92,6 +92,8 @@ struct gmac_softc {
gmac_hwqueue_t *sc_hwfreeq;
gmac_mapcache_t *sc_rxmaps;
gmac_mapcache_t *sc_txmaps;
size_t sc_swfree_min; /* min mbufs to keep on swfreeq */
size_t sc_rxpkts_per_sec;
/*
* What interrupts are enabled for both ports?

View File

@ -1,4 +1,4 @@
/* $NetBSD: gemini_reg.h,v 1.7 2008/12/14 01:57:02 matt Exp $ */
/* $NetBSD: gemini_reg.h,v 1.8 2008/12/15 04:44:27 matt Exp $ */
#ifndef _ARM_GEMINI_REG_H_
#define _ARM_GEMINI_REG_H_
@ -73,6 +73,8 @@
#define GEMINI_GLOBAL_RESET_CTL 0xc /* Global Soft Reset Control */ /* rw */
#define GLOBAL_RESET_GLOBAL __BIT(31) /* Global Soft Reset */
#define GLOBAL_RESET_CPU1 __BIT(30) /* CPU#1 reset hold */
#define GLOBAL_RESET_GMAC1 __BIT(6) /* GMAC1 reset hold */
#define GLOBAL_RESET_GMAC0 __BIT(5) /* CGMAC reset hold */
#define GEMINI_GLOBAL_MISC_CTL 0x30 /* Miscellaneous Control */ /* rw */
#define GEMINI_GLOBAL_CPU0 0x38 /* CPU #0 Status and Control */ /* rw */
#define GLOBAL_CPU0_IPICPU1 __BIT(31) /* IPI to CPU#1 */

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_gmc.c,v 1.1 2008/12/14 01:57:02 matt Exp $ */
/* $NetBSD: if_gmc.c,v 1.2 2008/12/15 04:44:27 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
@ -47,7 +47,7 @@
#include <net/if_ether.h>
#include <net/if_dl.h>
__KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.1 2008/12/14 01:57:02 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.2 2008/12/15 04:44:27 matt Exp $");
#define MAX_TXSEG 32
@ -91,6 +91,8 @@ gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
int error;
gmac_desc_t *d;
KASSERT(hwq != NULL);
map = gmac_mapcache_get(hwq->hwq_hqm->hqm_mc);
if (map == NULL)
return false;
@ -112,22 +114,20 @@ gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
memmove(m0->m_data - 1, m0->m_data, m0->m_len);
m0->m_data--;
} else {
panic("gmc_ifstart: odd addr %p", m0->m_data);
panic("gmc_txqueue: odd addr %p", m0->m_data);
}
}
count += ((addr & PGOFSET) + m->m_len + PGOFSET) >> PGSHIFT;
}
gmac_hwqueue_sync(hwq);
if (hwq->hwq_free <= count) {
gmac_hwqueue_sync(hwq);
if (hwq->hwq_free <= count) {
gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
return false;
}
gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
return false;
}
error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
BUS_DMA_READ|BUS_DMA_NOWAIT);
BUS_DMA_WRITE|BUS_DMA_NOWAIT);
if (error) {
aprint_error_dev(sc->sc_dev, "ifstart: load failed: %d\n",
error);
@ -142,7 +142,7 @@ gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
* Sync the mbuf contents to memory/cache.
*/
bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
BUS_DMASYNC_PREREAD);
BUS_DMASYNC_PREWRITE);
/*
* Now we need to load the descriptors...
@ -150,17 +150,27 @@ gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
desc1 = m->m_pkthdr.len;
desc3 = DESC3_SOF;
i = 0;
d = NULL;
do {
if (i > 0)
aprint_normal_dev(sc->sc_dev,
"gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
i-1, d, d->d_desc0, d->d_desc1,
d->d_bufaddr, d->d_desc3);
d = gmac_hwqueue_desc(hwq, i);
KASSERT(map->dm_segs[i].ds_len > 0);
KASSERT((map->dm_segs[i].ds_addr & 1) == 0);
d->d_desc0 = map->dm_segs[i].ds_len;
d->d_desc1 = desc1;
d->d_bufaddr = map->dm_segs[i].ds_addr;
d->d_desc3 = desc3;
d->d_desc0 = htole32(map->dm_segs[i].ds_len);
d->d_desc1 = htole32(desc1);
d->d_bufaddr = htole32(map->dm_segs[i].ds_addr);
d->d_desc3 = htole32(desc3);
desc3 = 0;
} while (++i < map->dm_nsegs);
d->d_desc3 |= DESC3_EOF;
d->d_desc3 |= htole32(DESC3_EOF|DESC3_EOFIE);
aprint_normal_dev(sc->sc_dev,
"gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
M_SETCTX(m, map);
IF_ENQUEUE(&hwq->hwq_ifq, m);
/*
@ -168,17 +178,22 @@ gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
* This will sync for us.
*/
gmac_hwqueue_produce(hwq, map->dm_nsegs);
aprint_normal_dev(sc->sc_dev,
"gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
return true;
}
static void
gmc_rxproduce(struct gmc_softc *sc)
{
gmac_hwqueue_t * const hwq = sc->sc_psc->sc_swfreeq;
struct gmac_softc * const psc = sc->sc_psc;
gmac_hwqueue_t * const hwq = psc->sc_swfreeq;
gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
size_t i;
for (i = 0; hwq->hwq_size - hwq->hwq_free + i < MIN_RXMAPS; i++) {
for (i = 0;
hwq->hwq_size - hwq->hwq_free - 1 + i < psc->sc_swfree_min; i++) {
bus_dmamap_t map;
gmac_desc_t *d;
struct mbuf *m;
@ -203,7 +218,7 @@ gmc_rxproduce(struct gmc_softc *sc)
break;
}
error = bus_dmamap_load(hqm->hqm_dmat, map, m->m_data,
MCLBYTES, NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
MCLBYTES, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
if (error) {
m_free(m);
gmac_mapcache_put(hqm->hqm_mc, map);
@ -216,13 +231,14 @@ gmc_rxproduce(struct gmc_softc *sc)
break;
}
bus_dmamap_sync(hqm->hqm_dmat, map, 0, map->dm_mapsize,
BUS_DMASYNC_PREWRITE);
BUS_DMASYNC_PREREAD);
m->m_len = 0;
M_SETCTX(m, map);
d = gmac_hwqueue_desc(hwq, i);
d->d_desc0 = htole32(map->dm_segs->ds_len);
d->d_bufaddr = htole32(map->dm_segs->ds_addr);
IF_ENQUEUE(&hwq->hwq_ifq, m);
sc->sc_psc->sc_rxpkts_per_sec++;
}
if (i)
@ -265,7 +281,7 @@ gmc_filter_change(struct gmc_softc *sc)
break;
}
i = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
mhash[i >> 5] |= 1 << (i & 31);
mhash[(i >> 5) & 1] |= 1 << (i & 31);
ETHER_NEXT_MULTI(step, enm);
}
@ -295,8 +311,24 @@ static void
gmc_mii_tick(void *arg)
{
struct gmc_softc * const sc = arg;
struct gmac_softc * const psc = sc->sc_psc;
int s = splnet();
/*
* If we had to increase the number of receive mbufs due to fifo
* overflows, we need a way to decrease them. So every second we
* recieve less than or equal to MIN_RXMAPS packets, we decrement
* swfree_min until it returns to MIN_RXMAPS.
*/
if (psc->sc_rxpkts_per_sec <= MIN_RXMAPS
&& psc->sc_swfree_min > MIN_RXMAPS)
psc->sc_swfree_min--;
/*
* If only one GMAC is running or this is port0, reset the count.
*/
if (psc->sc_running != 3 || !sc->sc_port1)
psc->sc_rxpkts_per_sec = 0;
mii_tick(&sc->sc_mii);
if (sc->sc_if.if_flags & IFF_RUNNING)
callout_schedule(&sc->sc_mii_ch, hz);
@ -332,6 +364,8 @@ gmc_mii_statchg(device_t self)
uint32_t gmac_status;
gmac_status = sc->sc_gmac_status;
gmac_status &= ~STATUS_PHYMODE_MASK;
gmac_status |= STATUS_PHYMODE_GMII;
gmac_status &= ~STATUS_SPEED_MASK;
if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T) {
gmac_status |= STATUS_SPEED_1000M;
@ -351,10 +385,13 @@ gmc_mii_statchg(device_t self)
else
gmac_status &= ~STATUS_LINK_ON;
gmac_status |= STATUS_LINK_ON; /* XXX */
if (sc->sc_gmac_status != gmac_status) {
aprint_normal_dev(sc->sc_dev,
"status change old=%#x new=%#x\n",
sc->sc_gmac_status, gmac_status);
"status change old=%#x new=%#x active=%#x\n",
sc->sc_gmac_status, gmac_status,
sc->sc_mii.mii_media_active);
sc->sc_gmac_status = gmac_status;
bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STATUS,
sc->sc_gmac_status);
@ -398,6 +435,10 @@ gmc_ifstart(struct ifnet *ifp)
{
struct gmc_softc * const sc = ifp->if_softc;
if ((sc->sc_gmac_status & STATUS_LINK_ON) == 0
|| (ifp->if_flags & IFF_RUNNING) == 0)
return;
for (;;) {
struct mbuf *m;
IF_DEQUEUE(&ifp->if_snd, m);
@ -452,7 +493,9 @@ gmc_ifinit(struct ifnet *ifp)
{
struct gmc_softc * const sc = ifp->if_softc;
struct gmac_softc * const psc = sc->sc_psc;
#if 1
uint32_t new, mask;
#endif
gmac_mapcache_fill(psc->sc_rxmaps, MIN_RXMAPS);
gmac_mapcache_fill(psc->sc_txmaps, MIN_TXMAPS);
@ -462,8 +505,8 @@ gmc_ifinit(struct ifnet *ifp)
hqm = gmac_hwqmem_create(psc->sc_rxmaps, RXQ_NDESCS, 1,
HQM_CONSUMER|HQM_RX);
sc->sc_rxq = gmac_hwqueue_create(hqm, sc->sc_iot,
sc->sc_ioh, GMAC_DEF_RXQn_BASE(sc->sc_port1),
GMAC_DEF_RXQn_RWPTR(sc->sc_port1), 0);
sc->sc_ioh, GMAC_DEF_RXQn_RWPTR(sc->sc_port1),
GMAC_DEF_RXQn_BASE(sc->sc_port1), 0);
if (sc->sc_rxq == NULL) {
gmac_hwqmem_destroy(hqm);
goto failed;
@ -472,13 +515,14 @@ gmc_ifinit(struct ifnet *ifp)
sc->sc_rxq->hwq_producer = psc->sc_swfreeq;
}
if (sc->sc_txq == NULL) {
if (sc->sc_txq[0] == NULL) {
gmac_hwqueue_t *hwq, *last_hwq;
gmac_hwqmem_t *hqm;
size_t i;
hqm = gmac_hwqmem_create(psc->sc_txmaps, TXQ_NDESCS, 6,
HQM_PRODUCER|HQM_TX);
KASSERT(hqm != NULL);
for (i = 0; i < __arraycount(sc->sc_txq); i++) {
sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot,
sc->sc_dma_ioh, GMAC_SW_TX_Qn_RWPTR(i),
@ -505,11 +549,11 @@ gmc_ifinit(struct ifnet *ifp)
SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i],
hwq_link);
}
}
gmc_filter_change(sc);
#if 1
mask = DMAVR_LOOPBACK|DMAVR_DROP_SMALL_ACK|DMAVR_EXTRABYTES_MASK
|DMAVR_RXBURSTSIZE_MASK|DMAVR_RXBUSWIDTH_MASK
|DMAVR_TXBURSTSIZE_MASK|DMAVR_TXBUSWIDTH_MASK;
@ -524,6 +568,9 @@ gmc_ifinit(struct ifnet *ifp)
sc->sc_dmavr = new;
bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
sc->sc_dmavr);
aprint_normal_dev(sc->sc_dev, "gmc_ifinit: dmavr=%#x/%#x\n",
sc->sc_dmavr,
bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR));
}
mask = CONFIG0_MAXLEN_MASK|CONFIG0_TX_DISABLE/*|CONFIG0_RX_DISABLE*/
@ -533,13 +580,14 @@ gmc_ifinit(struct ifnet *ifp)
new |= (sc->sc_gmac_config[0] & ~mask);
if (sc->sc_gmac_config[0] != new) {
sc->sc_gmac_config[0] = new;
bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0,
sc->sc_gmac_config[0]);
aprint_normal_dev(sc->sc_dev, "gmc_ifinit: config0=%#x/%#x\n",
sc->sc_gmac_config[0],
bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0));
}
#if 0
gmc_rxproduce(sc);
#endif
/*
* If we will be the only active interface, make sure the sw freeq
@ -563,6 +611,7 @@ gmc_ifinit(struct ifnet *ifp)
psc->sc_int_enabled[4] |= sc->sc_int_enabled[4];
gmac_intr_update(psc);
#endif
if ((ifp->if_flags & IFF_RUNNING) == 0)
mii_tick(&sc->sc_mii);
@ -602,22 +651,45 @@ gmc_intr(void *arg)
bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS,
int4_status & sc->sc_int_enabled[4]);
aprint_normal_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
int0_status, int1_status,
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
int4_status);
aprint_normal_dev(sc->sc_dev, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
status = int0_status & sc->sc_int_mask[0];
if (status & (INT0_TXDERR|INT0_TXPERR)) {
aprint_error_dev(sc->sc_dev,
"transmit%s%s error: bufaddr %#x\n",
"transmit%s%s error: %#x %08x bufaddr %#x\n",
status & INT0_TXDERR ? " data" : "",
status & INT0_TXPERR ? " protocol" : "",
bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
GMAC_DMA_TX_CUR_DESC),
bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
GMAC_SW_TX_Q0_RWPTR),
bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
GMAC_DMA_TX_DESC2));
Debugger();
}
if (status & (INT0_RXDERR|INT0_RXPERR)) {
aprint_error_dev(sc->sc_dev,
"receive%s%s error: bufaddr %#x\n",
"receive%s%s error: %#x %#x bufaddr %#x\n",
status & INT0_TXDERR ? " data" : "",
status & INT0_TXPERR ? " protocol" : "",
bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
GMAC_DMA_RX_CUR_DESC),
bus_space_read_4(sc->sc_iot, sc->sc_ioh,
GMAC_DEF_RXQn_RWPTR(sc->sc_port1)),
bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
GMAC_DMA_RX_DESC2));
Debugger();
}
if (status & INT0_SWTXQ_EOF) {
status &= INT0_SWTXQ_EOF;
@ -627,11 +699,13 @@ gmc_intr(void *arg)
status &= ~INT0_SWTXQn_EOF(i);
}
}
#if 0
/*
* If we got an EOF, that means someting wound up in the
* hardware freeq, so go reclaim it.
*/
gmac_hwqueue_consume(sc->sc_psc->sc_hwfreeq);
// gmac_hwqueue_consume(sc->sc_psc->sc_hwfreeq);
#endif
do_ifstart = true;
rv = 1;
}
@ -646,6 +720,7 @@ gmc_intr(void *arg)
gmc_rxproduce(sc);
rv = 1;
}
status = int4_status & sc->sc_int_enabled[4];
if (status & INT4_TX_FAIL) {
}
@ -660,6 +735,8 @@ gmc_intr(void *arg)
if (status & INT4_TX_XOFF) {
}
if (status & INT4_RX_FIFO_OVRN) {
if (sc->sc_psc->sc_swfree_min < MAX_RXMAPS)
sc->sc_psc->sc_swfree_min++;
sc->sc_if.if_ierrors++;
}
if (status & INT4_RGMII_STSCHG) {
@ -669,6 +746,7 @@ gmc_intr(void *arg)
if (do_ifstart)
gmc_ifstart(&sc->sc_if);
aprint_normal_dev(sc->sc_dev, "gmac_intr: done\n");
return rv;
}
@ -751,7 +829,7 @@ gmc_attach(device_t parent, device_t self, void *aux)
ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
} else {
ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
}
sc->sc_gmac_status = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
@ -779,9 +857,11 @@ gmc_attach(device_t parent, device_t self, void *aux)
sc->sc_int_mask[3] = (sc->sc_port1 ? INT3_GMAC1 : INT3_GMAC0);
sc->sc_int_mask[4] = (sc->sc_port1 ? INT4_GMAC1 : INT4_GMAC0);
if (!sc->sc_port1) {
sc->sc_ih = intr_establish(gma->gma_intr, IPL_NET, IST_LEVEL_HIGH,
gmc_intr, sc);
KASSERT(sc->sc_ih != NULL);
}
callout_init(&sc->sc_mii_ch, 0);
callout_setfunc(&sc->sc_mii_ch, gmc_mii_tick, sc);