- restructure tx descriptor handling code to decouple

number of tx descriptors, number of rx descriptors, and number of mbufs.
- bump number of tx descriptors for rtl8169.  64 doesn't seem to be sufficient
  when doing TSO.
This commit is contained in:
yamt 2005-03-23 20:23:08 +00:00
parent 93bf0b6883
commit e012617b1e
3 changed files with 130 additions and 108 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: rtl8169.c,v 1.14 2005/03/14 10:08:17 yamt Exp $ */ /* $NetBSD: rtl8169.c,v 1.15 2005/03/23 20:23:08 yamt Exp $ */
/* /*
* Copyright (c) 1997, 1998-2003 * Copyright (c) 1997, 1998-2003
@ -599,6 +599,8 @@ re_attach(struct rtk_softc *sc)
for (i = 0; i < ETHER_ADDR_LEN; i++) for (i = 0; i < ETHER_ADDR_LEN; i++)
eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i); eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
sc->rtk_ldata.rtk_tx_desc_cnt = RTK_TX_DESC_CNT_8169;
} else { } else {
/* Set RX length mask */ /* Set RX length mask */
@ -618,14 +620,24 @@ re_attach(struct rtk_softc *sc)
eaddr[(i * 2) + 0] = val & 0xff; eaddr[(i * 2) + 0] = val & 0xff;
eaddr[(i * 2) + 1] = val >> 8; eaddr[(i * 2) + 1] = val >> 8;
} }
sc->rtk_ldata.rtk_tx_desc_cnt = RTK_TX_DESC_CNT_8139;
} }
aprint_normal("%s: Ethernet address %s\n", aprint_normal("%s: Ethernet address %s\n",
sc->sc_dev.dv_xname, ether_sprintf(eaddr)); sc->sc_dev.dv_xname, ether_sprintf(eaddr));
if (sc->rtk_ldata.rtk_tx_desc_cnt >
PAGE_SIZE / sizeof(struct rtk_desc)) {
sc->rtk_ldata.rtk_tx_desc_cnt =
PAGE_SIZE / sizeof(struct rtk_desc);
}
aprint_verbose("%s: using %d tx descriptors\n",
sc->sc_dev.dv_xname, sc->rtk_ldata.rtk_tx_desc_cnt);
/* Allocate DMA'able memory for the TX ring */ /* Allocate DMA'able memory for the TX ring */
if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_TX_LIST_SZ, if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_TX_LIST_SZ(sc),
RTK_ETHER_ALIGN, 0, &sc->rtk_ldata.rtk_tx_listseg, RTK_ETHER_ALIGN, 0, &sc->rtk_ldata.rtk_tx_listseg,
1, &sc->rtk_ldata.rtk_tx_listnseg, BUS_DMA_NOWAIT)) != 0) { 1, &sc->rtk_ldata.rtk_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
aprint_error("%s: can't allocate tx listseg, error = %d\n", aprint_error("%s: can't allocate tx listseg, error = %d\n",
@ -635,17 +647,17 @@ re_attach(struct rtk_softc *sc)
/* Load the map for the TX ring. */ /* Load the map for the TX ring. */
if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rtk_ldata.rtk_tx_listseg, if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rtk_ldata.rtk_tx_listseg,
sc->rtk_ldata.rtk_tx_listnseg, RTK_TX_LIST_SZ, sc->rtk_ldata.rtk_tx_listnseg, RTK_TX_LIST_SZ(sc),
(caddr_t *)&sc->rtk_ldata.rtk_tx_list, (caddr_t *)&sc->rtk_ldata.rtk_tx_list,
BUS_DMA_NOWAIT)) != 0) { BUS_DMA_NOWAIT)) != 0) {
aprint_error("%s: can't map tx list, error = %d\n", aprint_error("%s: can't map tx list, error = %d\n",
sc->sc_dev.dv_xname, error); sc->sc_dev.dv_xname, error);
goto fail_1; goto fail_1;
} }
memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ); memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ(sc));
if ((error = bus_dmamap_create(sc->sc_dmat, RTK_TX_LIST_SZ, 1, if ((error = bus_dmamap_create(sc->sc_dmat, RTK_TX_LIST_SZ(sc), 1,
RTK_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW, RTK_TX_LIST_SZ(sc), 0, BUS_DMA_ALLOCNOW,
&sc->rtk_ldata.rtk_tx_list_map)) != 0) { &sc->rtk_ldata.rtk_tx_list_map)) != 0) {
aprint_error("%s: can't create tx list map, error = %d\n", aprint_error("%s: can't create tx list map, error = %d\n",
sc->sc_dev.dv_xname, error); sc->sc_dev.dv_xname, error);
@ -655,19 +667,19 @@ re_attach(struct rtk_softc *sc)
if ((error = bus_dmamap_load(sc->sc_dmat, if ((error = bus_dmamap_load(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_list_map, sc->rtk_ldata.rtk_tx_list, sc->rtk_ldata.rtk_tx_list_map, sc->rtk_ldata.rtk_tx_list,
RTK_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT)) != 0) { RTK_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
aprint_error("%s: can't load tx list, error = %d\n", aprint_error("%s: can't load tx list, error = %d\n",
sc->sc_dev.dv_xname, error); sc->sc_dev.dv_xname, error);
goto fail_3; goto fail_3;
} }
/* Create DMA maps for TX buffers */ /* Create DMA maps for TX buffers */
for (i = 0; i < RTK_TX_DESC_CNT; i++) { for (i = 0; i < RTK_TX_QLEN; i++) {
error = bus_dmamap_create(sc->sc_dmat, error = bus_dmamap_create(sc->sc_dmat,
round_page(IP_MAXPACKET), round_page(IP_MAXPACKET),
RTK_TX_DESC_CNT - 4, RTK_TDESC_CMD_FRAGLEN, RTK_TX_DESC_CNT(sc) - 4, RTK_TDESC_CMD_FRAGLEN,
0, BUS_DMA_ALLOCNOW, 0, BUS_DMA_ALLOCNOW,
&sc->rtk_ldata.rtk_tx_dmamap[i]); &sc->rtk_ldata.rtk_txq[i].txq_dmamap);
if (error) { if (error) {
aprint_error("%s: can't create DMA map for TX\n", aprint_error("%s: can't create DMA map for TX\n",
sc->sc_dev.dv_xname); sc->sc_dev.dv_xname);
@ -693,7 +705,7 @@ re_attach(struct rtk_softc *sc)
sc->sc_dev.dv_xname, error); sc->sc_dev.dv_xname, error);
goto fail_5; goto fail_5;
} }
memset(sc->rtk_ldata.rtk_rx_list, 0, RTK_TX_LIST_SZ); memset(sc->rtk_ldata.rtk_rx_list, 0, RTK_RX_LIST_SZ);
if ((error = bus_dmamap_create(sc->sc_dmat, RTK_RX_LIST_SZ, 1, if ((error = bus_dmamap_create(sc->sc_dmat, RTK_RX_LIST_SZ, 1,
RTK_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW, RTK_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
@ -809,10 +821,10 @@ fail_5:
fail_4: fail_4:
/* Destroy DMA maps for TX buffers. */ /* Destroy DMA maps for TX buffers. */
for (i = 0; i < RTK_TX_DESC_CNT; i++) for (i = 0; i < RTK_TX_QLEN; i++)
if (sc->rtk_ldata.rtk_tx_dmamap[i] != NULL) if (sc->rtk_ldata.rtk_txq[i].txq_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat, bus_dmamap_destroy(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_dmamap[i]); sc->rtk_ldata.rtk_txq[i].txq_dmamap);
/* Free DMA'able memory for the TX ring. */ /* Free DMA'able memory for the TX ring. */
bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map); bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
@ -820,7 +832,7 @@ fail_3:
bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map); bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
fail_2: fail_2:
bus_dmamem_unmap(sc->sc_dmat, bus_dmamem_unmap(sc->sc_dmat,
(caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ); (caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ(sc));
fail_1: fail_1:
bus_dmamem_free(sc->sc_dmat, bus_dmamem_free(sc->sc_dmat,
&sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg); &sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg);
@ -899,16 +911,16 @@ re_detach(struct rtk_softc *sc)
&sc->rtk_ldata.rtk_rx_listseg, sc->rtk_ldata.rtk_rx_listnseg); &sc->rtk_ldata.rtk_rx_listseg, sc->rtk_ldata.rtk_rx_listnseg);
/* Destroy DMA maps for TX buffers. */ /* Destroy DMA maps for TX buffers. */
for (i = 0; i < RTK_TX_DESC_CNT; i++) for (i = 0; i < RTK_TX_QLEN; i++)
if (sc->rtk_ldata.rtk_tx_dmamap[i] != NULL) if (sc->rtk_ldata.rtk_txq[i].txq_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat, bus_dmamap_destroy(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_dmamap[i]); sc->rtk_ldata.rtk_txq[i].txq_dmamap);
/* Free DMA'able memory for the TX ring. */ /* Free DMA'able memory for the TX ring. */
bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map); bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map); bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
bus_dmamem_unmap(sc->sc_dmat, bus_dmamem_unmap(sc->sc_dmat,
(caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ); (caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ(sc));
bus_dmamem_free(sc->sc_dmat, bus_dmamem_free(sc->sc_dmat,
&sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg); &sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg);
@ -1030,14 +1042,10 @@ re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m)
cmdstat = map->dm_segs[0].ds_len; cmdstat = map->dm_segs[0].ds_len;
d->rtk_bufaddr_lo = htole32(RTK_ADDR_LO(map->dm_segs[0].ds_addr)); d->rtk_bufaddr_lo = htole32(RTK_ADDR_LO(map->dm_segs[0].ds_addr));
d->rtk_bufaddr_hi = htole32(RTK_ADDR_HI(map->dm_segs[0].ds_addr)); d->rtk_bufaddr_hi = htole32(RTK_ADDR_HI(map->dm_segs[0].ds_addr));
cmdstat |= RTK_TDESC_CMD_SOF;
if (idx == (RTK_RX_DESC_CNT - 1)) if (idx == (RTK_RX_DESC_CNT - 1))
cmdstat |= RTK_TDESC_CMD_EOR; cmdstat |= RTK_RDESC_CMD_EOR;
d->rtk_cmdstat = htole32(cmdstat); d->rtk_cmdstat = htole32(cmdstat);
d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF);
sc->rtk_ldata.rtk_rx_list[idx].rtk_cmdstat |= sc->rtk_ldata.rtk_rx_list[idx].rtk_cmdstat |=
htole32(RTK_RDESC_CMD_OWN); htole32(RTK_RDESC_CMD_OWN);
sc->rtk_ldata.rtk_rx_mbuf[idx] = m; sc->rtk_ldata.rtk_rx_mbuf[idx] = m;
@ -1056,16 +1064,20 @@ out:
static int static int
re_tx_list_init(struct rtk_softc *sc) re_tx_list_init(struct rtk_softc *sc)
{ {
memset((char *)sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ); int i;
memset((char *)&sc->rtk_ldata.rtk_tx_mbuf, 0,
(RTK_TX_DESC_CNT * sizeof(struct mbuf *))); memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ(sc));
for (i = 0; i < RTK_TX_QLEN; i++) {
sc->rtk_ldata.rtk_txq[i].txq_mbuf = NULL;
}
bus_dmamap_sync(sc->sc_dmat, bus_dmamap_sync(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_list_map, 0, sc->rtk_ldata.rtk_tx_list_map, 0,
sc->rtk_ldata.rtk_tx_list_map->dm_mapsize, BUS_DMASYNC_PREWRITE); sc->rtk_ldata.rtk_tx_list_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
sc->rtk_ldata.rtk_tx_prodidx = 0; sc->rtk_ldata.rtk_txq_prodidx = 0;
sc->rtk_ldata.rtk_tx_considx = 0; sc->rtk_ldata.rtk_txq_considx = 0;
sc->rtk_ldata.rtk_tx_free = RTK_TX_DESC_CNT; sc->rtk_ldata.rtk_tx_free = RTK_TX_DESC_CNT(sc);
sc->rtk_ldata.rtk_tx_nextfree = 0;
return 0; return 0;
} }
@ -1148,7 +1160,7 @@ re_rxeof(struct rtk_softc *sc)
sc->rtk_tail = m; sc->rtk_tail = m;
} }
re_newbuf(sc, i, NULL); re_newbuf(sc, i, NULL);
RTK_DESC_INC(i); RTK_RX_DESC_INC(sc, i);
continue; continue;
} }
@ -1182,7 +1194,7 @@ re_rxeof(struct rtk_softc *sc)
sc->rtk_head = sc->rtk_tail = NULL; sc->rtk_head = sc->rtk_tail = NULL;
} }
re_newbuf(sc, i, m); re_newbuf(sc, i, m);
RTK_DESC_INC(i); RTK_RX_DESC_INC(sc, i);
continue; continue;
} }
@ -1198,11 +1210,11 @@ re_rxeof(struct rtk_softc *sc)
sc->rtk_head = sc->rtk_tail = NULL; sc->rtk_head = sc->rtk_tail = NULL;
} }
re_newbuf(sc, i, m); re_newbuf(sc, i, m);
RTK_DESC_INC(i); RTK_RX_DESC_INC(sc, i);
continue; continue;
} }
RTK_DESC_INC(i); RTK_RX_DESC_INC(sc, i);
if (sc->rtk_head != NULL) { if (sc->rtk_head != NULL) {
m->m_len = total_len % (MCLBYTES - RTK_ETHER_ALIGN); m->m_len = total_len % (MCLBYTES - RTK_ETHER_ALIGN);
@ -1284,11 +1296,10 @@ static void
re_txeof(struct rtk_softc *sc) re_txeof(struct rtk_softc *sc)
{ {
struct ifnet *ifp; struct ifnet *ifp;
u_int32_t txstat;
int idx; int idx;
ifp = &sc->ethercom.ec_if; ifp = &sc->ethercom.ec_if;
idx = sc->rtk_ldata.rtk_tx_considx; idx = sc->rtk_ldata.rtk_txq_considx;
/* Invalidate the TX descriptor list */ /* Invalidate the TX descriptor list */
@ -1297,40 +1308,39 @@ re_txeof(struct rtk_softc *sc)
0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize, 0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
BUS_DMASYNC_POSTREAD); BUS_DMASYNC_POSTREAD);
while (idx != sc->rtk_ldata.rtk_tx_prodidx) { while (idx != sc->rtk_ldata.rtk_txq_prodidx) {
struct rtk_txq *txq = &sc->rtk_ldata.rtk_txq[idx];
int descidx = txq->txq_descidx;
u_int32_t txstat;
txstat = le32toh(sc->rtk_ldata.rtk_tx_list[idx].rtk_cmdstat); KASSERT(txq->txq_mbuf != NULL);
txstat =
le32toh(sc->rtk_ldata.rtk_tx_list[descidx].rtk_cmdstat);
KASSERT((txstat & RTK_TDESC_CMD_EOF) != 0);
if (txstat & RTK_TDESC_CMD_OWN) if (txstat & RTK_TDESC_CMD_OWN)
break; break;
/* sc->rtk_ldata.rtk_tx_free += txq->txq_dmamap->dm_nsegs;
* We only stash mbufs in the last descriptor KASSERT(sc->rtk_ldata.rtk_tx_free <= RTK_TX_DESC_CNT(sc));
* in a fragment chain, which also happens to bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
* be the only place where the TX status bits m_freem(txq->txq_mbuf);
* are valid. txq->txq_mbuf = NULL;
*/
if (txstat & RTK_TDESC_CMD_EOF) { if (txstat & (RTK_TDESC_STAT_EXCESSCOL | RTK_TDESC_STAT_COLCNT))
m_freem(sc->rtk_ldata.rtk_tx_mbuf[idx]); ifp->if_collisions++;
sc->rtk_ldata.rtk_tx_mbuf[idx] = NULL; if (txstat & RTK_TDESC_STAT_TXERRSUM)
bus_dmamap_unload(sc->sc_dmat, ifp->if_oerrors++;
sc->rtk_ldata.rtk_tx_dmamap[idx]); else
if (txstat & (RTK_TDESC_STAT_EXCESSCOL | ifp->if_opackets++;
RTK_TDESC_STAT_COLCNT))
ifp->if_collisions++; idx = (idx + 1) % RTK_TX_QLEN;
if (txstat & RTK_TDESC_STAT_TXERRSUM)
ifp->if_oerrors++;
else
ifp->if_opackets++;
}
sc->rtk_ldata.rtk_tx_free++;
RTK_DESC_INC(idx);
} }
/* No changes made to the TX ring, so no flush needed */ /* No changes made to the TX ring, so no flush needed */
if (idx != sc->rtk_ldata.rtk_tx_considx) { if (idx != sc->rtk_ldata.rtk_txq_considx) {
sc->rtk_ldata.rtk_tx_considx = idx; sc->rtk_ldata.rtk_txq_considx = idx;
ifp->if_flags &= ~IFF_OACTIVE; ifp->if_flags &= ~IFF_OACTIVE;
ifp->if_timer = 0; ifp->if_timer = 0;
} }
@ -1341,7 +1351,7 @@ re_txeof(struct rtk_softc *sc)
* interrupt that will cause us to re-enter this routine. * interrupt that will cause us to re-enter this routine.
* This is done in case the transmitter has gone idle. * This is done in case the transmitter has gone idle.
*/ */
if (sc->rtk_ldata.rtk_tx_free != RTK_TX_DESC_CNT) if (sc->rtk_ldata.rtk_tx_free != RTK_TX_DESC_CNT(sc))
CSR_WRITE_4(sc, RTK_TIMERCNT, 1); CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
return; return;
@ -1495,10 +1505,11 @@ static int
re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx) re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
{ {
bus_dmamap_t map; bus_dmamap_t map;
int error, i, curidx; int error, i, startidx, curidx;
struct m_tag *mtag; struct m_tag *mtag;
struct rtk_desc *d; struct rtk_desc *d;
u_int32_t cmdstat, rtk_flags; u_int32_t cmdstat, rtk_flags;
struct rtk_txq *txq;
if (sc->rtk_ldata.rtk_tx_free <= 4) if (sc->rtk_ldata.rtk_tx_free <= 4)
return EFBIG; return EFBIG;
@ -1525,7 +1536,8 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
rtk_flags |= RTK_TDESC_CMD_UDPCSUM; rtk_flags |= RTK_TDESC_CMD_UDPCSUM;
} }
map = sc->rtk_ldata.rtk_tx_dmamap[*idx]; txq = &sc->rtk_ldata.rtk_txq[*idx];
map = txq->txq_dmamap;
error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT); error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
if (error) { if (error) {
@ -1553,14 +1565,14 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
* reception.) * reception.)
*/ */
i = 0; i = 0;
curidx = *idx; curidx = startidx = sc->rtk_ldata.rtk_tx_nextfree;
while (1) { while (1) {
d = &sc->rtk_ldata.rtk_tx_list[curidx]; d = &sc->rtk_ldata.rtk_tx_list[curidx];
if (le32toh(d->rtk_cmdstat) & RTK_RDESC_STAT_OWN) { if (le32toh(d->rtk_cmdstat) & RTK_TDESC_STAT_OWN) {
while (i > 0) { while (i > 0) {
sc->rtk_ldata.rtk_tx_list[ sc->rtk_ldata.rtk_tx_list[
(curidx + RTK_TX_DESC_CNT - i) % (curidx + RTK_TX_DESC_CNT(sc) - i) %
RTK_TX_DESC_CNT].rtk_cmdstat = 0; RTK_TX_DESC_CNT(sc)].rtk_cmdstat = 0;
i--; i--;
} }
error = ENOBUFS; error = ENOBUFS;
@ -1576,26 +1588,18 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
cmdstat |= RTK_TDESC_CMD_SOF; cmdstat |= RTK_TDESC_CMD_SOF;
else else
cmdstat |= RTK_TDESC_CMD_OWN; cmdstat |= RTK_TDESC_CMD_OWN;
if (curidx == (RTK_RX_DESC_CNT - 1)) if (curidx == (RTK_TX_DESC_CNT(sc) - 1))
cmdstat |= RTK_TDESC_CMD_EOR; cmdstat |= RTK_TDESC_CMD_EOR;
d->rtk_cmdstat = htole32(cmdstat | rtk_flags); d->rtk_cmdstat = htole32(cmdstat | rtk_flags);
i++; i++;
if (i == map->dm_nsegs) if (i == map->dm_nsegs)
break; break;
RTK_DESC_INC(curidx); RTK_TX_DESC_INC(sc, curidx);
} }
d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF); d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF);
/* txq->txq_mbuf = m;
* Insure that the map for this transmission
* is placed at the array index of the last descriptor
* in this chain.
*/
sc->rtk_ldata.rtk_tx_dmamap[*idx] =
sc->rtk_ldata.rtk_tx_dmamap[curidx];
sc->rtk_ldata.rtk_tx_dmamap[curidx] = map;
sc->rtk_ldata.rtk_tx_mbuf[curidx] = m;
sc->rtk_ldata.rtk_tx_free -= map->dm_nsegs; sc->rtk_ldata.rtk_tx_free -= map->dm_nsegs;
/* /*
@ -1605,7 +1609,7 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
*/ */
if ((mtag = VLAN_OUTPUT_TAG(&sc->ethercom, m)) != NULL) { if ((mtag = VLAN_OUTPUT_TAG(&sc->ethercom, m)) != NULL) {
sc->rtk_ldata.rtk_tx_list[*idx].rtk_vlanctl = sc->rtk_ldata.rtk_tx_list[startidx].rtk_vlanctl =
htole32(htons(VLAN_TAG_VALUE(mtag)) | htole32(htons(VLAN_TAG_VALUE(mtag)) |
RTK_TDESC_VLANCTL_TAG); RTK_TDESC_VLANCTL_TAG);
} }
@ -1614,12 +1618,14 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
sc->rtk_ldata.rtk_tx_list[curidx].rtk_cmdstat |= sc->rtk_ldata.rtk_tx_list[curidx].rtk_cmdstat |=
htole32(RTK_TDESC_CMD_OWN); htole32(RTK_TDESC_CMD_OWN);
if (*idx != curidx) if (startidx != curidx)
sc->rtk_ldata.rtk_tx_list[*idx].rtk_cmdstat |= sc->rtk_ldata.rtk_tx_list[startidx].rtk_cmdstat |=
htole32(RTK_TDESC_CMD_OWN); htole32(RTK_TDESC_CMD_OWN);
RTK_DESC_INC(curidx); txq->txq_descidx = curidx;
*idx = curidx; RTK_TX_DESC_INC(sc, curidx);
sc->rtk_ldata.rtk_tx_nextfree = curidx;
*idx = (*idx + 1) % RTK_TX_QLEN;
return 0; return 0;
@ -1642,8 +1648,8 @@ re_start(struct ifnet *ifp)
sc = ifp->if_softc; sc = ifp->if_softc;
idx = sc->rtk_ldata.rtk_tx_prodidx; idx = sc->rtk_ldata.rtk_txq_prodidx;
while (sc->rtk_ldata.rtk_tx_mbuf[idx] == NULL) { while (sc->rtk_ldata.rtk_txq[idx].txq_mbuf == NULL) {
int error; int error;
IF_DEQUEUE(&ifp->if_snd, m_head); IF_DEQUEUE(&ifp->if_snd, m_head);
@ -1652,7 +1658,7 @@ re_start(struct ifnet *ifp)
error = re_encap(sc, m_head, &idx); error = re_encap(sc, m_head, &idx);
if (error == EFBIG && if (error == EFBIG &&
sc->rtk_ldata.rtk_tx_free == RTK_TX_DESC_CNT) { sc->rtk_ldata.rtk_tx_free == RTK_TX_DESC_CNT(sc)) {
ifp->if_oerrors++; ifp->if_oerrors++;
m_freem(m_head); m_freem(m_head);
continue; continue;
@ -1679,7 +1685,7 @@ re_start(struct ifnet *ifp)
0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize, 0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
sc->rtk_ldata.rtk_tx_prodidx = idx; sc->rtk_ldata.rtk_txq_prodidx = idx;
/* /*
* RealTek put the TX poll request register in a different * RealTek put the TX poll request register in a different
@ -2016,12 +2022,12 @@ re_stop(struct ifnet *ifp, int disable)
} }
/* Free the TX list buffers. */ /* Free the TX list buffers. */
for (i = 0; i < RTK_TX_DESC_CNT; i++) { for (i = 0; i < RTK_TX_QLEN; i++) {
if (sc->rtk_ldata.rtk_tx_mbuf[i] != NULL) { if (sc->rtk_ldata.rtk_txq[i].txq_mbuf != NULL) {
bus_dmamap_unload(sc->sc_dmat, bus_dmamap_unload(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_dmamap[i]); sc->rtk_ldata.rtk_txq[i].txq_dmamap);
m_freem(sc->rtk_ldata.rtk_tx_mbuf[i]); m_freem(sc->rtk_ldata.rtk_txq[i].txq_mbuf);
sc->rtk_ldata.rtk_tx_mbuf[i] = NULL; sc->rtk_ldata.rtk_txq[i].txq_mbuf = NULL;
} }
} }

View File

@ -1,4 +1,4 @@
/* $NetBSD: rtl81x9reg.h,v 1.11 2005/03/12 08:01:51 yamt Exp $ */ /* $NetBSD: rtl81x9reg.h,v 1.12 2005/03/23 20:23:08 yamt Exp $ */
/* /*
* Copyright (c) 1997, 1998 * Copyright (c) 1997, 1998
@ -553,12 +553,11 @@ struct rtk_stats {
}; };
#define RTK_RX_DESC_CNT 64 #define RTK_RX_DESC_CNT 64
#define RTK_TX_DESC_CNT 64 #define RTK_TX_DESC_CNT_8139 64
#define RTK_TX_DESC_CNT_8169 1024
#define RTK_RX_LIST_SZ (RTK_RX_DESC_CNT * sizeof(struct rtk_desc)) #define RTK_RX_LIST_SZ (RTK_RX_DESC_CNT * sizeof(struct rtk_desc))
#define RTK_TX_LIST_SZ (RTK_TX_DESC_CNT * sizeof(struct rtk_desc))
#define RTK_RING_ALIGN 256 #define RTK_RING_ALIGN 256
#define RTK_IFQ_MAXLEN 512 #define RTK_IFQ_MAXLEN 512
#define RTK_DESC_INC(x) (x = (x + 1) % RTK_TX_DESC_CNT)
#define RTK_OWN(x) (le32toh((x)->rtk_cmdstat) & RTK_RDESC_STAT_OWN) #define RTK_OWN(x) (le32toh((x)->rtk_cmdstat) & RTK_RDESC_STAT_OWN)
#define RTK_RXBYTES(x) (le32toh((x)->rtk_cmdstat) & sc->rtk_rxlenmask) #define RTK_RXBYTES(x) (le32toh((x)->rtk_cmdstat) & sc->rtk_rxlenmask)
#define RTK_PKTSZ(x) ((x)/* >> 3*/) #define RTK_PKTSZ(x) ((x)/* >> 3*/)

View File

@ -1,4 +1,4 @@
/* $NetBSD: rtl81x9var.h,v 1.17 2005/02/04 02:10:37 perry Exp $ */ /* $NetBSD: rtl81x9var.h,v 1.18 2005/03/23 20:23:08 yamt Exp $ */
/* /*
* Copyright (c) 1997, 1998 * Copyright (c) 1997, 1998
@ -81,6 +81,8 @@ struct rtk_mii_frame {
#define RTK_ISCPLUS(x) ((x)->rtk_type == RTK_8139CPLUS || \ #define RTK_ISCPLUS(x) ((x)->rtk_type == RTK_8139CPLUS || \
(x)->rtk_type == RTK_8169) (x)->rtk_type == RTK_8169)
#define RTK_TX_QLEN 64
/* /*
* The 8139C+ and 8160 gigE chips support descriptor-based TX * The 8139C+ and 8160 gigE chips support descriptor-based TX
* and RX. In fact, they even support TCP large send. Descriptors * and RX. In fact, they even support TCP large send. Descriptors
@ -89,22 +91,28 @@ struct rtk_mii_frame {
*/ */
struct rtk_list_data { struct rtk_list_data {
struct mbuf *rtk_tx_mbuf[RTK_TX_DESC_CNT]; struct rtk_txq {
struct mbuf *txq_mbuf;
bus_dmamap_t txq_dmamap;
int txq_descidx;
} rtk_txq[RTK_TX_QLEN];
int rtk_txq_considx;
int rtk_txq_prodidx;
bus_dmamap_t rtk_tx_list_map;
struct rtk_desc *rtk_tx_list;
bus_dma_segment_t rtk_tx_listseg;
int rtk_tx_free; /* # of free descriptors */
int rtk_tx_nextfree; /* next descriptor to use */
int rtk_tx_desc_cnt; /* # of descriptors */
int rtk_tx_listnseg;
struct mbuf *rtk_rx_mbuf[RTK_RX_DESC_CNT]; struct mbuf *rtk_rx_mbuf[RTK_RX_DESC_CNT];
int rtk_tx_prodidx;
int rtk_rx_prodidx;
int rtk_tx_considx;
int rtk_tx_free;
bus_dmamap_t rtk_tx_dmamap[RTK_TX_DESC_CNT];
bus_dmamap_t rtk_rx_dmamap[RTK_RX_DESC_CNT]; bus_dmamap_t rtk_rx_dmamap[RTK_RX_DESC_CNT];
bus_dmamap_t rtk_rx_list_map; bus_dmamap_t rtk_rx_list_map;
struct rtk_desc *rtk_rx_list; struct rtk_desc *rtk_rx_list;
bus_dma_segment_t rtk_rx_listseg; bus_dma_segment_t rtk_rx_listseg;
int rtk_rx_prodidx;
int rtk_rx_listnseg; int rtk_rx_listnseg;
bus_dmamap_t rtk_tx_list_map;
struct rtk_desc *rtk_tx_list;
bus_dma_segment_t rtk_tx_listseg;
int rtk_tx_listnseg;
}; };
struct rtk_tx_desc { struct rtk_tx_desc {
SIMPLEQ_ENTRY(rtk_tx_desc) txd_q; SIMPLEQ_ENTRY(rtk_tx_desc) txd_q;
@ -154,6 +162,15 @@ struct rtk_softc {
#endif #endif
}; };
#define RTK_TX_DESC_CNT(sc) \
((sc)->rtk_ldata.rtk_tx_desc_cnt)
#define RTK_TX_LIST_SZ(sc) \
(RTK_TX_DESC_CNT(sc) * sizeof(struct rtk_desc))
#define RTK_TX_DESC_INC(sc, x) \
((x) = ((x) + 1) % RTK_TX_DESC_CNT(sc))
#define RTK_RX_DESC_INC(sc, x) \
((x) = ((x) + 1) % RTK_RX_DESC_CNT)
#define RTK_ATTACHED 0x00000001 /* attach has succeeded */ #define RTK_ATTACHED 0x00000001 /* attach has succeeded */
#define RTK_ENABLED 0x00000002 /* chip is enabled */ #define RTK_ENABLED 0x00000002 /* chip is enabled */