- restructure tx descriptor handling code to decouple

number of tx descriptors, number of rx descriptors, and number of mbufs.
- bump number of tx descriptors for rtl8169.  64 doesn't seem to be sufficient
  when doing TSO.
This commit is contained in:
yamt 2005-03-23 20:23:08 +00:00
parent 93bf0b6883
commit e012617b1e
3 changed files with 130 additions and 108 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: rtl8169.c,v 1.14 2005/03/14 10:08:17 yamt Exp $ */
/* $NetBSD: rtl8169.c,v 1.15 2005/03/23 20:23:08 yamt Exp $ */
/*
* Copyright (c) 1997, 1998-2003
@ -599,6 +599,8 @@ re_attach(struct rtk_softc *sc)
for (i = 0; i < ETHER_ADDR_LEN; i++)
eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
sc->rtk_ldata.rtk_tx_desc_cnt = RTK_TX_DESC_CNT_8169;
} else {
/* Set RX length mask */
@ -618,14 +620,24 @@ re_attach(struct rtk_softc *sc)
eaddr[(i * 2) + 0] = val & 0xff;
eaddr[(i * 2) + 1] = val >> 8;
}
sc->rtk_ldata.rtk_tx_desc_cnt = RTK_TX_DESC_CNT_8139;
}
aprint_normal("%s: Ethernet address %s\n",
sc->sc_dev.dv_xname, ether_sprintf(eaddr));
if (sc->rtk_ldata.rtk_tx_desc_cnt >
PAGE_SIZE / sizeof(struct rtk_desc)) {
sc->rtk_ldata.rtk_tx_desc_cnt =
PAGE_SIZE / sizeof(struct rtk_desc);
}
aprint_verbose("%s: using %d tx descriptors\n",
sc->sc_dev.dv_xname, sc->rtk_ldata.rtk_tx_desc_cnt);
/* Allocate DMA'able memory for the TX ring */
if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_TX_LIST_SZ,
if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_TX_LIST_SZ(sc),
RTK_ETHER_ALIGN, 0, &sc->rtk_ldata.rtk_tx_listseg,
1, &sc->rtk_ldata.rtk_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
aprint_error("%s: can't allocate tx listseg, error = %d\n",
@ -635,17 +647,17 @@ re_attach(struct rtk_softc *sc)
/* Load the map for the TX ring. */
if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rtk_ldata.rtk_tx_listseg,
sc->rtk_ldata.rtk_tx_listnseg, RTK_TX_LIST_SZ,
sc->rtk_ldata.rtk_tx_listnseg, RTK_TX_LIST_SZ(sc),
(caddr_t *)&sc->rtk_ldata.rtk_tx_list,
BUS_DMA_NOWAIT)) != 0) {
aprint_error("%s: can't map tx list, error = %d\n",
sc->sc_dev.dv_xname, error);
goto fail_1;
}
memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ);
memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ(sc));
if ((error = bus_dmamap_create(sc->sc_dmat, RTK_TX_LIST_SZ, 1,
RTK_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
if ((error = bus_dmamap_create(sc->sc_dmat, RTK_TX_LIST_SZ(sc), 1,
RTK_TX_LIST_SZ(sc), 0, BUS_DMA_ALLOCNOW,
&sc->rtk_ldata.rtk_tx_list_map)) != 0) {
aprint_error("%s: can't create tx list map, error = %d\n",
sc->sc_dev.dv_xname, error);
@ -655,19 +667,19 @@ re_attach(struct rtk_softc *sc)
if ((error = bus_dmamap_load(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_list_map, sc->rtk_ldata.rtk_tx_list,
RTK_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
RTK_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
aprint_error("%s: can't load tx list, error = %d\n",
sc->sc_dev.dv_xname, error);
goto fail_3;
}
/* Create DMA maps for TX buffers */
for (i = 0; i < RTK_TX_DESC_CNT; i++) {
for (i = 0; i < RTK_TX_QLEN; i++) {
error = bus_dmamap_create(sc->sc_dmat,
round_page(IP_MAXPACKET),
RTK_TX_DESC_CNT - 4, RTK_TDESC_CMD_FRAGLEN,
RTK_TX_DESC_CNT(sc) - 4, RTK_TDESC_CMD_FRAGLEN,
0, BUS_DMA_ALLOCNOW,
&sc->rtk_ldata.rtk_tx_dmamap[i]);
&sc->rtk_ldata.rtk_txq[i].txq_dmamap);
if (error) {
aprint_error("%s: can't create DMA map for TX\n",
sc->sc_dev.dv_xname);
@ -693,7 +705,7 @@ re_attach(struct rtk_softc *sc)
sc->sc_dev.dv_xname, error);
goto fail_5;
}
memset(sc->rtk_ldata.rtk_rx_list, 0, RTK_TX_LIST_SZ);
memset(sc->rtk_ldata.rtk_rx_list, 0, RTK_RX_LIST_SZ);
if ((error = bus_dmamap_create(sc->sc_dmat, RTK_RX_LIST_SZ, 1,
RTK_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
@ -809,10 +821,10 @@ fail_5:
fail_4:
/* Destroy DMA maps for TX buffers. */
for (i = 0; i < RTK_TX_DESC_CNT; i++)
if (sc->rtk_ldata.rtk_tx_dmamap[i] != NULL)
for (i = 0; i < RTK_TX_QLEN; i++)
if (sc->rtk_ldata.rtk_txq[i].txq_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_dmamap[i]);
sc->rtk_ldata.rtk_txq[i].txq_dmamap);
/* Free DMA'able memory for the TX ring. */
bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
@ -820,7 +832,7 @@ fail_3:
bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
fail_2:
bus_dmamem_unmap(sc->sc_dmat,
(caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ);
(caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ(sc));
fail_1:
bus_dmamem_free(sc->sc_dmat,
&sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg);
@ -899,16 +911,16 @@ re_detach(struct rtk_softc *sc)
&sc->rtk_ldata.rtk_rx_listseg, sc->rtk_ldata.rtk_rx_listnseg);
/* Destroy DMA maps for TX buffers. */
for (i = 0; i < RTK_TX_DESC_CNT; i++)
if (sc->rtk_ldata.rtk_tx_dmamap[i] != NULL)
for (i = 0; i < RTK_TX_QLEN; i++)
if (sc->rtk_ldata.rtk_txq[i].txq_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_dmamap[i]);
sc->rtk_ldata.rtk_txq[i].txq_dmamap);
/* Free DMA'able memory for the TX ring. */
bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
bus_dmamem_unmap(sc->sc_dmat,
(caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ);
(caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ(sc));
bus_dmamem_free(sc->sc_dmat,
&sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg);
@ -1030,14 +1042,10 @@ re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m)
cmdstat = map->dm_segs[0].ds_len;
d->rtk_bufaddr_lo = htole32(RTK_ADDR_LO(map->dm_segs[0].ds_addr));
d->rtk_bufaddr_hi = htole32(RTK_ADDR_HI(map->dm_segs[0].ds_addr));
cmdstat |= RTK_TDESC_CMD_SOF;
if (idx == (RTK_RX_DESC_CNT - 1))
cmdstat |= RTK_TDESC_CMD_EOR;
cmdstat |= RTK_RDESC_CMD_EOR;
d->rtk_cmdstat = htole32(cmdstat);
d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF);
sc->rtk_ldata.rtk_rx_list[idx].rtk_cmdstat |=
htole32(RTK_RDESC_CMD_OWN);
sc->rtk_ldata.rtk_rx_mbuf[idx] = m;
@ -1056,16 +1064,20 @@ out:
static int
re_tx_list_init(struct rtk_softc *sc)
{
memset((char *)sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ);
memset((char *)&sc->rtk_ldata.rtk_tx_mbuf, 0,
(RTK_TX_DESC_CNT * sizeof(struct mbuf *)));
int i;
memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ(sc));
for (i = 0; i < RTK_TX_QLEN; i++) {
sc->rtk_ldata.rtk_txq[i].txq_mbuf = NULL;
}
bus_dmamap_sync(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_list_map, 0,
sc->rtk_ldata.rtk_tx_list_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
sc->rtk_ldata.rtk_tx_prodidx = 0;
sc->rtk_ldata.rtk_tx_considx = 0;
sc->rtk_ldata.rtk_tx_free = RTK_TX_DESC_CNT;
sc->rtk_ldata.rtk_txq_prodidx = 0;
sc->rtk_ldata.rtk_txq_considx = 0;
sc->rtk_ldata.rtk_tx_free = RTK_TX_DESC_CNT(sc);
sc->rtk_ldata.rtk_tx_nextfree = 0;
return 0;
}
@ -1148,7 +1160,7 @@ re_rxeof(struct rtk_softc *sc)
sc->rtk_tail = m;
}
re_newbuf(sc, i, NULL);
RTK_DESC_INC(i);
RTK_RX_DESC_INC(sc, i);
continue;
}
@ -1182,7 +1194,7 @@ re_rxeof(struct rtk_softc *sc)
sc->rtk_head = sc->rtk_tail = NULL;
}
re_newbuf(sc, i, m);
RTK_DESC_INC(i);
RTK_RX_DESC_INC(sc, i);
continue;
}
@ -1198,11 +1210,11 @@ re_rxeof(struct rtk_softc *sc)
sc->rtk_head = sc->rtk_tail = NULL;
}
re_newbuf(sc, i, m);
RTK_DESC_INC(i);
RTK_RX_DESC_INC(sc, i);
continue;
}
RTK_DESC_INC(i);
RTK_RX_DESC_INC(sc, i);
if (sc->rtk_head != NULL) {
m->m_len = total_len % (MCLBYTES - RTK_ETHER_ALIGN);
@ -1284,11 +1296,10 @@ static void
re_txeof(struct rtk_softc *sc)
{
struct ifnet *ifp;
u_int32_t txstat;
int idx;
ifp = &sc->ethercom.ec_if;
idx = sc->rtk_ldata.rtk_tx_considx;
idx = sc->rtk_ldata.rtk_txq_considx;
/* Invalidate the TX descriptor list */
@ -1297,40 +1308,39 @@ re_txeof(struct rtk_softc *sc)
0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
BUS_DMASYNC_POSTREAD);
while (idx != sc->rtk_ldata.rtk_tx_prodidx) {
while (idx != sc->rtk_ldata.rtk_txq_prodidx) {
struct rtk_txq *txq = &sc->rtk_ldata.rtk_txq[idx];
int descidx = txq->txq_descidx;
u_int32_t txstat;
txstat = le32toh(sc->rtk_ldata.rtk_tx_list[idx].rtk_cmdstat);
KASSERT(txq->txq_mbuf != NULL);
txstat =
le32toh(sc->rtk_ldata.rtk_tx_list[descidx].rtk_cmdstat);
KASSERT((txstat & RTK_TDESC_CMD_EOF) != 0);
if (txstat & RTK_TDESC_CMD_OWN)
break;
/*
* We only stash mbufs in the last descriptor
* in a fragment chain, which also happens to
* be the only place where the TX status bits
* are valid.
*/
sc->rtk_ldata.rtk_tx_free += txq->txq_dmamap->dm_nsegs;
KASSERT(sc->rtk_ldata.rtk_tx_free <= RTK_TX_DESC_CNT(sc));
bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
m_freem(txq->txq_mbuf);
txq->txq_mbuf = NULL;
if (txstat & RTK_TDESC_CMD_EOF) {
m_freem(sc->rtk_ldata.rtk_tx_mbuf[idx]);
sc->rtk_ldata.rtk_tx_mbuf[idx] = NULL;
bus_dmamap_unload(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_dmamap[idx]);
if (txstat & (RTK_TDESC_STAT_EXCESSCOL |
RTK_TDESC_STAT_COLCNT))
ifp->if_collisions++;
if (txstat & RTK_TDESC_STAT_TXERRSUM)
ifp->if_oerrors++;
else
ifp->if_opackets++;
}
sc->rtk_ldata.rtk_tx_free++;
RTK_DESC_INC(idx);
if (txstat & (RTK_TDESC_STAT_EXCESSCOL | RTK_TDESC_STAT_COLCNT))
ifp->if_collisions++;
if (txstat & RTK_TDESC_STAT_TXERRSUM)
ifp->if_oerrors++;
else
ifp->if_opackets++;
idx = (idx + 1) % RTK_TX_QLEN;
}
/* No changes made to the TX ring, so no flush needed */
if (idx != sc->rtk_ldata.rtk_tx_considx) {
sc->rtk_ldata.rtk_tx_considx = idx;
if (idx != sc->rtk_ldata.rtk_txq_considx) {
sc->rtk_ldata.rtk_txq_considx = idx;
ifp->if_flags &= ~IFF_OACTIVE;
ifp->if_timer = 0;
}
@ -1341,7 +1351,7 @@ re_txeof(struct rtk_softc *sc)
* interrupt that will cause us to re-enter this routine.
* This is done in case the transmitter has gone idle.
*/
if (sc->rtk_ldata.rtk_tx_free != RTK_TX_DESC_CNT)
if (sc->rtk_ldata.rtk_tx_free != RTK_TX_DESC_CNT(sc))
CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
return;
@ -1495,10 +1505,11 @@ static int
re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
{
bus_dmamap_t map;
int error, i, curidx;
int error, i, startidx, curidx;
struct m_tag *mtag;
struct rtk_desc *d;
u_int32_t cmdstat, rtk_flags;
struct rtk_txq *txq;
if (sc->rtk_ldata.rtk_tx_free <= 4)
return EFBIG;
@ -1525,7 +1536,8 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
rtk_flags |= RTK_TDESC_CMD_UDPCSUM;
}
map = sc->rtk_ldata.rtk_tx_dmamap[*idx];
txq = &sc->rtk_ldata.rtk_txq[*idx];
map = txq->txq_dmamap;
error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
if (error) {
@ -1553,14 +1565,14 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
* reception.)
*/
i = 0;
curidx = *idx;
curidx = startidx = sc->rtk_ldata.rtk_tx_nextfree;
while (1) {
d = &sc->rtk_ldata.rtk_tx_list[curidx];
if (le32toh(d->rtk_cmdstat) & RTK_RDESC_STAT_OWN) {
if (le32toh(d->rtk_cmdstat) & RTK_TDESC_STAT_OWN) {
while (i > 0) {
sc->rtk_ldata.rtk_tx_list[
(curidx + RTK_TX_DESC_CNT - i) %
RTK_TX_DESC_CNT].rtk_cmdstat = 0;
(curidx + RTK_TX_DESC_CNT(sc) - i) %
RTK_TX_DESC_CNT(sc)].rtk_cmdstat = 0;
i--;
}
error = ENOBUFS;
@ -1576,26 +1588,18 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
cmdstat |= RTK_TDESC_CMD_SOF;
else
cmdstat |= RTK_TDESC_CMD_OWN;
if (curidx == (RTK_RX_DESC_CNT - 1))
if (curidx == (RTK_TX_DESC_CNT(sc) - 1))
cmdstat |= RTK_TDESC_CMD_EOR;
d->rtk_cmdstat = htole32(cmdstat | rtk_flags);
i++;
if (i == map->dm_nsegs)
break;
RTK_DESC_INC(curidx);
RTK_TX_DESC_INC(sc, curidx);
}
d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF);
/*
* Insure that the map for this transmission
* is placed at the array index of the last descriptor
* in this chain.
*/
sc->rtk_ldata.rtk_tx_dmamap[*idx] =
sc->rtk_ldata.rtk_tx_dmamap[curidx];
sc->rtk_ldata.rtk_tx_dmamap[curidx] = map;
sc->rtk_ldata.rtk_tx_mbuf[curidx] = m;
txq->txq_mbuf = m;
sc->rtk_ldata.rtk_tx_free -= map->dm_nsegs;
/*
@ -1605,7 +1609,7 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
*/
if ((mtag = VLAN_OUTPUT_TAG(&sc->ethercom, m)) != NULL) {
sc->rtk_ldata.rtk_tx_list[*idx].rtk_vlanctl =
sc->rtk_ldata.rtk_tx_list[startidx].rtk_vlanctl =
htole32(htons(VLAN_TAG_VALUE(mtag)) |
RTK_TDESC_VLANCTL_TAG);
}
@ -1614,12 +1618,14 @@ re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
sc->rtk_ldata.rtk_tx_list[curidx].rtk_cmdstat |=
htole32(RTK_TDESC_CMD_OWN);
if (*idx != curidx)
sc->rtk_ldata.rtk_tx_list[*idx].rtk_cmdstat |=
if (startidx != curidx)
sc->rtk_ldata.rtk_tx_list[startidx].rtk_cmdstat |=
htole32(RTK_TDESC_CMD_OWN);
RTK_DESC_INC(curidx);
*idx = curidx;
txq->txq_descidx = curidx;
RTK_TX_DESC_INC(sc, curidx);
sc->rtk_ldata.rtk_tx_nextfree = curidx;
*idx = (*idx + 1) % RTK_TX_QLEN;
return 0;
@ -1642,8 +1648,8 @@ re_start(struct ifnet *ifp)
sc = ifp->if_softc;
idx = sc->rtk_ldata.rtk_tx_prodidx;
while (sc->rtk_ldata.rtk_tx_mbuf[idx] == NULL) {
idx = sc->rtk_ldata.rtk_txq_prodidx;
while (sc->rtk_ldata.rtk_txq[idx].txq_mbuf == NULL) {
int error;
IF_DEQUEUE(&ifp->if_snd, m_head);
@ -1652,7 +1658,7 @@ re_start(struct ifnet *ifp)
error = re_encap(sc, m_head, &idx);
if (error == EFBIG &&
sc->rtk_ldata.rtk_tx_free == RTK_TX_DESC_CNT) {
sc->rtk_ldata.rtk_tx_free == RTK_TX_DESC_CNT(sc)) {
ifp->if_oerrors++;
m_freem(m_head);
continue;
@ -1679,7 +1685,7 @@ re_start(struct ifnet *ifp)
0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
sc->rtk_ldata.rtk_tx_prodidx = idx;
sc->rtk_ldata.rtk_txq_prodidx = idx;
/*
* RealTek put the TX poll request register in a different
@ -2016,12 +2022,12 @@ re_stop(struct ifnet *ifp, int disable)
}
/* Free the TX list buffers. */
for (i = 0; i < RTK_TX_DESC_CNT; i++) {
if (sc->rtk_ldata.rtk_tx_mbuf[i] != NULL) {
for (i = 0; i < RTK_TX_QLEN; i++) {
if (sc->rtk_ldata.rtk_txq[i].txq_mbuf != NULL) {
bus_dmamap_unload(sc->sc_dmat,
sc->rtk_ldata.rtk_tx_dmamap[i]);
m_freem(sc->rtk_ldata.rtk_tx_mbuf[i]);
sc->rtk_ldata.rtk_tx_mbuf[i] = NULL;
sc->rtk_ldata.rtk_txq[i].txq_dmamap);
m_freem(sc->rtk_ldata.rtk_txq[i].txq_mbuf);
sc->rtk_ldata.rtk_txq[i].txq_mbuf = NULL;
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: rtl81x9reg.h,v 1.11 2005/03/12 08:01:51 yamt Exp $ */
/* $NetBSD: rtl81x9reg.h,v 1.12 2005/03/23 20:23:08 yamt Exp $ */
/*
* Copyright (c) 1997, 1998
@ -553,12 +553,11 @@ struct rtk_stats {
};
#define RTK_RX_DESC_CNT 64
#define RTK_TX_DESC_CNT 64
#define RTK_TX_DESC_CNT_8139 64
#define RTK_TX_DESC_CNT_8169 1024
#define RTK_RX_LIST_SZ (RTK_RX_DESC_CNT * sizeof(struct rtk_desc))
#define RTK_TX_LIST_SZ (RTK_TX_DESC_CNT * sizeof(struct rtk_desc))
#define RTK_RING_ALIGN 256
#define RTK_IFQ_MAXLEN 512
#define RTK_DESC_INC(x) (x = (x + 1) % RTK_TX_DESC_CNT)
#define RTK_OWN(x) (le32toh((x)->rtk_cmdstat) & RTK_RDESC_STAT_OWN)
#define RTK_RXBYTES(x) (le32toh((x)->rtk_cmdstat) & sc->rtk_rxlenmask)
#define RTK_PKTSZ(x) ((x)/* >> 3*/)

View File

@ -1,4 +1,4 @@
/* $NetBSD: rtl81x9var.h,v 1.17 2005/02/04 02:10:37 perry Exp $ */
/* $NetBSD: rtl81x9var.h,v 1.18 2005/03/23 20:23:08 yamt Exp $ */
/*
* Copyright (c) 1997, 1998
@ -81,6 +81,8 @@ struct rtk_mii_frame {
#define RTK_ISCPLUS(x) ((x)->rtk_type == RTK_8139CPLUS || \
(x)->rtk_type == RTK_8169)
#define RTK_TX_QLEN 64
/*
* The 8139C+ and 8160 gigE chips support descriptor-based TX
* and RX. In fact, they even support TCP large send. Descriptors
@ -89,22 +91,28 @@ struct rtk_mii_frame {
*/
struct rtk_list_data {
struct mbuf *rtk_tx_mbuf[RTK_TX_DESC_CNT];
struct rtk_txq {
struct mbuf *txq_mbuf;
bus_dmamap_t txq_dmamap;
int txq_descidx;
} rtk_txq[RTK_TX_QLEN];
int rtk_txq_considx;
int rtk_txq_prodidx;
bus_dmamap_t rtk_tx_list_map;
struct rtk_desc *rtk_tx_list;
bus_dma_segment_t rtk_tx_listseg;
int rtk_tx_free; /* # of free descriptors */
int rtk_tx_nextfree; /* next descriptor to use */
int rtk_tx_desc_cnt; /* # of descriptors */
int rtk_tx_listnseg;
struct mbuf *rtk_rx_mbuf[RTK_RX_DESC_CNT];
int rtk_tx_prodidx;
int rtk_rx_prodidx;
int rtk_tx_considx;
int rtk_tx_free;
bus_dmamap_t rtk_tx_dmamap[RTK_TX_DESC_CNT];
bus_dmamap_t rtk_rx_dmamap[RTK_RX_DESC_CNT];
bus_dmamap_t rtk_rx_list_map;
struct rtk_desc *rtk_rx_list;
bus_dma_segment_t rtk_rx_listseg;
int rtk_rx_prodidx;
int rtk_rx_listnseg;
bus_dmamap_t rtk_tx_list_map;
struct rtk_desc *rtk_tx_list;
bus_dma_segment_t rtk_tx_listseg;
int rtk_tx_listnseg;
};
struct rtk_tx_desc {
SIMPLEQ_ENTRY(rtk_tx_desc) txd_q;
@ -154,6 +162,15 @@ struct rtk_softc {
#endif
};
#define RTK_TX_DESC_CNT(sc) \
((sc)->rtk_ldata.rtk_tx_desc_cnt)
#define RTK_TX_LIST_SZ(sc) \
(RTK_TX_DESC_CNT(sc) * sizeof(struct rtk_desc))
#define RTK_TX_DESC_INC(sc, x) \
((x) = ((x) + 1) % RTK_TX_DESC_CNT(sc))
#define RTK_RX_DESC_INC(sc, x) \
((x) = ((x) + 1) % RTK_RX_DESC_CNT)
#define RTK_ATTACHED 0x00000001 /* attach has succeeded */
#define RTK_ENABLED 0x00000002 /* chip is enabled */