support {extended, advanced} receive descriptor for wm(4).

support below two receive descriptors.
    - extended descriptor (used by 82574 only)
    - advanced descriptor (used by 82575 and newer)

SPH(split header buffer) is not supported yet.

ok by msaitoh@n.o.
This commit is contained in:
knakahara 2017-01-16 00:09:06 +00:00
parent c2ffce666f
commit bab4f3a363
2 changed files with 538 additions and 85 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_wm.c,v 1.465 2017/01/10 08:57:39 msaitoh Exp $ */
/* $NetBSD: if_wm.c,v 1.466 2017/01/16 00:09:06 knakahara Exp $ */
/*
* Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
@ -84,7 +84,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.465 2017/01/10 08:57:39 msaitoh Exp $");
__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.466 2017/01/16 00:09:06 knakahara Exp $");
#ifdef _KERNEL_OPT
#include "opt_net_mpsafe.h"
@ -215,8 +215,14 @@ typedef union txdescs {
nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
} txdescs_t;
typedef union rxdescs {
wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
} rxdescs_t;
#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
#define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
/*
* Software state for transmit jobs.
@ -356,15 +362,19 @@ struct wm_rxqueue {
struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
/* Software state for the receive descriptors. */
wiseman_rxdesc_t *rxq_descs;
struct wm_rxsoft rxq_soft[WM_NRXDESC];
/* RX control data structures. */
struct wm_rxsoft rxq_soft[WM_NRXDESC];
int rxq_ndesc; /* must be a power of two */
size_t rxq_descsize; /* a rx descriptor size */
rxdescs_t *rxq_descs_u;
bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
bus_dma_segment_t rxq_desc_seg; /* control data segment */
int rxq_desc_rseg; /* real number of control segment */
size_t rxq_desc_size; /* control data size */
#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
#define rxq_descs rxq_descs_u->sctxu_rxdescs
#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
bus_addr_t rxq_rdt_reg; /* offset of RDT register */
@ -578,7 +588,7 @@ do { \
(reg) + sc->sc_flashreg_offset, (data))
#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
#define WM_CDTXADDR_HI(txq, x) \
@ -1516,7 +1526,7 @@ wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
struct wm_softc *sc = rxq->rxq_sc;
bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
}
static inline void
@ -1524,7 +1534,6 @@ wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
{
struct wm_softc *sc = rxq->rxq_sc;
struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
struct mbuf *m = rxs->rxs_mbuf;
/*
@ -1543,13 +1552,29 @@ wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
*/
m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
wm_set_dma_addr(&rxd->wrx_addr,
rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
rxd->wrx_len = 0;
rxd->wrx_cksum = 0;
rxd->wrx_status = 0;
rxd->wrx_errors = 0;
rxd->wrx_special = 0;
if (sc->sc_type == WM_T_82574) {
ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
rxd->erx_data.erxd_addr =
htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
rxd->erx_data.erxd_dd = 0;
} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
rxd->nqrx_data.nrxd_paddr =
htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
/* Currently, split header is not supported. */
rxd->nqrx_data.nrxd_haddr = 0;
} else {
wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
wm_set_dma_addr(&rxd->wrx_addr,
rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
rxd->wrx_len = 0;
rxd->wrx_cksum = 0;
rxd->wrx_status = 0;
rxd->wrx_errors = 0;
rxd->wrx_special = 0;
}
wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
@ -3721,6 +3746,12 @@ wm_initialize_hardware_bits(struct wm_softc *sc)
reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
CSR_WRITE(sc, WMREG_RFCTL, reg);
break;
case WM_T_82574:
/* use extened Rx descriptor. */
reg = CSR_READ(sc, WMREG_RFCTL);
reg |= WMREG_RFCTL_EXSTEN;
CSR_WRITE(sc, WMREG_RFCTL, reg);
break;
default:
break;
}
@ -5172,6 +5203,12 @@ wm_init_locked(struct ifnet *ifp)
sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
| RCTL_MO(sc->sc_mchash_type);
/*
* 82574 use one buffer extended Rx descriptor.
*/
if (sc->sc_type == WM_T_82574)
sc->sc_rctl |= RCTL_DTYP_ONEBUF;
/*
* The I350 has a bug where it always strips the CRC whether
* asked to or not. So ask for stripped CRC here and cope in rxeof
@ -5558,6 +5595,7 @@ static int
wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
{
int error;
size_t rxq_descs_size;
/*
* Allocate the control data structures, and create and load the
@ -5567,8 +5605,16 @@ wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
* memory. So must Rx descriptors. We simplify by allocating
* both sets within the same 4G segment.
*/
rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
rxq->rxq_ndesc = WM_NRXDESC;
if (sc->sc_type == WM_T_82574)
rxq->rxq_descsize = sizeof(ext_rxdesc_t);
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
rxq->rxq_descsize = sizeof(nq_rxdesc_t);
else
rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
1, &rxq->rxq_desc_rseg, 0)) != 0) {
aprint_error_dev(sc->sc_dev,
@ -5578,15 +5624,15 @@ wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
}
if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
rxq->rxq_desc_rseg, rxq->rxq_desc_size,
(void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
rxq->rxq_desc_rseg, rxq_descs_size,
(void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to map RX control data, error = %d\n", error);
goto fail_1;
}
if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to create RX control data DMA map, error = %d\n",
error);
@ -5594,7 +5640,7 @@ wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
}
if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to load RX control data DMA map, error = %d\n",
error);
@ -5606,8 +5652,8 @@ wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
fail_3:
bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
fail_2:
bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
rxq->rxq_desc_size);
bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
rxq_descs_size);
fail_1:
bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
fail_0:
@ -5620,8 +5666,8 @@ wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
rxq->rxq_desc_size);
bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
rxq->rxq_descsize * rxq->rxq_ndesc);
bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
}
@ -5675,7 +5721,7 @@ wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
int i, error;
/* Create the receive buffer DMA maps. */
for (i = 0; i < WM_NRXDESC; i++) {
for (i = 0; i < rxq->rxq_ndesc; i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
MCLBYTES, 0, 0,
&rxq->rxq_soft[i].rxs_dmamap)) != 0) {
@ -5690,7 +5736,7 @@ wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
return 0;
fail:
for (i = 0; i < WM_NRXDESC; i++) {
for (i = 0; i < rxq->rxq_ndesc; i++) {
if (rxq->rxq_soft[i].rxs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
rxq->rxq_soft[i].rxs_dmamap);
@ -5703,7 +5749,7 @@ wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
{
int i;
for (i = 0; i < WM_NRXDESC; i++) {
for (i = 0; i < rxq->rxq_ndesc; i++) {
if (rxq->rxq_soft[i].rxs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
rxq->rxq_soft[i].rxs_dmamap);
@ -5993,7 +6039,7 @@ wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
CSR_WRITE(sc, WMREG_OLD_RDLEN0,
sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
rxq->rxq_descsize * rxq->rxq_ndesc);
CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
@ -6009,12 +6055,14 @@ wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
| (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
| RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
@ -6042,7 +6090,7 @@ wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
KASSERT(mutex_owned(rxq->rxq_lock));
for (i = 0; i < WM_NRXDESC; i++) {
for (i = 0; i < rxq->rxq_ndesc; i++) {
rxs = &rxq->rxq_soft[i];
if (rxs->rxs_mbuf == NULL) {
if ((error = wm_add_rxbuf(rxq, i)) != 0) {
@ -7411,6 +7459,224 @@ wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
return processed;
}
static inline uint32_t
wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
{
struct wm_softc *sc = rxq->rxq_sc;
if (sc->sc_type == WM_T_82574)
return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
else
return rxq->rxq_descs[idx].wrx_status;
}
static inline uint32_t
wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
{
struct wm_softc *sc = rxq->rxq_sc;
if (sc->sc_type == WM_T_82574)
return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
else
return rxq->rxq_descs[idx].wrx_errors;
}
static inline uint16_t
wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
{
struct wm_softc *sc = rxq->rxq_sc;
if (sc->sc_type == WM_T_82574)
return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
else
return rxq->rxq_descs[idx].wrx_special;
}
static inline int
wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
{
struct wm_softc *sc = rxq->rxq_sc;
if (sc->sc_type == WM_T_82574)
return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
else
return rxq->rxq_descs[idx].wrx_len;
}
#ifdef WM_DEBUG
static inline uint32_t
wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
{
struct wm_softc *sc = rxq->rxq_sc;
if (sc->sc_type == WM_T_82574)
return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
else
return 0;
}
static inline uint8_t
wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
{
struct wm_softc *sc = rxq->rxq_sc;
if (sc->sc_type == WM_T_82574)
return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
else
return 0;
}
#endif /* WM_DEBUG */
static inline bool
wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
{
if (sc->sc_type == WM_T_82574)
return (status & ext_bit) != 0;
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
return (status & nq_bit) != 0;
else
return (status & legacy_bit) != 0;
}
static inline bool
wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
{
if (sc->sc_type == WM_T_82574)
return (error & ext_bit) != 0;
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
return (error & nq_bit) != 0;
else
return (error & legacy_bit) != 0;
}
static inline bool
wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
{
if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
return true;
else
return false;
}
static inline bool
wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
{
struct wm_softc *sc = rxq->rxq_sc;
/* XXXX missing error bit for newqueue? */
if (wm_rxdesc_is_set_error(sc, errors,
WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
NQRXC_ERROR_RXE)) {
if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
log(LOG_WARNING, "%s: symbol error\n",
device_xname(sc->sc_dev));
else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
log(LOG_WARNING, "%s: receive sequence error\n",
device_xname(sc->sc_dev));
else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
log(LOG_WARNING, "%s: CRC error\n",
device_xname(sc->sc_dev));
return true;
}
return false;
}
static inline bool
wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
{
struct wm_softc *sc = rxq->rxq_sc;
if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
NQRXC_STATUS_DD)) {
/* We have processed all of the receive descriptors. */
struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
if (sc->sc_type == WM_T_82574) {
rxq->rxq_ext_descs[idx].erx_data.erxd_addr =
htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr
+ sc->sc_align_tweak);
} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
rxq->rxq_nq_descs[idx].nqrx_data.nrxd_paddr =
htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr
+ sc->sc_align_tweak);
}
wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
return false;
}
return true;
}
static inline bool
wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
struct mbuf *m)
{
struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
}
return true;
}
static inline void
wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
uint32_t errors, struct mbuf *m)
{
struct wm_softc *sc = rxq->rxq_sc;
if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
if (wm_rxdesc_is_set_status(sc, status,
WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
WM_Q_EVCNT_INCR(rxq, rxipsum);
m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
if (wm_rxdesc_is_set_error(sc, errors,
WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
m->m_pkthdr.csum_flags |=
M_CSUM_IPv4_BAD;
}
if (wm_rxdesc_is_set_status(sc, status,
WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
/*
* Note: we don't know if this was TCP or UDP,
* so we just set both bits, and expect the
* upper layers to deal.
*/
WM_Q_EVCNT_INCR(rxq, rxtusum);
m->m_pkthdr.csum_flags |=
M_CSUM_TCPv4 | M_CSUM_UDPv4 |
M_CSUM_TCPv6 | M_CSUM_UDPv6;
if (wm_rxdesc_is_set_error(sc, errors,
WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
m->m_pkthdr.csum_flags |=
M_CSUM_TCP_UDP_BAD;
}
}
}
/*
* wm_rxeof:
*
@ -7425,8 +7691,10 @@ wm_rxeof(struct wm_rxqueue *rxq)
struct mbuf *m;
int i, len;
int count = 0;
uint8_t status, errors;
uint32_t status, errors;
uint16_t vlantag;
uint32_t rsshash __debugused;
uint8_t rsstype __debugused;
KASSERT(mutex_owned(rxq->rxq_lock));
@ -7436,19 +7704,19 @@ wm_rxeof(struct wm_rxqueue *rxq)
DPRINTF(WM_DEBUG_RX,
("%s: RX: checking descriptor %d\n",
device_xname(sc->sc_dev), i));
wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
status = rxq->rxq_descs[i].wrx_status;
errors = rxq->rxq_descs[i].wrx_errors;
len = le16toh(rxq->rxq_descs[i].wrx_len);
vlantag = rxq->rxq_descs[i].wrx_special;
status = wm_rxdesc_get_status(rxq, i);
errors = wm_rxdesc_get_errors(rxq, i);
len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
vlantag = wm_rxdesc_get_vlantag(rxq, i);
#ifdef WM_DEBUG
rsshash = wm_rxdesc_get_rsshash(rxq, i);
rsstype = wm_rxdesc_get_rsstype(rxq, i);
#endif
if ((status & WRX_ST_DD) == 0) {
/* We have processed all of the receive descriptors. */
wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
if (!wm_rxdesc_dd(rxq, i, status))
break;
}
count++;
if (__predict_false(rxq->rxq_discard)) {
@ -7456,7 +7724,7 @@ wm_rxeof(struct wm_rxqueue *rxq)
("%s: RX: discarding contents of descriptor %d\n",
device_xname(sc->sc_dev), i));
wm_init_rxdesc(rxq, i);
if (status & WRX_ST_EOP) {
if (wm_rxdesc_is_eop(rxq, status)) {
/* Reset our state. */
DPRINTF(WM_DEBUG_RX,
("%s: RX: resetting rxdiscard -> 0\n",
@ -7485,7 +7753,7 @@ wm_rxeof(struct wm_rxqueue *rxq)
bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
wm_init_rxdesc(rxq, i);
if ((status & WRX_ST_EOP) == 0)
if (!wm_rxdesc_is_eop(rxq, status))
rxq->rxq_discard = 1;
if (rxq->rxq_head != NULL)
m_freem(rxq->rxq_head);
@ -7504,7 +7772,7 @@ wm_rxeof(struct wm_rxqueue *rxq)
device_xname(sc->sc_dev), m->m_data, len));
/* If this is not the end of the packet, keep looking. */
if ((status & WRX_ST_EOP) == 0) {
if (!wm_rxdesc_is_eop(rxq, status)) {
WM_RXCHAIN_LINK(rxq, m);
DPRINTF(WM_DEBUG_RX,
("%s: RX: not yet EOP, rxlen -> %d\n",
@ -7547,17 +7815,7 @@ wm_rxeof(struct wm_rxqueue *rxq)
device_xname(sc->sc_dev), len));
/* If an error occurred, update stats and drop the packet. */
if (errors &
(WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
if (errors & WRX_ER_SE)
log(LOG_WARNING, "%s: symbol error\n",
device_xname(sc->sc_dev));
else if (errors & WRX_ER_SEQ)
log(LOG_WARNING, "%s: receive sequence error\n",
device_xname(sc->sc_dev));
else if (errors & WRX_ER_CE)
log(LOG_WARNING, "%s: CRC error\n",
device_xname(sc->sc_dev));
if (wm_rxdesc_has_errors(rxq, errors)) {
m_freem(m);
continue;
}
@ -7565,40 +7823,24 @@ wm_rxeof(struct wm_rxqueue *rxq)
/* No errors. Receive the packet. */
m_set_rcvif(m, ifp);
m->m_pkthdr.len = len;
/*
* TODO
* should be save rsshash and rsstype to this mbuf.
*/
DPRINTF(WM_DEBUG_RX,
("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
device_xname(sc->sc_dev), rsstype, rsshash));
/*
* If VLANs are enabled, VLAN packets have been unwrapped
* for us. Associate the tag with the packet.
*/
/* XXXX should check for i350 and i354 */
if ((status & WRX_ST_VP) != 0) {
VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
}
if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
continue;
/* Set up checksum info for this packet. */
if ((status & WRX_ST_IXSM) == 0) {
if (status & WRX_ST_IPCS) {
WM_Q_EVCNT_INCR(rxq, rxipsum);
m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
if (errors & WRX_ER_IPE)
m->m_pkthdr.csum_flags |=
M_CSUM_IPv4_BAD;
}
if (status & WRX_ST_TCPCS) {
/*
* Note: we don't know if this was TCP or UDP,
* so we just set both bits, and expect the
* upper layers to deal.
*/
WM_Q_EVCNT_INCR(rxq, rxtusum);
m->m_pkthdr.csum_flags |=
M_CSUM_TCPv4 | M_CSUM_UDPv4 |
M_CSUM_TCPv6 | M_CSUM_UDPv6;
if (errors & WRX_ER_TCPE)
m->m_pkthdr.csum_flags |=
M_CSUM_TCP_UDP_BAD;
}
}
wm_rxdesc_ensure_checksum(rxq, status, errors, m);
mutex_exit(rxq->rxq_lock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_wmreg.h,v 1.94 2016/12/13 10:01:44 msaitoh Exp $ */
/* $NetBSD: if_wmreg.h,v 1.95 2017/01/16 00:09:06 knakahara Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@ -125,6 +125,211 @@ typedef struct wiseman_rxdesc {
#define WRX_VLAN_CFI (1U << 12) /* Canonical Form Indicator */
#define WRX_VLAN_PRI(x) (((x) >> 13) & 7)/* VLAN priority field */
/* extended RX descriptor for 82574 */
typedef union ext_rxdesc {
struct {
uint64_t erxd_addr; /* Packet Buffer Address */
uint64_t erxd_dd; /* 63:1 reserved, 0 DD */
} erx_data;
struct {
uint32_t erxc_mrq; /*
* 31:13 reserved
* 12:8 Rx queue associated with the packet
* 7:4 reserved 3:0 RSS Type
*/
uint32_t erxc_rsshash; /* RSS Hash or {Fragment Checksum, IP identification } */
uint32_t erxc_err_stat; /* 31:20 Extended Error, 19:0 Extened Status */
uint16_t erxc_pktlen; /* PKT_LEN */
uint16_t erxc_vlan; /* VLAN Tag */
} erx_ctx;
} __packed ext_rxdesc_t;
#define EXTRXD_DD_MASK __BIT(0)
/*
* erxc_rsshash is used for below 2 patterns
* (1) Fragment Checksum and IP identification
* - Fragment Checksum is valid
* when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set
* - IP identification is valid
* when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set
* (2) RSS Hash
* when RXCSUM.PCSD bit is set
*/
#define EXTRXC_IP_ID_MASK __BITS(15,0)
#define EXTRXC_FRAG_CSUM_MASK __BITS(31,16)
#define EXTRXC_IP_ID(rsshash) __SHIFTOUT(rsshash,ERXC_IP_ID_MASK)
#define EXTRXC_FRAG_CSUM(rsshash) __SHIFTOUT(rsshash,ERXC_FRAG_CSUM_MASK)
/* macros for nrxc_mrq */
#define EXTRXC_RSS_TYPE_MASK __BITS(3,0)
/* __BITS(7,4) is reserved */
#define EXTRXC_QUEUE_MASK __BITS(12,8)
/* __BITS(31,13) is reserved */
#define EXTRXC_RSS_TYPE(mrq) __SHIFTOUT(mrq,EXTRXC_RSS_TYPE_MASK)
#define EXTRXC_QUEUE(mrq) __SHIFTOUT(mrq,EXTRXC_QUEUE_MASK)
#define EXTRXC_RSS_TYPE_NONE 0x0 /* No hash computation done. */
#define EXTRXC_RSS_TYPE_TCP_IPV4 0x1
#define EXTRXC_RSS_TYPE_IPV4 0x2
#define EXTRXC_RSS_TYPE_TCP_IPV6 0x3
#define EXTRXC_RSS_TYPE_IPV6_EX 0x4
#define EXTRXC_RSS_TYPE_IPV6 0x5
/*0x6:0xF is reserved. */
#define EXTRXC_STATUS_MASK __BITS(19,0)
#define EXTRXC_ERROR_MASK __BITS(31,20)
#define EXTRXC_STATUS(err_stat) __SHIFTOUT(err_stat,EXTRXC_STATUS_MASK)
#define EXTRXC_ERROR(err_stat) __SHIFTOUT(err_stat,EXTRXC_ERROR_MASK)
/* 3:0 is reserved. */
#define EXTRXC_ERROR_CE __BIT(4) /* The same as WRX_ER_CE. */
#define EXTRXC_ERROR_SE __BIT(5) /* The same as WRX_ER_SE. */
#define EXTRXC_ERROR_SEQ __BIT(6) /* The same as WRX_ER_SEQ. */
/* 7 is reserved. */
#define EXTRXC_ERROR_CXE __BIT(8) /* The same as WRX_ER_CXE. */
#define EXTRXC_ERROR_TCPE __BIT(9) /* The same as WRX_ER_TCPE. */
#define EXTRXC_ERROR_IPE __BIT(10) /* The same as WRX_ER_IPE. */
#define EXTRXC_ERROR_RXE __BIT(11) /* The same as WRX_ER_RXE. */
#define EXTRXC_STATUS_DD __BIT(0) /* The same as WRX_ST_DD. */
#define EXTRXC_STATUS_EOP __BIT(1) /* The same as WRX_ST_EOP. */
/* 2 is reserved. */
#define EXTRXC_STATUS_VP __BIT(3) /* The same as WRX_ST_VP. */
#define EXTRXC_STATUS_UDPCS __BIT(4) /* UDP checksum calculated on packet. */
#define EXTRXC_STATUS_TCPCS __BIT(5) /* The same as WRX_ST_TCPCS. */
#define EXTRXC_STATUS_IPCS __BIT(6) /* The same as WRX_ST_IPCS. */
/* 7 is reserved. */
#define EXTRXC_STATUS_TST __BIT(8) /* Time stamp taken. */
#define EXTRXC_STATUS_IPIDV __BIT(9) /* IP identification valid. */
#define EXTRXC_STATUS_UDPV __BIT(10) /* Valid UDP XSUM. */
/* 14:11 is reserved. */
#define EXTRXC_STATUS_ACK __BIT(15) /* ACK packet indication. */
#define EXTRXC_STATUS_PKTTYPE_MASK __BITS(19,16)
#define EXTRXC_STATUS_PKTTYPE(status) __SHIFTOUT(status,EXTRXC_STATUS_PKTTYPE_MASK)
/* advanced RX descriptor for 82575 and newer */
typedef union nq_rxdesc {
struct {
uint64_t nrxd_paddr; /* 63:1 Packet Buffer Address, 0 A0/NSE */
uint64_t nrxd_haddr; /* 63:1 HEader Buffer Address, 0 DD */
} nqrx_data;
struct {
uint32_t nrxc_misc; /*
* 31: SPH, 30:21 HDR_LEN[9:0],
* 20:19 HDR_LEN[11:10], 18:17 RSV,
* 16:4 Packet Type 3:0 RSS Type
*/
uint32_t nrxc_rsshash; /* RSS Hash or {Fragment Checksum, IP identification } */
uint32_t nrxc_err_stat; /* 31:20 Extended Error, 19:0 Extened Status */
uint16_t nrxc_pktlen; /* PKT_LEN */
uint16_t nrxc_vlan; /* VLAN Tag */
} nqrx_ctx;
} __packed nq_rxdesc_t;
/* for nrxd_paddr macros */
#define NQRXD_A0_MASK __BIT(0)
#define NQRXD_NSE_MASK __BIT(0)
#define NQRXD_ADDR_MASK __BITS(63,1)
/* for nrxd_haddr macros */
#define NQRXD_DD_MASK __BIT(0)
/*
* nrxc_rsshash is used for below 2 patterns
* (1) Fragment Checksum and IP identification
* - Fragment Checksum is valid
* when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set
* - IP identification is valid
* when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set
* (2) RSS Hash
* when RXCSUM.PCSD bit is set
*/
#define NQRXC_IP_ID_MASK __BITS(15,0)
#define NQRXC_FRAG_CSUM_MASK __BITS(31,16)
#define NQRXC_IP_ID(rsshash) __SHIFTOUT(rsshash,NRXC_IP_ID_MASK)
#define NQRXC_FRAG_CSUM(rsshash) __SHIFTOUT(rsshash,NRXC_FRAG_CSUM_MASK)
/* macros for nrxc_misc */
#define NQRXC_RSS_TYPE_MASK __BITS(3,0)
#define NQRXC_PKT_TYPE_ID_MASK __BITS(11,4)
#define NQRXC_PKT_TYPE_ETQF_INDEX_MASK __BITS(11,4)
#define NQRXC_PKT_TYPE_ETQF_VALID_MASK __BIT(15)
#define NQRXC_PKT_TYPE_VLAN_MASK __BIT(16)
#define NQRXC_PKT_TYPE_MASK __BITS(16,4)
/* __BITS(18,17) is reserved */
#define NQRXC_HDRLEN_HIGH_MASK __BITS(20,19)
#define NQRXC_HDRLEN_LOW_MASK __BITS(30,21)
#define NQRXC_SPH_MASK __BIT(31)
#define NQRXC_RSS_TYPE(misc) __SHIFTOUT(misc,NQRXC_RSS_TYPE_MASK)
#define NQRXC_PKT_TYPE_ID(pkttype) \
__SHIFTOUT(pkttype,NQRXC_PKT_TYPE_ID_MASK)
#define NQRXC_PKT_TYPE(misc) __SHIFTOUT(misc,NQRXC_PKT_TYPE_MASK)
#define NQRXC_PKT_TYPE_ETQF_INDEX(pkttype) \
__SHIFTOUT(pkttype,NQRXC_PKT_TYPE_ETQF_INDEX_MASK)
#define NQRXC_PKT_TYPE_ETQF_VALID NQRXC_PKT_TYPE_ETQF_VALID_MASK
#define NQRXC_PKT_TYPE_VLAN NQRXC_PKT_TYPE_VLAN_MASK
#define NQRXC_HEADER_LEN(misc) (__SHIFTOUT(misc,NQRXC_HDRLEN_LOW_MASK) \
| __SHIFTOUT(misc,NQRXC_HDRLEN_HIGH_MASK) << 10)
#define NQRXC_SPH NQRXC_SPH_MASK
#define NQRXC_RSS_TYPE_NONE 0x0 /* No hash computation done. */
#define NQRXC_RSS_TYPE_TCP_IPV4 0x1
#define NQRXC_RSS_TYPE_IPV4 0x2
#define NQRXC_RSS_TYPE_TCP_IPV6 0x3
#define NQRXC_RSS_TYPE_IPV6_EX 0x4
#define NQRXC_RSS_TYPE_IPV6 0x5
#define NQRXC_RSS_TYPE_TCP_IPV6_EX 0x6
#define NQRXC_RSS_TYPE_UDP_IPV4 0x7
#define NQRXC_RSS_TYPE_UDP_IPV6 0x8
#define NQRXC_RSS_TYPE_UDP_IPV6_EX 0x9
/*0xA:0xF is reserved. */
#define NQRXC_PKT_TYPE_IPV4 __BIT(0)
#define NQRXC_PKT_TYPE_IPV4E __BIT(1)
#define NQRXC_PKT_TYPE_IPV6 __BIT(2)
#define NQRXC_PKT_TYPE_IPV6E __BIT(3)
#define NQRXC_PKT_TYPE_TCP __BIT(4)
#define NQRXC_PKT_TYPE_UDP __BIT(5)
#define NQRXC_PKT_TYPE_SCTP __BIT(6)
#define NQRXC_PKT_TYPE_NFS __BIT(7)
#define NQRXC_STATUS_MASK __BITS(19,0)
#define NQRXC_ERROR_MASK __BITS(31,20)
#define NQRXC_STATUS(err_stat) __SHIFTOUT(err_stat,NQRXC_STATUS_MASK)
#define NQRXC_ERROR(err_stat) __SHIFTOUT(err_stat,NQRXC_ERROR_MASK)
/* 2:0 is reserved. */
#define NQRXC_ERROR_HB0 __BIT(3) /* Header Buffer Overflow. */
/* 6:4 is reserved. */
/* 8:7 is reserved. */
#define NQRXC_ERROR_L4E __BIT(9) /* L4 error indication. */
#define NQRXC_ERROR_IPE __BIT(10) /* The same as WRX_ER_IPE. */
#define NQRXC_ERROR_RXE __BIT(11) /* The same as WRX_ER_RXE. */
/* XXX Where is WRX_ER_CE, WRX_ER_SE, WRX_ER_SEQ, WRX_ER_CXE error? */
#define NQRXC_STATUS_DD __BIT(0) /* The same as WRX_ST_DD. */
#define NQRXC_STATUS_EOP __BIT(1) /* The same as WRX_ST_EOP. */
/* 2 is reserved */
#define NQRXC_STATUS_VP __BIT(3) /* The same as WRX_ST_VP. */
#define NQRXC_STATUS_UDPCS __BIT(4) /* UDP checksum or IP payload checksum. */
/* XXX in I210 spec, this bit is the same as WRX_ST_BPDU(is "???" comment) */
#define NQRXC_STATUS_L4I __BIT(5) /* L4 integrity check was done. */
#define NQRXC_STATUS_IPCS __BIT(6) /* The same as WRX_ST_IPCS. */
#define NQRXC_STATUS_PIF __BIT(7) /* The same as WRX_ST_PIF. */
/* 8 is reserved */
#define NQRXC_STATUS_VEXT __BIT(9) /* First VLAN is found on a bouble VLAN packet. */
#define NQRXC_STATUS_UDPV __BIT(10) /* The packet contains a valid checksum field in a first fragment UDP IPv4 packet. */
#define NQRXC_STATUS_LLINT __BIT(11) /* The packet caused an immediate interrupt. */
#define NQRXC_STATUS_STRIPCRC __BIT(12) /* Ethernet CRC is stripped. */
/* 14:13 is reserved */
#define NQRXC_STATUS_TSIP __BIT(15) /* Timestamp in packet. */
#define NQRXC_STATUS_TS __BIT(16) /* Time stamped packet. */
/* 17 is reserved */
#define NQRXC_STATUS_LB __BIT(18) /* Sent by a local virtual machine (VM to VM switch indication). */
#define NQRXC_STATUS_MC __BIT(19) /* Packet received from Manageability Controller */
/* "MBC" in i350 spec */
/*
* The Wiseman transmit descriptor.
*
@ -539,6 +744,11 @@ struct livengood_tcpip_ctxdesc {
#define RCTL_RDMTS_1_4 RCTL_RDMTS(1)
#define RCTL_RDMTS_1_8 RCTL_RDMTS(2)
#define RCTL_RDMTS_MASK RCTL_RDMTS(3)
#define RCTL_DTYP_MASK __BITS(11,10) /* descriptor type. 82574 only */
#define RCTL_DTYP(x) __SHIFTIN(x,RCTL_DTYP_MASK)
#define RCTL_DTYP_ONEBUF RCTL_DTYP(0) /* use one buffer(not split header). */
#define RCTL_DTYP_SPH RCTL_DTYP(1) /* split header buffer. */
/* RCTL_DTYP(2) and RCTL_DTYP(3) are reserved. */
#define RCTL_MO(x) ((x) << 12) /* multicast offset */
#define RCTL_BAM (1U << 15) /* broadcast accept mode */
#define RCTL_RDMTS_HEX __BIT(16)
@ -943,6 +1153,7 @@ struct livengood_tcpip_ctxdesc {
#define WMREG_RFCTL_NFSRDIS __BIT(7) /* NFS Read Disable */
#define WMREG_RFCTL_ACKDIS __BIT(12) /* ACK Accelerate Disable */
#define WMREG_RFCTL_ACKD_DIS __BIT(13) /* ACK data Disable */
#define WMREG_RFCTL_EXSTEN __BIT(15) /* Extended status Enable. 82574 only. */
#define WMREG_RFCTL_IPV6EXDIS __BIT(16) /* IPv6 Extension Header Disable */
#define WMREG_RFCTL_NEWIPV6EXDIS __BIT(17) /* New IPv6 Extension Header */