Pull up the following, requested by msaitoh in ticket #1792:
sys/dev/pci/ixgbe/ix_txrx.c 1.105-1.116 via patch sys/dev/pci/ixgbe/ixgbe.c 1.345-1.346,1.349 via patch sys/dev/pci/ixgbe/ixgbe.h 1.94-1.98 sys/dev/pci/ixgbe/ixgbe_type.h 1.62 sys/dev/pci/ixgbe/ixv.c 1.193,1.195-1.196 - Clear the WTHRESH bit field before writing it. - Optimize ixgbe_txeof(). - Use kmem_zalloc() instead of malloc(,M_ZERO). - Add QPRDC(Queue Packet Receive Drop Count) into iqdrops. - No functional change - Move assignment of TXD. - ixv(4): Remove unused IFF_OACTIVE. - Don't include the Flow Director related members to reduce the size of struct tx_ring. On amd64 and aarch64, the real size is not changed because of the alignment. - The descriptor ring size and the alignment are tested in the attach function, so it's not required to use roundup2(size, DBA_ALIGN). - Use #ifdef LRO more to reduce the size of struct rx_ring. - Change "me" from 32bit to 8bit because the max is 128. This commit doesn't change the real size of ix_queue, tx_ring and rx_ring because of the alignment. - Th RSC (hardware receive side coalescing) feature has been disabled all along, so enclose the code with #ifdef RSC. - Remove unused. - Modify for the readability. - Modify comment. - Fix comment. Whitespace.
This commit is contained in:
parent
d4b93fded3
commit
582444be6f
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: ix_txrx.c,v 1.54.2.14 2023/10/18 14:05:27 martin Exp $ */
|
||||
/* $NetBSD: ix_txrx.c,v 1.54.2.15 2024/02/03 12:13:32 martin Exp $ */
|
||||
|
||||
/******************************************************************************
|
||||
|
||||
|
@ -64,13 +64,14 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.54.2.14 2023/10/18 14:05:27 martin Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.54.2.15 2024/02/03 12:13:32 martin Exp $");
|
||||
|
||||
#include "opt_inet.h"
|
||||
#include "opt_inet6.h"
|
||||
|
||||
#include "ixgbe.h"
|
||||
|
||||
#ifdef RSC
|
||||
/*
|
||||
* HW RSC control:
|
||||
* this feature only works with
|
||||
|
@ -84,7 +85,9 @@ __KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.54.2.14 2023/10/18 14:05:27 martin Exp
|
|||
* to enable.
|
||||
*/
|
||||
static bool ixgbe_rsc_enable = FALSE;
|
||||
#endif
|
||||
|
||||
#ifdef IXGBE_FDIR
|
||||
/*
|
||||
* For Flow Director: this is the
|
||||
* number of TX packets we sample
|
||||
|
@ -95,6 +98,7 @@ static bool ixgbe_rsc_enable = FALSE;
|
|||
* setting this to 0.
|
||||
*/
|
||||
static int atr_sample_rate = 20;
|
||||
#endif
|
||||
|
||||
#define IXGBE_M_ADJ(sc, rxr, mp) \
|
||||
if (sc->max_frame_size <= (rxr->mbuf_sz - ETHER_ALIGN)) \
|
||||
|
@ -122,8 +126,9 @@ static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
|
|||
static int ixgbe_dma_malloc(struct ixgbe_softc *, bus_size_t,
|
||||
struct ixgbe_dma_alloc *, int);
|
||||
static void ixgbe_dma_free(struct ixgbe_softc *, struct ixgbe_dma_alloc *);
|
||||
|
||||
static void ixgbe_setup_hw_rsc(struct rx_ring *);
|
||||
#ifdef RSC
|
||||
static void ixgbe_setup_hw_rsc(struct rx_ring *);
|
||||
#endif
|
||||
|
||||
/************************************************************************
|
||||
* ixgbe_legacy_start_locked - Transmit entry point
|
||||
|
@ -406,7 +411,7 @@ ixgbe_drain_all(struct ixgbe_softc *sc)
|
|||
static int
|
||||
ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
|
||||
{
|
||||
struct ixgbe_softc *sc = txr->sc;
|
||||
struct ixgbe_softc *sc = txr->sc;
|
||||
struct ixgbe_tx_buf *txbuf;
|
||||
union ixgbe_adv_tx_desc *txd = NULL;
|
||||
struct ifnet *ifp = sc->ifp;
|
||||
|
@ -487,8 +492,8 @@ retry:
|
|||
}
|
||||
|
||||
/*
|
||||
* Set up the appropriate offload context
|
||||
* this will consume the first descriptor
|
||||
* Set up the appropriate offload context if requested,
|
||||
* this may consume one TX descriptor.
|
||||
*/
|
||||
error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
|
||||
if (__predict_false(error)) {
|
||||
|
@ -625,14 +630,8 @@ ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
txr->tx_buffers =
|
||||
(struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
|
||||
sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (txr->tx_buffers == NULL) {
|
||||
aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
|
||||
error = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
txr->tx_buffers = kmem_zalloc(sizeof(struct ixgbe_tx_buf) *
|
||||
sc->num_tx_desc, KM_SLEEP);
|
||||
|
||||
/* Create the descriptor buffer dma maps */
|
||||
txbuf = txr->tx_buffers;
|
||||
|
@ -722,9 +721,11 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
|
|||
txbuf->eop = NULL;
|
||||
}
|
||||
|
||||
#ifdef IXGBE_FDIR
|
||||
/* Set the rate at which we sample packets */
|
||||
if (sc->feat_en & IXGBE_FEATURE_FDIR)
|
||||
txr->atr_sample = atr_sample_rate;
|
||||
#endif
|
||||
|
||||
/* Set number of descriptors available */
|
||||
txr->tx_avail = sc->num_tx_desc;
|
||||
|
@ -761,7 +762,7 @@ ixgbe_free_transmit_structures(struct ixgbe_softc *sc)
|
|||
ixgbe_dma_free(sc, &txr->txdma);
|
||||
IXGBE_TX_LOCK_DESTROY(txr);
|
||||
}
|
||||
free(sc->tx_rings, M_DEVBUF);
|
||||
kmem_free(sc->tx_rings, sizeof(struct tx_ring) * sc->num_queues);
|
||||
} /* ixgbe_free_transmit_structures */
|
||||
|
||||
/************************************************************************
|
||||
|
@ -772,7 +773,7 @@ ixgbe_free_transmit_structures(struct ixgbe_softc *sc)
|
|||
static void
|
||||
ixgbe_free_transmit_buffers(struct tx_ring *txr)
|
||||
{
|
||||
struct ixgbe_softc *sc = txr->sc;
|
||||
struct ixgbe_softc *sc = txr->sc;
|
||||
struct ixgbe_tx_buf *tx_buffer;
|
||||
int i;
|
||||
|
||||
|
@ -809,7 +810,8 @@ ixgbe_free_transmit_buffers(struct tx_ring *txr)
|
|||
pcq_destroy(txr->txr_interq);
|
||||
}
|
||||
if (txr->tx_buffers != NULL) {
|
||||
free(txr->tx_buffers, M_DEVBUF);
|
||||
kmem_free(txr->tx_buffers,
|
||||
sizeof(struct ixgbe_tx_buf) * sc->num_tx_desc);
|
||||
txr->tx_buffers = NULL;
|
||||
}
|
||||
if (txr->txtag != NULL) {
|
||||
|
@ -861,9 +863,6 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
|
|||
/* Indicate the whole packet as payload when not doing TSO */
|
||||
*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
|
||||
|
||||
/* Now ready a context descriptor */
|
||||
TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
|
||||
|
||||
/*
|
||||
* In advanced descriptors the vlan tag must
|
||||
* be placed into the context descriptor. Hence
|
||||
|
@ -966,6 +965,9 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
|
|||
no_offloads:
|
||||
type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
|
||||
|
||||
/* Now ready a context descriptor */
|
||||
TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
|
||||
|
||||
/* Now copy bits into descriptor */
|
||||
TXD->vlan_macip_lens = htole32(vlan_macip_lens);
|
||||
TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
|
||||
|
@ -1115,6 +1117,7 @@ ixgbe_txeof(struct tx_ring *txr)
|
|||
union ixgbe_adv_tx_desc *txd;
|
||||
u32 work, processed = 0;
|
||||
u32 limit = sc->tx_process_limit;
|
||||
u16 avail;
|
||||
|
||||
KASSERT(mutex_owned(&txr->tx_mtx));
|
||||
|
||||
|
@ -1158,6 +1161,7 @@ ixgbe_txeof(struct tx_ring *txr)
|
|||
buf = &txr->tx_buffers[work];
|
||||
txd = &txr->tx_base[work];
|
||||
work -= txr->num_desc; /* The distance to ring end */
|
||||
avail = txr->tx_avail;
|
||||
ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
|
||||
BUS_DMASYNC_POSTREAD);
|
||||
|
||||
|
@ -1179,8 +1183,7 @@ ixgbe_txeof(struct tx_ring *txr)
|
|||
buf->m_head = NULL;
|
||||
}
|
||||
buf->eop = NULL;
|
||||
txr->txr_no_space = false;
|
||||
++txr->tx_avail;
|
||||
++avail;
|
||||
|
||||
/* We clean the range if multi segment */
|
||||
while (txd != eop) {
|
||||
|
@ -1205,13 +1208,11 @@ ixgbe_txeof(struct tx_ring *txr)
|
|||
m_freem(buf->m_head);
|
||||
buf->m_head = NULL;
|
||||
}
|
||||
++txr->tx_avail;
|
||||
++avail;
|
||||
buf->eop = NULL;
|
||||
|
||||
}
|
||||
++txr->packets;
|
||||
++processed;
|
||||
++ifp->if_opackets;
|
||||
|
||||
/* Try the next packet */
|
||||
++txd;
|
||||
|
@ -1231,6 +1232,12 @@ ixgbe_txeof(struct tx_ring *txr)
|
|||
|
||||
work += txr->num_desc;
|
||||
txr->next_to_clean = work;
|
||||
if (processed) {
|
||||
txr->tx_avail = avail;
|
||||
txr->txr_no_space = false;
|
||||
txr->packets += processed;
|
||||
ifp->if_opackets += processed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue Hang detection, we know there's
|
||||
|
@ -1255,6 +1262,7 @@ ixgbe_txeof(struct tx_ring *txr)
|
|||
return ((limit > 0) ? false : true);
|
||||
} /* ixgbe_txeof */
|
||||
|
||||
#ifdef RSC
|
||||
/************************************************************************
|
||||
* ixgbe_rsc_count
|
||||
*
|
||||
|
@ -1331,6 +1339,7 @@ ixgbe_setup_hw_rsc(struct rx_ring *rxr)
|
|||
|
||||
rxr->hw_rsc = TRUE;
|
||||
} /* ixgbe_setup_hw_rsc */
|
||||
#endif
|
||||
|
||||
/************************************************************************
|
||||
* ixgbe_refresh_mbufs
|
||||
|
@ -1425,13 +1434,7 @@ ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
|
|||
int bsize, error;
|
||||
|
||||
bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
|
||||
rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (rxr->rx_buffers == NULL) {
|
||||
aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
|
||||
error = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
rxr->rx_buffers = kmem_zalloc(bsize, KM_SLEEP);
|
||||
|
||||
error = ixgbe_dma_tag_create(
|
||||
/* parent */ sc->osdep.dmat,
|
||||
|
@ -1509,8 +1512,8 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
|
|||
slot = netmap_reset(na, NR_RX, rxr->me, 0);
|
||||
#endif /* DEV_NETMAP */
|
||||
|
||||
rsize = roundup2(sc->num_rx_desc *
|
||||
sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
|
||||
rsize = sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc);
|
||||
KASSERT((rsize % DBA_ALIGN) == 0);
|
||||
bzero((void *)rxr->rx_base, rsize);
|
||||
/* Cache the size */
|
||||
rxr->mbuf_sz = sc->rx_mbuf_sz;
|
||||
|
@ -1519,10 +1522,10 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
|
|||
ixgbe_free_receive_ring(rxr);
|
||||
|
||||
/* Now replenish the mbufs */
|
||||
for (int j = 0; j != rxr->num_desc; ++j) {
|
||||
for (int i = 0; i < rxr->num_desc; i++) {
|
||||
struct mbuf *mp;
|
||||
|
||||
rxbuf = &rxr->rx_buffers[j];
|
||||
rxbuf = &rxr->rx_buffers[i];
|
||||
|
||||
#ifdef DEV_NETMAP
|
||||
/*
|
||||
|
@ -1533,14 +1536,14 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
|
|||
* an mbuf, so end the block with a continue;
|
||||
*/
|
||||
if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
|
||||
int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j);
|
||||
int sj = netmap_idx_n2k(na->rx_rings[rxr->me], i);
|
||||
uint64_t paddr;
|
||||
void *addr;
|
||||
|
||||
addr = PNMB(na, slot + sj, &paddr);
|
||||
netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
|
||||
/* Update descriptor and the cached value */
|
||||
rxr->rx_base[j].read.pkt_addr = htole64(paddr);
|
||||
rxr->rx_base[i].read.pkt_addr = htole64(paddr);
|
||||
rxbuf->addr = htole64(paddr);
|
||||
continue;
|
||||
}
|
||||
|
@ -1572,7 +1575,7 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
|
|||
bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
|
||||
0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
|
||||
/* Update the descriptor and the cached value */
|
||||
rxr->rx_base[j].read.pkt_addr =
|
||||
rxr->rx_base[i].read.pkt_addr =
|
||||
htole64(rxbuf->pmap->dm_segs[0].ds_addr);
|
||||
rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
|
||||
}
|
||||
|
@ -1580,7 +1583,9 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
|
|||
/* Setup our descriptor indices */
|
||||
rxr->next_to_check = 0;
|
||||
rxr->next_to_refresh = sc->num_rx_desc - 1; /* Fully allocated */
|
||||
#ifdef LRO
|
||||
rxr->lro_enabled = FALSE;
|
||||
#endif
|
||||
rxr->discard_multidesc = false;
|
||||
IXGBE_EVC_STORE(&rxr->rx_copies, 0);
|
||||
#if 0 /* NetBSD */
|
||||
|
@ -1597,10 +1602,15 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
|
|||
/*
|
||||
* Now set up the LRO interface
|
||||
*/
|
||||
#ifdef RSC
|
||||
if (ixgbe_rsc_enable)
|
||||
ixgbe_setup_hw_rsc(rxr);
|
||||
#endif
|
||||
#ifdef LRO
|
||||
else if (ifp->if_capenable & IFCAP_LRO) {
|
||||
#ifdef RSC
|
||||
else
|
||||
#endif
|
||||
if (ifp->if_capenable & IFCAP_LRO) {
|
||||
device_t dev = sc->dev;
|
||||
int err = tcp_lro_init(lro);
|
||||
if (err) {
|
||||
|
@ -1677,7 +1687,7 @@ ixgbe_free_receive_structures(struct ixgbe_softc *sc)
|
|||
IXGBE_RX_LOCK_DESTROY(rxr);
|
||||
}
|
||||
|
||||
free(sc->rx_rings, M_DEVBUF);
|
||||
kmem_free(sc->rx_rings, sizeof(struct rx_ring) * sc->num_queues);
|
||||
} /* ixgbe_free_receive_structures */
|
||||
|
||||
|
||||
|
@ -1704,7 +1714,8 @@ ixgbe_free_receive_buffers(struct rx_ring *rxr)
|
|||
}
|
||||
|
||||
if (rxr->rx_buffers != NULL) {
|
||||
free(rxr->rx_buffers, M_DEVBUF);
|
||||
kmem_free(rxr->rx_buffers,
|
||||
sizeof(struct ixgbe_rx_buf) * rxr->num_desc);
|
||||
rxr->rx_buffers = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1867,7 +1878,10 @@ ixgbe_rxeof(struct ix_queue *que)
|
|||
|
||||
struct mbuf *sendmp, *mp;
|
||||
struct mbuf *newmp;
|
||||
u32 rsc, ptype;
|
||||
#ifdef RSC
|
||||
u32 rsc;
|
||||
#endif
|
||||
u32 ptype;
|
||||
u16 len;
|
||||
u16 vtag = 0;
|
||||
bool eop;
|
||||
|
@ -1902,7 +1916,9 @@ ixgbe_rxeof(struct ix_queue *que)
|
|||
loopcount++;
|
||||
sendmp = newmp = NULL;
|
||||
nbuf = NULL;
|
||||
#ifdef RSC
|
||||
rsc = 0;
|
||||
#endif
|
||||
cur->wb.upper.status_error = 0;
|
||||
rbuf = &rxr->rx_buffers[i];
|
||||
mp = rbuf->buf;
|
||||
|
@ -1988,6 +2004,7 @@ ixgbe_rxeof(struct ix_queue *que)
|
|||
* Figure out the next descriptor
|
||||
* of this frame.
|
||||
*/
|
||||
#ifdef RSC
|
||||
if (rxr->hw_rsc == TRUE) {
|
||||
rsc = ixgbe_rsc_count(cur);
|
||||
rxr->rsc_num += (rsc - 1);
|
||||
|
@ -1995,7 +2012,9 @@ ixgbe_rxeof(struct ix_queue *que)
|
|||
if (rsc) { /* Get hardware index */
|
||||
nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
|
||||
IXGBE_RXDADV_NEXTP_SHIFT);
|
||||
} else { /* Just sequential */
|
||||
} else
|
||||
#endif
|
||||
{ /* Just sequential */
|
||||
nextp = i + 1;
|
||||
if (nextp == sc->num_rx_desc)
|
||||
nextp = 0;
|
||||
|
@ -2345,35 +2364,20 @@ ixgbe_allocate_queues(struct ixgbe_softc *sc)
|
|||
int txconf = 0, rxconf = 0;
|
||||
|
||||
/* First, allocate the top level queue structs */
|
||||
sc->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
|
||||
sc->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (sc->queues == NULL) {
|
||||
aprint_error_dev(dev, "Unable to allocate queue memory\n");
|
||||
error = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
sc->queues = kmem_zalloc(sizeof(struct ix_queue) * sc->num_queues,
|
||||
KM_SLEEP);
|
||||
|
||||
/* Second, allocate the TX ring struct memory */
|
||||
sc->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
|
||||
sc->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (sc->tx_rings == NULL) {
|
||||
aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
|
||||
error = ENOMEM;
|
||||
goto tx_fail;
|
||||
}
|
||||
sc->tx_rings = kmem_zalloc(sizeof(struct tx_ring) * sc->num_queues,
|
||||
KM_SLEEP);
|
||||
|
||||
/* Third, allocate the RX ring */
|
||||
sc->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
|
||||
sc->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (sc->rx_rings == NULL) {
|
||||
aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
|
||||
error = ENOMEM;
|
||||
goto rx_fail;
|
||||
}
|
||||
sc->rx_rings = kmem_zalloc(sizeof(struct rx_ring) * sc->num_queues,
|
||||
KM_SLEEP);
|
||||
|
||||
/* For the ring itself */
|
||||
tsize = roundup2(sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
|
||||
DBA_ALIGN);
|
||||
tsize = sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc);
|
||||
KASSERT((tsize % DBA_ALIGN) == 0);
|
||||
|
||||
/*
|
||||
* Now set up the TX queues, txconf is needed to handle the
|
||||
|
@ -2429,8 +2433,8 @@ ixgbe_allocate_queues(struct ixgbe_softc *sc)
|
|||
/*
|
||||
* Next the RX queues...
|
||||
*/
|
||||
rsize = roundup2(sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
|
||||
DBA_ALIGN);
|
||||
rsize = sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc);
|
||||
KASSERT((rsize % DBA_ALIGN) == 0);
|
||||
for (int i = 0; i < sc->num_queues; i++, rxconf++) {
|
||||
rxr = &sc->rx_rings[i];
|
||||
/* Set up some basics */
|
||||
|
@ -2488,12 +2492,9 @@ err_rx_desc:
|
|||
err_tx_desc:
|
||||
for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
|
||||
ixgbe_dma_free(sc, &txr->txdma);
|
||||
free(sc->rx_rings, M_DEVBUF);
|
||||
rx_fail:
|
||||
free(sc->tx_rings, M_DEVBUF);
|
||||
tx_fail:
|
||||
free(sc->queues, M_DEVBUF);
|
||||
fail:
|
||||
kmem_free(sc->rx_rings, sizeof(struct rx_ring) * sc->num_queues);
|
||||
kmem_free(sc->tx_rings, sizeof(struct tx_ring) * sc->num_queues);
|
||||
kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues);
|
||||
return (error);
|
||||
} /* ixgbe_allocate_queues */
|
||||
|
||||
|
@ -2515,5 +2516,5 @@ ixgbe_free_queues(struct ixgbe_softc *sc)
|
|||
que = &sc->queues[i];
|
||||
mutex_destroy(&que->dc_mtx);
|
||||
}
|
||||
free(sc->queues, M_DEVBUF);
|
||||
kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues);
|
||||
} /* ixgbe_free_queues */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: ixgbe.c,v 1.199.2.29 2023/10/18 14:05:27 martin Exp $ */
|
||||
/* $NetBSD: ixgbe.c,v 1.199.2.30 2024/02/03 12:13:32 martin Exp $ */
|
||||
|
||||
/******************************************************************************
|
||||
|
||||
|
@ -64,7 +64,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.199.2.29 2023/10/18 14:05:27 martin Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.199.2.30 2024/02/03 12:13:32 martin Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_inet.h"
|
||||
|
@ -720,7 +720,7 @@ ixgbe_initialize_transmit_units(struct ixgbe_softc *sc)
|
|||
|
||||
txr->txr_no_space = false;
|
||||
|
||||
/* Disable Head Writeback */
|
||||
/* Disable relax ordering */
|
||||
/*
|
||||
* Note: for X550 series devices, these registers are actually
|
||||
* prefixed with TPH_ instead of DCA_, but the addresses and
|
||||
|
@ -1591,7 +1591,7 @@ ixgbe_update_stats_counters(struct ixgbe_softc *sc)
|
|||
struct ixgbe_hw *hw = &sc->hw;
|
||||
struct ixgbe_hw_stats *stats = &sc->stats.pf;
|
||||
u32 missed_rx = 0, bprc, lxontxc, lxofftxc;
|
||||
u64 total, total_missed_rx = 0;
|
||||
u64 total, total_missed_rx = 0, total_qprdc = 0;
|
||||
uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
|
||||
unsigned int queue_counters;
|
||||
int i;
|
||||
|
@ -1610,13 +1610,18 @@ ixgbe_update_stats_counters(struct ixgbe_softc *sc)
|
|||
IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]);
|
||||
IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]);
|
||||
if (hw->mac.type >= ixgbe_mac_82599EB) {
|
||||
uint32_t qprdc;
|
||||
|
||||
IXGBE_EVC_ADD(&stats->qbrc[i],
|
||||
IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)) +
|
||||
((u64)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32));
|
||||
IXGBE_EVC_ADD(&stats->qbtc[i],
|
||||
IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)) +
|
||||
((u64)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32));
|
||||
IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRDC(i), qprdc[i]);
|
||||
/* QPRDC will be added to iqdrops. */
|
||||
qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
|
||||
IXGBE_EVC_ADD(&stats->qprdc[i], qprdc);
|
||||
total_qprdc += qprdc;
|
||||
} else {
|
||||
/* 82598 */
|
||||
IXGBE_EVC_REGADD(hw, stats, IXGBE_QBRC(i), qbrc[i]);
|
||||
|
@ -1750,7 +1755,7 @@ ixgbe_update_stats_counters(struct ixgbe_softc *sc)
|
|||
ifp->if_collisions = 0;
|
||||
|
||||
/* Rx Errors */
|
||||
ifp->if_iqdrops += total_missed_rx;
|
||||
ifp->if_iqdrops += total_missed_rx + total_qprdc;
|
||||
|
||||
/*
|
||||
* Aggregate following types of errors as RX errors:
|
||||
|
@ -3567,7 +3572,7 @@ static int
|
|||
ixgbe_allocate_pci_resources(struct ixgbe_softc *sc,
|
||||
const struct pci_attach_args *pa)
|
||||
{
|
||||
pcireg_t memtype, csr;
|
||||
pcireg_t memtype, csr;
|
||||
device_t dev = sc->dev;
|
||||
bus_addr_t addr;
|
||||
int flags;
|
||||
|
@ -4119,6 +4124,7 @@ ixgbe_init_locked(struct ixgbe_softc *sc)
|
|||
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
|
||||
txdctl |= IXGBE_TXDCTL_ENABLE;
|
||||
/* Set WTHRESH to 8, burst writeback */
|
||||
txdctl &= ~IXGBE_TXDCTL_WTHRESH_MASK;
|
||||
txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
|
||||
/*
|
||||
* When the internal queue falls below PTHRESH (32),
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: ixgbe.h,v 1.56.2.12 2023/10/18 14:05:27 martin Exp $ */
|
||||
/* $NetBSD: ixgbe.h,v 1.56.2.13 2024/02/03 12:13:32 martin Exp $ */
|
||||
|
||||
/******************************************************************************
|
||||
SPDX-License-Identifier: BSD-3-Clause
|
||||
|
@ -325,7 +325,7 @@ struct ix_queue {
|
|||
struct ixgbe_softc *sc;
|
||||
u32 msix; /* This queue's MSI-X vector */
|
||||
u32 eitr_setting;
|
||||
u32 me;
|
||||
u8 me;
|
||||
struct resource *res;
|
||||
int busy;
|
||||
struct tx_ring *txr;
|
||||
|
@ -357,7 +357,7 @@ struct ix_queue {
|
|||
struct tx_ring {
|
||||
struct ixgbe_softc *sc;
|
||||
kmutex_t tx_mtx;
|
||||
u32 me;
|
||||
u8 me;
|
||||
u32 tail;
|
||||
int busy;
|
||||
union ixgbe_adv_tx_desc *tx_base;
|
||||
|
@ -376,9 +376,11 @@ struct tx_ring {
|
|||
void *txr_si;
|
||||
bool txr_no_space; /* Like IFF_OACTIVE */
|
||||
|
||||
#ifdef IXGBE_FDIR
|
||||
/* Flow Director */
|
||||
u16 atr_sample;
|
||||
u16 atr_count;
|
||||
#endif
|
||||
|
||||
u64 bytes; /* Used for AIM */
|
||||
u64 packets;
|
||||
|
@ -405,15 +407,17 @@ struct tx_ring {
|
|||
struct rx_ring {
|
||||
struct ixgbe_softc *sc;
|
||||
kmutex_t rx_mtx;
|
||||
u32 me;
|
||||
u8 me;
|
||||
u32 tail;
|
||||
union ixgbe_adv_rx_desc *rx_base;
|
||||
struct ixgbe_dma_alloc rxdma;
|
||||
#ifdef LRO
|
||||
struct lro_ctrl lro;
|
||||
#endif /* LRO */
|
||||
bool lro_enabled;
|
||||
#endif /* LRO */
|
||||
#ifdef RSC
|
||||
bool hw_rsc;
|
||||
#endif
|
||||
bool vtag_strip;
|
||||
bool discard_multidesc;
|
||||
u16 next_to_refresh;
|
||||
|
@ -435,10 +439,9 @@ struct rx_ring {
|
|||
struct evcnt rx_bytes;
|
||||
struct evcnt rx_discarded;
|
||||
struct evcnt no_mbuf;
|
||||
#ifdef RSC
|
||||
u64 rsc_num;
|
||||
|
||||
/* Flow Director */
|
||||
u64 flm;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct ixgbe_vf {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: ixgbe_type.h,v 1.41.2.11 2023/10/18 14:05:27 martin Exp $ */
|
||||
/* $NetBSD: ixgbe_type.h,v 1.41.2.12 2024/02/03 12:13:33 martin Exp $ */
|
||||
|
||||
/******************************************************************************
|
||||
SPDX-License-Identifier: BSD-3-Clause
|
||||
|
@ -2625,6 +2625,7 @@ enum {
|
|||
/* Transmit Config masks */
|
||||
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
|
||||
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
|
||||
#define IXGBE_TXDCTL_WTHRESH_MASK 0x007f0000
|
||||
#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
|
||||
/* Enable short packet padding to 64 bytes */
|
||||
#define IXGBE_TX_PAD_ENABLE 0x00000400
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: ixv.c,v 1.125.2.24 2023/10/18 14:05:27 martin Exp $ */
|
||||
/* $NetBSD: ixv.c,v 1.125.2.25 2024/02/03 12:13:33 martin Exp $ */
|
||||
|
||||
/******************************************************************************
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
|||
/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.125.2.24 2023/10/18 14:05:27 martin Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.125.2.25 2024/02/03 12:13:33 martin Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_inet.h"
|
||||
|
@ -829,7 +829,6 @@ ixv_init_locked(struct ixgbe_softc *sc)
|
|||
|
||||
/* Inform the stack we're ready */
|
||||
ifp->if_flags |= IFF_RUNNING;
|
||||
ifp->if_flags &= ~IFF_OACTIVE;
|
||||
|
||||
/* And now turn on interrupts */
|
||||
ixv_enable_intr(sc);
|
||||
|
@ -1478,7 +1477,7 @@ ixv_stop_locked(void *arg)
|
|||
ixv_disable_intr(sc);
|
||||
|
||||
/* Tell the stack that the interface is no longer active */
|
||||
ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
|
||||
ifp->if_flags &= ~IFF_RUNNING;
|
||||
|
||||
hw->mac.ops.reset_hw(hw);
|
||||
sc->hw.adapter_stopped = FALSE;
|
||||
|
@ -1499,8 +1498,8 @@ static int
|
|||
ixv_allocate_pci_resources(struct ixgbe_softc *sc,
|
||||
const struct pci_attach_args *pa)
|
||||
{
|
||||
pcireg_t memtype, csr;
|
||||
device_t dev = sc->dev;
|
||||
pcireg_t memtype, csr;
|
||||
device_t dev = sc->dev;
|
||||
bus_addr_t addr;
|
||||
int flags;
|
||||
|
||||
|
@ -1700,6 +1699,7 @@ ixv_initialize_transmit_units(struct ixgbe_softc *sc)
|
|||
|
||||
/* Set WTHRESH to 8, burst writeback */
|
||||
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
|
||||
txdctl &= ~IXGBE_TXDCTL_WTHRESH_MASK;
|
||||
txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
|
||||
|
||||
|
|
Loading…
Reference in New Issue