separate buffer management codes 'mvxpbm.c' from if_mvxpe.c.

the buffer management(ex. fill the rx descriptors/buffers) is done by H/W in
ARMADA XP/380, and is done by S/W in ARMADA 370. the H/W BM support is not yet
implemented, so all devices use the S/W management mode at this time.
This commit is contained in:
hsuenaga 2015-06-03 03:55:47 +00:00
parent 9400e05df8
commit ea5bc4c33e
8 changed files with 853 additions and 510 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.marvell,v 1.15 2015/06/03 03:34:38 hsuenaga Exp $ # $NetBSD: files.marvell,v 1.16 2015/06/03 03:55:47 hsuenaga Exp $
# #
# Configuration info for Marvell System on Chip support # Configuration info for Marvell System on Chip support
# #
@ -58,6 +58,9 @@ attach mvsata at mvsoc with mvsata_mbus
# Gigabit Ethernet Controller Interface # Gigabit Ethernet Controller Interface
attach mvgbec at mvsoc with mvgbec_mbus attach mvgbec at mvsoc with mvgbec_mbus
# ARMADA XP Buffer Manager
attach mvxpbm at mvsoc with mvxpbm_mbus
# ARMADA XP Gigabit Ethernet Controller Interface # ARMADA XP Gigabit Ethernet Controller Interface
attach mvxpe at mvsoc with mvxpe_mbus attach mvxpe at mvsoc with mvxpe_mbus

View File

@ -1,4 +1,4 @@
/* $NetBSD: mvsoc.c,v 1.21 2015/06/03 03:04:21 hsuenaga Exp $ */ /* $NetBSD: mvsoc.c,v 1.22 2015/06/03 03:55:47 hsuenaga Exp $ */
/* /*
* Copyright (c) 2007, 2008, 2013, 2014 KIYOHARA Takashi * Copyright (c) 2007, 2008, 2013, 2014 KIYOHARA Takashi
* All rights reserved. * All rights reserved.
@ -26,7 +26,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: mvsoc.c,v 1.21 2015/06/03 03:04:21 hsuenaga Exp $"); __KERNEL_RCSID(0, "$NetBSD: mvsoc.c,v 1.22 2015/06/03 03:55:47 hsuenaga Exp $");
#include "opt_cputypes.h" #include "opt_cputypes.h"
#include "opt_mvsoc.h" #include "opt_mvsoc.h"
@ -685,6 +685,7 @@ static const struct mvsoc_periph {
{ ARMADAXP(MV78130), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO }, { ARMADAXP(MV78130), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO },
{ ARMADAXP(MV78130), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX }, { ARMADAXP(MV78130), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX },
#if NMVXPE > 0 #if NMVXPE > 0
{ ARMADAXP(MV78130), "mvxpbm", 0, MVA_OFFSET_DEFAULT,IRQ_DEFAULT },
{ ARMADAXP(MV78130), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX }, { ARMADAXP(MV78130), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX },
{ ARMADAXP(MV78130), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX }, { ARMADAXP(MV78130), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX },
#else #else
@ -716,6 +717,7 @@ static const struct mvsoc_periph {
{ ARMADAXP(MV78160), "mvspi", 0, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI }, { ARMADAXP(MV78160), "mvspi", 0, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI },
{ ARMADAXP(MV78160), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO }, { ARMADAXP(MV78160), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO },
#if NMVXPE > 0 #if NMVXPE > 0
{ ARMADAXP(MV78160), "mvxpbm", 0, MVA_OFFSET_DEFAULT,IRQ_DEFAULT },
{ ARMADAXP(MV78160), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX }, { ARMADAXP(MV78160), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX },
{ ARMADAXP(MV78160), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX }, { ARMADAXP(MV78160), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX },
{ ARMADAXP(MV78160), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX }, { ARMADAXP(MV78160), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX },
@ -751,6 +753,7 @@ static const struct mvsoc_periph {
{ ARMADAXP(MV78230), "mvspi", 0, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI }, { ARMADAXP(MV78230), "mvspi", 0, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI },
{ ARMADAXP(MV78230), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO }, { ARMADAXP(MV78230), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO },
#if NMVXPE > 0 #if NMVXPE > 0
{ ARMADAXP(MV78230), "mvxpbm", 0, MVA_OFFSET_DEFAULT,IRQ_DEFAULT },
{ ARMADAXP(MV78230), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX }, { ARMADAXP(MV78230), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX },
{ ARMADAXP(MV78230), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX }, { ARMADAXP(MV78230), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX },
{ ARMADAXP(MV78230), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX }, { ARMADAXP(MV78230), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX },
@ -784,6 +787,7 @@ static const struct mvsoc_periph {
{ ARMADAXP(MV78260), "mvspi", 0, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI }, { ARMADAXP(MV78260), "mvspi", 0, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI },
{ ARMADAXP(MV78260), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO }, { ARMADAXP(MV78260), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO },
#if NMVXPE > 0 #if NMVXPE > 0
{ ARMADAXP(MV78260), "mvxpbm", 0, MVA_OFFSET_DEFAULT,IRQ_DEFAULT },
{ ARMADAXP(MV78260), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX }, { ARMADAXP(MV78260), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX },
{ ARMADAXP(MV78260), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX }, { ARMADAXP(MV78260), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX },
{ ARMADAXP(MV78260), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX }, { ARMADAXP(MV78260), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX },
@ -820,6 +824,7 @@ static const struct mvsoc_periph {
{ ARMADAXP(MV78460), "mvspi", 0, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI }, { ARMADAXP(MV78460), "mvspi", 0, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI },
{ ARMADAXP(MV78460), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO }, { ARMADAXP(MV78460), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO },
#if NMVXPE > 0 #if NMVXPE > 0
{ ARMADAXP(MV78460), "mvxpbm", 0, MVA_OFFSET_DEFAULT,IRQ_DEFAULT },
{ ARMADAXP(MV78460), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX }, { ARMADAXP(MV78460), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX },
{ ARMADAXP(MV78460), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX }, { ARMADAXP(MV78460), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX },
{ ARMADAXP(MV78460), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX }, { ARMADAXP(MV78460), "mvxpe", 2, ARMADAXP_GBE2_BASE,ARMADAXP_IRQ_GBE2_TH_RXTX },
@ -849,6 +854,7 @@ static const struct mvsoc_periph {
{ ARMADA370(MV6710), "mvspi", 1, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI }, { ARMADA370(MV6710), "mvspi", 1, ARMADAXP_SPI_BASE,ARMADAXP_IRQ_SPI },
{ ARMADA370(MV6710), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO }, { ARMADA370(MV6710), "mvsdio", 0, ARMADAXP_SDIO_BASE,ARMADAXP_IRQ_SDIO },
#if NMVXPE > 0 #if NMVXPE > 0
{ ARMADA370(MV6710), "mvxpbm", 0, MVA_OFFSET_DEFAULT,IRQ_DEFAULT },
{ ARMADA370(MV6710), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX }, { ARMADA370(MV6710), "mvxpe", 0, ARMADAXP_GBE0_BASE,ARMADAXP_IRQ_GBE0_TH_RXTX },
{ ARMADA370(MV6710), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX }, { ARMADA370(MV6710), "mvxpe", 1, ARMADAXP_GBE1_BASE,ARMADAXP_IRQ_GBE1_TH_RXTX },
#else #else

View File

@ -1,7 +1,11 @@
# $NetBSD: files.armada,v 1.1 2015/06/03 03:34:38 hsuenaga Exp $ # $NetBSD: files.armada,v 1.2 2015/06/03 03:55:47 hsuenaga Exp $
# Configuration info for Marvell ARMADA integrated peripherals # Configuration info for Marvell ARMADA integrated peripherals
# ARMADA XP Buffer Manger
device mvxpbm { [port = -1 ], [irq = -1] }
file dev/marvell/mvxpbm.c
# ARMADA XP Gigabit Ethernet Controller Interface # ARMADA XP Gigabit Ethernet Controller Interface
define mvxpe { [port = -1 ], [irq = -1] } define mvxpe { [port = -1 ], [irq = -1] }
device mvxpe: ether, ifnet, arp, mii device mvxpe: mvxpbm, ether, ifnet, arp, mii
file dev/marvell/if_mvxpe.c mvxpe needs-flag file dev/marvell/if_mvxpe.c mvxpe needs-flag

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_mvxpe.c,v 1.1 2015/05/03 14:38:10 hsuenaga Exp $ */ /* $NetBSD: if_mvxpe.c,v 1.2 2015/06/03 03:55:47 hsuenaga Exp $ */
/* /*
* Copyright (c) 2015 Internet Initiative Japan Inc. * Copyright (c) 2015 Internet Initiative Japan Inc.
* All rights reserved. * All rights reserved.
@ -25,7 +25,7 @@
* POSSIBILITY OF SUCH DAMAGE. * POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.1 2015/05/03 14:38:10 hsuenaga Exp $"); __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.2 2015/06/03 03:55:47 hsuenaga Exp $");
#include "opt_multiprocessor.h" #include "opt_multiprocessor.h"
@ -58,6 +58,7 @@ __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.1 2015/05/03 14:38:10 hsuenaga Exp $"
#include <dev/marvell/marvellreg.h> #include <dev/marvell/marvellreg.h>
#include <dev/marvell/marvellvar.h> #include <dev/marvell/marvellvar.h>
#include <dev/marvell/mvxpbmvar.h>
#include <dev/marvell/if_mvxpereg.h> #include <dev/marvell/if_mvxpereg.h>
#include <dev/marvell/if_mvxpevar.h> #include <dev/marvell/if_mvxpevar.h>
@ -136,30 +137,20 @@ STATIC void mvxpe_linkup(struct mvxpe_softc *);
STATIC void mvxpe_linkdown(struct mvxpe_softc *); STATIC void mvxpe_linkdown(struct mvxpe_softc *);
STATIC void mvxpe_linkreset(struct mvxpe_softc *); STATIC void mvxpe_linkreset(struct mvxpe_softc *);
/* Packet Buffer Manager(BM) */
STATIC int mvxpe_bm_init(struct mvxpe_softc *);
STATIC int mvxpe_bm_init_mbuf_hdr(struct mvxpe_bm_chunk *);
STATIC struct mvxpe_bm_chunk *mvxpe_bm_alloc(struct mvxpe_softc *);
STATIC void mvxpe_bm_free_mbuf(struct mbuf *, void *, size_t, void *);
STATIC void mvxpe_bm_free_chunk(struct mvxpe_bm_chunk *);
STATIC void mvxpe_bm_sync(struct mvxpe_bm_chunk *, size_t, int);
STATIC void mvxpe_bm_lock(struct mvxpe_softc *);
STATIC void mvxpe_bm_unlock(struct mvxpe_softc *);
/* Tx Subroutines */ /* Tx Subroutines */
STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *); STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *);
STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int); STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int);
STATIC void mvxpe_tx_set_csumflag(struct ifnet *, STATIC void mvxpe_tx_set_csumflag(struct ifnet *,
struct mvxpe_tx_desc *, struct mbuf *); struct mvxpe_tx_desc *, struct mbuf *);
STATIC void mvxpe_tx_complete(struct mvxpe_softc *); STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t);
STATIC void mvxpe_tx_queue_del(struct mvxpe_softc *, int); STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int);
/* Rx Subroutines */ /* Rx Subroutines */
STATIC void mvxpe_rx(struct mvxpe_softc *); STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t);
STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int); STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int);
STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, int *); STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *);
STATIC void mvxpe_rx_reload(struct mvxpe_softc *); STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t);
STATIC void mvxpe_rx_queue_reload(struct mvxpe_softc *, int); STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int);
STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int); STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int);
STATIC void mvxpe_rx_set_csumflag(struct ifnet *, STATIC void mvxpe_rx_set_csumflag(struct ifnet *,
struct mvxpe_rx_desc *, struct mbuf *); struct mvxpe_rx_desc *, struct mbuf *);
@ -330,14 +321,19 @@ mvxpe_attach(device_t parent, device_t self, void *aux)
aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version); aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version);
/* /*
* Software based Buffer Manager(BM) subsystem. * Buffer Manager(BM) subsystem.
* Try to allocate special memory chunks for Rx packets.
* Some version of SoC has hardware based BM(not supported yet)
*/ */
if (mvxpe_bm_init(sc) != 0) { sc->sc_bm = mvxpbm_device(mva);
aprint_error_dev(self, "BM pool allocation failure\n"); if (sc->sc_bm == NULL) {
aprint_error_dev(self, "no Buffer Manager.\n");
goto fail; goto fail;
} }
aprint_normal_dev(self,
"Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm));
aprint_normal_dev(sc->sc_dev,
"%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n",
mvxpbm_buf_size(sc->sc_bm) / 1024,
mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm));
/* /*
* make sure DMA engines are in reset state * make sure DMA engines are in reset state
@ -526,9 +522,9 @@ fail:
STATIC int STATIC int
mvxpe_evcnt_attach(struct mvxpe_softc *sc) mvxpe_evcnt_attach(struct mvxpe_softc *sc)
{ {
#ifdef MVXPE_EVENT_COUNTERS
int q; int q;
#ifdef MVXPE_EVENT_COUNTERS
/* Master Interrupt Handler */ /* Master Interrupt Handler */
evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR, evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR,
NULL, device_xname(sc->sc_dev), "RxTxTH Intr."); NULL, device_xname(sc->sc_dev), "RxTxTH Intr.");
@ -1103,8 +1099,8 @@ mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
rx->rx_queue_len = rx_default_queue_len[q]; rx->rx_queue_len = rx_default_queue_len[q];
if (rx->rx_queue_len > MVXPE_RX_RING_CNT) if (rx->rx_queue_len > MVXPE_RX_RING_CNT)
rx->rx_queue_len = MVXPE_RX_RING_CNT; rx->rx_queue_len = MVXPE_RX_RING_CNT;
rx->rx_queue_th_received = rx->rx_queue_len / 4; rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO;
rx->rx_queue_th_free = rx->rx_queue_len / 2; rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */ rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */
/* Tx handle */ /* Tx handle */
@ -1113,8 +1109,9 @@ mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i; MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i;
MVXPE_TX_MBUF(sc, q, i) = NULL; MVXPE_TX_MBUF(sc, q, i) = NULL;
/* Tx handle needs DMA map for busdma_load_mbuf() */ /* Tx handle needs DMA map for busdma_load_mbuf() */
if (bus_dmamap_create(sc->sc_dmat, sc->sc_bm.bm_chunk_size, if (bus_dmamap_create(sc->sc_dmat,
MVXPE_TX_SEGLIMIT, sc->sc_bm.bm_chunk_size, 0, mvxpbm_chunk_size(sc->sc_bm),
MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0,
BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
&MVXPE_TX_MAP(sc, q, i))) { &MVXPE_TX_MAP(sc, q, i))) {
aprint_error_dev(sc->sc_dev, aprint_error_dev(sc->sc_dev,
@ -1126,8 +1123,8 @@ mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
tx->tx_queue_len = tx_default_queue_len[q]; tx->tx_queue_len = tx_default_queue_len[q];
if (tx->tx_queue_len > MVXPE_TX_RING_CNT) if (tx->tx_queue_len > MVXPE_TX_RING_CNT)
tx->tx_queue_len = MVXPE_TX_RING_CNT; tx->tx_queue_len = MVXPE_TX_RING_CNT;
tx->tx_free_cnt = tx->tx_queue_len; tx->tx_used = 0;
tx->tx_queue_th_free = tx->tx_queue_len / 2; tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO;
} }
STATIC void STATIC void
@ -1144,7 +1141,7 @@ mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q)
for (i = 0; i < MVXPE_RX_RING_CNT; i++) { for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
if (MVXPE_RX_PKTBUF(sc, q, i) == NULL) if (MVXPE_RX_PKTBUF(sc, q, i) == NULL)
continue; continue;
mvxpe_bm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i)); mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i));
MVXPE_RX_PKTBUF(sc, q, i) = NULL; MVXPE_RX_PKTBUF(sc, q, i) = NULL;
} }
rx->rx_dma = rx->rx_cpu = 0; rx->rx_dma = rx->rx_cpu = 0;
@ -1158,7 +1155,7 @@ mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q)
MVXPE_TX_MBUF(sc, q, i) = NULL; MVXPE_TX_MBUF(sc, q, i) = NULL;
} }
tx->tx_dma = tx->tx_cpu = 0; tx->tx_dma = tx->tx_cpu = 0;
tx->tx_free_cnt = tx->tx_queue_len; tx->tx_used = 0;
} }
STATIC void STATIC void
@ -1218,18 +1215,23 @@ mvxpe_rx_queue_init(struct ifnet *ifp, int q)
MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q)); MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q));
/* Rx buffer size and descriptor ring size */ /* Rx buffer size and descriptor ring size */
reg = MVXPE_PRXDQS_BUFFERSIZE(sc->sc_bm.bm_chunk_size >> 3); reg = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3);
reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT); reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT);
MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg); MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg);
DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n", DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n",
q, MVXPE_READ(sc, MVXPE_PRXDQS(q))); q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
/* Rx packet offset address */ /* Rx packet offset address */
reg = MVXPE_PRXC_PACKETOFFSET(sc->sc_bm.bm_chunk_packet_offset >> 3); reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3);
MVXPE_WRITE(sc, MVXPE_PRXC(q), reg); MVXPE_WRITE(sc, MVXPE_PRXC(q), reg);
DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n", DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n",
q, MVXPE_READ(sc, MVXPE_PRXC(q))); q, MVXPE_READ(sc, MVXPE_PRXC(q)));
/* Rx DMA SNOOP */
reg = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU);
reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU);
MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg);
/* if DMA is not working, register is not updated */ /* if DMA is not working, register is not updated */
KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q)); KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q));
return 0; return 0;
@ -1412,58 +1414,51 @@ mvxpe_rxtxth_intr(void *arg)
{ {
struct mvxpe_softc *sc = arg; struct mvxpe_softc *sc = arg;
struct ifnet *ifp = &sc->sc_ethercom.ec_if; struct ifnet *ifp = &sc->sc_ethercom.ec_if;
uint32_t ic, datum = 0; uint32_t ic, queues, datum = 0;
int claimed = 0;
DPRINTSC(sc, 2, "got RXTX_TH_Intr\n"); DPRINTSC(sc, 2, "got RXTX_TH_Intr\n");
MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth); MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth);
mvxpe_sc_lock(sc); mvxpe_sc_lock(sc);
for (;;) { ic = MVXPE_READ(sc, MVXPE_PRXTXTIC);
ic = MVXPE_READ(sc, MVXPE_PRXTXTIC); if (ic == 0)
if (ic == 0) return 0;
break; MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic);
MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic); datum = datum ^ ic;
datum = datum ^ ic;
claimed = 1;
DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic); DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic);
/* route maintance interrupt first */ /* ack maintance interrupt first */
if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) { if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) {
DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n"); DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n");
MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr); MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr);
} }
if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) { if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) {
DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n"); DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n");
mvxpe_misc_intr(sc); mvxpe_misc_intr(sc);
} }
if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) { if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) {
DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n"); DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n");
mvxpe_rxtx_intr(sc); mvxpe_rxtx_intr(sc);
} }
if (!(ifp->if_flags & IFF_RUNNING)) if (!(ifp->if_flags & IFF_RUNNING))
break; return 1;
/* RxTx interrupt */ /* RxTxTH interrupt */
if (ic & (MVXPE_PRXTXTI_RBICTAPQ_MASK)) { queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic);
DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n"); if (queues) {
mvxpe_rx(sc); DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n");
} mvxpe_rx(sc, queues);
}
if (ic & MVXPE_PRXTXTI_TBTCQ_MASK) { queues = MVXPE_PRXTXTI_GET_TBTCQ(ic);
DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n"); if (queues) {
mvxpe_tx_complete(sc); DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n");
} mvxpe_tx_complete(sc, queues);
}
if (ic & MVXPE_PRXTXTI_RDTAQ_MASK) { queues = MVXPE_PRXTXTI_GET_RDTAQ(ic);
DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n"); if (queues) {
mvxpe_rx_reload(sc); DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n");
} mvxpe_rx_refill(sc, queues);
/* don' loop here. we are using interrupt coalescing */
break;
} }
mvxpe_sc_unlock(sc); mvxpe_sc_unlock(sc);
@ -1472,7 +1467,7 @@ mvxpe_rxtxth_intr(void *arg)
rnd_add_uint32(&sc->sc_rnd_source, datum); rnd_add_uint32(&sc->sc_rnd_source, datum);
return claimed; return 1;
} }
STATIC int STATIC int
@ -1689,8 +1684,8 @@ mvxpe_start(struct ifnet *ifp)
break; break;
} }
mvxpe_tx_unlockq(sc, q); mvxpe_tx_unlockq(sc, q);
KASSERT(sc->sc_tx_ring[q].tx_free_cnt >= 0); KASSERT(sc->sc_tx_ring[q].tx_used >= 0);
KASSERT(sc->sc_tx_ring[q].tx_free_cnt <= KASSERT(sc->sc_tx_ring[q].tx_used <=
sc->sc_tx_ring[q].tx_queue_len); sc->sc_tx_ring[q].tx_queue_len);
DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n"); DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n");
sc->sc_tx_pending++; sc->sc_tx_pending++;
@ -1763,7 +1758,7 @@ mvxpe_init(struct ifnet *ifp)
for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
mvxpe_rx_lockq(sc, q); mvxpe_rx_lockq(sc, q);
mvxpe_rx_queue_enable(ifp, q); mvxpe_rx_queue_enable(ifp, q);
mvxpe_rx_queue_reload(sc, q); mvxpe_rx_queue_refill(sc, q);
mvxpe_rx_unlockq(sc, q); mvxpe_rx_unlockq(sc, q);
mvxpe_tx_lockq(sc, q); mvxpe_tx_lockq(sc, q);
@ -1876,7 +1871,7 @@ mvxpe_stop(struct ifnet *ifp, int disable)
mvxpe_rx_lockq(sc, q); mvxpe_rx_lockq(sc, q);
mvxpe_tx_lockq(sc, q); mvxpe_tx_lockq(sc, q);
/* Disable Rx packet buffer reloading */ /* Disable Rx packet buffer refill request */
reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
reg |= MVXPE_PRXDQTH_NODT(0); reg |= MVXPE_PRXDQTH_NODT(0);
MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg); MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
@ -1912,7 +1907,7 @@ mvxpe_watchdog(struct ifnet *ifp)
* Reclaim first as there is a possibility of losing Tx completion * Reclaim first as there is a possibility of losing Tx completion
* interrupts. * interrupts.
*/ */
mvxpe_tx_complete(sc); mvxpe_tx_complete(sc, 0xff);
for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
@ -1989,7 +1984,6 @@ mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
*/ */
STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc) STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc)
{ {
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
int linkup; /* bool */ int linkup; /* bool */
KASSERT_SC_MTX(sc); KASSERT_SC_MTX(sc);
@ -2002,7 +1996,10 @@ STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc)
if (sc->sc_linkstate == linkup) if (sc->sc_linkstate == linkup)
return; return;
log(LOG_CRIT, "%s: link %s\n", ifp->if_xname, linkup ? "up" : "down"); #ifdef DEBUG
log(LOG_DEBUG,
"%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down");
#endif
if (linkup) if (linkup)
MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up); MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up);
else else
@ -2081,264 +2078,6 @@ mvxpe_linkreset(struct mvxpe_softc *sc)
mvxpe_linkup(sc); mvxpe_linkup(sc);
} }
/*
* Packet Buffer Manager(BM)
*/
STATIC int
mvxpe_bm_init(struct mvxpe_softc *sc)
{
struct mvxpe_bm_softc *bm = &sc->sc_bm;
bus_dma_segment_t segs;
char *kva, *ptr, *ptr_next, *ptr_data;
char *bm_buf_end;
paddr_t bm_buf_pa;
uint32_t align, pad;
size_t bm_buf_size;
int nsegs, error;
error = 0;
memset(bm, 0, sizeof(*bm));
bm->bm_dmat = sc->sc_dmat;
bm->bm_chunk_count = 0;
bm->bm_chunk_size = MVXPE_BM_SIZE;
bm->bm_chunk_header_size = sizeof(struct mvxpe_bm_chunk);
bm->bm_chunk_packet_offset = 0;
mutex_init(&bm->bm_mtx, MUTEX_DEFAULT, IPL_NET);
LIST_INIT(&bm->bm_free);
LIST_INIT(&bm->bm_inuse);
/*
* adjust bm_chunk_size, bm_chunk_header_size, bm_slotsize
* to satisfy alignemnt restrictions.
*
* <---------------- bm_slotsize [oct.] ------------------>
* <--- bm_chunk_size[oct.] ---->
* <--- header_size[oct] ---> <-- MBXPE_BM_SIZE[oct.] ----->
* +-----------------+--------+---------+-----------------+--+
* | bm_chunk hdr |pad |pkt_off | packet data | |
* +-----------------+--------+---------+-----------------+--+
* ^ ^ ^ ^
* | | | |
* ptr ptr_data DMA here ptr_next
*
* Restrictions:
* - ptr must be aligned to MVXPE_BM_ADDR_ALIGN
* - data must be aligned to MVXPE_RXBUF_ALIGN
* - data size X must be multiple of 8.
*/
/* assume start of buffer at 0x0000.0000 */
ptr = (char *)0;
/* align start of packet data */
ptr_data = ptr + bm->bm_chunk_header_size;
align = (unsigned long)ptr_data & MVXPE_RXBUF_MASK;
if (align != 0) {
pad = MVXPE_RXBUF_ALIGN - align;
bm->bm_chunk_header_size += pad;
DPRINTSC(sc, 1, "added padding to BM header, %u bytes\n", pad);
}
/* align size of packet data */
ptr_data = ptr + bm->bm_chunk_header_size;
ptr_next = ptr_data + MVXPE_BM_SIZE;
align = (unsigned long)ptr_next & MVXPE_BM_ADDR_MASK;
if (align != 0) {
pad = MVXPE_BM_ADDR_ALIGN - align;
ptr_next += pad;
DPRINTSC(sc, 1, "added padding to BM pktbuf, %u bytes\n", pad);
}
bm->bm_slotsize = ptr_next - ptr;
bm->bm_chunk_size = ptr_next - ptr_data;
KASSERT((bm->bm_chunk_size % 8) == 0);
/* align total buffer size to page boundary */
bm_buf_size = bm->bm_slotsize * MVXPE_BM_SLOTS;
align = (unsigned long)bm_buf_size & (PAGE_SIZE - 1);
if (align != 0) {
pad = PAGE_SIZE - align;
bm_buf_size += pad;
DPRINTSC(sc, 1,
"expand buffer to fit page boundary, %u bytes\n", pad);
}
/*
* get the aligned buffer from busdma(9) framework
*/
if (bus_dmamem_alloc(bm->bm_dmat, bm_buf_size, PAGE_SIZE, 0,
&segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
aprint_error_dev(sc->sc_dev, "can't alloc BM buffers\n");
return ENOBUFS;
}
if (bus_dmamem_map(bm->bm_dmat, &segs, nsegs, bm_buf_size,
(void **)&kva, BUS_DMA_NOWAIT)) {
aprint_error_dev(sc->sc_dev,
"can't map dma buffers (%zu bytes)\n", bm_buf_size);
error = ENOBUFS;
goto fail1;
}
KASSERT(((unsigned long)kva & MVXPE_BM_ADDR_MASK) == 0);
if (bus_dmamap_create(bm->bm_dmat, bm_buf_size, 1, bm_buf_size, 0,
BUS_DMA_NOWAIT, &bm->bm_map)) {
aprint_error_dev(sc->sc_dev, "can't create dma map\n");
error = ENOBUFS;
goto fail2;
}
if (bus_dmamap_load(bm->bm_dmat, bm->bm_map,
kva, bm_buf_size, NULL, BUS_DMA_NOWAIT)) {
aprint_error_dev(sc->sc_dev, "can't load dma map\n");
error = ENOBUFS;
goto fail3;
}
bm->bm_buf = (void *)kva;
bm_buf_end = (void *)(kva + bm_buf_size);
bm_buf_pa = segs.ds_addr;
DPRINTSC(sc, 1, "memory pool at %p\n", bm->bm_buf);
/* slice the buffer */
mvxpe_bm_lock(sc);
for (ptr = bm->bm_buf; ptr + bm->bm_slotsize <= bm_buf_end;
ptr += bm->bm_slotsize) {
struct mvxpe_bm_chunk *chunk;
/* initialzie chunk */
ptr_data = ptr + bm->bm_chunk_header_size;
chunk = (struct mvxpe_bm_chunk *)ptr;
chunk->m = NULL;
chunk->sc = sc;
chunk->off = (ptr - bm->bm_buf);
chunk->pa = (paddr_t)(bm_buf_pa + chunk->off);
chunk->buf_off = (ptr_data - bm->bm_buf);
chunk->buf_pa = (paddr_t)(bm_buf_pa + chunk->buf_off);
chunk->buf_va = (vaddr_t)(bm->bm_buf + chunk->buf_off);
chunk->buf_size = bm->bm_chunk_size;
/* add to array */
bm->bm_slots[bm->bm_chunk_count++] = chunk;
/* add to free list (for software management) */
LIST_INSERT_HEAD(&bm->bm_free, chunk, link);
mvxpe_bm_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
DPRINTSC(sc, 9, "new chunk %p\n", (void *)chunk->buf_va);
}
mvxpe_bm_unlock(sc);
aprint_normal_dev(sc->sc_dev,
"%zu bytes packet buffer, %zu bytes * %zu entries allocated.\n",
bm_buf_size, bm->bm_chunk_size, bm->bm_chunk_count);
return 0;
fail3:
bus_dmamap_destroy(bm->bm_dmat, bm->bm_map);
fail2:
bus_dmamem_unmap(bm->bm_dmat, kva, bm_buf_size);
fail1:
bus_dmamem_free(bm->bm_dmat, &segs, nsegs);
return error;
}
STATIC int
mvxpe_bm_init_mbuf_hdr(struct mvxpe_bm_chunk *chunk)
{
struct mvxpe_softc *sc = chunk->sc;
KASSERT(chunk->m == NULL);
/* add mbuf header */
MGETHDR(chunk->m, M_DONTWAIT, MT_DATA);
if (chunk->m == NULL) {
aprint_error_dev(sc->sc_dev, "cannot get mbuf\n");
return ENOBUFS;
}
MEXTADD(chunk->m, chunk->buf_va, chunk->buf_size, 0,
mvxpe_bm_free_mbuf, chunk);
chunk->m->m_flags |= M_EXT_RW;
chunk->m->m_len = chunk->m->m_pkthdr.len = chunk->buf_size;
if (sc->sc_bm.bm_chunk_packet_offset)
m_adj(chunk->m, sc->sc_bm.bm_chunk_packet_offset);
return 0;
}
STATIC struct mvxpe_bm_chunk *
mvxpe_bm_alloc(struct mvxpe_softc *sc)
{
struct mvxpe_bm_chunk *chunk;
struct mvxpe_bm_softc *bm = &sc->sc_bm;
mvxpe_bm_lock(sc);
chunk = LIST_FIRST(&bm->bm_free);
if (chunk == NULL) {
mvxpe_bm_unlock(sc);
return NULL;
}
LIST_REMOVE(chunk, link);
LIST_INSERT_HEAD(&bm->bm_inuse, chunk, link);
mvxpe_bm_unlock(sc);
return chunk;
}
STATIC void
mvxpe_bm_free_mbuf(struct mbuf *m, void *buf, size_t size, void *arg)
{
struct mvxpe_bm_chunk *chunk = (struct mvxpe_bm_chunk *)arg;
int s;
KASSERT(m != NULL);
KASSERT(arg != NULL);
DPRINTFN(3, "free packet %p\n", m);
if (m->m_flags & M_PKTHDR)
m_tag_delete_chain((m), NULL);
chunk->m = NULL;
s = splvm();
pool_cache_put(mb_cache, m);
splx(s);
return mvxpe_bm_free_chunk(chunk);
}
STATIC void
mvxpe_bm_free_chunk(struct mvxpe_bm_chunk *chunk)
{
struct mvxpe_softc *sc = chunk->sc;
struct mvxpe_bm_softc *bm = &sc->sc_bm;
DPRINTFN(3, "bm chunk free\n");
mvxpe_bm_lock(sc);
LIST_REMOVE(chunk, link);
LIST_INSERT_HEAD(&bm->bm_free, chunk, link);
mvxpe_bm_unlock(sc);
}
STATIC void
mvxpe_bm_sync(struct mvxpe_bm_chunk *chunk, size_t size, int ops)
{
struct mvxpe_softc *sc = (struct mvxpe_softc *)chunk->sc;
struct mvxpe_bm_softc *bm = &sc->sc_bm;
KASSERT(size <= chunk->buf_size);
if (size == 0)
size = chunk->buf_size;
bus_dmamap_sync(bm->bm_dmat, bm->bm_map, chunk->buf_off, size, ops);
}
STATIC void
mvxpe_bm_lock(struct mvxpe_softc *sc)
{
mutex_enter(&sc->sc_bm.bm_mtx);
}
STATIC void
mvxpe_bm_unlock(struct mvxpe_softc *sc)
{
mutex_exit(&sc->sc_bm.bm_mtx);
}
/* /*
* Tx Subroutines * Tx Subroutines
*/ */
@ -2364,9 +2103,9 @@ mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
int start, used; int start, used;
int i; int i;
KASSERT(mutex_owned(&tx->tx_ring_mtx)); KASSERT_TX_MTX(sc, q);
KASSERT(tx->tx_free_cnt >= 0); KASSERT(tx->tx_used >= 0);
KASSERT(tx->tx_free_cnt <= tx->tx_queue_len); KASSERT(tx->tx_used <= tx->tx_queue_len);
/* load mbuf using dmamap of 1st descriptor */ /* load mbuf using dmamap of 1st descriptor */
if (bus_dmamap_load_mbuf(sc->sc_dmat, if (bus_dmamap_load_mbuf(sc->sc_dmat,
@ -2376,7 +2115,7 @@ mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
} }
txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs; txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs;
txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs; txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs;
if (txnsegs <= 0 || txnsegs > tx->tx_free_cnt) { if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) {
/* we have no enough descriptors or mbuf is broken */ /* we have no enough descriptors or mbuf is broken */
bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu)); bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu));
m_freem(m); m_freem(m);
@ -2411,7 +2150,7 @@ mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
t->bufptr = txsegs[i].ds_addr; t->bufptr = txsegs[i].ds_addr;
t->bytecnt = txsegs[i].ds_len; t->bytecnt = txsegs[i].ds_len;
tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1); tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1);
tx->tx_free_cnt--; tx->tx_used++;
used++; used++;
} }
/* t is last descriptor here */ /* t is last descriptor here */
@ -2450,8 +2189,8 @@ mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
"PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q))); "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q)));
DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC)); DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC));
DPRINTIFNET(ifp, 2, DPRINTIFNET(ifp, 2,
"Tx: tx_cpu = %d, tx_dma = %d, tx_free_cnt = %d\n", "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
tx->tx_cpu, tx->tx_dma, tx->tx_free_cnt); tx->tx_cpu, tx->tx_dma, tx->tx_used);
return 0; return 0;
} }
@ -2459,61 +2198,72 @@ STATIC void
mvxpe_tx_set_csumflag(struct ifnet *ifp, mvxpe_tx_set_csumflag(struct ifnet *ifp,
struct mvxpe_tx_desc *t, struct mbuf *m) struct mvxpe_tx_desc *t, struct mbuf *m)
{ {
struct ether_header *eh;
int csum_flags; int csum_flags;
uint32_t iphl = 0, ipoff = 0; uint32_t iphl = 0, ipoff = 0;
csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags; csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags;
if (csum_flags & (M_CSUM_IPv4| M_CSUM_TCPv4|M_CSUM_UDPv4)) { eh = mtod(m, struct ether_header *);
switch (htons(eh->ether_type)) {
case ETHERTYPE_IP:
case ETHERTYPE_IPV6:
ipoff = ETHER_HDR_LEN;
break;
case ETHERTYPE_VLAN:
ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
break;
}
if (csum_flags & (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
ipoff = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data); t->command |= MVXPE_TX_CMD_L3_IP4;
} }
else if (csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { else if (csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
iphl = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data); iphl = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
ipoff = M_CSUM_DATA_IPv6_OFFSET(m->m_pkthdr.csum_data); t->command |= MVXPE_TX_CMD_L3_IP6;
} }
else { else {
t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE; t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
return; return;
} }
/* L3 */ /* L3 */
if (csum_flags & M_CSUM_IPv4) { if (csum_flags & M_CSUM_IPv4) {
t->command |= MVXPE_TX_CMD_L3_IP4;
t->command |= MVXPE_TX_CMD_IP4_CHECKSUM; t->command |= MVXPE_TX_CMD_IP4_CHECKSUM;
} }
/* L4 */ /* L4 */
if (csum_flags & M_CSUM_TCPv4) { if ((csum_flags &
t->command |= MVXPE_TX_CMD_L3_IP4; (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)) == 0) {
t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
}
else if (csum_flags & M_CSUM_TCPv4) {
t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
t->command |= MVXPE_TX_CMD_L4_TCP; t->command |= MVXPE_TX_CMD_L4_TCP;
} }
else if (csum_flags & M_CSUM_UDPv4) { else if (csum_flags & M_CSUM_UDPv4) {
t->command |= MVXPE_TX_CMD_L3_IP4; t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
t->command |= MVXPE_TX_CMD_L4_UDP; t->command |= MVXPE_TX_CMD_L4_UDP;
} }
else if (csum_flags & M_CSUM_TCPv6) { else if (csum_flags & M_CSUM_TCPv6) {
t->command |= MVXPE_TX_CMD_L3_IP6; t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
t->command |= MVXPE_TX_CMD_L4_TCP; t->command |= MVXPE_TX_CMD_L4_TCP;
} }
else if (csum_flags & M_CSUM_UDPv6) { else if (csum_flags & M_CSUM_UDPv6) {
t->command |= MVXPE_TX_CMD_L3_IP6; t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
t->command |= MVXPE_TX_CMD_L4_UDP; t->command |= MVXPE_TX_CMD_L4_UDP;
} }
/*
* NetBSD's networking stack is not request H/W csum on fragmented
* packets.
*/
t->l4ichk = 0; t->l4ichk = 0;
t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2);
t->command |= MVXPE_TX_CMD_W_IP_HEADER_LEN(iphl >> 2); t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff);
t->command |= MVXPE_TX_CMD_W_L3_OFFSET(ipoff);
} }
STATIC void STATIC void
mvxpe_tx_complete(struct mvxpe_softc *sc) mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues)
{ {
struct ifnet *ifp = &sc->sc_ethercom.ec_if; struct ifnet *ifp = &sc->sc_ethercom.ec_if;
int q; int q;
@ -2522,10 +2272,11 @@ mvxpe_tx_complete(struct mvxpe_softc *sc)
KASSERT_SC_MTX(sc); KASSERT_SC_MTX(sc);
/* XXX: check queue bit array */
for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
if (!MVXPE_IS_QUEUE_BUSY(queues, q))
continue;
mvxpe_tx_lockq(sc, q); mvxpe_tx_lockq(sc, q);
mvxpe_tx_queue_del(sc, q); mvxpe_tx_queue_complete(sc, q);
mvxpe_tx_unlockq(sc, q); mvxpe_tx_unlockq(sc, q);
} }
KASSERT(sc->sc_tx_pending >= 0); KASSERT(sc->sc_tx_pending >= 0);
@ -2534,7 +2285,7 @@ mvxpe_tx_complete(struct mvxpe_softc *sc)
} }
STATIC void STATIC void
mvxpe_tx_queue_del(struct mvxpe_softc *sc, int q) mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q)
{ {
struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
struct mvxpe_tx_desc *t; struct mvxpe_tx_desc *t;
@ -2585,14 +2336,14 @@ mvxpe_tx_queue_del(struct mvxpe_softc *sc, int q)
else else
KASSERT((t->flags & MVXPE_TX_CMD_F) == 0); KASSERT((t->flags & MVXPE_TX_CMD_F) == 0);
tx->tx_dma = tx_counter_adv(tx->tx_dma, 1); tx->tx_dma = tx_counter_adv(tx->tx_dma, 1);
tx->tx_free_cnt++; tx->tx_used--;
if (error) if (error)
MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]); MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]);
else else
MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]); MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]);
} }
KASSERT(tx->tx_free_cnt >= 0); KASSERT(tx->tx_used >= 0);
KASSERT(tx->tx_free_cnt <= tx->tx_queue_len); KASSERT(tx->tx_used <= tx->tx_queue_len);
while (ndesc > 255) { while (ndesc > 255) {
ptxsu = MVXPE_PTXSU_NORB(255); ptxsu = MVXPE_PTXSU_NORB(255);
MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
@ -2603,22 +2354,22 @@ mvxpe_tx_queue_del(struct mvxpe_softc *sc, int q)
MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
} }
DPRINTSC(sc, 2, DPRINTSC(sc, 2,
"Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_free_cnt = %d\n", "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
q, tx->tx_cpu, tx->tx_dma, tx->tx_free_cnt); q, tx->tx_cpu, tx->tx_dma, tx->tx_used);
} }
/* /*
* Rx Subroutines * Rx Subroutines
*/ */
STATIC void STATIC void
mvxpe_rx(struct mvxpe_softc *sc) mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues)
{ {
int q, npkt; int q, npkt;
KASSERT_SC_MTX(sc); KASSERT_SC_MTX(sc);
while ( (npkt = mvxpe_rx_queue_select(sc, &q))) { while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) {
/* mutex is held by rx_queue_sel */ /* mutex is held by rx_queue_select */
mvxpe_rx_queue(sc, q, npkt); mvxpe_rx_queue(sc, q, npkt);
mvxpe_rx_unlockq(sc, q); mvxpe_rx_unlockq(sc, q);
} }
@ -2630,7 +2381,7 @@ mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
struct ifnet *ifp = &sc->sc_ethercom.ec_if; struct ifnet *ifp = &sc->sc_ethercom.ec_if;
struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
struct mvxpe_rx_desc *r; struct mvxpe_rx_desc *r;
struct mvxpe_bm_chunk *chunk; struct mvxpbm_chunk *chunk;
struct mbuf *m; struct mbuf *m;
uint32_t prxsu; uint32_t prxsu;
int error = 0; int error = 0;
@ -2646,7 +2397,7 @@ mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma); chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma);
MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL; MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL;
r = MVXPE_RX_DESC(sc, q, rx->rx_dma); r = MVXPE_RX_DESC(sc, q, rx->rx_dma);
mvxpe_bm_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD); mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD);
/* check errors */ /* check errors */
if (r->status & MVXPE_RX_ES) { if (r->status & MVXPE_RX_ES) {
@ -2686,7 +2437,10 @@ mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
} }
/* extract packet buffer */ /* extract packet buffer */
mvxpe_bm_init_mbuf_hdr(chunk); if (mvxpbm_init_mbuf_hdr(chunk) != 0) {
error = 1;
goto rx_done;
}
m = chunk->m; m = chunk->m;
m->m_pkthdr.rcvif = ifp; m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN; m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN;
@ -2699,7 +2453,7 @@ mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
rx_done: rx_done:
if (chunk) { if (chunk) {
/* rx error. just return the chunk to BM. */ /* rx error. just return the chunk to BM. */
mvxpe_bm_free_chunk(chunk); mvxpbm_free_chunk(chunk);
} }
if (error) if (error)
MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]); MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]);
@ -2733,7 +2487,7 @@ rx_done:
} }
STATIC int STATIC int
mvxpe_rx_queue_select(struct mvxpe_softc *sc, int *queue) mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue)
{ {
uint32_t prxs, npkt; uint32_t prxs, npkt;
int q; int q;
@ -2743,6 +2497,9 @@ mvxpe_rx_queue_select(struct mvxpe_softc *sc, int *queue)
DPRINTSC(sc, 2, "selecting rx queue\n"); DPRINTSC(sc, 2, "selecting rx queue\n");
for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) { for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) {
if (!MVXPE_IS_QUEUE_BUSY(queues, q))
continue;
prxs = MVXPE_READ(sc, MVXPE_PRXS(q)); prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
npkt = MVXPE_PRXS_GET_ODC(prxs); npkt = MVXPE_PRXS_GET_ODC(prxs);
if (npkt == 0) if (npkt == 0)
@ -2760,7 +2517,7 @@ mvxpe_rx_queue_select(struct mvxpe_softc *sc, int *queue)
} }
STATIC void STATIC void
mvxpe_rx_reload(struct mvxpe_softc *sc) mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues)
{ {
int q; int q;
@ -2768,37 +2525,38 @@ mvxpe_rx_reload(struct mvxpe_softc *sc)
/* XXX: check rx bit array */ /* XXX: check rx bit array */
for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
if (!MVXPE_IS_QUEUE_BUSY(queues, q))
continue;
mvxpe_rx_lockq(sc, q); mvxpe_rx_lockq(sc, q);
mvxpe_rx_queue_refill(sc, q);
mvxpe_rx_queue_reload(sc, q);
mvxpe_rx_unlockq(sc, q); mvxpe_rx_unlockq(sc, q);
} }
} }
STATIC void STATIC void
mvxpe_rx_queue_reload(struct mvxpe_softc *sc, int q) mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q)
{ {
struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
uint32_t prxs, prxsu, ndesc; uint32_t prxs, prxsu, ndesc;
int idx, reload = 0; int idx, refill = 0;
int npkt; int npkt;
KASSERT_RX_MTX(sc, q); KASSERT_RX_MTX(sc, q);
prxs = MVXPE_READ(sc, MVXPE_PRXS(q)); prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs); ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs);
reload = rx->rx_queue_len - ndesc; refill = rx->rx_queue_len - ndesc;
if (reload <= 0) if (refill <= 0)
return; return;
DPRINTPRXS(2, q); DPRINTPRXS(2, q);
DPRINTSC(sc, 2, "%d buffers to reload.\n", reload); DPRINTSC(sc, 2, "%d buffers to refill.\n", refill);
idx = rx->rx_cpu; idx = rx->rx_cpu;
for (npkt = 0; npkt < reload; npkt++) for (npkt = 0; npkt < refill; npkt++)
if (mvxpe_rx_queue_add(sc, q) != 0) if (mvxpe_rx_queue_add(sc, q) != 0)
break; break;
DPRINTSC(sc, 2, "queue %d, %d buffer reloaded.\n", q, npkt); DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt);
if (npkt == 0) if (npkt == 0)
return; return;
@ -2823,12 +2581,12 @@ mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q)
{ {
struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
struct mvxpe_rx_desc *r; struct mvxpe_rx_desc *r;
struct mvxpe_bm_chunk *chunk = NULL; struct mvxpbm_chunk *chunk = NULL;
KASSERT_RX_MTX(sc, q); KASSERT_RX_MTX(sc, q);
/* Allocate the packet buffer */ /* Allocate the packet buffer */
chunk = mvxpe_bm_alloc(sc); chunk = mvxpbm_alloc(sc->sc_bm);
if (chunk == NULL) { if (chunk == NULL) {
DPRINTSC(sc, 1, "BM chunk allocation failed.\n"); DPRINTSC(sc, 1, "BM chunk allocation failed.\n");
return ENOBUFS; return ENOBUFS;
@ -2837,7 +2595,7 @@ mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q)
/* Add the packet to descritor */ /* Add the packet to descritor */
KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL); KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL);
MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk; MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk;
mvxpe_bm_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD); mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
r = MVXPE_RX_DESC(sc, q, rx->rx_cpu); r = MVXPE_RX_DESC(sc, q, rx->rx_cpu);
r->bufptr = chunk->buf_pa; r->bufptr = chunk->buf_pa;
@ -3139,8 +2897,10 @@ sysctl_set_queue_length(SYSCTLFN_ARGS)
case MVXPE_SYSCTL_RX: case MVXPE_SYSCTL_RX:
mvxpe_rx_lockq(sc, arg->queue); mvxpe_rx_lockq(sc, arg->queue);
rx->rx_queue_len = val; rx->rx_queue_len = val;
rx->rx_queue_th_received = rx->rx_queue_len / 4; rx->rx_queue_th_received =
rx->rx_queue_th_free = rx->rx_queue_len / 2; rx->rx_queue_len / MVXPE_RXTH_RATIO;
rx->rx_queue_th_free =
rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free); reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
@ -3151,7 +2911,8 @@ sysctl_set_queue_length(SYSCTLFN_ARGS)
case MVXPE_SYSCTL_TX: case MVXPE_SYSCTL_TX:
mvxpe_tx_lockq(sc, arg->queue); mvxpe_tx_lockq(sc, arg->queue);
tx->tx_queue_len = val; tx->tx_queue_len = val;
tx->tx_queue_th_free = tx->tx_queue_len / 2; tx->tx_queue_th_free =
tx->tx_queue_len / MVXPE_TXTH_RATIO;
reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT); reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_mvxpereg.h,v 1.1 2015/05/03 14:38:10 hsuenaga Exp $ */ /* $NetBSD: if_mvxpereg.h,v 1.2 2015/06/03 03:55:47 hsuenaga Exp $ */
/* /*
* Copyright (c) 2015 Internet Initiative Japan Inc. * Copyright (c) 2015 Internet Initiative Japan Inc.
* All rights reserved. * All rights reserved.
@ -705,12 +705,15 @@
/* Port RX_TX Threshold Interrupt Cause/Mask (MVXPE_PRXTXTIC/MVXPE_PRXTXTIM) */ /* Port RX_TX Threshold Interrupt Cause/Mask (MVXPE_PRXTXTIC/MVXPE_PRXTXTIM) */
#define MVXPE_PRXTXTI_TBTCQ(q) (1 << ((q) + 0)) #define MVXPE_PRXTXTI_TBTCQ(q) (1 << ((q) + 0))
#define MVXPE_PRXTXTI_TBTCQ_MASK (0xff << 0) #define MVXPE_PRXTXTI_TBTCQ_MASK (0xff << 0)
#define MVXPE_PRXTXTI_GET_TBTCQ(reg) (((reg) >> 0) & 0xff)
/* Tx Buffer Threshold Cross Queue*/ /* Tx Buffer Threshold Cross Queue*/
#define MVXPE_PRXTXTI_RBICTAPQ(q) (1 << ((q) + 8)) #define MVXPE_PRXTXTI_RBICTAPQ(q) (1 << ((q) + 8))
#define MVXPE_PRXTXTI_RBICTAPQ_MASK (0xff << 8) #define MVXPE_PRXTXTI_RBICTAPQ_MASK (0xff << 8)
#define MVXPE_PRXTXTI_GET_RBICTAPQ(reg) (((reg) >> 8) & 0xff)
/* Rx Buffer Int. Coaleasing Th. Pri. Alrt Q */ /* Rx Buffer Int. Coaleasing Th. Pri. Alrt Q */
#define MVXPE_PRXTXTI_RDTAQ(q) (1 << ((q) + 16)) #define MVXPE_PRXTXTI_RDTAQ(q) (1 << ((q) + 16))
#define MVXPE_PRXTXTI_RDTAQ_MASK (0xff << 16) #define MVXPE_PRXTXTI_RDTAQ_MASK (0xff << 16)
#define MVXPE_PRXTXTI_GET_RDTAQ(reg) (((reg) >> 16) & 0xff)
/* Rx Descriptor Threshold Alert Queue*/ /* Rx Descriptor Threshold Alert Queue*/
#define MVXPE_PRXTXTI_PRXTXICSUMMARY (1 << 29) /* PRXTXI summary */ #define MVXPE_PRXTXTI_PRXTXICSUMMARY (1 << 29) /* PRXTXI summary */
#define MVXPE_PRXTXTI_PTXERRORSUMMARY (1 << 30) /* PTEXERROR summary */ #define MVXPE_PRXTXTI_PTXERRORSUMMARY (1 << 30) /* PTEXERROR summary */
@ -719,10 +722,13 @@
/* Port RX_TX Interrupt Cause/Mask (MVXPE_PRXTXIC/MVXPE_PRXTXIM) */ /* Port RX_TX Interrupt Cause/Mask (MVXPE_PRXTXIC/MVXPE_PRXTXIM) */
#define MVXPE_PRXTXI_TBRQ(q) (1 << ((q) + 0)) #define MVXPE_PRXTXI_TBRQ(q) (1 << ((q) + 0))
#define MVXPE_PRXTXI_TBRQ_MASK (0xff << 0) #define MVXPE_PRXTXI_TBRQ_MASK (0xff << 0)
#define MVXPE_PRXTXI_GET_TBRQ(reg) (((reg) >> 0) & 0xff)
#define MVXPE_PRXTXI_RPQ(q) (1 << ((q) + 8)) #define MVXPE_PRXTXI_RPQ(q) (1 << ((q) + 8))
#define MVXPE_PRXTXI_RPQ_MASK (0xff << 8) #define MVXPE_PRXTXI_RPQ_MASK (0xff << 8)
#define MVXPE_PRXTXI_GET_RPQ(reg) (((reg) >> 8) & 0xff)
#define MVXPE_PRXTXI_RREQ(q) (1 << ((q) + 16)) #define MVXPE_PRXTXI_RREQ(q) (1 << ((q) + 16))
#define MVXPE_PRXTXI_RREQ_MASK (0xff << 16) #define MVXPE_PRXTXI_RREQ_MASK (0xff << 16)
#define MVXPE_PRXTXI_GET_RREQ(reg) (((reg) >> 16) & 0xff)
#define MVXPE_PRXTXI_PRXTXTHICSUMMARY (1 << 29) #define MVXPE_PRXTXI_PRXTXTHICSUMMARY (1 << 29)
#define MVXPE_PRXTXI_PTXERRORSUMMARY (1 << 30) #define MVXPE_PRXTXI_PTXERRORSUMMARY (1 << 30)
#define MVXPE_PRXTXI_PMISCICSUMMARY (1 << 31) #define MVXPE_PRXTXI_PMISCICSUMMARY (1 << 31)
@ -854,18 +860,16 @@ struct mvxpe_rx_desc {
#define MVXPE_TX_CMD_L (1 << 20) /* Last buffer */ #define MVXPE_TX_CMD_L (1 << 20) /* Last buffer */
#define MVXPE_TX_CMD_PADDING (1 << 19) /* Pad short frame */ #define MVXPE_TX_CMD_PADDING (1 << 19) /* Pad short frame */
#define MVXPE_TX_CMD_IP4_CHECKSUM (1 << 18) /* Do IPv4 Checksum */ #define MVXPE_TX_CMD_IP4_CHECKSUM (1 << 18) /* Do IPv4 Checksum */
#define MVXPE_TX_CMD_L3_TYPE (1 << 17) /* L3 Type 0:IP4, 1:IP6 */
#define MVXPE_TX_CMD_L3_IP4 (0 << 17) #define MVXPE_TX_CMD_L3_IP4 (0 << 17)
#define MVXPE_TX_CMD_L3_IP6 (0 << 17) #define MVXPE_TX_CMD_L3_IP6 (1 << 17)
#define MVXPE_TX_CMD_L4_TYPE (1 << 16) /* L4 Type 0:TCP, 1:UDP */
#define MVXPE_TX_CMD_L4_TCP (0 << 16) #define MVXPE_TX_CMD_L4_TCP (0 << 16)
#define MVXPE_TX_CMD_L4_UDP (1 << 16) #define MVXPE_TX_CMD_L4_UDP (1 << 16)
/* bit 15:13 reserved */ /* bit 15:13 reserved */
#define MVXPE_TX_CMD_IP_HEADER_LEN_MASK (0x1f << 8) /* IP header len >> 2 */ #define MVXPE_TX_CMD_IP_HEADER_LEN_MASK (0x1f << 8) /* IP header len >> 2 */
#define MVXPE_TX_CMD_W_IP_HEADER_LEN(v) (((v) & 0x1f) << 8) #define MVXPE_TX_CMD_IP_HEADER_LEN(v) (((v) & 0x1f) << 8)
/* bit 7 reserved */ /* bit 7 reserved */
#define MVXPE_TX_CMD_L3_OFFSET_MASK (0x7f << 0) /* offset of L3 hdr. */ #define MVXPE_TX_CMD_L3_OFFSET_MASK (0x7f << 0) /* offset of L3 hdr. */
#define MVXPE_TX_CMD_W_L3_OFFSET(v) (((v) & 0x7f) << 0) #define MVXPE_TX_CMD_L3_OFFSET(v) (((v) & 0x7f) << 0)
/* /*
* Transmit pakcet extra attributes * Transmit pakcet extra attributes

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_mvxpevar.h,v 1.1 2015/05/03 14:38:10 hsuenaga Exp $ */ /* $NetBSD: if_mvxpevar.h,v 1.2 2015/06/03 03:55:47 hsuenaga Exp $ */
/* /*
* Copyright (c) 2015 Internet Initiative Japan Inc. * Copyright (c) 2015 Internet Initiative Japan Inc.
* All rights reserved. * All rights reserved.
@ -26,12 +26,8 @@
*/ */
#ifndef _IF_MVXPEVAR_H_ #ifndef _IF_MVXPEVAR_H_
#define _IF_MVXPEVAR_H_ #define _IF_MVXPEVAR_H_
/* #include <net/if.h>
* Comple options #include <dev/marvell/mvxpbmvar.h>
* XXX: use kernel config
*/
#define MVXPE_DEBUG 0
#define MVXPE_EVENT_COUNTERS
/* /*
* Limit of packet sizes. * Limit of packet sizes.
@ -48,23 +44,28 @@
* *
* XXX: packet classifier is not implement yet * XXX: packet classifier is not implement yet
*/ */
#define MVXPE_RX_QUEUE_LIMIT_0 8 #define MVXPE_RX_QUEUE_LIMIT_0 8
#define MVXPE_RX_QUEUE_LIMIT_1 8 #define MVXPE_RX_QUEUE_LIMIT_1 8
#define MVXPE_RX_QUEUE_LIMIT_2 8 #define MVXPE_RX_QUEUE_LIMIT_2 8
#define MVXPE_RX_QUEUE_LIMIT_3 8 #define MVXPE_RX_QUEUE_LIMIT_3 8
#define MVXPE_RX_QUEUE_LIMIT_4 8 #define MVXPE_RX_QUEUE_LIMIT_4 8
#define MVXPE_RX_QUEUE_LIMIT_5 8 #define MVXPE_RX_QUEUE_LIMIT_5 8
#define MVXPE_RX_QUEUE_LIMIT_6 8 #define MVXPE_RX_QUEUE_LIMIT_6 8
#define MVXPE_RX_QUEUE_LIMIT_7 256 #define MVXPE_RX_QUEUE_LIMIT_7 IFQ_MAXLEN
#define MVXPE_TX_QUEUE_LIMIT_0 256 #define MVXPE_TX_QUEUE_LIMIT_0 IFQ_MAXLEN
#define MVXPE_TX_QUEUE_LIMIT_1 8 #define MVXPE_TX_QUEUE_LIMIT_1 8
#define MVXPE_TX_QUEUE_LIMIT_2 8 #define MVXPE_TX_QUEUE_LIMIT_2 8
#define MVXPE_TX_QUEUE_LIMIT_3 8 #define MVXPE_TX_QUEUE_LIMIT_3 8
#define MVXPE_TX_QUEUE_LIMIT_4 8 #define MVXPE_TX_QUEUE_LIMIT_4 8
#define MVXPE_TX_QUEUE_LIMIT_5 8 #define MVXPE_TX_QUEUE_LIMIT_5 8
#define MVXPE_TX_QUEUE_LIMIT_6 8 #define MVXPE_TX_QUEUE_LIMIT_6 8
#define MVXPE_TX_QUEUE_LIMIT_7 8 #define MVXPE_TX_QUEUE_LIMIT_7 8
/* interrupt is triggered when corossing (queuelen / RATIO) */
#define MVXPE_RXTH_RATIO 8
#define MVXPE_RXTH_REFILL_RATIO 2
#define MVXPE_TXTH_RATIO 8
/* /*
* Device Register access * Device Register access
@ -85,6 +86,9 @@
#define MVXPE_IS_LINKUP(sc) \ #define MVXPE_IS_LINKUP(sc) \
(MVXPE_READ((sc), MVXPE_PSR) & MVXPE_PSR_LINKUP) (MVXPE_READ((sc), MVXPE_PSR) & MVXPE_PSR_LINKUP)
#define MVXPE_IS_QUEUE_BUSY(queues, q) \
((((queues) >> (q)) & 0x1))
/* /*
* EEE: Lower Power Idle config * EEE: Lower Power Idle config
* Default timer is duration of MTU sized frame transmission. * Default timer is duration of MTU sized frame transmission.
@ -101,10 +105,10 @@
* the ethernet device has 8 rx/tx DMA queues. each of queue has its own * the ethernet device has 8 rx/tx DMA queues. each of queue has its own
* decriptor list. descriptors are simply index by counter inside the device. * decriptor list. descriptors are simply index by counter inside the device.
*/ */
#define MVXPE_TX_RING_CNT 256 #define MVXPE_TX_RING_CNT IFQ_MAXLEN
#define MVXPE_TX_RING_MSK (MVXPE_TX_RING_CNT - 1) #define MVXPE_TX_RING_MSK (MVXPE_TX_RING_CNT - 1)
#define MVXPE_TX_RING_NEXT(x) (((x) + 1) & MVXPE_TX_RING_MSK) #define MVXPE_TX_RING_NEXT(x) (((x) + 1) & MVXPE_TX_RING_MSK)
#define MVXPE_RX_RING_CNT 256 #define MVXPE_RX_RING_CNT IFQ_MAXLEN
#define MVXPE_RX_RING_MSK (MVXPE_RX_RING_CNT - 1) #define MVXPE_RX_RING_MSK (MVXPE_RX_RING_CNT - 1)
#define MVXPE_RX_RING_NEXT(x) (((x) + 1) & MVXPE_RX_RING_MSK) #define MVXPE_RX_RING_NEXT(x) (((x) + 1) & MVXPE_RX_RING_MSK)
#define MVXPE_TX_SEGLIMIT 32 #define MVXPE_TX_SEGLIMIT 32
@ -118,7 +122,7 @@ struct mvxpe_rx_ring {
struct mvxpe_rx_handle { struct mvxpe_rx_handle {
struct mvxpe_rx_desc *rxdesc_va; struct mvxpe_rx_desc *rxdesc_va;
off_t rxdesc_off; /* from rx_descriptors[0] */ off_t rxdesc_off; /* from rx_descriptors[0] */
struct mvxpe_bm_chunk *chunk; struct mvxpbm_chunk *chunk;
} rx_handle[MVXPE_RX_RING_CNT]; } rx_handle[MVXPE_RX_RING_CNT];
/* locks */ /* locks */
@ -152,7 +156,7 @@ struct mvxpe_tx_ring {
kmutex_t tx_ring_mtx; kmutex_t tx_ring_mtx;
/* Index */ /* Index */
int tx_free_cnt; int tx_used;
int tx_dma; int tx_dma;
int tx_cpu; int tx_cpu;
@ -183,14 +187,6 @@ rx_counter_adv(int ctr, int n)
return ctr; return ctr;
} }
/*
* Buffer alignement
*/
#define MVXPE_RXBUF_ALIGN 32 /* Cache line size */
#define MVXPE_RXBUF_MASK (MVXPE_RXBUF_ALIGN - 1)
#define MVXPE_BM_ADDR_ALIGN 32
#define MVXPE_BM_ADDR_MASK (MVXPE_BM_ADDR_ALIGN - 1)
/* /*
* Timeout control * Timeout control
*/ */
@ -388,67 +384,6 @@ struct mvxpe_sysctl_mib {
uint64_t counter; uint64_t counter;
}; };
/*
* Packet Buffer Header
*
* this chunks may be managed by H/W Buffer Manger(BM) device,
* but there is no device driver yet.
*
* +----------------+ bm_buf
* |chunk header | |
* +----------------+ | | |chunk->buf_off
* |mbuf (M_EXT set)|<--------|struct mbuf *m | V
* +----------------+ +----------------+ chunk->buf_va/buf_pa
* | m_ext.ext_buf|-------->|packet buffer | |
* +----------------+ | | |chunk->buf_size
* | | V
* +----------------+
* |chunk header |
* |.... |
*/
#define MVXPE_BM_SLOTS \
(MVXPE_RX_RING_CNT * (MVXPE_QUEUE_SIZE + 1))
#define MVXPE_BM_SIZE \
(MVXPE_MRU + MVXPE_HWHEADER_SIZE)
struct mvxpe_bm_chunk {
struct mbuf *m; /* back pointer to mbuf header */
void *sc; /* back pointer to softc */
off_t off; /* offset of chunk */
paddr_t pa; /* physical address of chunk */
off_t buf_off; /* offset of packet from sc_bm_buf */
paddr_t buf_pa; /* physical address of packet */
vaddr_t buf_va; /* virtual addres of packet */
size_t buf_size; /* size of buffer (exclude hdr) */
LIST_ENTRY(mvxpe_bm_chunk) link;
/* followed by packet buffer */
};
struct mvxpe_bm_softc {
bus_dma_tag_t bm_dmat;
bus_dmamap_t bm_map;
kmutex_t bm_mtx;
/* DMA MAP for entire buffer */
char *bm_buf;
/* memory chunk properties */
size_t bm_slotsize; /* size of bm_slots include header */
size_t bm_chunk_count; /* number of chunks */
size_t bm_chunk_size; /* size of packet buffer */
off_t bm_chunk_header_size; /* size of hader + padding */
off_t bm_chunk_packet_offset; /* allocate m_leading_space */
struct mvxpe_bm_chunk *bm_slots[MVXPE_BM_SLOTS];
/* for software based management */
LIST_HEAD(__mvxpe_bm_freehead, mvxpe_bm_chunk) bm_free;
LIST_HEAD(__mvxpe_bm_inusehead, mvxpe_bm_chunk) bm_inuse;
} sc_bm;
#define BM_SYNC_ALL 0
/* /*
* Ethernet Device main context * Ethernet Device main context
*/ */
@ -495,9 +430,8 @@ struct mvxpe_softc {
/* /*
* Software Buffer Manager * Software Buffer Manager
* XXX: to be writtten the independent device driver.
*/ */
struct mvxpe_bm_softc sc_bm; struct mvxpbm_softc *sc_bm;
/* /*
* Maintance clock * Maintance clock

492
sys/dev/marvell/mvxpbm.c Normal file
View File

@ -0,0 +1,492 @@
/* $NetBSD: mvxpbm.c,v 1.1 2015/06/03 03:55:47 hsuenaga Exp $ */
/*
* Copyright (c) 2015 Internet Initiative Japan Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: mvxpbm.c,v 1.1 2015/06/03 03:55:47 hsuenaga Exp $");
#include "opt_multiprocessor.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/mbuf.h>
#include <dev/marvell/marvellreg.h>
#include <dev/marvell/marvellvar.h>
#include "mvxpbmvar.h"
#ifdef DEBUG
#define STATIC /* nothing */
#define DPRINTF(fmt, ...) \
do { \
if (mvxpbm_debug >= 1) { \
printf("%s: ", __func__); \
printf((fmt), ##__VA_ARGS__); \
} \
} while (/*CONSTCOND*/0)
#define DPRINTFN(level , fmt, ...) \
do { \
if (mvxpbm_debug >= (level)) { \
printf("%s: ", __func__); \
printf((fmt), ##__VA_ARGS__); \
} \
} while (/*CONSTCOND*/0)
#define DPRINTDEV(dev, level, fmt, ...) \
do { \
if (mvxpbm_debug >= (level)) { \
device_printf((dev), \
"%s: "fmt , __func__, ##__VA_ARGS__); \
} \
} while (/*CONSTCOND*/0)
#define DPRINTSC(sc, level, fmt, ...) \
do { \
device_t dev = (sc)->sc_dev; \
if (mvxpbm_debug >= (level)) { \
device_printf(dev, \
"%s: " fmt, __func__, ##__VA_ARGS__); \
} \
} while (/*CONSTCOND*/0)
#else
#define STATIC static
#define DPRINTF(fmt, ...)
#define DPRINTFN(level, fmt, ...)
#define DPRINTDEV(dev, level, fmt, ...)
#define DPRINTSC(sc, level, fmt, ...)
#endif
/* autoconf(9) */
STATIC int mvxpbm_match(device_t, cfdata_t, void *);
STATIC void mvxpbm_attach(device_t, device_t, void *);
STATIC int mvxpbm_evcnt_attach(struct mvxpbm_softc *);
CFATTACH_DECL_NEW(mvxpbm_mbus, sizeof(struct mvxpbm_softc),
mvxpbm_match, mvxpbm_attach, NULL, NULL);
/* DMA buffers */
STATIC int mvxpbm_alloc_buffer(struct mvxpbm_softc *);
/* mbuf subroutines */
STATIC void mvxpbm_free_mbuf(struct mbuf *, void *, size_t, void *);
/* singleton device instance */
static struct mvxpbm_softc sc_emul;
static struct mvxpbm_softc *sc0;
/* debug level */
#ifdef DEBUG
static int mvxpbm_debug = 0;
#endif
/*
* autoconf(9)
*/
STATIC int
mvxpbm_match(device_t parent, cfdata_t match, void *aux)
{
struct marvell_attach_args *mva = aux;
if (strcmp(mva->mva_name, match->cf_name) != 0)
return 0;
if (mva->mva_unit > MVXPBM_UNIT_MAX)
return 0;
if (sc0 != NULL)
return 0;
if (mva->mva_offset != MVA_OFFSET_DEFAULT) {
/* Hardware BM is not supported yet. */
return 0;
}
return 1;
}
STATIC void
mvxpbm_attach(device_t parnet, device_t self, void *aux)
{
struct marvell_attach_args *mva = aux;
struct mvxpbm_softc *sc = device_private(self);
aprint_naive("\n");
aprint_normal(": Marvell ARMADA Buffer Manager\n");
memset(sc, 0, sizeof(*sc));
sc->sc_dev = self;
sc->sc_iot = mva->mva_iot;
sc->sc_dmat = mva->mva_dmat;
if (mva->mva_offset == MVA_OFFSET_DEFAULT) {
aprint_normal_dev(sc->sc_dev, "Software emulation.\n");
sc->sc_emul = 1;
}
mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
LIST_INIT(&sc->sc_free);
LIST_INIT(&sc->sc_inuse);
/* DMA buffers */
if (mvxpbm_alloc_buffer(sc) != 0)
return;
/* event counters */
mvxpbm_evcnt_attach(sc);
sc0 = sc;
return;
}
STATIC int
mvxpbm_evcnt_attach(struct mvxpbm_softc *sc)
{
return 0;
}
/*
* DMA buffers
*/
STATIC int
mvxpbm_alloc_buffer(struct mvxpbm_softc *sc)
{
bus_dma_segment_t segs;
char *kva, *ptr, *ptr_next, *ptr_data;
char *bm_buf_end;
uint32_t align, pad;
int nsegs;
int error;
/*
* set default buffer sizes. this will changed to satisfy
* alignment restrictions.
*/
sc->sc_chunk_count = 0;
sc->sc_chunk_size = MVXPBM_PACKET_SIZE;
sc->sc_chunk_header_size = sizeof(struct mvxpbm_chunk);
sc->sc_chunk_packet_offset = 64;
/*
* adjust bm_chunk_size, bm_chunk_header_size, bm_slotsize
* to satisfy alignemnt restrictions.
*
* <---------------- bm_slotsize [oct.] ------------------>
* <--- bm_chunk_size[oct.] ---->
* <--- header_size[oct] ---> <-- MBXPE_BM_SIZE[oct.] --->
* +-----------------+--------+---------+-----------------+--+
* | bm_chunk hdr |pad |pkt_off | packet data | |
* +-----------------+--------+---------+-----------------+--+
* ^ ^ ^ ^
* | | | |
* ptr ptr_data DMA here ptr_next
*
* Restrictions:
* - total buffer size must be multiple of MVXPBM_BUF_ALIGN
* - ptr must be aligned to MVXPBM_CHUNK_ALIGN
* - ptr_data must be aligned to MVXPEBM_DATA_ALIGN
* - bm_chunk_size must be multiple of 8[bytes].
*/
/* start calclation from 0x0000.0000 */
ptr = (char *)0;
/* align start of packet data */
ptr_data = ptr + sc->sc_chunk_header_size;
align = (unsigned long)ptr_data & MVXPBM_DATA_MASK;
if (align != 0) {
pad = MVXPBM_DATA_ALIGN - align;
sc->sc_chunk_header_size += pad;
DPRINTSC(sc, 1, "added padding to BM header, %u bytes\n", pad);
}
/* align size of packet data */
ptr_data = ptr + sc->sc_chunk_header_size;
ptr_next = ptr_data + MVXPBM_PACKET_SIZE;
align = (unsigned long)ptr_next & MVXPBM_CHUNK_MASK;
if (align != 0) {
pad = MVXPBM_CHUNK_ALIGN - align;
ptr_next += pad;
DPRINTSC(sc, 1, "added padding to BM pktbuf, %u bytes\n", pad);
}
sc->sc_slotsize = ptr_next - ptr;
sc->sc_chunk_size = ptr_next - ptr_data;
KASSERT((sc->sc_chunk_size % MVXPBM_DATA_UNIT) == 0);
/* align total buffer size to Mbus window boundary */
sc->sc_buf_size = sc->sc_slotsize * MVXPBM_NUM_SLOTS;
align = (unsigned long)sc->sc_buf_size & MVXPBM_BUF_MASK;
if (align != 0) {
pad = MVXPBM_BUF_ALIGN - align;
sc->sc_buf_size += pad;
DPRINTSC(sc, 1,
"expand buffer to fit page boundary, %u bytes\n", pad);
}
/*
* get the aligned buffer from busdma(9) framework
*/
if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_buf_size, MVXPBM_BUF_ALIGN, 0,
&segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
aprint_error_dev(sc->sc_dev, "can't alloc BM buffers\n");
return ENOBUFS;
}
if (bus_dmamem_map(sc->sc_dmat, &segs, nsegs, sc->sc_buf_size,
(void **)&kva, BUS_DMA_NOWAIT)) {
aprint_error_dev(sc->sc_dev,
"can't map dma buffers (%zu bytes)\n", sc->sc_buf_size);
error = ENOBUFS;
goto fail1;
}
if (bus_dmamap_create(sc->sc_dmat, sc->sc_buf_size, 1, sc->sc_buf_size,
0, BUS_DMA_NOWAIT, &sc->sc_buf_map)) {
aprint_error_dev(sc->sc_dev, "can't create dma map\n");
error = ENOBUFS;
goto fail2;
}
if (bus_dmamap_load(sc->sc_dmat, sc->sc_buf_map,
kva, sc->sc_buf_size, NULL, BUS_DMA_NOWAIT)) {
aprint_error_dev(sc->sc_dev, "can't load dma map\n");
error = ENOBUFS;
goto fail3;
}
sc->sc_buf = (void *)kva;
sc->sc_buf_pa = segs.ds_addr;
bm_buf_end = (void *)(kva + sc->sc_buf_size);
DPRINTSC(sc, 1, "memory pool at %p\n", sc->sc_buf);
/* slice the buffer */
mvxpbm_lock(sc);
for (ptr = sc->sc_buf; ptr + sc->sc_slotsize <= bm_buf_end;
ptr += sc->sc_slotsize) {
struct mvxpbm_chunk *chunk;
/* initialzie chunk */
ptr_data = ptr + sc->sc_chunk_header_size;
chunk = (struct mvxpbm_chunk *)ptr;
chunk->m = NULL;
chunk->sc = sc;
chunk->off = (ptr - sc->sc_buf);
chunk->pa = (paddr_t)(sc->sc_buf_pa + chunk->off);
chunk->buf_off = (ptr_data - sc->sc_buf);
chunk->buf_pa = (paddr_t)(sc->sc_buf_pa + chunk->buf_off);
chunk->buf_va = (vaddr_t)(sc->sc_buf + chunk->buf_off);
chunk->buf_size = sc->sc_chunk_size;
/* add to free list (for software management) */
LIST_INSERT_HEAD(&sc->sc_free, chunk, link);
mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
sc->sc_chunk_count++;
DPRINTSC(sc, 9, "new chunk %p\n", (void *)chunk->buf_va);
}
mvxpbm_unlock(sc);
return 0;
fail3:
bus_dmamap_destroy(sc->sc_dmat, sc->sc_buf_map);
fail2:
bus_dmamem_unmap(sc->sc_dmat, kva, sc->sc_buf_size);
fail1:
bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
return error;
}
/*
* mbuf subroutines
*/
STATIC void
mvxpbm_free_mbuf(struct mbuf *m, void *buf, size_t size, void *arg)
{
struct mvxpbm_chunk *chunk = (struct mvxpbm_chunk *)arg;
int s;
KASSERT(m != NULL);
KASSERT(arg != NULL);
DPRINTFN(3, "free packet %p\n", m);
if (m->m_flags & M_PKTHDR)
m_tag_delete_chain((m), NULL);
chunk->m = NULL;
s = splvm();
pool_cache_put(mb_cache, m);
splx(s);
return mvxpbm_free_chunk(chunk);
}
/*
* Exported APIs
*/
/* get mvxpbm device context */
struct mvxpbm_softc *
mvxpbm_device(struct marvell_attach_args *mva)
{
struct mvxpbm_softc *sc;
if (sc0 != NULL)
return sc0;
if (mva == NULL)
return NULL;
/* allocate software emulation context */
sc = &sc_emul;
memset(sc, 0, sizeof(*sc));
sc->sc_emul = 1;
sc->sc_iot = mva->mva_iot;
sc->sc_dmat = mva->mva_dmat;
mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
LIST_INIT(&sc->sc_free);
LIST_INIT(&sc->sc_inuse);
if (mvxpbm_alloc_buffer(sc) != 0)
return NULL;
mvxpbm_evcnt_attach(sc);
sc0 = sc;
return sc0;
}
/* allocate new memory chunk */
struct mvxpbm_chunk *
mvxpbm_alloc(struct mvxpbm_softc *sc)
{
struct mvxpbm_chunk *chunk;
mvxpbm_lock(sc);
chunk = LIST_FIRST(&sc->sc_free);
if (chunk == NULL) {
mvxpbm_unlock(sc);
return NULL;
}
LIST_REMOVE(chunk, link);
LIST_INSERT_HEAD(&sc->sc_inuse, chunk, link);
mvxpbm_unlock(sc);
return chunk;
}
/* free memory chunk */
void
mvxpbm_free_chunk(struct mvxpbm_chunk *chunk)
{
struct mvxpbm_softc *sc = chunk->sc;
KASSERT(chunk->m == NULL);
DPRINTFN(3, "bm chunk free\n");
mvxpbm_lock(sc);
LIST_REMOVE(chunk, link);
LIST_INSERT_HEAD(&sc->sc_free, chunk, link);
mvxpbm_unlock(sc);
}
/* prepare mbuf header after Rx */
int
mvxpbm_init_mbuf_hdr(struct mvxpbm_chunk *chunk)
{
struct mvxpbm_softc *sc = chunk->sc;
KASSERT(chunk->m == NULL);
/* add new mbuf header */
MGETHDR(chunk->m, M_DONTWAIT, MT_DATA);
if (chunk->m == NULL) {
aprint_error_dev(sc->sc_dev, "cannot get mbuf\n");
return ENOBUFS;
}
MEXTADD(chunk->m, chunk->buf_va, chunk->buf_size, 0,
mvxpbm_free_mbuf, chunk);
chunk->m->m_flags |= M_EXT_RW;
chunk->m->m_len = chunk->m->m_pkthdr.len = chunk->buf_size;
if (sc->sc_chunk_packet_offset)
m_adj(chunk->m, sc->sc_chunk_packet_offset);
return 0;
}
/* sync DMA seguments */
void
mvxpbm_dmamap_sync(struct mvxpbm_chunk *chunk, size_t size, int ops)
{
struct mvxpbm_softc *sc = chunk->sc;
KASSERT(size <= chunk->buf_size);
if (size == 0)
size = chunk->buf_size;
bus_dmamap_sync(sc->sc_dmat, sc->sc_buf_map, chunk->buf_off, size, ops);
}
/* lock */
void
mvxpbm_lock(struct mvxpbm_softc *sc)
{
mutex_enter(&sc->sc_mtx);
}
void
mvxpbm_unlock(struct mvxpbm_softc *sc)
{
mutex_exit(&sc->sc_mtx);
}
/* get params */
const char *
mvxpbm_xname(struct mvxpbm_softc *sc)
{
if (sc->sc_emul) {
return "software_bm";
}
return device_xname(sc->sc_dev);
}
size_t
mvxpbm_chunk_size(struct mvxpbm_softc *sc)
{
return sc->sc_chunk_size;
}
uint32_t
mvxpbm_chunk_count(struct mvxpbm_softc *sc)
{
return sc->sc_chunk_count;
}
off_t
mvxpbm_packet_offset(struct mvxpbm_softc *sc)
{
return sc->sc_chunk_packet_offset;
}
paddr_t
mvxpbm_buf_pbase(struct mvxpbm_softc *sc)
{
return sc->sc_buf_pa;
}
size_t
mvxpbm_buf_size(struct mvxpbm_softc *sc)
{
return sc->sc_buf_size;
}

139
sys/dev/marvell/mvxpbmvar.h Normal file
View File

@ -0,0 +1,139 @@
/* $NetBSD: mvxpbmvar.h,v 1.1 2015/06/03 03:55:47 hsuenaga Exp $ */
/*
* Copyright (c) 2015 Internet Initiative Japan Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MVXPBMVAR_H_
#define _MVXPBMVAR_H_
#include <dev/marvell/marvellvar.h>
/*
* Max number of unit.
*/
#define MVXPBM_UNIT_MAX 1
/*
* Buffer alignement
*/
#define MVXPBM_NUM_SLOTS 2048 /* minimum number of slots */
#define MVXPBM_PACKET_SIZE 2000 /* minimum packet size */
#define MVXPBM_BUF_ALIGN 65536 /* Mbus window size granularity */
#define MVXPBM_BUF_MASK (MVXPBM_BUF_ALIGN - 1)
#define MVXPBM_CHUNK_ALIGN 32 /* Cache line size */
#define MVXPBM_CHUNK_MASK (MVXPBM_CHUNK_ALIGN - 1)
#define MVXPBM_DATA_ALIGN 32 /* Cache line size */
#define MVXPBM_DATA_MASK (MVXPBM_DATA_ALIGN - 1)
#define MVXPBM_DATA_UNIT 8
/*
* Packet Buffer Header
*
* this chunks may be managed by H/W Buffer Manger(BM) device,
* but there is no device driver yet.
*
* +----------------+ bm_buf
* |chunk header | |
* +----------------+ | | |chunk->buf_off
* |mbuf (M_EXT set)|<--------|struct mbuf *m | V
* +----------------+ +----------------+ chunk->buf_va/buf_pa
* | m_ext.ext_buf|-------->|packet buffer | |
* +----------------+ | | |chunk->buf_size
* | | V
* +----------------+
* |chunk header |
* |.... |
*/
struct mvxpbm_chunk {
struct mbuf *m; /* back pointer to mbuf header */
void *sc; /* back pointer to softc */
off_t off; /* offset of chunk */
paddr_t pa; /* physical address of chunk */
off_t buf_off; /* offset of packet from sc_bm_buf */
paddr_t buf_pa; /* physical address of packet */
vaddr_t buf_va; /* virtual addres of packet */
size_t buf_size; /* size of buffer (exclude hdr) */
LIST_ENTRY(mvxpbm_chunk) link;
/* followed by packet buffer */
};
struct mvxpbm_softc {
device_t sc_dev;
bus_dma_tag_t sc_dmat;
bus_space_tag_t sc_iot;
kmutex_t sc_mtx;
/* software emulated */
int sc_emul;
/* DMA MAP for entire buffer */
bus_dmamap_t sc_buf_map;
char *sc_buf;
paddr_t sc_buf_pa;
size_t sc_buf_size;
/* memory chunk properties */
size_t sc_slotsize; /* size of bm_slots include header */
uint32_t sc_chunk_count; /* number of chunks */
size_t sc_chunk_size; /* size of packet buffer */
size_t sc_chunk_header_size; /* size of hader + padding */
off_t sc_chunk_packet_offset; /* allocate m_leading_space */
/* for software based management */
LIST_HEAD(__mvxpbm_freehead, mvxpbm_chunk) sc_free;
LIST_HEAD(__mvxpbm_inusehead, mvxpbm_chunk) sc_inuse;
};
#define BM_SYNC_ALL 0
/* get mvxpbm device context */
struct mvxpbm_softc *mvxpbm_device(struct marvell_attach_args *);
/* allocate new memory chunk */
struct mvxpbm_chunk *mvxpbm_alloc(struct mvxpbm_softc *);
/* free memory chunk */
void mvxpbm_free_chunk(struct mvxpbm_chunk *);
/* prepare mbuf header after Rx */
int mvxpbm_init_mbuf_hdr(struct mvxpbm_chunk *);
/* sync DMA seguments */
void mvxpbm_dmamap_sync(struct mvxpbm_chunk *, size_t, int);
/* lock */
void mvxpbm_lock(struct mvxpbm_softc *);
void mvxpbm_unlock(struct mvxpbm_softc *);
/* get params */
const char *mvxpbm_xname(struct mvxpbm_softc *);
size_t mvxpbm_chunk_size(struct mvxpbm_softc *);
uint32_t mvxpbm_chunk_count(struct mvxpbm_softc *);
off_t mvxpbm_packet_offset(struct mvxpbm_softc *);
paddr_t mvxpbm_buf_pbase(struct mvxpbm_softc *);
size_t mvxpbm_buf_size(struct mvxpbm_softc *);
#endif /* _MVXPBMVAR_H_ */