d5e3d70afd
from Mark Kettenis of OpenBSD. There are still some outstanding issues with this driver, namely: - Checksum offload is unsupported - There is a significant amount of code duplication from sk(4) - There remain some 'magic numbers' - Performance is not heavily tested, and likely to be lower than the chip is capable of in some cases. Syncing some of the aforementioned 'magic numbers' with the Marvell FreeBSD driver should help here. Tested on a motherboard with Marvell 88E8053 ethernet, under NetBSD/i386 and NetBSD/amd64.
2291 lines
60 KiB
C
2291 lines
60 KiB
C
/* $NetBSD: if_msk.c,v 1.1 2006/09/09 16:17:50 riz Exp $ */
|
|
/* $OpenBSD: if_msk.c,v 1.11 2006/08/17 22:07:40 brad Exp $ */
|
|
|
|
/*
|
|
* Copyright (c) 1997, 1998, 1999, 2000
|
|
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
* must display the following acknowledgement:
|
|
* This product includes software developed by Bill Paul.
|
|
* 4. Neither the name of the author nor the names of any co-contributors
|
|
* may be used to endorse or promote products derived from this software
|
|
* without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
|
* THE POSSIBILITY OF SUCH DAMAGE.
|
|
*
|
|
* $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
|
|
*/
|
|
|
|
/*
|
|
* Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
|
|
*
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
* copyright notice and this permission notice appear in all copies.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
*/
|
|
|
|
#include "bpfilter.h"
|
|
#include "rnd.h"
|
|
|
|
#include <sys/param.h>
|
|
#include <sys/systm.h>
|
|
#include <sys/sockio.h>
|
|
#include <sys/mbuf.h>
|
|
#include <sys/malloc.h>
|
|
#include <sys/kernel.h>
|
|
#include <sys/socket.h>
|
|
#include <sys/device.h>
|
|
#include <sys/queue.h>
|
|
#include <sys/callout.h>
|
|
#include <sys/sysctl.h>
|
|
#include <sys/endian.h>
|
|
#ifdef __NetBSD__
|
|
#define letoh16 htole16
|
|
#define letoh32 htole32
|
|
#endif
|
|
|
|
#include <net/if.h>
|
|
#include <net/if_dl.h>
|
|
#include <net/if_types.h>
|
|
|
|
#include <net/if_media.h>
|
|
|
|
#if NBPFILTER > 0
|
|
#include <net/bpf.h>
|
|
#endif
|
|
#if NRND > 0
|
|
#include <sys/rnd.h>
|
|
#endif
|
|
|
|
#include <dev/mii/mii.h>
|
|
#include <dev/mii/miivar.h>
|
|
#include <dev/mii/brgphyreg.h>
|
|
|
|
#include <dev/pci/pcireg.h>
|
|
#include <dev/pci/pcivar.h>
|
|
#include <dev/pci/pcidevs.h>
|
|
|
|
#include <dev/pci/if_skreg.h>
|
|
#include <dev/pci/if_mskvar.h>
|
|
|
|
int mskc_probe(struct device *, struct cfdata *, void *);
|
|
void mskc_attach(struct device *, struct device *self, void *aux);
|
|
void mskc_shutdown(void *);
|
|
int msk_probe(struct device *, struct cfdata *, void *);
|
|
void msk_attach(struct device *, struct device *self, void *aux);
|
|
int mskcprint(void *, const char *);
|
|
int msk_intr(void *);
|
|
void msk_intr_yukon(struct sk_if_softc *);
|
|
__inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
|
|
void msk_rxeof(struct sk_if_softc *, u_int16_t, u_int32_t);
|
|
void msk_txeof(struct sk_if_softc *);
|
|
int msk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *);
|
|
void msk_start(struct ifnet *);
|
|
int msk_ioctl(struct ifnet *, u_long, caddr_t);
|
|
int msk_init(struct ifnet *);
|
|
void msk_init_yukon(struct sk_if_softc *);
|
|
void msk_stop(struct ifnet *, int);
|
|
void msk_watchdog(struct ifnet *);
|
|
int msk_ifmedia_upd(struct ifnet *);
|
|
void msk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
|
|
void msk_reset(struct sk_softc *);
|
|
int msk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t);
|
|
int msk_alloc_jumbo_mem(struct sk_if_softc *);
|
|
void *msk_jalloc(struct sk_if_softc *);
|
|
void msk_jfree(struct mbuf *, caddr_t, size_t, void *);
|
|
int msk_init_rx_ring(struct sk_if_softc *);
|
|
int msk_init_tx_ring(struct sk_if_softc *);
|
|
|
|
void msk_update_int_mod(struct sk_softc *);
|
|
|
|
int msk_marv_miibus_readreg(struct device *, int, int);
|
|
void msk_marv_miibus_writereg(struct device *, int, int, int);
|
|
void msk_marv_miibus_statchg(struct device *);
|
|
|
|
u_int32_t msk_yukon_hash(caddr_t);
|
|
void msk_setfilt(struct sk_if_softc *, caddr_t, int);
|
|
void msk_setmulti(struct sk_if_softc *);
|
|
void msk_setpromisc(struct sk_if_softc *);
|
|
void msk_yukon_tick(void *);
|
|
|
|
/* #define MSK_DEBUG 1 */
|
|
#ifdef MSK_DEBUG
|
|
#define DPRINTF(x) if (mskdebug) printf x
|
|
#define DPRINTFN(n,x) if (mskdebug >= (n)) printf x
|
|
int mskdebug = MSK_DEBUG;
|
|
|
|
void msk_dump_txdesc(struct msk_tx_desc *, int);
|
|
void msk_dump_mbuf(struct mbuf *);
|
|
void msk_dump_bytes(const char *, int);
|
|
#else
|
|
#define DPRINTF(x)
|
|
#define DPRINTFN(n,x)
|
|
#endif
|
|
|
|
static int msk_sysctl_handler(SYSCTLFN_PROTO);
|
|
static int msk_root_num;
|
|
|
|
/* supported device vendors */
|
|
static const struct msk_product {
|
|
pci_vendor_id_t msk_vendor;
|
|
pci_product_id_t msk_product;
|
|
} msk_products[] = {
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8035 },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8036 },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8038 },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8052 },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8050 },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8053 },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8021CU },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8022CU },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8021X },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8022X },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8061CU },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8062CU },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8061X },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8062X },
|
|
{ PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_3 },
|
|
{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9SXX },
|
|
{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9E21 }
|
|
};
|
|
|
|
static inline u_int32_t
|
|
sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
|
|
{
|
|
return CSR_READ_4(sc, reg);
|
|
}
|
|
|
|
static inline u_int16_t
|
|
sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
|
|
{
|
|
return CSR_READ_2(sc, reg);
|
|
}
|
|
|
|
static inline u_int8_t
|
|
sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
|
|
{
|
|
return CSR_READ_1(sc, reg);
|
|
}
|
|
|
|
static inline void
|
|
sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
|
|
{
|
|
CSR_WRITE_4(sc, reg, x);
|
|
}
|
|
|
|
static inline void
|
|
sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
|
|
{
|
|
CSR_WRITE_2(sc, reg, x);
|
|
}
|
|
|
|
static inline void
|
|
sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
|
|
{
|
|
CSR_WRITE_1(sc, reg, x);
|
|
}
|
|
|
|
int
|
|
msk_marv_miibus_readreg(struct device *dev, int phy, int reg)
|
|
{
|
|
struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
|
|
u_int16_t val;
|
|
int i;
|
|
|
|
if (phy != 0 ||
|
|
(sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
|
|
sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
|
|
DPRINTFN(9, ("msk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
|
|
phy, reg));
|
|
return (0);
|
|
}
|
|
|
|
SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
|
|
YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
|
|
|
|
for (i = 0; i < SK_TIMEOUT; i++) {
|
|
DELAY(1);
|
|
val = SK_YU_READ_2(sc_if, YUKON_SMICR);
|
|
if (val & YU_SMICR_READ_VALID)
|
|
break;
|
|
}
|
|
|
|
if (i == SK_TIMEOUT) {
|
|
aprint_error("%s: phy failed to come ready\n",
|
|
sc_if->sk_dev.dv_xname);
|
|
return (0);
|
|
}
|
|
|
|
DPRINTFN(9, ("msk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
|
|
SK_TIMEOUT));
|
|
|
|
val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
|
|
|
|
DPRINTFN(9, ("msk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
|
|
phy, reg, val));
|
|
|
|
return (val);
|
|
}
|
|
|
|
void
|
|
msk_marv_miibus_writereg(struct device *dev, int phy, int reg, int val)
|
|
{
|
|
struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
|
|
int i;
|
|
|
|
DPRINTFN(9, ("msk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n",
|
|
phy, reg, val));
|
|
|
|
SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
|
|
SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
|
|
YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
|
|
|
|
for (i = 0; i < SK_TIMEOUT; i++) {
|
|
DELAY(1);
|
|
if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
|
|
break;
|
|
}
|
|
|
|
if (i == SK_TIMEOUT)
|
|
aprint_error("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
|
|
}
|
|
|
|
void
|
|
msk_marv_miibus_statchg(struct device *dev)
|
|
{
|
|
DPRINTFN(9, ("msk_marv_miibus_statchg: gpcr=%x\n",
|
|
SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
|
|
}
|
|
|
|
#define HASH_BITS 6
|
|
|
|
u_int32_t
|
|
msk_yukon_hash(caddr_t addr)
|
|
{
|
|
u_int32_t crc;
|
|
|
|
crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
|
|
return (crc & ((1 << HASH_BITS) - 1));
|
|
}
|
|
|
|
void
|
|
msk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
|
|
{
|
|
int base = XM_RXFILT_ENTRY(slot);
|
|
|
|
SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
|
|
SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
|
|
SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
|
|
}
|
|
|
|
void
|
|
msk_setmulti(struct sk_if_softc *sc_if)
|
|
{
|
|
struct ifnet *ifp= &sc_if->sk_ethercom.ec_if;
|
|
u_int32_t hashes[2] = { 0, 0 };
|
|
int h;
|
|
struct ethercom *ec = &sc_if->sk_ethercom;
|
|
struct ether_multi *enm;
|
|
struct ether_multistep step;
|
|
|
|
/* First, zot all the existing filters. */
|
|
SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
|
|
SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
|
|
SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
|
|
SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
|
|
|
|
|
|
/* Now program new ones. */
|
|
allmulti:
|
|
if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
|
|
hashes[0] = 0xFFFFFFFF;
|
|
hashes[1] = 0xFFFFFFFF;
|
|
} else {
|
|
/* First find the tail of the list. */
|
|
ETHER_FIRST_MULTI(step, ec, enm);
|
|
while (enm != NULL) {
|
|
if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
|
|
ETHER_ADDR_LEN)) {
|
|
ifp->if_flags |= IFF_ALLMULTI;
|
|
goto allmulti;
|
|
}
|
|
h = msk_yukon_hash(enm->enm_addrlo);
|
|
if (h < 32)
|
|
hashes[0] |= (1 << h);
|
|
else
|
|
hashes[1] |= (1 << (h - 32));
|
|
|
|
ETHER_NEXT_MULTI(step, enm);
|
|
}
|
|
}
|
|
|
|
SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
|
|
SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
|
|
SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
|
|
SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
|
|
}
|
|
|
|
void
|
|
msk_setpromisc(struct sk_if_softc *sc_if)
|
|
{
|
|
struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
|
|
|
|
if (ifp->if_flags & IFF_PROMISC)
|
|
SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
|
|
YU_RCR_UFLEN | YU_RCR_MUFLEN);
|
|
else
|
|
SK_YU_SETBIT_2(sc_if, YUKON_RCR,
|
|
YU_RCR_UFLEN | YU_RCR_MUFLEN);
|
|
}
|
|
|
|
int
|
|
msk_init_rx_ring(struct sk_if_softc *sc_if)
|
|
{
|
|
struct msk_chain_data *cd = &sc_if->sk_cdata;
|
|
struct msk_ring_data *rd = sc_if->sk_rdata;
|
|
int i, nexti;
|
|
|
|
bzero((char *)rd->sk_rx_ring,
|
|
sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
|
|
|
|
for (i = 0; i < MSK_RX_RING_CNT; i++) {
|
|
cd->sk_rx_chain[i].sk_le = &rd->sk_rx_ring[i];
|
|
if (i == (MSK_RX_RING_CNT - 1))
|
|
nexti = 0;
|
|
else
|
|
nexti = i + 1;
|
|
cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
|
|
}
|
|
|
|
for (i = 0; i < MSK_RX_RING_CNT; i++) {
|
|
if (msk_newbuf(sc_if, i, NULL,
|
|
sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
|
|
aprint_error("%s: failed alloc of %dth mbuf\n",
|
|
sc_if->sk_dev.dv_xname, i);
|
|
return (ENOBUFS);
|
|
}
|
|
}
|
|
|
|
sc_if->sk_cdata.sk_rx_prod = MSK_RX_RING_CNT - 1;
|
|
sc_if->sk_cdata.sk_rx_cons = 0;
|
|
|
|
return (0);
|
|
}
|
|
|
|
int
|
|
msk_init_tx_ring(struct sk_if_softc *sc_if)
|
|
{
|
|
struct sk_softc *sc = sc_if->sk_softc;
|
|
struct msk_chain_data *cd = &sc_if->sk_cdata;
|
|
struct msk_ring_data *rd = sc_if->sk_rdata;
|
|
bus_dmamap_t dmamap;
|
|
struct sk_txmap_entry *entry;
|
|
int i, nexti;
|
|
|
|
bzero((char *)sc_if->sk_rdata->sk_tx_ring,
|
|
sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
|
|
|
|
SIMPLEQ_INIT(&sc_if->sk_txmap_head);
|
|
for (i = 0; i < MSK_TX_RING_CNT; i++) {
|
|
cd->sk_tx_chain[i].sk_le = &rd->sk_tx_ring[i];
|
|
if (i == (MSK_TX_RING_CNT - 1))
|
|
nexti = 0;
|
|
else
|
|
nexti = i + 1;
|
|
cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
|
|
|
|
if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
|
|
SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap))
|
|
return (ENOBUFS);
|
|
|
|
entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
|
|
if (!entry) {
|
|
bus_dmamap_destroy(sc->sc_dmatag, dmamap);
|
|
return (ENOBUFS);
|
|
}
|
|
entry->dmamap = dmamap;
|
|
SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
|
|
}
|
|
|
|
sc_if->sk_cdata.sk_tx_prod = 0;
|
|
sc_if->sk_cdata.sk_tx_cons = 0;
|
|
sc_if->sk_cdata.sk_tx_cnt = 0;
|
|
|
|
MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT,
|
|
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
|
|
|
|
return (0);
|
|
}
|
|
|
|
int
|
|
msk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m,
|
|
bus_dmamap_t dmamap)
|
|
{
|
|
struct mbuf *m_new = NULL;
|
|
struct sk_chain *c;
|
|
struct msk_rx_desc *r;
|
|
|
|
if (m == NULL) {
|
|
caddr_t buf = NULL;
|
|
|
|
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
|
|
if (m_new == NULL)
|
|
return (ENOBUFS);
|
|
|
|
/* Allocate the jumbo buffer */
|
|
buf = msk_jalloc(sc_if);
|
|
if (buf == NULL) {
|
|
m_freem(m_new);
|
|
DPRINTFN(1, ("%s jumbo allocation failed -- packet "
|
|
"dropped!\n", sc_if->sk_ethercom.ec_if.if_xname));
|
|
return (ENOBUFS);
|
|
}
|
|
|
|
/* Attach the buffer to the mbuf */
|
|
m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
|
|
MEXTADD(m_new, buf, SK_JLEN, 0, msk_jfree, sc_if);
|
|
} else {
|
|
/*
|
|
* We're re-using a previously allocated mbuf;
|
|
* be sure to re-init pointers and lengths to
|
|
* default values.
|
|
*/
|
|
m_new = m;
|
|
m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
|
|
m_new->m_data = m_new->m_ext.ext_buf;
|
|
}
|
|
m_adj(m_new, ETHER_ALIGN);
|
|
|
|
c = &sc_if->sk_cdata.sk_rx_chain[i];
|
|
r = c->sk_le;
|
|
c->sk_mbuf = m_new;
|
|
r->sk_addr = htole32(dmamap->dm_segs[0].ds_addr +
|
|
(((vaddr_t)m_new->m_data
|
|
- (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf)));
|
|
r->sk_len = htole16(SK_JLEN);
|
|
r->sk_ctl = 0;
|
|
r->sk_opcode = SK_Y2_RXOPC_PACKET | SK_Y2_RXOPC_OWN;
|
|
|
|
MSK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
|
|
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* Memory management for jumbo frames.
|
|
*/
|
|
|
|
int
|
|
msk_alloc_jumbo_mem(struct sk_if_softc *sc_if)
|
|
{
|
|
struct sk_softc *sc = sc_if->sk_softc;
|
|
caddr_t ptr, kva;
|
|
bus_dma_segment_t seg;
|
|
int i, rseg, state, error;
|
|
struct sk_jpool_entry *entry;
|
|
|
|
state = error = 0;
|
|
|
|
/* Grab a big chunk o' storage. */
|
|
if (bus_dmamem_alloc(sc->sc_dmatag, MSK_JMEM, PAGE_SIZE, 0,
|
|
&seg, 1, &rseg, BUS_DMA_NOWAIT)) {
|
|
aprint_error(": can't alloc rx buffers");
|
|
return (ENOBUFS);
|
|
}
|
|
|
|
state = 1;
|
|
if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, MSK_JMEM, &kva,
|
|
BUS_DMA_NOWAIT)) {
|
|
aprint_error(": can't map dma buffers (%d bytes)", MSK_JMEM);
|
|
error = ENOBUFS;
|
|
goto out;
|
|
}
|
|
|
|
state = 2;
|
|
if (bus_dmamap_create(sc->sc_dmatag, MSK_JMEM, 1, MSK_JMEM, 0,
|
|
BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) {
|
|
aprint_error(": can't create dma map");
|
|
error = ENOBUFS;
|
|
goto out;
|
|
}
|
|
|
|
state = 3;
|
|
if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map,
|
|
kva, MSK_JMEM, NULL, BUS_DMA_NOWAIT)) {
|
|
aprint_error(": can't load dma map");
|
|
error = ENOBUFS;
|
|
goto out;
|
|
}
|
|
|
|
state = 4;
|
|
sc_if->sk_cdata.sk_jumbo_buf = (caddr_t)kva;
|
|
DPRINTFN(1,("msk_jumbo_buf = %p\n", (caddr_t)sc_if->sk_cdata.sk_jumbo_buf));
|
|
|
|
LIST_INIT(&sc_if->sk_jfree_listhead);
|
|
LIST_INIT(&sc_if->sk_jinuse_listhead);
|
|
|
|
/*
|
|
* Now divide it up into 9K pieces and save the addresses
|
|
* in an array.
|
|
*/
|
|
ptr = sc_if->sk_cdata.sk_jumbo_buf;
|
|
for (i = 0; i < MSK_JSLOTS; i++) {
|
|
sc_if->sk_cdata.sk_jslots[i] = ptr;
|
|
ptr += SK_JLEN;
|
|
entry = malloc(sizeof(struct sk_jpool_entry),
|
|
M_DEVBUF, M_NOWAIT);
|
|
if (entry == NULL) {
|
|
aprint_error(": no memory for jumbo buffer queue!");
|
|
error = ENOBUFS;
|
|
goto out;
|
|
}
|
|
entry->slot = i;
|
|
if (i)
|
|
LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
|
|
entry, jpool_entries);
|
|
else
|
|
LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead,
|
|
entry, jpool_entries);
|
|
}
|
|
out:
|
|
if (error != 0) {
|
|
switch (state) {
|
|
case 4:
|
|
bus_dmamap_unload(sc->sc_dmatag,
|
|
sc_if->sk_cdata.sk_rx_jumbo_map);
|
|
case 3:
|
|
bus_dmamap_destroy(sc->sc_dmatag,
|
|
sc_if->sk_cdata.sk_rx_jumbo_map);
|
|
case 2:
|
|
bus_dmamem_unmap(sc->sc_dmatag, kva, MSK_JMEM);
|
|
case 1:
|
|
bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
return (error);
|
|
}
|
|
|
|
/*
|
|
* Allocate a jumbo buffer.
|
|
*/
|
|
void *
|
|
msk_jalloc(struct sk_if_softc *sc_if)
|
|
{
|
|
struct sk_jpool_entry *entry;
|
|
|
|
entry = LIST_FIRST(&sc_if->sk_jfree_listhead);
|
|
|
|
if (entry == NULL)
|
|
return (NULL);
|
|
|
|
LIST_REMOVE(entry, jpool_entries);
|
|
LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
|
|
return (sc_if->sk_cdata.sk_jslots[entry->slot]);
|
|
}
|
|
|
|
/*
|
|
* Release a jumbo buffer.
|
|
*/
|
|
void
|
|
msk_jfree(struct mbuf *m, caddr_t buf, size_t size, void *arg)
|
|
{
|
|
struct sk_jpool_entry *entry;
|
|
struct sk_if_softc *sc;
|
|
int i, s;
|
|
|
|
/* Extract the softc struct pointer. */
|
|
sc = (struct sk_if_softc *)arg;
|
|
|
|
if (sc == NULL)
|
|
panic("msk_jfree: can't find softc pointer!");
|
|
|
|
/* calculate the slot this buffer belongs to */
|
|
|
|
i = ((vaddr_t)buf
|
|
- (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN;
|
|
|
|
if ((i < 0) || (i >= MSK_JSLOTS))
|
|
panic("sk_jfree: asked to free buffer that we don't manage!");
|
|
|
|
s = splvm();
|
|
entry = LIST_FIRST(&sc->sk_jinuse_listhead);
|
|
if (entry == NULL)
|
|
panic("msk_jfree: buffer not in use!");
|
|
entry->slot = i;
|
|
LIST_REMOVE(entry, jpool_entries);
|
|
LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries);
|
|
|
|
if (__predict_true(m != NULL))
|
|
pool_cache_put(&mbpool_cache, m);
|
|
splx(s);
|
|
}
|
|
|
|
/*
|
|
* Set media options.
|
|
*/
|
|
int
|
|
msk_ifmedia_upd(struct ifnet *ifp)
|
|
{
|
|
struct sk_if_softc *sc_if = ifp->if_softc;
|
|
|
|
msk_init(ifp);
|
|
mii_mediachg(&sc_if->sk_mii);
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* Report current media status.
|
|
*/
|
|
void
|
|
msk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
|
|
{
|
|
struct sk_if_softc *sc_if = ifp->if_softc;
|
|
|
|
mii_pollstat(&sc_if->sk_mii);
|
|
ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
|
|
ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
|
|
}
|
|
|
|
int
|
|
msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
|
{
|
|
struct sk_if_softc *sc_if = ifp->if_softc;
|
|
struct ifreq *ifr = (struct ifreq *) data;
|
|
struct mii_data *mii;
|
|
int s, error = 0;
|
|
|
|
s = splnet();
|
|
|
|
switch(command) {
|
|
case SIOCGIFMEDIA:
|
|
case SIOCSIFMEDIA:
|
|
DPRINTFN(2,("msk_ioctl: SIOC[GS]IFMEDIA\n"));
|
|
mii = &sc_if->sk_mii;
|
|
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
|
|
DPRINTFN(2,("msk_ioctl: SIOC[GS]IFMEDIA done\n"));
|
|
break;
|
|
default:
|
|
DPRINTFN(2, ("msk_ioctl ETHER\n"));
|
|
error = ether_ioctl(ifp, command, data);
|
|
|
|
if (error == ENETRESET) {
|
|
/*
|
|
* Multicast list has changed; set the hardware
|
|
* filter accordingly.
|
|
*/
|
|
if (ifp->if_flags & IFF_RUNNING)
|
|
msk_setmulti(sc_if);
|
|
error = 0;
|
|
}
|
|
break;
|
|
}
|
|
|
|
splx(s);
|
|
return (error);
|
|
}
|
|
|
|
void
|
|
msk_update_int_mod(struct sk_softc *sc)
|
|
{
|
|
u_int32_t sk_imtimer_ticks;
|
|
|
|
/*
|
|
* Configure interrupt moderation. The moderation timer
|
|
* defers interrupts specified in the interrupt moderation
|
|
* timer mask based on the timeout specified in the interrupt
|
|
* moderation timer init register. Each bit in the timer
|
|
* register represents one tick, so to specify a timeout in
|
|
* microseconds, we have to multiply by the correct number of
|
|
* ticks-per-microsecond.
|
|
*/
|
|
switch (sc->sk_type) {
|
|
case SK_YUKON_EC:
|
|
sk_imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
|
|
break;
|
|
default:
|
|
sk_imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
|
|
}
|
|
aprint_verbose("%s: interrupt moderation is %d us\n",
|
|
sc->sk_dev.dv_xname, sc->sk_int_mod);
|
|
sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
|
|
sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
|
|
SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
|
|
sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
|
|
sc->sk_int_mod_pending = 0;
|
|
}
|
|
|
|
static int
|
|
msk_lookup(const struct pci_attach_args *pa)
|
|
{
|
|
const struct msk_product *pmsk;
|
|
|
|
for ( pmsk = &msk_products[0]; pmsk->msk_vendor != 0; pmsk++) {
|
|
if (PCI_VENDOR(pa->pa_id) == pmsk->msk_vendor &&
|
|
PCI_PRODUCT(pa->pa_id) == pmsk->msk_product)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
|
|
* IDs against our list and return a device name if we find a match.
|
|
*/
|
|
int
|
|
mskc_probe(struct device *parent, struct cfdata *match, void *aux)
|
|
{
|
|
struct pci_attach_args *pa = (struct pci_attach_args *)aux;
|
|
|
|
return msk_lookup(pa);
|
|
}
|
|
|
|
/*
|
|
* Force the GEnesis into reset, then bring it out of reset.
|
|
*/
|
|
void msk_reset(struct sk_softc *sc)
|
|
{
|
|
u_int32_t sk_imtimer_ticks;
|
|
int reg;
|
|
|
|
DPRINTFN(2, ("msk_reset\n"));
|
|
|
|
CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET);
|
|
CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET);
|
|
CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
|
|
|
|
DELAY(1000);
|
|
CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET);
|
|
DELAY(2);
|
|
CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
|
|
CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
|
|
|
|
DPRINTFN(2, ("msk_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR)));
|
|
DPRINTFN(2, ("msk_reset: sk_link_ctrl=%x\n",
|
|
CSR_READ_2(sc, SK_LINK_CTRL)));
|
|
|
|
/* Disable ASF */
|
|
CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET);
|
|
CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF);
|
|
|
|
/* Clear I2C IRQ noise */
|
|
CSR_WRITE_4(sc, SK_I2CHWIRQ, 1);
|
|
|
|
/* Disable hardware timer */
|
|
CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP);
|
|
CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR);
|
|
|
|
/* Disable descriptor polling */
|
|
CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
|
|
|
|
/* Disable time stamps */
|
|
CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP);
|
|
CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR);
|
|
|
|
/* Enable RAM interface */
|
|
sk_win_write_1(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
|
|
for (reg = SK_TO0;reg <= SK_TO11; reg++)
|
|
sk_win_write_1(sc, reg, 36);
|
|
|
|
/*
|
|
* Configure interrupt moderation. The moderation timer
|
|
* defers interrupts specified in the interrupt moderation
|
|
* timer mask based on the timeout specified in the interrupt
|
|
* moderation timer init register. Each bit in the timer
|
|
* register represents one tick, so to specify a timeout in
|
|
* microseconds, we have to multiply by the correct number of
|
|
* ticks-per-microsecond.
|
|
*/
|
|
switch (sc->sk_type) {
|
|
case SK_YUKON_EC:
|
|
case SK_YUKON_XL:
|
|
case SK_YUKON_FE:
|
|
sk_imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
|
|
break;
|
|
default:
|
|
sk_imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
|
|
}
|
|
|
|
/* Reset status ring. */
|
|
bzero((char *)sc->sk_status_ring,
|
|
MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
|
|
sc->sk_status_idx = 0;
|
|
|
|
sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_RESET);
|
|
sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_UNRESET);
|
|
|
|
sk_win_write_2(sc, SK_STAT_BMU_LIDX, MSK_STATUS_RING_CNT - 1);
|
|
sk_win_write_4(sc, SK_STAT_BMU_ADDRLO,
|
|
sc->sk_status_map->dm_segs[0].ds_addr);
|
|
sk_win_write_4(sc, SK_STAT_BMU_ADDRHI,
|
|
(u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32);
|
|
sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, 10);
|
|
sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 16);
|
|
sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM, 16);
|
|
|
|
#if 0
|
|
sk_win_write_4(sc, SK_Y2_LEV_ITIMERINIT, SK_IM_USECS(100));
|
|
sk_win_write_4(sc, SK_Y2_TX_ITIMERINIT, SK_IM_USECS(1000));
|
|
|
|
sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(20));
|
|
#else
|
|
sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(4));
|
|
#endif
|
|
|
|
sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_ON);
|
|
|
|
sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL, SK_IMCTL_START);
|
|
sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL, SK_IMCTL_START);
|
|
sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL, SK_IMCTL_START);
|
|
|
|
msk_update_int_mod(sc);
|
|
}
|
|
|
|
int
|
|
msk_probe(struct device *parent, struct cfdata *match, void *aux)
|
|
{
|
|
struct skc_attach_args *sa = aux;
|
|
|
|
if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
|
|
return (0);
|
|
|
|
switch (sa->skc_type) {
|
|
case SK_YUKON_XL:
|
|
case SK_YUKON_EC_U:
|
|
case SK_YUKON_EC:
|
|
case SK_YUKON_FE:
|
|
return (1);
|
|
}
|
|
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* Each XMAC chip is attached as a separate logical IP interface.
|
|
* Single port cards will have only one logical interface of course.
|
|
*/
|
|
void
|
|
msk_attach(struct device *parent, struct device *self, void *aux)
|
|
{
|
|
struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
|
|
struct sk_softc *sc = (struct sk_softc *)parent;
|
|
struct skc_attach_args *sa = aux;
|
|
struct ifnet *ifp;
|
|
caddr_t kva;
|
|
bus_dma_segment_t seg;
|
|
int i, rseg;
|
|
u_int32_t chunk, val;
|
|
|
|
sc_if->sk_port = sa->skc_port;
|
|
sc_if->sk_softc = sc;
|
|
sc->sk_if[sa->skc_port] = sc_if;
|
|
|
|
DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port));
|
|
|
|
/*
|
|
* Get station address for this interface. Note that
|
|
* dual port cards actually come with three station
|
|
* addresses: one for each port, plus an extra. The
|
|
* extra one is used by the SysKonnect driver software
|
|
* as a 'virtual' station address for when both ports
|
|
* are operating in failover mode. Currently we don't
|
|
* use this extra address.
|
|
*/
|
|
for (i = 0; i < ETHER_ADDR_LEN; i++)
|
|
sc_if->sk_enaddr[i] =
|
|
sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
|
|
|
|
aprint_normal(": Ethernet address %s\n",
|
|
ether_sprintf(sc_if->sk_enaddr));
|
|
|
|
/*
|
|
* Set up RAM buffer addresses. The NIC will have a certain
|
|
* amount of SRAM on it, somewhere between 512K and 2MB. We
|
|
* need to divide this up a) between the transmitter and
|
|
* receiver and b) between the two XMACs, if this is a
|
|
* dual port NIC. Our algorithm is to divide up the memory
|
|
* evenly so that everyone gets a fair share.
|
|
*
|
|
* Just to be contrary, Yukon2 appears to have separate memory
|
|
* for each MAC.
|
|
*/
|
|
chunk = sc->sk_ramsize - (sc->sk_ramsize + 2) / 3;
|
|
val = sc->sk_rboff / sizeof(u_int64_t);
|
|
sc_if->sk_rx_ramstart = val;
|
|
val += (chunk / sizeof(u_int64_t));
|
|
sc_if->sk_rx_ramend = val - 1;
|
|
chunk = sc->sk_ramsize - chunk;
|
|
sc_if->sk_tx_ramstart = val;
|
|
val += (chunk / sizeof(u_int64_t));
|
|
sc_if->sk_tx_ramend = val - 1;
|
|
|
|
DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
|
|
" tx_ramstart=%#x tx_ramend=%#x\n",
|
|
sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
|
|
sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
|
|
|
|
/* Read and save PHY type */
|
|
sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
|
|
|
|
/* Set PHY address */
|
|
if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
|
|
sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) {
|
|
/* not initialized, punt */
|
|
sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
|
|
|
|
sc->sk_coppertype = 1;
|
|
}
|
|
|
|
sc_if->sk_phyaddr = SK_PHYADDR_MARV;
|
|
|
|
if (!(sc->sk_coppertype))
|
|
sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
|
|
|
|
/* Allocate the descriptor queues. */
|
|
if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data),
|
|
PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
|
|
aprint_error(": can't alloc rx buffers\n");
|
|
goto fail;
|
|
}
|
|
if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
|
|
sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)) {
|
|
aprint_error(": can't map dma buffers (%zu bytes)\n",
|
|
sizeof(struct msk_ring_data));
|
|
goto fail_1;
|
|
}
|
|
if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1,
|
|
sizeof(struct msk_ring_data), 0, BUS_DMA_NOWAIT,
|
|
&sc_if->sk_ring_map)) {
|
|
aprint_error(": can't create dma map\n");
|
|
goto fail_2;
|
|
}
|
|
if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
|
|
sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)) {
|
|
aprint_error(": can't load dma map\n");
|
|
goto fail_3;
|
|
}
|
|
sc_if->sk_rdata = (struct msk_ring_data *)kva;
|
|
bzero(sc_if->sk_rdata, sizeof(struct msk_ring_data));
|
|
|
|
ifp = &sc_if->sk_ethercom.ec_if;
|
|
/* Try to allocate memory for jumbo buffers. */
|
|
if (msk_alloc_jumbo_mem(sc_if)) {
|
|
aprint_error(": jumbo buffer allocation failed\n");
|
|
goto fail_3;
|
|
}
|
|
sc_if->sk_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU
|
|
| ETHERCAP_JUMBO_MTU;
|
|
|
|
ifp->if_softc = sc_if;
|
|
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
|
|
ifp->if_ioctl = msk_ioctl;
|
|
ifp->if_start = msk_start;
|
|
ifp->if_stop = msk_stop;
|
|
ifp->if_init = msk_init;
|
|
ifp->if_watchdog = msk_watchdog;
|
|
ifp->if_baudrate = 1000000000;
|
|
IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
|
|
IFQ_SET_READY(&ifp->if_snd);
|
|
strcpy(ifp->if_xname, sc_if->sk_dev.dv_xname);
|
|
|
|
/*
|
|
* Do miibus setup.
|
|
*/
|
|
msk_init_yukon(sc_if);
|
|
|
|
DPRINTFN(2, ("msk_attach: 1\n"));
|
|
|
|
sc_if->sk_mii.mii_ifp = ifp;
|
|
sc_if->sk_mii.mii_readreg = msk_marv_miibus_readreg;
|
|
sc_if->sk_mii.mii_writereg = msk_marv_miibus_writereg;
|
|
sc_if->sk_mii.mii_statchg = msk_marv_miibus_statchg;
|
|
|
|
ifmedia_init(&sc_if->sk_mii.mii_media, 0,
|
|
msk_ifmedia_upd, msk_ifmedia_sts);
|
|
mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
|
|
MII_OFFSET_ANY, 0);
|
|
if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
|
|
aprint_error("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
|
|
ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
|
|
0, NULL);
|
|
ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
|
|
} else
|
|
ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
|
|
|
|
callout_init(&sc_if->sk_tick_ch);
|
|
callout_setfunc(&sc_if->sk_tick_ch, msk_yukon_tick, sc_if);
|
|
callout_schedule(&sc_if->sk_tick_ch, hz);
|
|
|
|
/*
|
|
* Call MI attach routines.
|
|
*/
|
|
if_attach(ifp);
|
|
ether_ifattach(ifp, sc_if->sk_enaddr);
|
|
|
|
shutdownhook_establish(mskc_shutdown, sc);
|
|
|
|
#if NRND > 0
|
|
rnd_attach_source(&sc->rnd_source, sc->sk_dev.dv_xname,
|
|
RND_TYPE_NET, 0);
|
|
#endif
|
|
|
|
DPRINTFN(2, ("msk_attach: end\n"));
|
|
return;
|
|
|
|
fail_3:
|
|
bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
|
|
fail_2:
|
|
bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data));
|
|
fail_1:
|
|
bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
|
|
fail:
|
|
sc->sk_if[sa->skc_port] = NULL;
|
|
}
|
|
|
|
int
|
|
mskcprint(void *aux, const char *pnp)
|
|
{
|
|
struct skc_attach_args *sa = aux;
|
|
|
|
if (pnp)
|
|
aprint_normal("sk port %c at %s",
|
|
(sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
|
|
else
|
|
aprint_normal(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
|
|
return (UNCONF);
|
|
}
|
|
|
|
/*
|
|
* Attach the interface. Allocate softc structures, do ifmedia
|
|
* setup and ethernet/BPF attach.
|
|
*/
|
|
void
|
|
mskc_attach(struct device *parent, struct device *self, void *aux)
|
|
{
|
|
struct sk_softc *sc = (struct sk_softc *)self;
|
|
struct pci_attach_args *pa = aux;
|
|
struct skc_attach_args skca;
|
|
pci_chipset_tag_t pc = pa->pa_pc;
|
|
pcireg_t command, memtype;
|
|
pci_intr_handle_t ih;
|
|
const char *intrstr = NULL;
|
|
bus_size_t size;
|
|
int rc, sk_nodenum;
|
|
u_int8_t hw, skrs;
|
|
const char *revstr = NULL;
|
|
const struct sysctlnode *node;
|
|
caddr_t kva;
|
|
bus_dma_segment_t seg;
|
|
int rseg;
|
|
|
|
DPRINTFN(2, ("begin mskc_attach\n"));
|
|
|
|
/*
|
|
* Handle power management nonsense.
|
|
*/
|
|
command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
|
|
|
|
if (command == 0x01) {
|
|
command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
|
|
if (command & SK_PSTATE_MASK) {
|
|
u_int32_t iobase, membase, irq;
|
|
|
|
/* Save important PCI config data. */
|
|
iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
|
|
membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
|
|
irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
|
|
|
|
/* Reset the power state. */
|
|
aprint_normal("%s chip is in D%d power mode "
|
|
"-- setting to D0\n", sc->sk_dev.dv_xname,
|
|
command & SK_PSTATE_MASK);
|
|
command &= 0xFFFFFFFC;
|
|
pci_conf_write(pc, pa->pa_tag,
|
|
SK_PCI_PWRMGMTCTRL, command);
|
|
|
|
/* Restore PCI config data. */
|
|
pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
|
|
pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
|
|
pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Map control/status registers.
|
|
*/
|
|
|
|
memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
|
|
switch (memtype) {
|
|
case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
|
|
case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
|
|
if (pci_mapreg_map(pa, SK_PCI_LOMEM,
|
|
memtype, 0, &sc->sk_btag, &sc->sk_bhandle,
|
|
NULL, &size) == 0)
|
|
break;
|
|
default:
|
|
aprint_error(": can't map mem space\n");
|
|
return;
|
|
}
|
|
|
|
sc->sc_dmatag = pa->pa_dmat;
|
|
|
|
sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
|
|
sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
|
|
|
|
/* bail out here if chip is not recognized */
|
|
if (!(SK_IS_YUKON(sc))) {
|
|
aprint_error(": unknown chip type: %d\n", sc->sk_type);
|
|
goto fail_1;
|
|
}
|
|
DPRINTFN(2, ("mskc_attach: allocate interrupt\n"));
|
|
|
|
/* Allocate interrupt */
|
|
if (pci_intr_map(pa, &ih)) {
|
|
aprint_error(": couldn't map interrupt\n");
|
|
goto fail_1;
|
|
}
|
|
|
|
intrstr = pci_intr_string(pc, ih);
|
|
sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, msk_intr, sc);
|
|
if (sc->sk_intrhand == NULL) {
|
|
aprint_error(": couldn't establish interrupt");
|
|
if (intrstr != NULL)
|
|
aprint_error(" at %s", intrstr);
|
|
aprint_error("\n");
|
|
goto fail_1;
|
|
}
|
|
|
|
if (bus_dmamem_alloc(sc->sc_dmatag,
|
|
MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
|
|
PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
|
|
aprint_error(": can't alloc status buffers\n");
|
|
goto fail_2;
|
|
}
|
|
|
|
if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
|
|
MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
|
|
&kva, BUS_DMA_NOWAIT)) {
|
|
aprint_error(": can't map dma buffers (%zu bytes)\n",
|
|
MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
|
|
goto fail_3;
|
|
}
|
|
if (bus_dmamap_create(sc->sc_dmatag,
|
|
MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1,
|
|
MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 0,
|
|
BUS_DMA_NOWAIT, &sc->sk_status_map)) {
|
|
aprint_error(": can't create dma map\n");
|
|
goto fail_4;
|
|
}
|
|
if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva,
|
|
MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
|
|
NULL, BUS_DMA_NOWAIT)) {
|
|
aprint_error(": can't load dma map\n");
|
|
goto fail_5;
|
|
}
|
|
sc->sk_status_ring = (struct msk_status_desc *)kva;
|
|
bzero(sc->sk_status_ring,
|
|
MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
|
|
|
|
/* Reset the adapter. */
|
|
msk_reset(sc);
|
|
|
|
skrs = sk_win_read_1(sc, SK_EPROM0);
|
|
if (skrs == 0x00)
|
|
sc->sk_ramsize = 0x20000;
|
|
else
|
|
sc->sk_ramsize = skrs * (1<<12);
|
|
sc->sk_rboff = SK_RBOFF_0;
|
|
|
|
DPRINTFN(2, ("mskc_attach: ramsize=%d (%dk), rboff=%d\n",
|
|
sc->sk_ramsize, sc->sk_ramsize / 1024,
|
|
sc->sk_rboff));
|
|
|
|
/* Read and save physical media type */
|
|
sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
|
|
|
|
if (sc->sk_pmd == 'T' || sc->sk_pmd == '1' ||
|
|
(SK_IS_YUKON2(sc) && !(sc->sk_pmd == 'L' ||
|
|
sc->sk_pmd == 'S')))
|
|
sc->sk_coppertype = 1;
|
|
else
|
|
sc->sk_coppertype = 0;
|
|
|
|
switch (sc->sk_type) {
|
|
case SK_YUKON_XL:
|
|
sc->sk_name = "Marvell Yukon-2 XL";
|
|
break;
|
|
case SK_YUKON_EC_U:
|
|
sc->sk_name = "Marvell Yukon-2 EC Ultra";
|
|
break;
|
|
case SK_YUKON_EC:
|
|
sc->sk_name = "Marvell Yukon-2 EC";
|
|
break;
|
|
case SK_YUKON_FE:
|
|
sc->sk_name = "Marvell Yukon-2 FE";
|
|
break;
|
|
default:
|
|
sc->sk_name = "Marvell Yukon (Unknown)";
|
|
}
|
|
|
|
if (sc->sk_type == SK_YUKON_XL) {
|
|
switch (sc->sk_rev) {
|
|
case SK_YUKON_XL_REV_A0:
|
|
revstr = "A0";
|
|
break;
|
|
case SK_YUKON_XL_REV_A1:
|
|
revstr = "A1";
|
|
break;
|
|
case SK_YUKON_XL_REV_A2:
|
|
revstr = "A2";
|
|
break;
|
|
case SK_YUKON_XL_REV_A3:
|
|
revstr = "A3";
|
|
break;
|
|
default:
|
|
;
|
|
}
|
|
}
|
|
|
|
if (sc->sk_type == SK_YUKON_EC) {
|
|
switch (sc->sk_rev) {
|
|
case SK_YUKON_EC_REV_A1:
|
|
revstr = "A1";
|
|
break;
|
|
case SK_YUKON_EC_REV_A2:
|
|
revstr = "A2";
|
|
break;
|
|
case SK_YUKON_EC_REV_A3:
|
|
revstr = "A3";
|
|
break;
|
|
default:
|
|
;
|
|
}
|
|
}
|
|
|
|
if (sc->sk_type == SK_YUKON_EC_U) {
|
|
switch (sc->sk_rev) {
|
|
case SK_YUKON_EC_U_REV_A0:
|
|
revstr = "A0";
|
|
break;
|
|
case SK_YUKON_EC_U_REV_A1:
|
|
revstr = "A1";
|
|
break;
|
|
default:
|
|
;
|
|
}
|
|
}
|
|
|
|
/* Announce the product name. */
|
|
aprint_normal(", %s", sc->sk_name);
|
|
if (revstr != NULL)
|
|
aprint_normal(" rev. %s", revstr);
|
|
aprint_normal(" (0x%x): %s\n", sc->sk_rev, intrstr);
|
|
|
|
|
|
sc->sk_macs = 1;
|
|
|
|
hw = sk_win_read_1(sc, SK_Y2_HWRES);
|
|
if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) {
|
|
if ((sk_win_read_1(sc, SK_Y2_CLKGATE) &
|
|
SK_Y2_CLKGATE_LINK2_INACTIVE) == 0)
|
|
sc->sk_macs++;
|
|
}
|
|
|
|
skca.skc_port = SK_PORT_A;
|
|
skca.skc_type = sc->sk_type;
|
|
skca.skc_rev = sc->sk_rev;
|
|
(void)config_found(&sc->sk_dev, &skca, mskcprint);
|
|
|
|
if (sc->sk_macs > 1) {
|
|
skca.skc_port = SK_PORT_B;
|
|
skca.skc_type = sc->sk_type;
|
|
skca.skc_rev = sc->sk_rev;
|
|
(void)config_found(&sc->sk_dev, &skca, mskcprint);
|
|
}
|
|
|
|
/* Turn on the 'driver is loaded' LED. */
|
|
CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
|
|
|
|
/* skc sysctl setup */
|
|
|
|
sc->sk_int_mod = SK_IM_DEFAULT;
|
|
sc->sk_int_mod_pending = 0;
|
|
|
|
if ((rc = sysctl_createv(&sc->sk_clog, 0, NULL, &node,
|
|
0, CTLTYPE_NODE, sc->sk_dev.dv_xname,
|
|
SYSCTL_DESCR("mskc per-controller controls"),
|
|
NULL, 0, NULL, 0, CTL_HW, msk_root_num, CTL_CREATE,
|
|
CTL_EOL)) != 0) {
|
|
aprint_normal("%s: couldn't create sysctl node\n",
|
|
sc->sk_dev.dv_xname);
|
|
goto fail_6;
|
|
}
|
|
|
|
sk_nodenum = node->sysctl_num;
|
|
|
|
/* interrupt moderation time in usecs */
|
|
if ((rc = sysctl_createv(&sc->sk_clog, 0, NULL, &node,
|
|
CTLFLAG_READWRITE,
|
|
CTLTYPE_INT, "int_mod",
|
|
SYSCTL_DESCR("msk interrupt moderation timer"),
|
|
msk_sysctl_handler, 0, sc,
|
|
0, CTL_HW, msk_root_num, sk_nodenum, CTL_CREATE,
|
|
CTL_EOL)) != 0) {
|
|
aprint_normal("%s: couldn't create int_mod sysctl node\n",
|
|
sc->sk_dev.dv_xname);
|
|
goto fail_6;
|
|
}
|
|
|
|
return;
|
|
|
|
fail_6:
|
|
bus_dmamap_unload(sc->sc_dmatag, sc->sk_status_map);
|
|
fail_5:
|
|
bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map);
|
|
fail_4:
|
|
bus_dmamem_unmap(sc->sc_dmatag, kva,
|
|
MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
|
|
fail_3:
|
|
bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
|
|
fail_2:
|
|
pci_intr_disestablish(pc, sc->sk_intrhand);
|
|
fail_1:
|
|
bus_space_unmap(sc->sk_btag, sc->sk_bhandle, size);
|
|
}
|
|
|
|
int
|
|
msk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx)
|
|
{
|
|
struct sk_softc *sc = sc_if->sk_softc;
|
|
struct msk_tx_desc *f = NULL;
|
|
u_int32_t frag, cur, cnt = 0;
|
|
int i;
|
|
struct sk_txmap_entry *entry;
|
|
bus_dmamap_t txmap;
|
|
|
|
DPRINTFN(2, ("msk_encap\n"));
|
|
|
|
entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
|
|
if (entry == NULL) {
|
|
DPRINTFN(2, ("msk_encap: no txmap available\n"));
|
|
return (ENOBUFS);
|
|
}
|
|
txmap = entry->dmamap;
|
|
|
|
cur = frag = *txidx;
|
|
|
|
#ifdef MSK_DEBUG
|
|
if (mskdebug >= 2)
|
|
msk_dump_mbuf(m_head);
|
|
#endif
|
|
|
|
/*
|
|
* Start packing the mbufs in this chain into
|
|
* the fragment pointers. Stop when we run out
|
|
* of fragments or hit the end of the mbuf chain.
|
|
*/
|
|
if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
|
|
BUS_DMA_NOWAIT)) {
|
|
DPRINTFN(2, ("msk_encap: dmamap failed\n"));
|
|
return (ENOBUFS);
|
|
}
|
|
|
|
DPRINTFN(2, ("msk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
|
|
|
|
/* Sync the DMA map. */
|
|
bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
for (i = 0; i < txmap->dm_nsegs; i++) {
|
|
if ((MSK_TX_RING_CNT - (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) {
|
|
DPRINTFN(2, ("msk_encap: too few descriptors free\n"));
|
|
return (ENOBUFS);
|
|
}
|
|
f = &sc_if->sk_rdata->sk_tx_ring[frag];
|
|
f->sk_addr = htole32(txmap->dm_segs[i].ds_addr);
|
|
f->sk_len = htole16(txmap->dm_segs[i].ds_len);
|
|
f->sk_ctl = 0;
|
|
if (cnt == 0)
|
|
f->sk_opcode = SK_Y2_TXOPC_PACKET;
|
|
else
|
|
f->sk_opcode = SK_Y2_TXOPC_BUFFER | SK_Y2_TXOPC_OWN;
|
|
cur = frag;
|
|
SK_INC(frag, MSK_TX_RING_CNT);
|
|
cnt++;
|
|
}
|
|
|
|
sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
|
|
SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
|
|
|
|
sc_if->sk_cdata.sk_tx_map[cur] = entry;
|
|
sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_Y2_TXCTL_LASTFRAG;
|
|
|
|
/* Sync descriptors before handing to chip */
|
|
MSK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
|
|
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
|
|
|
|
sc_if->sk_rdata->sk_tx_ring[*txidx].sk_opcode |= SK_Y2_TXOPC_OWN;
|
|
|
|
/* Sync first descriptor to hand it off */
|
|
MSK_CDTXSYNC(sc_if, *txidx, 1,
|
|
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
|
|
|
|
sc_if->sk_cdata.sk_tx_cnt += cnt;
|
|
|
|
#ifdef MSK_DEBUG
|
|
if (mskdebug >= 2) {
|
|
struct msk_tx_desc *le;
|
|
u_int32_t idx;
|
|
for (idx = *txidx; idx != frag; SK_INC(idx, MSK_TX_RING_CNT)) {
|
|
le = &sc_if->sk_rdata->sk_tx_ring[idx];
|
|
msk_dump_txdesc(le, idx);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
*txidx = frag;
|
|
|
|
DPRINTFN(2, ("msk_encap: completed successfully\n"));
|
|
|
|
return (0);
|
|
}
|
|
|
|
void
|
|
msk_start(struct ifnet *ifp)
|
|
{
|
|
struct sk_if_softc *sc_if = ifp->if_softc;
|
|
struct mbuf *m_head = NULL;
|
|
u_int32_t idx = sc_if->sk_cdata.sk_tx_prod;
|
|
int pkts = 0;
|
|
|
|
DPRINTFN(2, ("msk_start\n"));
|
|
|
|
while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
|
|
IFQ_POLL(&ifp->if_snd, m_head);
|
|
if (m_head == NULL)
|
|
break;
|
|
|
|
/*
|
|
* Pack the data into the transmit ring. If we
|
|
* don't have room, set the OACTIVE flag and wait
|
|
* for the NIC to drain the ring.
|
|
*/
|
|
if (msk_encap(sc_if, m_head, &idx)) {
|
|
ifp->if_flags |= IFF_OACTIVE;
|
|
break;
|
|
}
|
|
|
|
/* now we are committed to transmit the packet */
|
|
IFQ_DEQUEUE(&ifp->if_snd, m_head);
|
|
pkts++;
|
|
|
|
/*
|
|
* If there's a BPF listener, bounce a copy of this frame
|
|
* to him.
|
|
*/
|
|
#if NBPFILTER > 0
|
|
if (ifp->if_bpf)
|
|
bpf_mtap(ifp->if_bpf, m_head);
|
|
#endif
|
|
}
|
|
if (pkts == 0)
|
|
return;
|
|
|
|
/* Transmit */
|
|
if (idx != sc_if->sk_cdata.sk_tx_prod) {
|
|
sc_if->sk_cdata.sk_tx_prod = idx;
|
|
SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, idx);
|
|
|
|
/* Set a timeout in case the chip goes out to lunch. */
|
|
ifp->if_timer = 5;
|
|
}
|
|
}
|
|
|
|
void
|
|
msk_watchdog(struct ifnet *ifp)
|
|
{
|
|
struct sk_if_softc *sc_if = ifp->if_softc;
|
|
|
|
/*
|
|
* Reclaim first as there is a possibility of losing Tx completion
|
|
* interrupts.
|
|
*/
|
|
msk_txeof(sc_if);
|
|
if (sc_if->sk_cdata.sk_tx_cnt != 0) {
|
|
aprint_error("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
|
|
|
|
ifp->if_oerrors++;
|
|
|
|
/* XXX Resets both ports; we shouldn't do that. */
|
|
msk_reset(sc_if->sk_softc);
|
|
msk_init(ifp);
|
|
}
|
|
}
|
|
|
|
void
|
|
mskc_shutdown(void *v)
|
|
{
|
|
struct sk_softc *sc = v;
|
|
|
|
DPRINTFN(2, ("msk_shutdown\n"));
|
|
|
|
/* Turn off the 'driver is loaded' LED. */
|
|
CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
|
|
|
|
msk_reset(sc);
|
|
}
|
|
|
|
__inline int
|
|
msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
|
|
{
|
|
if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
|
|
YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
|
|
YU_RXSTAT_JABBER)) != 0 ||
|
|
(stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
|
|
YU_RXSTAT_BYTES(stat) != len)
|
|
return (0);
|
|
|
|
return (1);
|
|
}
|
|
|
|
void
|
|
msk_rxeof(struct sk_if_softc *sc_if, u_int16_t len, u_int32_t rxstat)
|
|
{
|
|
struct sk_softc *sc = sc_if->sk_softc;
|
|
struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
|
|
struct mbuf *m;
|
|
struct sk_chain *cur_rx;
|
|
int cur, total_len = len;
|
|
bus_dmamap_t dmamap;
|
|
|
|
DPRINTFN(2, ("msk_rxeof\n"));
|
|
|
|
cur = sc_if->sk_cdata.sk_rx_cons;
|
|
SK_INC(sc_if->sk_cdata.sk_rx_cons, MSK_RX_RING_CNT);
|
|
SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
|
|
|
|
/* Sync the descriptor */
|
|
MSK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
|
|
|
|
cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
|
|
dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
|
|
|
|
bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
|
|
dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
|
|
|
|
m = cur_rx->sk_mbuf;
|
|
cur_rx->sk_mbuf = NULL;
|
|
|
|
if (total_len < SK_MIN_FRAMELEN ||
|
|
total_len > SK_JUMBO_FRAMELEN ||
|
|
msk_rxvalid(sc, rxstat, total_len) == 0) {
|
|
ifp->if_ierrors++;
|
|
msk_newbuf(sc_if, cur, m, dmamap);
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* Try to allocate a new jumbo buffer. If that fails, copy the
|
|
* packet to mbufs and put the jumbo buffer back in the ring
|
|
* so it can be re-used. If allocating mbufs fails, then we
|
|
* have to drop the packet.
|
|
*/
|
|
if (msk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) {
|
|
struct mbuf *m0;
|
|
m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
|
|
total_len + ETHER_ALIGN, 0, ifp, NULL);
|
|
msk_newbuf(sc_if, cur, m, dmamap);
|
|
if (m0 == NULL) {
|
|
ifp->if_ierrors++;
|
|
return;
|
|
}
|
|
m_adj(m0, ETHER_ALIGN);
|
|
m = m0;
|
|
} else {
|
|
m->m_pkthdr.rcvif = ifp;
|
|
m->m_pkthdr.len = m->m_len = total_len;
|
|
}
|
|
|
|
ifp->if_ipackets++;
|
|
|
|
#if NBPFILTER > 0
|
|
if (ifp->if_bpf)
|
|
bpf_mtap(ifp->if_bpf, m);
|
|
#endif
|
|
|
|
/* pass it on. */
|
|
(*ifp->if_input)(ifp, m);
|
|
}
|
|
|
|
void
|
|
msk_txeof(struct sk_if_softc *sc_if)
|
|
{
|
|
struct sk_softc *sc = sc_if->sk_softc;
|
|
struct msk_tx_desc *cur_tx;
|
|
struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
|
|
u_int32_t idx, sk_ctl;
|
|
struct sk_txmap_entry *entry;
|
|
|
|
DPRINTFN(2, ("msk_txeof\n"));
|
|
|
|
/*
|
|
* Go through our tx ring and free mbufs for those
|
|
* frames that have been sent.
|
|
*/
|
|
idx = sc_if->sk_cdata.sk_tx_cons;
|
|
while (idx != sk_win_read_2(sc, SK_STAT_BMU_TXA1_RIDX)) {
|
|
MSK_CDTXSYNC(sc_if, idx, 1,
|
|
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
|
|
|
|
cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
|
|
sk_ctl = letoh32(cur_tx->sk_ctl);
|
|
#ifdef MSK_DEBUG
|
|
if (mskdebug >= 2)
|
|
msk_dump_txdesc(cur_tx, idx);
|
|
#endif
|
|
if (sk_ctl & SK_TXCTL_LASTFRAG)
|
|
ifp->if_opackets++;
|
|
if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
|
|
entry = sc_if->sk_cdata.sk_tx_map[idx];
|
|
|
|
m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
|
|
sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
|
|
|
|
bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
|
|
entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
|
|
SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
|
|
link);
|
|
sc_if->sk_cdata.sk_tx_map[idx] = NULL;
|
|
}
|
|
sc_if->sk_cdata.sk_tx_cnt--;
|
|
SK_INC(idx, MSK_TX_RING_CNT);
|
|
}
|
|
ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
|
|
|
|
if (sc_if->sk_cdata.sk_tx_cnt < MSK_TX_RING_CNT - 2)
|
|
ifp->if_flags &= ~IFF_OACTIVE;
|
|
|
|
sc_if->sk_cdata.sk_tx_cons = idx;
|
|
}
|
|
|
|
void
|
|
msk_yukon_tick(void *xsc_if)
|
|
{
|
|
struct sk_if_softc *sc_if = xsc_if;
|
|
struct mii_data *mii = &sc_if->sk_mii;
|
|
|
|
mii_tick(mii);
|
|
callout_schedule(&sc_if->sk_tick_ch, hz);
|
|
}
|
|
|
|
void
|
|
msk_intr_yukon(struct sk_if_softc *sc_if)
|
|
{
|
|
u_int8_t status;
|
|
|
|
status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
|
|
/* RX overrun */
|
|
if ((status & SK_GMAC_INT_RX_OVER) != 0) {
|
|
SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
|
|
SK_RFCTL_RX_FIFO_OVER);
|
|
}
|
|
/* TX underrun */
|
|
if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
|
|
SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
|
|
SK_TFCTL_TX_FIFO_UNDER);
|
|
}
|
|
|
|
DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status));
|
|
}
|
|
|
|
int
|
|
msk_intr(void *xsc)
|
|
{
|
|
struct sk_softc *sc = xsc;
|
|
struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A];
|
|
struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B];
|
|
struct ifnet *ifp0 = NULL, *ifp1 = NULL;
|
|
int claimed = 0;
|
|
u_int32_t status;
|
|
u_int16_t idx;
|
|
struct msk_status_desc *cur_st;
|
|
|
|
status = CSR_READ_4(sc, SK_Y2_ISSR2);
|
|
if (status == 0) {
|
|
CSR_WRITE_4(sc, SK_Y2_ICR, 2);
|
|
return (0);
|
|
}
|
|
|
|
status = CSR_READ_4(sc, SK_ISR);
|
|
|
|
if (sc_if0 != NULL)
|
|
ifp0 = &sc_if0->sk_ethercom.ec_if;
|
|
if (sc_if1 != NULL)
|
|
ifp1 = &sc_if1->sk_ethercom.ec_if;
|
|
|
|
if (sc_if0 && (status & SK_Y2_IMR_MAC1) &&
|
|
(ifp0->if_flags & IFF_RUNNING)) {
|
|
msk_intr_yukon(sc_if0);
|
|
}
|
|
|
|
if (sc_if1 && (status & SK_Y2_IMR_MAC2) &&
|
|
(ifp1->if_flags & IFF_RUNNING)) {
|
|
msk_intr_yukon(sc_if1);
|
|
}
|
|
|
|
idx = CSR_READ_2(sc, SK_STAT_BMU_PUTIDX);
|
|
while (sc->sk_status_idx != idx) {
|
|
MSK_CDSTSYNC(sc, sc->sk_status_idx,
|
|
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
|
|
|
|
cur_st = &sc->sk_status_ring[sc->sk_status_idx];
|
|
switch (cur_st->sk_opcode & ~SK_Y2_STOPC_OWN) {
|
|
case SK_Y2_STOPC_RXSTAT:
|
|
msk_rxeof(sc->sk_if[cur_st->sk_link],
|
|
letoh16(cur_st->sk_len),
|
|
letoh32(cur_st->sk_status));
|
|
SK_IF_WRITE_2(sc->sk_if[cur_st->sk_link], 0,
|
|
SK_RXQ1_Y2_PREF_PUTIDX,
|
|
sc->sk_if[cur_st->sk_link]->sk_cdata.sk_rx_prod);
|
|
break;
|
|
case SK_Y2_STOPC_TXSTAT:
|
|
msk_txeof(sc->sk_if[cur_st->sk_link]);
|
|
break;
|
|
default:
|
|
aprint_error("opcode=0x%x\n", cur_st->sk_opcode);
|
|
break;
|
|
}
|
|
SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT);
|
|
idx = CSR_READ_2(sc, SK_STAT_BMU_PUTIDX);
|
|
}
|
|
|
|
if (status & SK_Y2_IMR_BMU) {
|
|
CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR);
|
|
claimed = 1;
|
|
}
|
|
|
|
CSR_WRITE_4(sc, SK_Y2_ICR, 2);
|
|
|
|
if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd))
|
|
msk_start(ifp0);
|
|
if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd))
|
|
msk_start(ifp1);
|
|
|
|
#if NRND > 0
|
|
if (RND_ENABLED(&sc->rnd_source))
|
|
rnd_add_uint32(&sc->rnd_source, status);
|
|
#endif
|
|
|
|
if (sc->sk_int_mod_pending)
|
|
msk_update_int_mod(sc);
|
|
|
|
return claimed;
|
|
}
|
|
|
|
void
|
|
msk_init_yukon(struct sk_if_softc *sc_if)
|
|
{
|
|
u_int32_t phy, v;
|
|
u_int16_t reg;
|
|
struct sk_softc *sc;
|
|
int i;
|
|
|
|
sc = sc_if->sk_softc;
|
|
|
|
DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n",
|
|
CSR_READ_4(sc_if->sk_softc, SK_CSR)));
|
|
|
|
DPRINTFN(6, ("msk_init_yukon: 1\n"));
|
|
|
|
/* GMAC and GPHY Reset */
|
|
SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
|
|
DELAY(1000);
|
|
|
|
DPRINTFN(6, ("msk_init_yukon: 2\n"));
|
|
|
|
#if 0
|
|
phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
|
|
SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
|
|
#else
|
|
phy = SK_GPHY_ENA_PAUSE;
|
|
#endif
|
|
|
|
if (sc->sk_coppertype)
|
|
phy |= SK_GPHY_COPPER;
|
|
else
|
|
phy |= SK_GPHY_FIBER;
|
|
|
|
DPRINTFN(3, ("msk_init_yukon: phy=%#x\n", phy));
|
|
|
|
SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
|
|
DELAY(1000);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
|
|
SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
|
|
|
|
DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n",
|
|
SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
|
|
|
|
DPRINTFN(6, ("msk_init_yukon: 3\n"));
|
|
|
|
/* unused read of the interrupt source register */
|
|
DPRINTFN(6, ("msk_init_yukon: 4\n"));
|
|
SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
|
|
|
|
DPRINTFN(6, ("msk_init_yukon: 4a\n"));
|
|
reg = SK_YU_READ_2(sc_if, YUKON_PAR);
|
|
DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
|
|
|
|
/* MIB Counter Clear Mode set */
|
|
reg |= YU_PAR_MIB_CLR;
|
|
DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
|
|
DPRINTFN(6, ("msk_init_yukon: 4b\n"));
|
|
SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
|
|
|
|
/* MIB Counter Clear Mode clear */
|
|
DPRINTFN(6, ("msk_init_yukon: 5\n"));
|
|
reg &= ~YU_PAR_MIB_CLR;
|
|
SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
|
|
|
|
/* receive control reg */
|
|
DPRINTFN(6, ("msk_init_yukon: 7\n"));
|
|
SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
|
|
|
|
/* transmit parameter register */
|
|
DPRINTFN(6, ("msk_init_yukon: 8\n"));
|
|
SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
|
|
YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
|
|
|
|
/* serial mode register */
|
|
DPRINTFN(6, ("msk_init_yukon: 9\n"));
|
|
SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
|
|
YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO |
|
|
YU_SMR_IPG_DATA(0x1e));
|
|
|
|
DPRINTFN(6, ("msk_init_yukon: 10\n"));
|
|
/* Setup Yukon's address */
|
|
for (i = 0; i < 3; i++) {
|
|
/* Write Source Address 1 (unicast filter) */
|
|
SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
|
|
sc_if->sk_enaddr[i * 2] |
|
|
sc_if->sk_enaddr[i * 2 + 1] << 8);
|
|
}
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
reg = sk_win_read_2(sc_if->sk_softc,
|
|
SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
|
|
SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
|
|
}
|
|
|
|
/* Set promiscuous mode */
|
|
msk_setpromisc(sc_if);
|
|
|
|
/* Set multicast filter */
|
|
DPRINTFN(6, ("msk_init_yukon: 11\n"));
|
|
msk_setmulti(sc_if);
|
|
|
|
/* enable interrupt mask for counter overflows */
|
|
DPRINTFN(6, ("msk_init_yukon: 12\n"));
|
|
SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
|
|
SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
|
|
SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
|
|
|
|
/* Configure RX MAC FIFO Flush Mask */
|
|
v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
|
|
YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
|
|
YU_RXSTAT_JABBER;
|
|
SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
|
|
|
|
/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
|
|
if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
|
|
v = SK_TFCTL_OPERATION_ON;
|
|
else
|
|
v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
|
|
/* Configure RX MAC FIFO */
|
|
SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
|
|
SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
|
|
|
|
/* Increase flush threshould to 64 bytes */
|
|
SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
|
|
SK_RFCTL_FIFO_THRESHOLD + 1);
|
|
|
|
/* Configure TX MAC FIFO */
|
|
SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
|
|
SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
|
|
|
|
#if 1
|
|
SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN);
|
|
#endif
|
|
DPRINTFN(6, ("msk_init_yukon: end\n"));
|
|
}
|
|
|
|
/*
|
|
* Note that to properly initialize any part of the GEnesis chip,
|
|
* you first have to take it out of reset mode.
|
|
*/
|
|
int
|
|
msk_init(struct ifnet *ifp)
|
|
{
|
|
struct sk_if_softc *sc_if = ifp->if_softc;
|
|
struct sk_softc *sc = sc_if->sk_softc;
|
|
struct mii_data *mii = &sc_if->sk_mii;
|
|
int s;
|
|
uint32_t imr, sk_imtimer_ticks;
|
|
|
|
|
|
DPRINTFN(2, ("msk_init\n"));
|
|
|
|
s = splnet();
|
|
|
|
/* Cancel pending I/O and free all RX/TX buffers. */
|
|
msk_stop(ifp,0);
|
|
|
|
/* Configure I2C registers */
|
|
|
|
/* Configure XMAC(s) */
|
|
msk_init_yukon(sc_if);
|
|
mii_mediachg(mii);
|
|
|
|
/* Configure transmit arbiter(s) */
|
|
SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON);
|
|
#if 0
|
|
SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
|
|
#endif
|
|
|
|
/* Configure RAMbuffers */
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
|
|
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON);
|
|
|
|
/* Configure BMUs */
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_WATERMARK, 0x00000600);
|
|
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_WATERMARK, 0x00000600);
|
|
|
|
/* Make sure the sync transmit queue is disabled. */
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET);
|
|
|
|
/* Init descriptors */
|
|
if (msk_init_rx_ring(sc_if) == ENOBUFS) {
|
|
aprint_error("%s: initialization failed: no "
|
|
"memory for rx buffers\n", sc_if->sk_dev.dv_xname);
|
|
msk_stop(ifp,0);
|
|
splx(s);
|
|
return ENOBUFS;
|
|
}
|
|
|
|
if (msk_init_tx_ring(sc_if) == ENOBUFS) {
|
|
aprint_error("%s: initialization failed: no "
|
|
"memory for tx buffers\n", sc_if->sk_dev.dv_xname);
|
|
msk_stop(ifp,0);
|
|
splx(s);
|
|
return ENOBUFS;
|
|
}
|
|
|
|
/* Set interrupt moderation if changed via sysctl. */
|
|
switch (sc->sk_type) {
|
|
case SK_YUKON_EC:
|
|
sk_imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
|
|
break;
|
|
default:
|
|
sk_imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
|
|
}
|
|
imr = sk_win_read_4(sc, SK_IMTIMERINIT);
|
|
if (imr != SK_IM_USECS(sc->sk_int_mod)) {
|
|
sk_win_write_4(sc, SK_IMTIMERINIT,
|
|
SK_IM_USECS(sc->sk_int_mod));
|
|
aprint_verbose("%s: interrupt moderation is %d us\n",
|
|
sc->sk_dev.dv_xname, sc->sk_int_mod);
|
|
}
|
|
|
|
/* Initialize prefetch engine. */
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002);
|
|
SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO,
|
|
MSK_RX_RING_ADDR(sc_if, 0));
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI,
|
|
(u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008);
|
|
SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR);
|
|
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002);
|
|
SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO,
|
|
MSK_TX_RING_ADDR(sc_if, 0));
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI,
|
|
(u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008);
|
|
SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR);
|
|
|
|
SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX,
|
|
sc_if->sk_cdata.sk_rx_prod);
|
|
|
|
/* Configure interrupt handling */
|
|
if (sc_if->sk_port == SK_PORT_A)
|
|
sc->sk_intrmask |= SK_Y2_INTRS1;
|
|
else
|
|
sc->sk_intrmask |= SK_Y2_INTRS2;
|
|
sc->sk_intrmask |= SK_Y2_IMR_BMU;
|
|
CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
|
|
|
|
ifp->if_flags |= IFF_RUNNING;
|
|
ifp->if_flags &= ~IFF_OACTIVE;
|
|
|
|
callout_schedule(&sc_if->sk_tick_ch, hz);
|
|
|
|
splx(s);
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
msk_stop(struct ifnet *ifp, int disable)
|
|
{
|
|
struct sk_if_softc *sc_if = ifp->if_softc;
|
|
struct sk_softc *sc = sc_if->sk_softc;
|
|
struct sk_txmap_entry *dma;
|
|
int i;
|
|
|
|
DPRINTFN(2, ("msk_stop\n"));
|
|
|
|
callout_stop(&sc_if->sk_tick_ch);
|
|
|
|
ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
|
|
|
|
/* Stop transfer of Tx descriptors */
|
|
|
|
/* Stop transfer of Rx descriptors */
|
|
|
|
/* Turn off various components of this interface. */
|
|
SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
|
|
SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
|
|
SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
|
|
SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
|
|
SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
|
|
SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
|
|
SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
|
|
SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
|
|
|
|
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001);
|
|
SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001);
|
|
|
|
/* Disable interrupts */
|
|
if (sc_if->sk_port == SK_PORT_A)
|
|
sc->sk_intrmask &= ~SK_Y2_INTRS1;
|
|
else
|
|
sc->sk_intrmask &= ~SK_Y2_INTRS2;
|
|
CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
|
|
|
|
SK_XM_READ_2(sc_if, XM_ISR);
|
|
SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
|
|
|
|
/* Free RX and TX mbufs still in the queues. */
|
|
for (i = 0; i < MSK_RX_RING_CNT; i++) {
|
|
if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
|
|
m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
|
|
sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
|
|
}
|
|
}
|
|
|
|
for (i = 0; i < MSK_TX_RING_CNT; i++) {
|
|
if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
|
|
m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
|
|
sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
|
|
#if 1
|
|
SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head,
|
|
sc_if->sk_cdata.sk_tx_map[i], link);
|
|
sc_if->sk_cdata.sk_tx_map[i] = 0;
|
|
#endif
|
|
}
|
|
}
|
|
|
|
#if 1
|
|
while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) {
|
|
SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
|
|
bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap);
|
|
free(dma, M_DEVBUF);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
CFATTACH_DECL(mskc, sizeof(struct sk_softc), mskc_probe, mskc_attach,
|
|
NULL, NULL);
|
|
|
|
CFATTACH_DECL(msk, sizeof(struct sk_if_softc), msk_probe, msk_attach,
|
|
NULL, NULL);
|
|
|
|
#ifdef MSK_DEBUG
|
|
void
|
|
msk_dump_txdesc(struct msk_tx_desc *le, int idx)
|
|
{
|
|
#define DESC_PRINT(X) \
|
|
if (X) \
|
|
printf("txdesc[%d]." #X "=%#x\n", \
|
|
idx, X);
|
|
|
|
DESC_PRINT(letoh32(le->sk_addr));
|
|
DESC_PRINT(letoh16(le->sk_len));
|
|
DESC_PRINT(le->sk_ctl);
|
|
DESC_PRINT(le->sk_opcode);
|
|
#undef DESC_PRINT
|
|
}
|
|
|
|
void
|
|
msk_dump_bytes(const char *data, int len)
|
|
{
|
|
int c, i, j;
|
|
|
|
for (i = 0; i < len; i += 16) {
|
|
printf("%08x ", i);
|
|
c = len - i;
|
|
if (c > 16) c = 16;
|
|
|
|
for (j = 0; j < c; j++) {
|
|
printf("%02x ", data[i + j] & 0xff);
|
|
if ((j & 0xf) == 7 && j > 0)
|
|
printf(" ");
|
|
}
|
|
|
|
for (; j < 16; j++)
|
|
printf(" ");
|
|
printf(" ");
|
|
|
|
for (j = 0; j < c; j++) {
|
|
int ch = data[i + j] & 0xff;
|
|
printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
if (c < 16)
|
|
break;
|
|
}
|
|
}
|
|
|
|
void
|
|
msk_dump_mbuf(struct mbuf *m)
|
|
{
|
|
int count = m->m_pkthdr.len;
|
|
|
|
printf("m=%p, m->m_pkthdr.len=%d\n", m, m->m_pkthdr.len);
|
|
|
|
while (count > 0 && m) {
|
|
printf("m=%p, m->m_data=%p, m->m_len=%d\n",
|
|
m, m->m_data, m->m_len);
|
|
msk_dump_bytes(mtod(m, char *), m->m_len);
|
|
|
|
count -= m->m_len;
|
|
m = m->m_next;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
static int
|
|
msk_sysctl_handler(SYSCTLFN_ARGS)
|
|
{
|
|
int error, t;
|
|
struct sysctlnode node;
|
|
struct sk_softc *sc;
|
|
|
|
node = *rnode;
|
|
sc = node.sysctl_data;
|
|
t = sc->sk_int_mod;
|
|
node.sysctl_data = &t;
|
|
error = sysctl_lookup(SYSCTLFN_CALL(&node));
|
|
if (error || newp == NULL)
|
|
return error;
|
|
|
|
if (t < SK_IM_MIN || t > SK_IM_MAX)
|
|
return EINVAL;
|
|
|
|
/* update the softc with sysctl-changed value, and mark
|
|
for hardware update */
|
|
sc->sk_int_mod = t;
|
|
sc->sk_int_mod_pending = 1;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Set up sysctl(3) MIB, hw.sk.* - Individual controllers will be
|
|
* set up in skc_attach()
|
|
*/
|
|
SYSCTL_SETUP(sysctl_msk, "sysctl msk subtree setup")
|
|
{
|
|
int rc;
|
|
const struct sysctlnode *node;
|
|
|
|
if ((rc = sysctl_createv(clog, 0, NULL, NULL,
|
|
0, CTLTYPE_NODE, "hw", NULL,
|
|
NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
|
|
goto err;
|
|
}
|
|
|
|
if ((rc = sysctl_createv(clog, 0, NULL, &node,
|
|
0, CTLTYPE_NODE, "msk",
|
|
SYSCTL_DESCR("msk interface controls"),
|
|
NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
|
|
goto err;
|
|
}
|
|
|
|
msk_root_num = node->sysctl_num;
|
|
return;
|
|
|
|
err:
|
|
aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
|
|
}
|