Reorganizing Chelsio 10 gig files into separate directory.

This commit is contained in:
jklos 2010-03-21 21:15:37 +00:00
parent 86bb752ce4
commit 054fd02ef5
32 changed files with 0 additions and 29877 deletions

View File

@ -1,679 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/cxgb_adapter.h,v 1.20 2007/09/10 00:59:51 kmacy Exp $
***************************************************************************/
#ifndef _CXGB_ADAPTER_H_
#define _CXGB_ADAPTER_H_
#include <sys/lock.h>
#include <sys/mutex.h>
#ifdef __FreeBSD__
#include <sys/sx.h>
#include <sys/rman.h>
#endif
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#ifdef __FreeBSD__
#include <net/ethernet.h>
#endif
#include <net/if.h>
#ifdef __NetBSD__
#include <net/if_ether.h>
#endif
#include <net/if_media.h>
#include <machine/bus.h>
#ifdef __FreeBSD__
#include <machine/resource.h>
#include <sys/bus_dma.h>
#endif
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#ifdef CONFIG_DEFINED
#include <cxgb_osdep.h>
#include <ulp/toecore/toedev.h>
#include <sys/mbufq.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/cxgb_osdep.h>
#include <dev/cxgb/sys/mbufq.h>
#include <dev/cxgb/ulp/toecore/toedev.h>
#endif
#ifdef __NetBSD__
#include "cxgb_osdep.h"
#include "cxgb_mbuf.h"
#include "cxgb_toedev.h"
#endif
#endif
#ifdef __FreeBSD__
#define USE_SX
#endif
struct adapter;
struct sge_qset;
extern int cxgb_debug;
#ifdef DEBUG_LOCKING
#define MTX_INIT(lock, lockname, class, flags) \
do { \
printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
mtx_init((lock), lockname, class, flags); \
} while (0)
#define MTX_DESTROY(lock) \
do { \
printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
mtx_destroy((lock)); \
} while (0)
#define SX_INIT(lock, lockname) \
do { \
printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
sx_init((lock), lockname); \
} while (0)
#define SX_DESTROY(lock) \
do { \
printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
sx_destroy((lock)); \
} while (0)
#else
#define MTX_INIT mtx_init
#define MTX_DESTROY mtx_destroy
#define SX_INIT sx_init
#define SX_DESTROY sx_destroy
#endif
#ifdef __NetBSD__
struct port_device {
struct device original;
device_t dev;
struct adapter *parent;
int port_number;
};
#endif
struct port_info {
struct adapter *adapter;
struct ifnet *ifp;
#ifdef __NetBSD__
struct port_device *pd;
int port;
#endif
int if_flags;
const struct port_type_info *port_type;
struct cphy phy;
struct cmac mac;
struct link_config link_config;
struct ifmedia media;
#ifdef USE_SX
struct sx lock;
#else
struct mtx lock;
#endif
uint8_t port_id;
uint8_t tx_chan;
uint8_t txpkt_intf;
uint8_t nqsets;
uint8_t first_qset;
uint8_t hw_addr[ETHER_ADDR_LEN];
#ifdef __FreeBSD__
struct taskqueue *tq;
struct task start_task;
struct task timer_reclaim_task;
#endif
#ifdef __NetBSD__
struct cxgb_task start_task;
struct cxgb_task timer_reclaim_task;
#endif
struct cdev *port_cdev;
#define PORT_NAME_LEN 32
#define TASKQ_NAME_LEN 32
char lockbuf[PORT_NAME_LEN];
char taskqbuf[TASKQ_NAME_LEN];
};
enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
USING_MSI = (1 << 1),
USING_MSIX = (1 << 2),
QUEUES_BOUND = (1 << 3),
FW_UPTODATE = (1 << 4),
TPS_UPTODATE = (1 << 5),
};
#define FL_Q_SIZE 4096
#define JUMBO_Q_SIZE 512
#define RSPQ_Q_SIZE 1024
#define TX_ETH_Q_SIZE 1024
/*
* Types of Tx queues in each queue set. Order here matters, do not change.
* XXX TOE is not implemented yet, so the extra queues are just placeholders.
*/
enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
/* careful, the following are set on priv_flags and must not collide with
* IFF_ flags!
*/
enum {
LRO_ACTIVE = (1 << 8),
};
/* Max concurrent LRO sessions per queue set */
#define MAX_LRO_SES 8
struct t3_lro_session {
struct mbuf *head;
struct mbuf *tail;
uint32_t seq;
uint16_t ip_len;
uint16_t mss;
uint16_t vtag;
uint8_t npkts;
};
struct lro_state {
unsigned short enabled;
unsigned short active_idx;
unsigned int nactive;
struct t3_lro_session sess[MAX_LRO_SES];
};
#define RX_BUNDLE_SIZE 8
struct rsp_desc;
struct sge_rspq {
uint32_t credits;
uint32_t size;
uint32_t cidx;
uint32_t gen;
uint32_t polling;
uint32_t holdoff_tmr;
uint32_t next_holdoff;
uint32_t imm_data;
struct rsp_desc *desc;
uint32_t cntxt_id;
struct mtx lock;
struct mbuf *rx_head; /* offload packet receive queue head */
struct mbuf *rx_tail; /* offload packet receive queue tail */
uint32_t offload_pkts;
uint32_t offload_bundles;
uint32_t pure_rsps;
uint32_t unhandled_irqs;
bus_addr_t phys_addr;
bus_dma_tag_t desc_tag;
bus_dmamap_t desc_map;
struct t3_mbuf_hdr rspq_mh;
#define RSPQ_NAME_LEN 32
char lockbuf[RSPQ_NAME_LEN];
};
#ifndef DISABLE_MBUF_IOVEC
#define rspq_mbuf rspq_mh.mh_head
#endif
struct rx_desc;
struct rx_sw_desc;
struct sge_fl {
uint32_t buf_size;
uint32_t credits;
uint32_t size;
uint32_t cidx;
uint32_t pidx;
uint32_t gen;
struct rx_desc *desc;
struct rx_sw_desc *sdesc;
bus_addr_t phys_addr;
uint32_t cntxt_id;
uint64_t empty;
bus_dma_tag_t desc_tag;
bus_dmamap_t desc_map;
bus_dma_tag_t entry_tag;
#ifdef __FreeBSD__
uma_zone_t zone;
#endif
int type;
};
struct tx_desc;
struct tx_sw_desc;
#define TXQ_TRANSMITTING 0x1
struct sge_txq {
uint64_t flags;
uint32_t in_use;
uint32_t size;
uint32_t processed;
uint32_t cleaned;
uint32_t stop_thres;
uint32_t cidx;
uint32_t pidx;
uint32_t gen;
uint32_t unacked;
struct tx_desc *desc;
struct tx_sw_desc *sdesc;
uint32_t token;
bus_addr_t phys_addr;
#ifdef __FreeBSD__
struct task qresume_task;
struct task qreclaim_task;
#endif
#ifdef __NetBSD__
struct cxgb_task qresume_task;
struct cxgb_task qreclaim_task;
#endif
struct port_info *port;
uint32_t cntxt_id;
uint64_t stops;
uint64_t restarts;
bus_dma_tag_t desc_tag;
bus_dmamap_t desc_map;
bus_dma_tag_t entry_tag;
struct mbuf_head sendq;
struct mtx lock;
#define TXQ_NAME_LEN 32
char lockbuf[TXQ_NAME_LEN];
};
enum {
SGE_PSTAT_TSO, /* # of TSO requests */
SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
SGE_PSTATS_LRO_QUEUED, /* # of LRO appended packets */
SGE_PSTATS_LRO_FLUSHED, /* # of LRO flushed packets */
SGE_PSTATS_LRO_X_STREAMS, /* # of exceeded LRO contexts */
};
#define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
struct sge_qset {
struct sge_rspq rspq;
struct sge_fl fl[SGE_RXQ_PER_SET];
struct lro_state lro;
struct sge_txq txq[SGE_TXQ_PER_SET];
uint32_t txq_stopped; /* which Tx queues are stopped */
uint64_t port_stats[SGE_PSTAT_MAX];
struct port_info *port;
int idx; /* qset # */
};
struct sge {
struct sge_qset qs[SGE_QSETS];
struct mtx reg_lock;
};
struct filter_info;
struct adapter {
#ifdef __NetBSD__
struct device original;
#endif
device_t dev; // so we have a compatible pointer
int flags;
TAILQ_ENTRY(adapter) adapter_entry;
/* PCI register resources */
int regs_rid;
struct resource *regs_res;
bus_space_handle_t bh;
bus_space_tag_t bt;
bus_size_t mmio_len;
uint32_t link_width;
#ifdef __NetBSD__
struct pci_attach_args pa;
uint32_t bar0;
bus_space_handle_t bar0_handle;
pci_intr_handle_t intr_handle;
void *intr_cookie;
#endif
/* DMA resources */
bus_dma_tag_t parent_dmat;
bus_dma_tag_t rx_dmat;
bus_dma_tag_t rx_jumbo_dmat;
bus_dma_tag_t tx_dmat;
/* Interrupt resources */
#ifdef __FreeBSD__
struct resource *irq_res;
#endif
int irq_rid;
#ifdef __FreeBSD__
void *intr_tag;
#endif
uint32_t msix_regs_rid;
struct resource *msix_regs_res;
struct resource *msix_irq_res[SGE_QSETS];
int msix_irq_rid[SGE_QSETS];
void *msix_intr_tag[SGE_QSETS];
uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
struct filter_info *filters;
/* Tasks */
#ifdef __FreeBSD__
struct task ext_intr_task;
struct task slow_intr_task;
struct task tick_task;
struct task process_responses_task;
struct taskqueue *tq;
#endif
#ifdef __NetBSD__
struct cxgb_task ext_intr_task;
struct cxgb_task slow_intr_task;
struct cxgb_task tick_task;
#endif
struct callout cxgb_tick_ch;
struct callout sge_timer_ch;
/* Register lock for use by the hardware layer */
struct mtx mdio_lock;
struct mtx elmer_lock;
/* Bookkeeping for the hardware layer */
struct adapter_params params;
unsigned int slow_intr_mask;
unsigned long irq_stats[IRQ_NUM_STATS];
struct sge sge;
struct mc7 pmrx;
struct mc7 pmtx;
struct mc7 cm;
struct mc5 mc5;
struct port_info port[MAX_NPORTS];
device_t portdev[MAX_NPORTS];
struct toedev tdev;
char fw_version[64];
uint32_t open_device_map;
uint32_t registered_device_map;
#ifdef USE_SX
struct sx lock;
#else
struct mtx lock;
#endif
#ifdef __FreeBSD__
driver_intr_t *cxgb_intr;
#endif
#ifdef __NetBSD__
int (*cxgb_intr)(void *);
#endif
int msi_count;
#define ADAPTER_LOCK_NAME_LEN 32
char lockbuf[ADAPTER_LOCK_NAME_LEN];
char reglockbuf[ADAPTER_LOCK_NAME_LEN];
char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
};
struct t3_rx_mode {
uint32_t idx;
struct port_info *port;
};
#define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
#define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
#define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
#define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
#ifdef USE_SX
#define PORT_LOCK(port) sx_xlock(&(port)->lock);
#define PORT_UNLOCK(port) sx_xunlock(&(port)->lock);
#define PORT_LOCK_INIT(port, name) SX_INIT(&(port)->lock, name)
#define PORT_LOCK_DEINIT(port) SX_DESTROY(&(port)->lock)
#define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
#define ADAPTER_LOCK(adap) sx_xlock(&(adap)->lock);
#define ADAPTER_UNLOCK(adap) sx_xunlock(&(adap)->lock);
#define ADAPTER_LOCK_INIT(adap, name) SX_INIT(&(adap)->lock, name)
#define ADAPTER_LOCK_DEINIT(adap) SX_DESTROY(&(adap)->lock)
#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
#else
#define PORT_LOCK(port) mtx_lock(&(port)->lock);
#define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
#define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
#define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
#define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
#define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
#define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
#define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
#define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
#endif
static __inline uint32_t
t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
{
return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
}
static __inline void
t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
{
bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
}
static __inline void
t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
{
#ifdef __FreeBSD__
*val = pci_read_config(adapter->dev, reg, 4);
#endif
#ifdef __NetBSD__
*val = pci_conf_read(adapter->pa.pa_pc, adapter->pa.pa_tag, reg);
#endif
}
static __inline void
t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
{
#ifdef __FreeBSD__
pci_write_config(adapter->dev, reg, val, 4);
#endif
#ifdef __NetBSD__
pci_conf_write(adapter->pa.pa_pc, adapter->pa.pa_tag, reg, val);
#endif
}
static __inline void
t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
{
#ifdef __FreeBSD__
*val = pci_read_config(adapter->dev, reg, 2);
#endif
#ifdef __NetBSD__
uint32_t temp;
temp = pci_conf_read(adapter->pa.pa_pc, adapter->pa.pa_tag, reg&0xfc);
if (reg&0x2)
*val = (temp>>16)&0xffff;
else
*val = temp&0xffff;
#endif
}
static __inline void
t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
{
#ifdef __FreeBSD__
pci_write_config(adapter->dev, reg, val, 2);
#endif
#ifdef __NetBSD__
uint32_t temp = pci_conf_read(adapter->pa.pa_pc, adapter->pa.pa_tag, reg&0xfc);
if (reg&0x2)
temp = (temp&0xffff)|(val<<16);
else
temp = (temp&0xffff0000)|val;
pci_conf_write(adapter->pa.pa_pc, adapter->pa.pa_tag, reg&0xfc, temp);
#endif
}
static __inline uint8_t *
t3_get_next_mcaddr(struct t3_rx_mode *rm)
{
uint8_t *macaddr = NULL;
if (rm->idx == 0)
macaddr = rm->port->hw_addr;
rm->idx++;
return (macaddr);
}
static __inline void
t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
{
rm->idx = 0;
rm->port = port;
}
static __inline struct port_info *
adap2pinfo(struct adapter *adap, int idx)
{
return &adap->port[idx];
}
int t3_os_find_pci_capability(adapter_t *adapter, int cap);
int t3_os_pci_save_state(struct adapter *adapter);
int t3_os_pci_restore_state(struct adapter *adapter);
void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
int speed, int duplex, int fc);
void t3_sge_err_intr_handler(adapter_t *adapter);
int t3_offload_tx(struct toedev *, struct mbuf *);
void t3_os_ext_intr_handler(adapter_t *adapter);
void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
int t3_sge_alloc(struct adapter *);
int t3_sge_free(struct adapter *);
int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
int, struct port_info *);
void t3_free_sge_resources(adapter_t *);
void t3_sge_start(adapter_t *);
void t3_sge_stop(adapter_t *);
#ifdef __FreeBSD__
void t3b_intr(void *data);
void t3_intr_msi(void *data);
void t3_intr_msix(void *data);
#endif
#ifdef __NetBSD__
int t3b_intr(void *data);
int t3_intr_msi(void *data);
int t3_intr_msix(void *data);
#endif
int t3_encap(struct port_info *, struct mbuf **, int *free);
int t3_sge_init_adapter(adapter_t *);
int t3_sge_init_port(struct port_info *);
void t3_sge_deinit_sw(adapter_t *);
void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state);
void t3_add_sysctls(adapter_t *sc);
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data);
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
/*
* XXX figure out how we can return this to being private to sge
*/
#define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
static __inline struct sge_qset *
fl_to_qset(struct sge_fl *q, int qidx)
{
return container_of(q, struct sge_qset, fl[qidx]);
}
static __inline struct sge_qset *
rspq_to_qset(struct sge_rspq *q)
{
return container_of(q, struct sge_qset, rspq);
}
static __inline struct sge_qset *
txq_to_qset(struct sge_txq *q, int qidx)
{
return container_of(q, struct sge_qset, txq[qidx]);
}
static __inline struct adapter *
tdev2adap(struct toedev *d)
{
return container_of(d, struct adapter, tdev);
}
#undef container_of
#define OFFLOAD_DEVMAP_BIT 15
static inline int offload_running(adapter_t *adapter)
{
return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
}
#endif

View File

@ -1,343 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __NetBSD__
__KERNEL_RCSID(0, "$NetBSD: cxgb_ael1002.c,v 1.3 2008/01/17 06:03:22 jklos Exp $");
#endif
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/common/cxgb_ael1002.c,v 1.3 2007/06/13 05:36:00 kmacy Exp $");
#endif
#ifdef CONFIG_DEFINED
#include <cxgb_include.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/cxgb_include.h>
#endif
#ifdef __NetBSD__
#include <dev/pci/cxgb_include.h>
#endif
#endif
enum {
AEL100X_TX_DISABLE = 9,
AEL100X_TX_CONFIG1 = 0xc002,
AEL1002_PWR_DOWN_HI = 0xc011,
AEL1002_PWR_DOWN_LO = 0xc012,
AEL1002_XFI_EQL = 0xc015,
AEL1002_LB_EN = 0xc017,
LASI_CTRL = 0x9002,
LASI_STAT = 0x9005
};
static void ael100x_txon(struct cphy *phy)
{
int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
t3_os_sleep(100);
t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
t3_os_sleep(30);
}
static int ael1002_power_down(struct cphy *phy, int enable)
{
int err;
err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
if (!err)
err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
return err;
}
static int ael1002_reset(struct cphy *phy, int wait)
{
int err;
if ((err = ael1002_power_down(phy, 0)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
(err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
0, 1 << 5)))
return err;
return 0;
}
static int ael1002_intr_noop(struct cphy *phy)
{
return 0;
}
static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
int *speed, int *duplex, int *fc)
{
if (link_ok) {
unsigned int status;
int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
/*
* BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
* once more to get the current link state.
*/
if (!err && !(status & BMSR_LSTATUS))
err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
&status);
if (err)
return err;
*link_ok = !!(status & BMSR_LSTATUS);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
return 0;
}
#ifdef C99_NOT_SUPPORTED
static struct cphy_ops ael1002_ops = {
NULL,
ael1002_reset,
ael1002_intr_noop,
ael1002_intr_noop,
ael1002_intr_noop,
ael1002_intr_noop,
NULL,
NULL,
NULL,
NULL,
NULL,
ael100x_get_link_status,
ael1002_power_down,
};
#else
static struct cphy_ops ael1002_ops = {
.reset = ael1002_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
.intr_clear = ael1002_intr_noop,
.intr_handler = ael1002_intr_noop,
.get_link_status = ael100x_get_link_status,
.power_down = ael1002_power_down,
};
#endif
void t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
ael100x_txon(phy);
}
static int ael1006_reset(struct cphy *phy, int wait)
{
return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
}
static int ael1006_intr_enable(struct cphy *phy)
{
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
}
static int ael1006_intr_disable(struct cphy *phy)
{
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
}
static int ael1006_intr_clear(struct cphy *phy)
{
u32 val;
return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
}
static int ael1006_intr_handler(struct cphy *phy)
{
unsigned int status;
int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
if (err)
return err;
return (status & 1) ? cphy_cause_link_change : 0;
}
static int ael1006_power_down(struct cphy *phy, int enable)
{
return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
}
#ifdef C99_NOT_SUPPORTED
static struct cphy_ops ael1006_ops = {
NULL,
ael1006_reset,
ael1006_intr_enable,
ael1006_intr_disable,
ael1006_intr_clear,
ael1006_intr_handler,
NULL,
NULL,
NULL,
NULL,
NULL,
ael100x_get_link_status,
ael1006_power_down,
};
#else
static struct cphy_ops ael1006_ops = {
.reset = ael1006_reset,
.intr_enable = ael1006_intr_enable,
.intr_disable = ael1006_intr_disable,
.intr_clear = ael1006_intr_clear,
.intr_handler = ael1006_intr_handler,
.get_link_status = ael100x_get_link_status,
.power_down = ael1006_power_down,
};
#endif
void t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
ael100x_txon(phy);
}
#ifdef C99_NOT_SUPPORTED
static struct cphy_ops qt2045_ops = {
NULL,
ael1006_reset,
ael1006_intr_enable,
ael1006_intr_disable,
ael1006_intr_clear,
ael1006_intr_handler,
NULL,
NULL,
NULL,
NULL,
NULL,
ael100x_get_link_status,
ael1006_power_down,
};
#else
static struct cphy_ops qt2045_ops = {
.reset = ael1006_reset,
.intr_enable = ael1006_intr_enable,
.intr_disable = ael1006_intr_disable,
.intr_clear = ael1006_intr_clear,
.intr_handler = ael1006_intr_handler,
.get_link_status = ael100x_get_link_status,
.power_down = ael1006_power_down,
};
#endif
void t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
unsigned int stat;
cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
/*
* Some cards where the PHY is supposed to be at address 0 actually
* have it at 1.
*/
if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
stat == 0xffff)
phy->addr = 1;
}
static int xaui_direct_reset(struct cphy *phy, int wait)
{
return 0;
}
static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
int *speed, int *duplex, int *fc)
{
if (link_ok) {
unsigned int status;
status = t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT0, phy->addr)) |
t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT1, phy->addr)) |
t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT2, phy->addr)) |
t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT3, phy->addr));
*link_ok = !(status & F_LOWSIG0);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
return 0;
}
static int xaui_direct_power_down(struct cphy *phy, int enable)
{
return 0;
}
#ifdef C99_NOT_SUPPORTED
static struct cphy_ops xaui_direct_ops = {
NULL,
xaui_direct_reset,
ael1002_intr_noop,
ael1002_intr_noop,
ael1002_intr_noop,
ael1002_intr_noop,
NULL,
NULL,
NULL,
NULL,
NULL,
xaui_direct_get_link_status,
xaui_direct_power_down,
};
#else
static struct cphy_ops xaui_direct_ops = {
.reset = xaui_direct_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
.intr_clear = ael1002_intr_noop,
.intr_handler = ael1002_intr_noop,
.get_link_status = xaui_direct_get_link_status,
.power_down = xaui_direct_power_down,
};
#endif
void t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops);
}

View File

@ -1,797 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/common/cxgb_common.h,v 1.7 2007/09/09 01:28:03 kmacy Exp $
***************************************************************************/
#ifndef __CHELSIO_COMMON_H
#define __CHELSIO_COMMON_H
#ifdef CONFIG_DEFINED
#include <cxgb_osdep.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/cxgb_osdep.h>
#endif
#ifdef __NetBSD__
#include <dev/pci/cxgb_osdep.h>
// ??? #include <dev/pci/cxgb_toedev.h>
#endif
#endif
enum {
MAX_FRAME_SIZE = 10240, /* max MAC frame size, includes header + FCS */
EEPROMSIZE = 8192, /* Serial EEPROM size */
SERNUM_LEN = 16, /* Serial # length */
RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
NTX_SCHED = 8, /* # of HW Tx scheduling queues */
PROTO_SRAM_LINES = 128, /* size of protocol sram */
MAX_NPORTS = 4,
TP_TMR_RES = 200,
TP_SRAM_OFFSET = 4096, /* TP SRAM content offset in eeprom */
TP_SRAM_LEN = 2112, /* TP SRAM content offset in eeprom */
};
#define MAX_RX_COALESCING_LEN 12288U
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
enum {
SUPPORTED_IRQ = 1 << 24
};
enum { /* adapter interrupt-maintained statistics */
STAT_ULP_CH0_PBL_OOB,
STAT_ULP_CH1_PBL_OOB,
STAT_PCI_CORR_ECC,
IRQ_NUM_STATS /* keep last */
};
enum {
TP_VERSION_MAJOR = 1,
TP_VERSION_MINOR = 1,
TP_VERSION_MICRO = 0
};
#define S_TP_VERSION_MAJOR 16
#define M_TP_VERSION_MAJOR 0xFF
#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR)
#define G_TP_VERSION_MAJOR(x) \
(((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
#define S_TP_VERSION_MINOR 8
#define M_TP_VERSION_MINOR 0xFF
#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR)
#define G_TP_VERSION_MINOR(x) \
(((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
#define S_TP_VERSION_MICRO 0
#define M_TP_VERSION_MICRO 0xFF
#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO)
#define G_TP_VERSION_MICRO(x) \
(((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
enum {
FW_VERSION_MAJOR = 4,
FW_VERSION_MINOR = 7,
FW_VERSION_MICRO = 0
};
enum {
SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
};
enum sge_context_type { /* SGE egress context types */
SGE_CNTXT_RDMA = 0,
SGE_CNTXT_ETH = 2,
SGE_CNTXT_OFLD = 4,
SGE_CNTXT_CTRL = 5
};
enum {
AN_PKT_SIZE = 32, /* async notification packet size */
IMMED_PKT_SIZE = 48 /* packet size for immediate data */
};
struct sg_ent { /* SGE scatter/gather entry */
u32 len[2];
u64 addr[2];
};
#ifndef SGE_NUM_GENBITS
/* Must be 1 or 2 */
# define SGE_NUM_GENBITS 2
#endif
#define TX_DESC_FLITS 16U
#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
struct cphy;
struct mdio_ops {
int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val);
};
struct adapter_info {
unsigned char nports0; /* # of ports on channel 0 */
unsigned char nports1; /* # of ports on channel 1 */
unsigned char phy_base_addr; /* MDIO PHY base address */
unsigned char mdien:1;
unsigned char mdiinv:1;
unsigned int gpio_out; /* GPIO output settings */
unsigned int gpio_intr; /* GPIO IRQ enable mask */
unsigned long caps; /* adapter capabilities */
const struct mdio_ops *mdio_ops; /* MDIO operations */
const char *desc; /* product description */
};
struct port_type_info {
void (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *ops);
unsigned int caps;
const char *desc;
};
struct mc5_stats {
unsigned long parity_err;
unsigned long active_rgn_full;
unsigned long nfa_srch_err;
unsigned long unknown_cmd;
unsigned long reqq_parity_err;
unsigned long dispq_parity_err;
unsigned long del_act_empty;
};
struct mc7_stats {
unsigned long corr_err;
unsigned long uncorr_err;
unsigned long parity_err;
unsigned long addr_err;
};
struct mac_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_octets_bad; /* total # of octets in error frames */
u64 tx_frames; /* all good frames */
u64 tx_mcast_frames; /* good multicast frames */
u64 tx_bcast_frames; /* good broadcast frames */
u64 tx_pause; /* # of transmitted pause frames */
u64 tx_deferred; /* frames with deferred transmissions */
u64 tx_late_collisions; /* # of late collisions */
u64 tx_total_collisions; /* # of total collisions */
u64 tx_excess_collisions; /* frame errors from excessive collissions */
u64 tx_underrun; /* # of Tx FIFO underruns */
u64 tx_len_errs; /* # of Tx length errors */
u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
u64 tx_excess_deferral; /* # of frames with excessive deferral */
u64 tx_fcs_errs; /* # of frames with bad FCS */
u64 tx_frames_64; /* # of Tx frames in a particular range */
u64 tx_frames_65_127;
u64 tx_frames_128_255;
u64 tx_frames_256_511;
u64 tx_frames_512_1023;
u64 tx_frames_1024_1518;
u64 tx_frames_1519_max;
u64 rx_octets; /* total # of octets in good frames */
u64 rx_octets_bad; /* total # of octets in error frames */
u64 rx_frames; /* all good frames */
u64 rx_mcast_frames; /* good multicast frames */
u64 rx_bcast_frames; /* good broadcast frames */
u64 rx_pause; /* # of received pause frames */
u64 rx_fcs_errs; /* # of received frames with bad FCS */
u64 rx_align_errs; /* alignment errors */
u64 rx_symbol_errs; /* symbol errors */
u64 rx_data_errs; /* data errors */
u64 rx_sequence_errs; /* sequence errors */
u64 rx_runt; /* # of runt frames */
u64 rx_jabber; /* # of jabber frames */
u64 rx_short; /* # of short frames */
u64 rx_too_long; /* # of oversized frames */
u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
u64 rx_frames_64; /* # of Rx frames in a particular range */
u64 rx_frames_65_127;
u64 rx_frames_128_255;
u64 rx_frames_256_511;
u64 rx_frames_512_1023;
u64 rx_frames_1024_1518;
u64 rx_frames_1519_max;
u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
unsigned long tx_fifo_parity_err;
unsigned long rx_fifo_parity_err;
unsigned long tx_fifo_urun;
unsigned long rx_fifo_ovfl;
unsigned long serdes_signal_loss;
unsigned long xaui_pcs_ctc_err;
unsigned long xaui_pcs_align_change;
unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
unsigned long num_resets; /* # times reset due to stuck TX */
};
struct tp_mib_stats {
u32 ipInReceive_hi;
u32 ipInReceive_lo;
u32 ipInHdrErrors_hi;
u32 ipInHdrErrors_lo;
u32 ipInAddrErrors_hi;
u32 ipInAddrErrors_lo;
u32 ipInUnknownProtos_hi;
u32 ipInUnknownProtos_lo;
u32 ipInDiscards_hi;
u32 ipInDiscards_lo;
u32 ipInDelivers_hi;
u32 ipInDelivers_lo;
u32 ipOutRequests_hi;
u32 ipOutRequests_lo;
u32 ipOutDiscards_hi;
u32 ipOutDiscards_lo;
u32 ipOutNoRoutes_hi;
u32 ipOutNoRoutes_lo;
u32 ipReasmTimeout;
u32 ipReasmReqds;
u32 ipReasmOKs;
u32 ipReasmFails;
u32 reserved[8];
u32 tcpActiveOpens;
u32 tcpPassiveOpens;
u32 tcpAttemptFails;
u32 tcpEstabResets;
u32 tcpOutRsts;
u32 tcpCurrEstab;
u32 tcpInSegs_hi;
u32 tcpInSegs_lo;
u32 tcpOutSegs_hi;
u32 tcpOutSegs_lo;
u32 tcpRetransSeg_hi;
u32 tcpRetransSeg_lo;
u32 tcpInErrs_hi;
u32 tcpInErrs_lo;
u32 tcpRtoMin;
u32 tcpRtoMax;
};
struct tp_params {
unsigned int nchan; /* # of channels */
unsigned int pmrx_size; /* total PMRX capacity */
unsigned int pmtx_size; /* total PMTX capacity */
unsigned int cm_size; /* total CM capacity */
unsigned int chan_rx_size; /* per channel Rx size */
unsigned int chan_tx_size; /* per channel Tx size */
unsigned int rx_pg_size; /* Rx page size */
unsigned int tx_pg_size; /* Tx page size */
unsigned int rx_num_pgs; /* # of Rx pages */
unsigned int tx_num_pgs; /* # of Tx pages */
unsigned int ntimer_qs; /* # of timer queues */
unsigned int tre; /* log2 of core clocks per TP tick */
unsigned int dack_re; /* DACK timer resolution */
};
struct qset_params { /* SGE queue set parameters */
unsigned int polling; /* polling/interrupt service for rspq */
unsigned int lro; /* large receive offload */
unsigned int coalesce_nsecs; /* irq coalescing timer */
unsigned int rspq_size; /* # of entries in response queue */
unsigned int fl_size; /* # of entries in regular free list */
unsigned int jumbo_size; /* # of entries in jumbo free list */
unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
unsigned int cong_thres; /* FL congestion threshold */
unsigned int vector; /* Interrupt (line or vector) number */
};
struct sge_params {
unsigned int max_pkt_size; /* max offload pkt size */
struct qset_params qset[SGE_QSETS];
};
struct mc5_params {
unsigned int mode; /* selects MC5 width */
unsigned int nservers; /* size of server region */
unsigned int nfilters; /* size of filter region */
unsigned int nroutes; /* size of routing region */
};
/* Default MC5 region sizes */
enum {
DEFAULT_NSERVERS = 512,
DEFAULT_NFILTERS = 128
};
/* MC5 modes, these must be non-0 */
enum {
MC5_MODE_144_BIT = 1,
MC5_MODE_72_BIT = 2
};
/* MC5 min active region size */
enum { MC5_MIN_TIDS = 16 };
struct vpd_params {
unsigned int cclk;
unsigned int mclk;
unsigned int uclk;
unsigned int mdc;
unsigned int mem_timing;
u8 sn[SERNUM_LEN + 1];
u8 eth_base[6];
u8 port_type[MAX_NPORTS];
unsigned short xauicfg[2];
};
struct pci_params {
unsigned int vpd_cap_addr;
unsigned int pcie_cap_addr;
unsigned short speed;
unsigned char width;
unsigned char variant;
};
enum {
PCI_VARIANT_PCI,
PCI_VARIANT_PCIX_MODE1_PARITY,
PCI_VARIANT_PCIX_MODE1_ECC,
PCI_VARIANT_PCIX_266_MODE2,
PCI_VARIANT_PCIE
};
struct adapter_params {
struct sge_params sge;
struct mc5_params mc5;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
const struct adapter_info *info;
#ifdef CONFIG_CHELSIO_T3_CORE
unsigned short mtus[NMTUS];
unsigned short a_wnd[NCCTRL_WIN];
unsigned short b_wnd[NCCTRL_WIN];
#endif
unsigned int nports; /* # of ethernet ports */
unsigned int chan_map; /* bitmap of in-use Tx channels */
unsigned int stats_update_period; /* MAC stats accumulation period */
unsigned int linkpoll_period; /* link poll period in 0.1s */
unsigned int rev; /* chip revision */
unsigned int offload;
};
enum { /* chip revisions */
T3_REV_A = 0,
T3_REV_B = 2,
T3_REV_B2 = 3,
T3_REV_C = 4,
};
struct trace_params {
u32 sip;
u32 sip_mask;
u32 dip;
u32 dip_mask;
u16 sport;
u16 sport_mask;
u16 dport;
u16 dport_mask;
u32 vlan:12;
u32 vlan_mask:12;
u32 intf:4;
u32 intf_mask:4;
u8 proto;
u8 proto_mask;
};
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned char requested_duplex; /* duplex user has requested */
unsigned char duplex; /* actual link duplex */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned int link_ok; /* link up? */
};
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
struct mc5 {
adapter_t *adapter;
unsigned int tcam_size;
unsigned char part_type;
unsigned char parity_enabled;
unsigned char mode;
struct mc5_stats stats;
};
static inline unsigned int t3_mc5_size(const struct mc5 *p)
{
return p->tcam_size;
}
struct mc7 {
adapter_t *adapter; /* backpointer to adapter */
unsigned int size; /* memory size in bytes */
unsigned int width; /* MC7 interface width */
unsigned int offset; /* register address offset for MC7 instance */
const char *name; /* name of MC7 instance */
struct mc7_stats stats; /* MC7 statistics */
};
static inline unsigned int t3_mc7_size(const struct mc7 *p)
{
return p->size;
}
struct cmac {
adapter_t *adapter;
unsigned int offset;
unsigned char nucast; /* # of address filters for unicast MACs */
unsigned char multiport; /* multiple ports connected to this MAC */
unsigned char ext_port; /* external MAC port */
unsigned char promisc_map; /* which external ports are promiscuous */
unsigned int tx_tcnt;
unsigned int tx_xcnt;
u64 tx_mcnt;
unsigned int rx_xcnt;
unsigned int rx_ocnt;
u64 rx_mcnt;
unsigned int toggle_cnt;
unsigned int txen;
u64 rx_pause;
struct mac_stats stats;
};
enum {
MAC_DIRECTION_RX = 1,
MAC_DIRECTION_TX = 2,
MAC_RXFIFO_SIZE = 32768
};
/* IEEE 802.3ae specified MDIO devices */
enum {
MDIO_DEV_PMA_PMD = 1,
MDIO_DEV_WIS = 2,
MDIO_DEV_PCS = 3,
MDIO_DEV_XGXS = 4
};
/* PHY loopback direction */
enum {
PHY_LOOPBACK_TX = 1,
PHY_LOOPBACK_RX = 2
};
/* PHY interrupt types */
enum {
cphy_cause_link_change = 1,
cphy_cause_fifo_error = 2
};
/* PHY operations */
struct cphy_ops {
void (*destroy)(struct cphy *phy);
int (*reset)(struct cphy *phy, int wait);
int (*intr_enable)(struct cphy *phy);
int (*intr_disable)(struct cphy *phy);
int (*intr_clear)(struct cphy *phy);
int (*intr_handler)(struct cphy *phy);
int (*autoneg_enable)(struct cphy *phy);
int (*autoneg_restart)(struct cphy *phy);
int (*advertise)(struct cphy *phy, unsigned int advertise_map);
int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc);
int (*power_down)(struct cphy *phy, int enable);
};
/* A PHY instance */
struct cphy {
int addr; /* PHY address */
adapter_t *adapter; /* associated adapter */
unsigned long fifo_errors; /* FIFO over/under-flows */
const struct cphy_ops *ops; /* PHY operations */
int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val);
};
/* Convenience MDIO read/write wrappers */
static inline int mdio_read(struct cphy *phy, int mmd, int reg,
unsigned int *valp)
{
return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
}
static inline int mdio_write(struct cphy *phy, int mmd, int reg,
unsigned int val)
{
return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
}
/* Convenience initializer */
static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
int phy_addr, struct cphy_ops *phy_ops,
const struct mdio_ops *mdio_ops)
{
phy->adapter = adapter;
phy->addr = phy_addr;
phy->ops = phy_ops;
if (mdio_ops) {
phy->mdio_read = mdio_ops->read;
phy->mdio_write = mdio_ops->write;
}
}
/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
#define MAC_STATS_ACCUM_SECS 180
/* The external MAC needs accumulation every 30 seconds */
#define VSC_STATS_ACCUM_SECS 30
#define XGM_REG(reg_addr, idx) \
((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
struct addr_val_pair {
unsigned int reg_addr;
unsigned int val;
};
#ifdef CONFIG_DEFINED
#include <cxgb_adapter.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/cxgb_adapter.h>
#endif
#ifdef __NetBSD__
#include <dev/pci/cxgb_adapter.h>
#endif
#endif
#ifndef PCI_VENDOR_ID_CHELSIO
# define PCI_VENDOR_ID_CHELSIO 0x1425
#endif
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
#define adapter_info(adap) ((adap)->params.info)
static inline int uses_xaui(const adapter_t *adap)
{
return adapter_info(adap)->caps & SUPPORTED_AUI;
}
static inline int is_10G(const adapter_t *adap)
{
return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
}
static inline int is_offload(const adapter_t *adap)
{
#ifdef CONFIG_CHELSIO_T3_CORE
return adap->params.offload;
#else
return 0;
#endif
}
static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
{
return adap->params.vpd.cclk / 1000;
}
static inline unsigned int dack_ticks_to_usec(const adapter_t *adap,
unsigned int ticks)
{
return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap);
}
static inline unsigned int is_pcie(const adapter_t *adap)
{
return adap->params.pci.variant == PCI_VARIANT_PCIE;
}
void t3_set_reg_field(adapter_t *adap, unsigned int addr, u32 mask, u32 val);
void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
unsigned int offset);
int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
int attempts, int delay, u32 *valp);
static inline int t3_wait_op_done(adapter_t *adapter, int reg, u32 mask,
int polarity, int attempts, int delay)
{
return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
delay, NULL);
}
int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
unsigned int set);
int t3_phy_reset(struct cphy *phy, int mmd, int wait);
int t3_phy_advertise(struct cphy *phy, unsigned int advert);
int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
void t3_intr_enable(adapter_t *adapter);
void t3_intr_disable(adapter_t *adapter);
void t3_intr_clear(adapter_t *adapter);
void t3_port_intr_enable(adapter_t *adapter, int idx);
void t3_port_intr_disable(adapter_t *adapter, int idx);
void t3_port_intr_clear(adapter_t *adapter, int idx);
int t3_slow_intr_handler(adapter_t *adapter);
int t3_phy_intr_handler(adapter_t *adapter);
void t3_link_changed(adapter_t *adapter, int port_id);
int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data);
int t3_seeprom_wp(adapter_t *adapter, int enable);
int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
u32 *data, int byte_oriented);
int t3_get_tp_version(adapter_t *adapter, u32 *vers);
int t3_check_tpsram_version(adapter_t *adapter);
int t3_check_tpsram(adapter_t *adapter, const u8 *tp_ram, unsigned int size);
int t3_load_fw(adapter_t *adapter, const const u8 *fw_data, unsigned int size);
int t3_get_fw_version(adapter_t *adapter, u32 *vers);
int t3_check_fw_version(adapter_t *adapter);
int t3_init_hw(adapter_t *adapter, u32 fw_params);
void mac_prep(struct cmac *mac, adapter_t *adapter, int index);
void early_hw_init(adapter_t *adapter, const struct adapter_info *ai);
int t3_prep_adapter(adapter_t *adapter, const struct adapter_info *ai, int reset);
void t3_led_ready(adapter_t *adapter);
void t3_fatal_err(adapter_t *adapter);
void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on);
void t3_enable_filters(adapter_t *adap);
void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
const u16 *rspq);
int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map);
int t3_set_proto_sram(adapter_t *adap, const u8 *data);
int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask);
void t3_port_failover(adapter_t *adapter, int port);
void t3_failover_done(adapter_t *adapter, int port);
void t3_failover_clear(adapter_t *adapter);
int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
unsigned int *valp);
int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
u64 *buf);
int t3_mac_reset(struct cmac *mac);
void t3b_pcs_reset(struct cmac *mac);
int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, unsigned char n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
int fc);
int t3b2_mac_watchdog_task(struct cmac *mac);
void t3_mc5_prep(adapter_t *adapter, struct mc5 *mc5, int mode);
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes);
void t3_mc5_intr_handler(struct mc5 *mc5);
int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
u32 *buf);
#ifdef CONFIG_CHELSIO_T3_CORE
int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh);
void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size);
void t3_tp_set_offload_mode(adapter_t *adap, int enable);
void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps);
void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
unsigned short alpha[NCCTRL_WIN],
unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS]);
void t3_get_cong_cntl_tab(adapter_t *adap,
unsigned short incr[NMTUS][NCCTRL_WIN]);
void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
int filter_index, int invert, int enable);
int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched);
int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg);
void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
unsigned int *ipg);
void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED]);
void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
unsigned int start, unsigned int n);
#endif
void t3_sge_prep(adapter_t *adap, struct sge_params *p);
void t3_sge_init(adapter_t *adap, struct sge_params *p);
int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
enum sge_context_type type, int respq, u64 base_addr,
unsigned int size, unsigned int token, int gen,
unsigned int cidx);
int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
u64 base_addr, unsigned int size, unsigned int esize,
unsigned int cong_thres, int gen, unsigned int cidx);
int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
u64 base_addr, unsigned int size,
unsigned int fl_thres, int gen, unsigned int cidx);
int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
unsigned int size, int rspq, int ovfl_mode,
unsigned int credits, unsigned int credit_thres);
int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable);
int t3_sge_disable_fl(adapter_t *adapter, unsigned int id);
int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id);
int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id);
int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4]);
int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4]);
int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4]);
int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4]);
int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
unsigned int credits);
int t3_elmr_blk_write(adapter_t *adap, int start, const u32 *vals, int n);
int t3_elmr_blk_read(adapter_t *adap, int start, u32 *vals, int n);
int t3_vsc7323_init(adapter_t *adap, int nports);
int t3_vsc7323_set_speed_fc(adapter_t *adap, int speed, int fc, int port);
int t3_vsc7323_set_mtu(adapter_t *adap, unsigned int mtu, int port);
int t3_vsc7323_set_addr(adapter_t *adap, u8 addr[6], int port);
int t3_vsc7323_enable(adapter_t *adap, int port, int which);
int t3_vsc7323_disable(adapter_t *adap, int port, int which);
const struct mac_stats *t3_vsc7323_update_stats(struct cmac *mac);
void t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
void t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
void t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
void t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
void t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
void t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
#endif /* __CHELSIO_COMMON_H */

View File

@ -1,39 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/cxgb_config.h,v 1.4 2007/06/13 05:35:59 kmacy Exp $
***************************************************************************/
#ifndef _CXGB_CONFIG_H_
#define _CXGB_CONFIG_H_
#ifndef CONFIG_DEFINED
#define CONFIG_CHELSIO_T3_CORE
#endif
#endif

View File

@ -1,160 +0,0 @@
/*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*
* $FreeBSD: src/sys/dev/cxgb/common/cxgb_ctl_defs.h,v 1.3 2007/09/09 01:28:03 kmacy Exp $
*/
#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
#define _CXGB3_OFFLOAD_CTL_DEFS_H
enum {
GET_MAX_OUTSTANDING_WR,
GET_TX_MAX_CHUNK,
GET_TID_RANGE,
GET_STID_RANGE,
GET_RTBL_RANGE,
GET_L2T_CAPACITY,
GET_MTUS,
GET_WR_LEN,
GET_IFF_FROM_MAC,
GET_DDP_PARAMS,
GET_PORTS,
ULP_ISCSI_GET_PARAMS,
ULP_ISCSI_SET_PARAMS,
RDMA_GET_PARAMS,
RDMA_CQ_OP,
RDMA_CQ_SETUP,
RDMA_CQ_DISABLE,
RDMA_CTRL_QP_SETUP,
RDMA_GET_MEM,
FAILOVER = 30,
FAILOVER_DONE = 31,
FAILOVER_CLEAR = 32,
GET_CPUIDX_OF_QSET = 40,
GET_RX_PAGE_INFO = 50,
};
/*
* Structure used to describe a TID range. Valid TIDs are [base, base+num).
*/
struct tid_range {
unsigned int base; /* first TID */
unsigned int num; /* number of TIDs in range */
};
/*
* Structure used to request the size and contents of the MTU table.
*/
struct mtutab {
unsigned int size; /* # of entries in the MTU table */
const unsigned short *mtus; /* the MTU table values */
};
struct net_device;
/*
* Structure used to request the adapter net_device owning a given MAC address.
*/
struct iff_mac {
struct net_device *dev; /* the net_device */
const unsigned char *mac_addr; /* MAC address to lookup */
u16 vlan_tag;
};
struct pci_dev;
/*
* Structure used to request the TCP DDP parameters.
*/
struct ddp_params {
unsigned int llimit; /* TDDP region start address */
unsigned int ulimit; /* TDDP region end address */
unsigned int tag_mask; /* TDDP tag mask */
struct pci_dev *pdev;
};
struct adap_ports {
unsigned int nports; /* number of ports on this adapter */
struct net_device *lldevs[2];
};
/*
* Structure used to return information to the iscsi layer.
*/
struct ulp_iscsi_info {
unsigned int offset;
unsigned int llimit;
unsigned int ulimit;
unsigned int tagmask;
unsigned int pgsz3;
unsigned int pgsz2;
unsigned int pgsz1;
unsigned int pgsz0;
unsigned int max_rxsz;
unsigned int max_txsz;
struct pci_dev *pdev;
};
/*
* Offload TX/RX page information.
*/
struct ofld_page_info {
unsigned int page_size; /* Page size, should be a power of 2 */
unsigned int num; /* Number of pages */
};
/*
* Structure used to return information to the RDMA layer.
*/
struct rdma_info {
unsigned int tpt_base; /* TPT base address */
unsigned int tpt_top; /* TPT last entry address */
unsigned int pbl_base; /* PBL base address */
unsigned int pbl_top; /* PBL last entry address */
unsigned int rqt_base; /* RQT base address */
unsigned int rqt_top; /* RQT last entry address */
unsigned int udbell_len; /* user doorbell region length */
unsigned long udbell_physbase; /* user doorbell physical start addr */
void volatile *kdb_addr; /* kernel doorbell register address */
struct pci_dev *pdev; /* associated PCI device */
};
/*
* Structure used to request an operation on an RDMA completion queue.
*/
struct rdma_cq_op {
unsigned int id;
unsigned int op;
unsigned int credits;
};
/*
* Structure used to setup RDMA completion queues.
*/
struct rdma_cq_setup {
unsigned int id;
unsigned long long base_addr;
unsigned int size;
unsigned int credits;
unsigned int credit_thres;
unsigned int ovfl_mode;
};
/*
* Structure used to setup the RDMA control egress context.
*/
struct rdma_ctrlqp_setup {
unsigned long long base_addr;
unsigned int size;
};
#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */

View File

@ -1,177 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/common/cxgb_firmware_exports.h,v 1.2 2007/05/28 22:57:26 kmacy Exp $
***************************************************************************/
#ifndef _FIRMWARE_EXPORTS_H_
#define _FIRMWARE_EXPORTS_H_
/* WR OPCODES supported by the firmware.
*/
#define FW_WROPCODE_FORWARD 0x01
#define FW_WROPCODE_BYPASS 0x05
#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
#define FW_WROPCODE_ULPTX_MEM_READ 0x02
#define FW_WROPCODE_ULPTX_PKT 0x04
#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
#define FW_WROPCODE_OFLD_TX_DATA 0x0D
#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
#define FW_WROPCODE_RI_RDMA_INIT 0x10
#define FW_WROPCODE_RI_RDMA_WRITE 0x11
#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
#define FW_WROPCODE_RI_SEND 0x14
#define FW_WROPCODE_RI_TERMINATE 0x15
#define FW_WROPCODE_RI_RDMA_READ 0x16
#define FW_WROPCODE_RI_RECEIVE 0x17
#define FW_WROPCODE_RI_BIND_MW 0x18
#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
#define FW_WROPCODE_RI_LOCAL_INV 0x1A
#define FW_WROPCODE_RI_MODIFY_QP 0x1B
#define FW_WROPCODE_RI_BYPASS 0x1C
#define FW_WROPOCDE_RSVD 0x1E
#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
#define FW_WROPCODE_MNGT 0x1D
#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
/* Maximum size of a WR sent from the host, limited by the SGE.
*
* Note: WR coming from ULP or TP are only limited by CIM.
*/
#define FW_WR_SIZE 128
/* Maximum number of outstanding WRs sent from the host. Value must be
* programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
* offload modules to limit the number of WRs per connection.
*/
#define FW_T3_WR_NUM 16
#define FW_N3_WR_NUM 7
#ifndef N3
# define FW_WR_NUM FW_T3_WR_NUM
#else
# define FW_WR_NUM FW_N3_WR_NUM
#endif
/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
* queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
* start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
*
* Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
* to RESP Queue[i].
*/
#define FW_TUNNEL_NUM 8
#define FW_TUNNEL_SGEEC_START 8
#define FW_TUNNEL_TID_START 65544
/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
* must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
* (or 'uP Token') FW_CTRL_TID_START.
*
* Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
*/
#define FW_CTRL_NUM 8
#define FW_CTRL_SGEEC_START 65528
#define FW_CTRL_TID_START 65536
/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
* queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
*
* Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
* OFFLOAD Queues, as the host is responsible for providing the correct TID in
* every WR.
*
* Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
*/
#define FW_OFLD_NUM 8
#define FW_OFLD_SGEEC_START 0
/*
*
*/
#define FW_RI_NUM 1
#define FW_RI_SGEEC_START 65527
#define FW_RI_TID_START 65552
/*
* The RX_PKT_TID
*/
#define FW_RX_PKT_NUM 1
#define FW_RX_PKT_TID_START 65553
/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
* by the firmware.
*/
#define FW_WRC_NUM \
(65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
/*
* FW type and version.
*/
#define S_FW_VERSION_TYPE 28
#define M_FW_VERSION_TYPE 0xF
#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
#define G_FW_VERSION_TYPE(x) \
(((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
#define S_FW_VERSION_MAJOR 16
#define M_FW_VERSION_MAJOR 0xFFF
#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
#define G_FW_VERSION_MAJOR(x) \
(((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
#define S_FW_VERSION_MINOR 8
#define M_FW_VERSION_MINOR 0xFF
#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
#define G_FW_VERSION_MINOR(x) \
(((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
#define S_FW_VERSION_MICRO 0
#define M_FW_VERSION_MICRO 0xFF
#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
#define G_FW_VERSION_MICRO(x) \
(((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
#endif /* _FIRMWARE_EXPORTS_H_ */

View File

@ -1,22 +0,0 @@
/*
* $NetBSD
*/
#ifndef __CHELSIO_INCLUDE_H
#define __CHELSIO_INCLUDE_H
#include "cxgb_osdep.h"
#include "cxgb_mbuf.h"
#include "cxgb_common.h"
#include "cxgb_ioctl.h"
#include "cxgb_offload.h"
#include "cxgb_regs.h"
#include "cxgb_t3_cpl.h"
#include "cxgb_ctl_defs.h"
#include "cxgb_sge_defs.h"
#include "cxgb_firmware_exports.h"
#include "cxgb_mvec.h"
#include "cxgb_toedev.h"
#include "cxgb_jhash.h"
#endif /* __CHELSIO_INCLUDE_H */

View File

@ -1,263 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/cxgb_ioctl.h,v 1.5 2007/08/17 05:57:03 kmacy Exp $
***************************************************************************/
#ifndef __CHIOCTL_H__
#define __CHIOCTL_H__
/*
* Ioctl commands specific to this driver.
*/
enum {
CH_SETREG = 0x40,
CH_GETREG,
CH_SETTPI,
CH_GETTPI,
CH_DEVUP,
CH_GETMTUTAB,
CH_SETMTUTAB,
CH_GETMTU,
CH_SET_PM,
CH_GET_PM,
CH_GET_TCAM,
CH_SET_TCAM,
CH_GET_TCB,
CH_READ_TCAM_WORD,
CH_GET_MEM,
CH_GET_SGE_CONTEXT,
CH_GET_SGE_DESC,
CH_LOAD_FW,
CH_GET_PROTO,
CH_SET_PROTO,
CH_SET_TRACE_FILTER,
CH_SET_QSET_PARAMS,
CH_GET_QSET_PARAMS,
CH_SET_QSET_NUM,
CH_GET_QSET_NUM,
CH_SET_PKTSCHED,
CH_IFCONF_GETREGS,
CH_GETMIIREGS,
CH_SETMIIREGS,
CH_SET_FILTER,
CH_SET_HW_SCHED,
CH_DEL_FILTER,
};
struct ch_reg {
uint32_t addr;
uint32_t val;
};
struct ch_cntxt {
uint32_t cntxt_type;
uint32_t cntxt_id;
uint32_t data[4];
};
/* context types */
enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
struct ch_desc {
uint32_t cmd;
uint32_t queue_num;
uint32_t idx;
uint32_t size;
uint8_t data[128];
};
struct ch_mem_range {
uint32_t cmd;
uint32_t mem_id;
uint32_t addr;
uint32_t len;
uint32_t version;
uint8_t *buf;
};
struct ch_qset_params {
uint32_t qset_idx;
int32_t txq_size[3];
int32_t rspq_size;
int32_t fl_size[2];
int32_t intr_lat;
int32_t polling;
int32_t cong_thres;
int32_t vector;
int32_t qnum;
};
struct ch_pktsched_params {
uint32_t cmd;
uint8_t sched;
uint8_t idx;
uint8_t min;
uint8_t max;
uint8_t binding;
};
struct ch_hw_sched {
uint32_t cmd;
uint8_t sched;
int8_t mode;
int8_t channel;
int32_t kbps; /* rate in Kbps */
int32_t class_ipg; /* tenths of nanoseconds */
uint32_t flow_ipg; /* usec */
};
struct ch_filter_tuple {
uint32_t sip;
uint32_t dip;
uint16_t sport;
uint16_t dport;
uint16_t vlan:12;
uint16_t vlan_prio:3;
};
struct ch_filter {
uint32_t cmd;
uint32_t filter_id;
struct ch_filter_tuple val;
struct ch_filter_tuple mask;
uint16_t mac_addr_idx;
uint8_t mac_hit:1;
uint8_t proto:2;
uint8_t want_filter_id:1; /* report filter TID instead of RSS hash */
uint8_t pass:1; /* whether to pass or drop packets */
uint8_t rss:1; /* use RSS or specified qset */
uint8_t qset;
};
#ifndef TCB_SIZE
# define TCB_SIZE 128
#endif
/* TCB size in 32-bit words */
#define TCB_WORDS (TCB_SIZE / 4)
enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
struct ch_mtus {
uint32_t cmd;
uint32_t nmtus;
uint16_t mtus[NMTUS];
};
struct ch_pm {
uint32_t cmd;
uint32_t tx_pg_sz;
uint32_t tx_num_pg;
uint32_t rx_pg_sz;
uint32_t rx_num_pg;
uint32_t pm_total;
};
struct ch_tcam {
uint32_t cmd;
uint32_t tcam_size;
uint32_t nservers;
uint32_t nroutes;
uint32_t nfilters;
};
struct ch_tcb {
uint32_t cmd;
uint32_t tcb_index;
uint32_t tcb_data[TCB_WORDS];
};
struct ch_tcam_word {
uint32_t cmd;
uint32_t addr;
uint32_t buf[3];
};
struct ch_trace {
uint32_t cmd;
uint32_t sip;
uint32_t sip_mask;
uint32_t dip;
uint32_t dip_mask;
uint16_t sport;
uint16_t sport_mask;
uint16_t dport;
uint16_t dport_mask;
uint32_t vlan:12,
vlan_mask:12,
intf:4,
intf_mask:4;
uint8_t proto;
uint8_t proto_mask;
uint8_t invert_match:1,
config_tx:1,
config_rx:1,
trace_tx:1,
trace_rx:1;
};
#define REGDUMP_SIZE (4 * 1024)
struct ifconf_regs {
uint32_t version;
uint32_t len; /* bytes */
uint8_t *data;
};
struct mii_data {
uint32_t phy_id;
uint32_t reg_num;
uint32_t val_in;
uint32_t val_out;
};
#define CHELSIO_SETREG _IOW('f', CH_SETREG, struct ch_reg)
#define CHELSIO_GETREG _IOWR('f', CH_GETREG, struct ch_reg)
#define CHELSIO_READ_TCAM_WORD _IOR('f', CH_READ_TCAM_WORD, struct ch_tcam)
#define CHELSIO_GET_MEM _IOWR('f', CH_GET_MEM, struct ch_mem_range)
#define CHELSIO_GET_SGE_CONTEXT _IOWR('f', CH_GET_SGE_CONTEXT, struct ch_cntxt)
#define CHELSIO_GET_SGE_DESC _IOWR('f', CH_GET_SGE_DESC, struct ch_desc)
#define CHELSIO_GET_QSET_PARAMS _IOWR('f', CH_GET_QSET_PARAMS, struct ch_qset_params)
#define CHELSIO_SET_QSET_PARAMS _IOW('f', CH_SET_QSET_PARAMS, struct ch_qset_params)
#define CHELSIO_GET_QSET_NUM _IOWR('f', CH_GET_QSET_NUM, struct ch_reg)
#define CHELSIO_SET_QSET_NUM _IOW('f', CH_SET_QSET_NUM, struct ch_reg)
#define CHELSIO_GETMTUTAB _IOR('f', CH_GET_QSET_NUM, struct ch_mtus)
#define CHELSIO_SETMTUTAB _IOW('f', CH_SET_QSET_NUM, struct ch_mtus)
#define CHELSIO_SET_TRACE_FILTER _IOW('f', CH_SET_TRACE_FILTER, struct ch_trace)
#define CHELSIO_SET_PKTSCHED _IOW('f', CH_SET_PKTSCHED, struct ch_pktsched_params)
#define CHELSIO_IFCONF_GETREGS _IOWR('f', CH_IFCONF_GETREGS, struct ifconf_regs)
#define SIOCGMIIREG _IOWR('f', CH_GETMIIREGS, struct mii_data)
#define SIOCSMIIREG _IOWR('f', CH_SETMIIREGS, struct mii_data)
#define CHELSIO_SET_HW_SCHED _IOWR('f', CH_SET_HW_SCHED, struct ch_hw_sched)
#define CHELSIO_SET_FILTER _IOW('f', CH_SET_FILTER, struct ch_filter)
#define CHELSIO_DEL_FILTER _IOW('f', CH_DEL_FILTER, struct ch_filter)
#define CHELSIO_DEVUP _IO('f', CH_DEVUP)
#endif

View File

@ -1,140 +0,0 @@
#ifndef _JHASH_H
#define _JHASH_H
/* jhash.h: Jenkins hash support.
*
* Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
*
* http://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
* lookup2.c, by Bob Jenkins, December 1996, Public Domain.
* hash(), hash2(), hash3, and mix() are externally useful functions.
* Routines to test the hash are included if SELF_TEST is defined.
* You can use this free for any purpose. It has no warranty.
*
* $FreeBSD: src/sys/dev/cxgb/common/jhash.h,v 1.1 2007/05/25 09:48:19 kmacy Exp $
*/
/* NOTE: Arguments are modified. */
#define __jhash_mix(a, b, c) \
{ \
a -= b; a -= c; a ^= (c>>13); \
b -= c; b -= a; b ^= (a<<8); \
c -= a; c -= b; c ^= (b>>13); \
a -= b; a -= c; a ^= (c>>12); \
b -= c; b -= a; b ^= (a<<16); \
c -= a; c -= b; c ^= (b>>5); \
a -= b; a -= c; a ^= (c>>3); \
b -= c; b -= a; b ^= (a<<10); \
c -= a; c -= b; c ^= (b>>15); \
}
/* The golden ration: an arbitrary value */
#define JHASH_GOLDEN_RATIO 0x9e3779b9
/* The most generic version, hashes an arbitrary sequence
* of bytes. No alignment or length assumptions are made about
* the input key.
*/
static inline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c, len;
const u8 *k = key;
len = length;
a = b = JHASH_GOLDEN_RATIO;
c = initval;
while (len >= 12) {
a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
__jhash_mix(a,b,c);
k += 12;
len -= 12;
}
c += length;
switch (len) {
case 11: c += ((u32)k[10]<<24);
case 10: c += ((u32)k[9]<<16);
case 9 : c += ((u32)k[8]<<8);
case 8 : b += ((u32)k[7]<<24);
case 7 : b += ((u32)k[6]<<16);
case 6 : b += ((u32)k[5]<<8);
case 5 : b += k[4];
case 4 : a += ((u32)k[3]<<24);
case 3 : a += ((u32)k[2]<<16);
case 2 : a += ((u32)k[1]<<8);
case 1 : a += k[0];
};
__jhash_mix(a,b,c);
return c;
}
/* A special optimized version that handles 1 or more of u32s.
* The length parameter here is the number of u32s in the key.
*/
static inline u32 jhash2(u32 *k, u32 length, u32 initval)
{
u32 a, b, c, len;
a = b = JHASH_GOLDEN_RATIO;
c = initval;
len = length;
while (len >= 3) {
a += k[0];
b += k[1];
c += k[2];
__jhash_mix(a, b, c);
k += 3; len -= 3;
}
c += length * 4;
switch (len) {
case 2 : b += k[1];
case 1 : a += k[0];
};
__jhash_mix(a,b,c);
return c;
}
/* A special ultra-optimized versions that knows they are hashing exactly
* 3, 2 or 1 word(s).
*
* NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
* done at the end is not done here.
*/
static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
{
a += JHASH_GOLDEN_RATIO;
b += JHASH_GOLDEN_RATIO;
c += initval;
__jhash_mix(a, b, c);
return c;
}
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return jhash_3words(a, b, 0, initval);
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
return jhash_3words(a, 0, 0, initval);
}
#endif /* _JHASH_H */

View File

@ -1,699 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __NetBSD__
__KERNEL_RCSID(0, "$NetBSD: cxgb_l2t.c,v 1.7 2008/01/17 06:03:22 jklos Exp $");
#endif
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_l2t.c,v 1.3 2007/08/17 05:57:03 kmacy Exp $");
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#ifdef __FreeBSD__
#include <sys/module.h>
#include <sys/bus.h>
#endif
#include <sys/lock.h>
#include <sys/mutex.h>
#if __FreeBSD_version > 700000
#include <sys/rwlock.h>
#endif
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <net/if.h>
#ifdef __FreeBSD__
#include <net/ethernet.h>
#include <net/if_vlan_var.h>
#endif
#ifdef __NetBSD__
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <netinet/if_inarp.h>
#endif
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#ifdef __FreeBSD__
#include <netinet/if_ether.h>
#endif
#ifdef CONFIG_DEFINED
#include <cxgb_include.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/cxgb_include.h>
#endif
#ifdef __NetBSD__
#include "cxgb_include.h"
#endif
#endif
#define VLAN_NONE 0xfff
#define SDL(s) ((struct sockaddr_dl *)s)
#define RT_ENADDR(rt) ((u_char *)LLADDR(SDL((rt))))
#define rt_expire rt_rmx.rmx_expire
#ifdef __FreeBSD__
struct llinfo_arp {
struct callout la_timer;
struct rtentry *la_rt;
struct mbuf *la_hold; /* last packet until resolved/timeout */
u_short la_preempt; /* countdown for pre-expiry arps */
u_short la_asked; /* # requests sent */
};
#endif
/*
* Module locking notes: There is a RW lock protecting the L2 table as a
* whole plus a spinlock per L2T entry. Entry lookups and allocations happen
* under the protection of the table lock, individual entry changes happen
* while holding that entry's spinlock. The table lock nests outside the
* entry locks. Allocations of new entries take the table lock as writers so
* no other lookups can happen while allocating new entries. Entry updates
* take the table lock as readers so multiple entries can be updated in
* parallel. An L2T entry can be dropped by decrementing its reference count
* and therefore can happen in parallel with entry allocation but no entry
* can change state or increment its ref count during allocation as both of
* these perform lookups.
*/
static inline unsigned int
vlan_prio(const struct l2t_entry *e)
{
return e->vlan >> 13;
}
static inline unsigned int
arp_hash(u32 key, int ifindex, const struct l2t_data *d)
{
return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
}
static inline void
neigh_replace(struct l2t_entry *e, struct rtentry *rt)
{
RT_LOCK(rt);
RT_ADDREF(rt);
RT_UNLOCK(rt);
if (e->neigh) {
RT_LOCK(e->neigh);
RT_REMREF(e->neigh);
RT_UNLOCK(e->neigh);
}
e->neigh = rt;
}
/*
* Set up an L2T entry and send any packets waiting in the arp queue. The
* supplied mbuf is used for the CPL_L2T_WRITE_REQ. Must be called with the
* entry locked.
*/
static int
setup_l2e_send_pending(struct toedev *dev, struct mbuf *m,
struct l2t_entry *e)
{
struct cpl_l2t_write_req *req;
if (!m) {
if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return (ENOMEM);
}
/*
* XXX MH_ALIGN
*/
req = mtod(m, struct cpl_l2t_write_req *);
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
V_L2T_W_VLAN(e->vlan & EVL_VLID_MASK) |
V_L2T_W_PRIO(vlan_prio(e)));
memcpy(e->dmac, RT_ENADDR(e->neigh), sizeof(e->dmac));
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
m_set_priority(m, CPL_PRIORITY_CONTROL);
#ifdef __FreeBSD__
cxgb_ofld_send(dev, m);
#endif
while (e->arpq_head) {
m = e->arpq_head;
e->arpq_head = m->m_next;
m->m_next = NULL;
#ifdef __FreeBSD__
cxgb_ofld_send(dev, m);
#endif
}
e->arpq_tail = NULL;
e->state = L2T_STATE_VALID;
return 0;
}
/*
* Add a packet to the an L2T entry's queue of packets awaiting resolution.
* Must be called with the entry's lock held.
*/
static inline void
arpq_enqueue(struct l2t_entry *e, struct mbuf *m)
{
m->m_next = NULL;
if (e->arpq_head)
e->arpq_tail->m_next = m;
else
e->arpq_head = m;
e->arpq_tail = m;
}
int
t3_l2t_send_slow(struct toedev *dev, struct mbuf *m,
struct l2t_entry *e)
{
struct rtentry *rt;
struct mbuf *m0;
if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return (ENOMEM);
rt = e->neigh;
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
mtx_lock(&e->lock);
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
mtx_unlock(&e->lock);
case L2T_STATE_VALID: /* fast-path, send the packet on */
#ifdef __FreeBSD__
return cxgb_ofld_send(dev, m);
#endif
case L2T_STATE_RESOLVING:
mtx_lock(&e->lock);
if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
mtx_unlock(&e->lock);
goto again;
}
arpq_enqueue(e, m);
mtx_unlock(&e->lock);
if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return (ENOMEM);
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the m_gethdr below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
if (arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt)) == 0) {
mtx_lock(&e->lock);
if (e->arpq_head)
setup_l2e_send_pending(dev, m, e);
else
m_freem(m);
mtx_unlock(&e->lock);
}
}
return 0;
}
void
t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e)
{
struct rtentry *rt;
struct mbuf *m0;
if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return;
rt = e->neigh;
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
mtx_lock(&e->lock);
if (e->state == L2T_STATE_STALE) {
e->state = L2T_STATE_VALID;
}
mtx_unlock(&e->lock);
return;
case L2T_STATE_VALID: /* fast-path, send the packet on */
return;
case L2T_STATE_RESOLVING:
mtx_lock(&e->lock);
if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
mtx_unlock(&e->lock);
goto again;
}
mtx_unlock(&e->lock);
if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return;
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the alloc_skb below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
}
return;
}
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
static struct l2t_entry *
alloc_l2e(struct l2t_data *d)
{
struct l2t_entry *end, *e, **p;
if (!atomic_load_acq_int(&d->nfree))
return NULL;
/* there's definitely a free entry */
for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
if (atomic_load_acq_int(&e->refcnt) == 0)
goto found;
for (e = &d->l2tab[1]; atomic_load_acq_int(&e->refcnt); ++e) ;
found:
d->rover = e + 1;
atomic_add_int(&d->nfree, -1);
/*
* The entry we found may be an inactive entry that is
* presently in the hash table. We need to remove it.
*/
if (e->state != L2T_STATE_UNUSED) {
int hash = arp_hash(e->addr, e->ifindex, d);
for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
if (*p == e) {
*p = e->next;
break;
}
e->state = L2T_STATE_UNUSED;
}
return e;
}
/*
* Called when an L2T entry has no more users. The entry is left in the hash
* table since it is likely to be reused but we also bump nfree to indicate
* that the entry can be reallocated for a different neighbor. We also drop
* the existing neighbor reference in case the neighbor is going away and is
* waiting on our reference.
*
* Because entries can be reallocated to other neighbors once their ref count
* drops to 0 we need to take the entry's lock to avoid races with a new
* incarnation.
*/
void
t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
{
mtx_lock(&e->lock);
if (atomic_load_acq_int(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
RT_LOCK(e->neigh);
RT_REMREF(e->neigh);
RT_UNLOCK(e->neigh);
e->neigh = NULL;
}
}
mtx_unlock(&e->lock);
atomic_add_int(&d->nfree, 1);
}
/*
* Update an L2T entry that was previously used for the same next hop as neigh.
* Must be called with softirqs disabled.
*/
static inline void
reuse_entry(struct l2t_entry *e, struct rtentry *neigh)
{
struct llinfo_arp *la;
la = (struct llinfo_arp *)neigh->rt_llinfo;
mtx_lock(&e->lock); /* avoid race with t3_l2t_free */
if (neigh != e->neigh)
neigh_replace(e, neigh);
if (memcmp(e->dmac, RT_ENADDR(neigh), sizeof(e->dmac)) ||
(neigh->rt_expire > time_uptime))
e->state = L2T_STATE_RESOLVING;
else if (la->la_hold == NULL)
e->state = L2T_STATE_VALID;
else
e->state = L2T_STATE_STALE;
mtx_unlock(&e->lock);
}
struct l2t_entry *
t3_l2t_get(struct toedev *dev, struct rtentry *neigh,
unsigned int smt_idx)
{
struct l2t_entry *e;
struct l2t_data *d = L2DATA(dev);
u32 addr = *(u32 *)neigh->_rt_key;
int ifidx = neigh->rt_ifp->if_index;
int hash = arp_hash(addr, ifidx, d);
rw_wlock(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx &&
e->smt_idx == smt_idx) {
l2t_hold(d, e);
if (atomic_load_acq_int(&e->refcnt) == 1)
reuse_entry(e, neigh);
goto done;
}
/* Need to allocate a new entry */
e = alloc_l2e(d);
if (e) {
mtx_lock(&e->lock); /* avoid race with t3_l2t_free */
e->next = d->l2tab[hash].first;
d->l2tab[hash].first = e;
e->state = L2T_STATE_RESOLVING;
e->addr = addr;
e->ifindex = ifidx;
e->smt_idx = smt_idx;
atomic_store_rel_int(&e->refcnt, 1);
neigh_replace(e, neigh);
#ifdef notyet
/*
* XXX need to add accessor function for vlan tag
*/
if (neigh->rt_ifp->if_vlantrunk)
e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
else
#endif
e->vlan = VLAN_NONE;
mtx_unlock(&e->lock);
}
done:
rw_wunlock(&d->lock);
return e;
}
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
* otherwise the packets is sent to the TOE.
*
* XXX: maybe we should abandon the latter behavior and just require a failure
* handler.
*/
static void
handle_failed_resolution(struct toedev *dev, struct mbuf *arpq)
{
while (arpq) {
struct mbuf *m = arpq;
#ifdef notyet
struct l2t_mbuf_cb *cb = L2T_MBUF_CB(m);
#endif
arpq = m->m_next;
m->m_next = NULL;
#ifdef notyet
if (cb->arp_failure_handler)
cb->arp_failure_handler(dev, m);
else
#endif
#ifdef __FreeBSD__
cxgb_ofld_send(dev, m);
#endif
}
}
#if defined(NETEVENT) || !defined(CONFIG_CHELSIO_T3_MODULE)
/*
* Called when the host's ARP layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void
t3_l2t_update(struct toedev *dev, struct rtentry *neigh)
{
struct l2t_entry *e;
struct mbuf *arpq = NULL;
struct l2t_data *d = L2DATA(dev);
u32 addr = *(u32 *)neigh->_rt_key;
int ifidx = neigh->rt_ifp->if_index;
int hash = arp_hash(addr, ifidx, d);
struct llinfo_arp *la;
rw_rlock(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx) {
mtx_lock(&e->lock);
goto found;
}
rw_runlock(&d->lock);
return;
found:
rw_runlock(&d->lock);
if (atomic_load_acq_int(&e->refcnt)) {
if (neigh != e->neigh)
neigh_replace(e, neigh);
la = (struct llinfo_arp *)neigh->rt_llinfo;
if (e->state == L2T_STATE_RESOLVING) {
if (la->la_asked >= 5 /* arp_maxtries */) {
arpq = e->arpq_head;
e->arpq_head = e->arpq_tail = NULL;
} else if (la->la_hold == NULL)
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = (la->la_hold == NULL) ?
L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, RT_ENADDR(neigh), 6))
setup_l2e_send_pending(dev, NULL, e);
}
}
mtx_unlock(&e->lock);
if (arpq)
handle_failed_resolution(dev, arpq);
}
#else
/*
* Called from a kprobe, interrupts are off.
*/
void
t3_l2t_update(struct toedev *dev, struct rtentry *neigh)
{
struct l2t_entry *e;
struct l2t_data *d = L2DATA(dev);
u32 addr = *(u32 *) rt_key(neigh);
int ifidx = neigh->dev->ifindex;
int hash = arp_hash(addr, ifidx, d);
rw_rlock(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx) {
mtx_lock(&e->lock);
if (atomic_load_acq_int(&e->refcnt)) {
if (neigh != e->neigh)
neigh_replace(e, neigh);
e->tdev = dev;
mod_timer(&e->update_timer, jiffies + 1);
}
mtx_unlock(&e->lock);
break;
}
rw_runlock(&d->lock);
}
static void
update_timer_cb(unsigned long data)
{
struct mbuf *arpq = NULL;
struct l2t_entry *e = (struct l2t_entry *)data;
struct rtentry *neigh = e->neigh;
struct toedev *dev = e->tdev;
barrier();
if (!atomic_load_acq_int(&e->refcnt))
return;
rw_rlock(&neigh->lock);
mtx_lock(&e->lock);
if (atomic_load_acq_int(&e->refcnt)) {
if (e->state == L2T_STATE_RESOLVING) {
if (neigh->nud_state & NUD_FAILED) {
arpq = e->arpq_head;
e->arpq_head = e->arpq_tail = NULL;
} else if (neigh_is_connected(neigh) && e->arpq_head)
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = neigh_is_connected(neigh) ?
L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, RT_ENADDR(neigh), sizeof(e->dmac)))
setup_l2e_send_pending(dev, NULL, e);
}
}
mtx_unlock(&e->lock);
rw_runlock(&neigh->lock);
if (arpq)
handle_failed_resolution(dev, arpq);
}
#endif
struct l2t_data *
t3_init_l2t(unsigned int l2t_capacity)
{
struct l2t_data *d;
int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
d = cxgb_alloc_mem(size);
if (!d)
return NULL;
d->nentries = l2t_capacity;
d->rover = &d->l2tab[1]; /* entry 0 is not used */
atomic_store_rel_int(&d->nfree, l2t_capacity - 1);
rw_init(&d->lock, "L2T");
for (i = 0; i < l2t_capacity; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
mtx_init(&d->l2tab[i].lock, "L2TAB", NULL, MTX_DEF);
atomic_store_rel_int(&d->l2tab[i].refcnt, 0);
#ifndef NETEVENT
#ifdef CONFIG_CHELSIO_T3_MODULE
setup_timer(&d->l2tab[i].update_timer, update_timer_cb,
(unsigned long)&d->l2tab[i]);
#endif
#endif
}
return d;
}
void
t3_free_l2t(struct l2t_data *d)
{
#ifndef NETEVENT
#ifdef CONFIG_CHELSIO_T3_MODULE
int i;
/* Stop all L2T timers */
for (i = 0; i < d->nentries; ++i)
del_timer_sync(&d->l2tab[i].update_timer);
#endif
#endif
cxgb_free_mem(d);
}
#ifdef CONFIG_PROC_FS
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
static inline void *
l2t_get_idx(struct seq_file *seq, loff_t pos)
{
struct l2t_data *d = seq->private;
return pos >= d->nentries ? NULL : &d->l2tab[pos];
}
static void *
l2t_seq_start(struct seq_file *seq, loff_t *pos)
{
return *pos ? l2t_get_idx(seq, *pos) : SEQ_START_TOKEN;
}
static void *
l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = l2t_get_idx(seq, *pos + 1);
if (v)
++*pos;
return v;
}
static void
l2t_seq_stop(struct seq_file *seq, void *v)
{
}
static char
l2e_state(const struct l2t_entry *e)
{
switch (e->state) {
case L2T_STATE_VALID: return 'V'; /* valid, fast-path entry */
case L2T_STATE_STALE: return 'S'; /* needs revalidation, but usable */
case L2T_STATE_RESOLVING:
return e->arpq_head ? 'A' : 'R';
default:
return 'U';
}
}
static int
l2t_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Index IP address Ethernet address VLAN "
"Prio State Users SMTIDX Port\n");
else {
char ip[20];
struct l2t_entry *e = v;
mtx_lock(&e->lock);
sprintf(ip, "%u.%u.%u.%u", NIPQUAD(e->addr));
seq_printf(seq, "%-5u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d"
" %3u %c %7u %4u %s\n",
e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
e->dmac[3], e->dmac[4], e->dmac[5],
e->vlan & EVL_VLID_MASK, vlan_prio(e),
l2e_state(e), atomic_load_acq_int(&e->refcnt), e->smt_idx,
e->neigh ? e->neigh->dev->name : "");
mtx_unlock(&e->lock);
}
return 0;
}
#endif

View File

@ -1,180 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/cxgb_l2t.h,v 1.2 2007/08/17 05:57:03 kmacy Exp $
***************************************************************************/
#ifndef _CHELSIO_L2T_H
#define _CHELSIO_L2T_H
#ifdef __FreeBSD__
#include <dev/cxgb/ulp/toecore/toedev.h>
#endif
#include <sys/lock.h>
#ifdef __FreeBSD__
#if __FreeBSD_version > 700000
#include <sys/rwlock.h>
#else
#define rwlock mtx
#define rw_wlock(x) mtx_lock((x))
#define rw_wunlock(x) mtx_unlock((x))
#define rw_rlock(x) mtx_lock((x))
#define rw_runlock(x) mtx_unlock((x))
#define rw_init(x, str) mtx_init((x), (str), NULL, MTX_DEF)
#define rw_destroy(x) mtx_destroy((x))
#endif
#endif
#ifdef __NetBSD__
#define rwlock mtx
#define rw_wlock(x) mtx_lock((x))
#define rw_wunlock(x) mtx_unlock((x))
#define rw_rlock(x) mtx_lock((x))
#define rw_runlock(x) mtx_unlock((x))
#define rw_init(x, str) mtx_init((x), (str), NULL, MTX_DEF)
#define rw_destroy(x) mtx_destroy((x))
#endif
enum {
L2T_STATE_VALID, /* entry is up to date */
L2T_STATE_STALE, /* entry may be used but needs revalidation */
L2T_STATE_RESOLVING, /* entry needs address resolution */
L2T_STATE_UNUSED /* entry not in use */
};
/*
* Each L2T entry plays multiple roles. First of all, it keeps state for the
* corresponding entry of the HW L2 table and maintains a queue of offload
* packets awaiting address resolution. Second, it is a node of a hash table
* chain, where the nodes of the chain are linked together through their next
* pointer. Finally, each node is a bucket of a hash table, pointing to the
* first element in its chain through its first pointer.
*/
struct l2t_entry {
uint16_t state; /* entry state */
uint16_t idx; /* entry index */
uint32_t addr; /* dest IP address */
int ifindex; /* neighbor's net_device's ifindex */
uint16_t smt_idx; /* SMT index */
uint16_t vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
struct rtentry *neigh; /* associated neighbour */
struct l2t_entry *first; /* start of hash chain */
struct l2t_entry *next; /* next l2t_entry on chain */
struct mbuf *arpq_head; /* queue of packets awaiting resolution */
struct mbuf *arpq_tail;
struct mtx lock;
volatile uint32_t refcnt; /* entry reference count */
uint8_t dmac[6]; /* neighbour's MAC address */
#ifndef NETEVENT
#ifdef CONFIG_CHELSIO_T3_MODULE
struct timer_list update_timer;
#ifdef __FreeBSD__
struct toedev *tdev;
#endif
#endif
#endif
};
struct l2t_data {
unsigned int nentries; /* number of entries */
struct l2t_entry *rover; /* starting point for next allocation */
volatile uint32_t nfree; /* number of free entries */
struct rwlock lock;
struct l2t_entry l2tab[0];
};
typedef void (*arp_failure_handler_func)(struct toedev *dev,
struct mbuf *m);
/*
* Callback stored in an skb to handle address resolution failure.
*/
struct l2t_mbuf_cb {
arp_failure_handler_func arp_failure_handler;
};
/*
* XXX
*/
#define L2T_MBUF_CB(skb) ((struct l2t_mbuf_cb *)(skb)->cb)
static __inline void set_arp_failure_handler(struct mbuf *m,
arp_failure_handler_func hnd)
{
#if 0
L2T_SKB_CB(skb)->arp_failure_handler = hnd;
#endif
panic("implement me");
}
/*
* Getting to the L2 data from an offload device.
*/
#define L2DATA(dev) ((dev)->l2opt)
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct toedev *dev, struct rtentry *ifp);
struct l2t_entry *t3_l2t_get(struct toedev *dev, struct rtentry *neigh,
unsigned int smt_idx);
int t3_l2t_send_slow(struct toedev *dev, struct mbuf *m,
struct l2t_entry *e);
void t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
void t3_free_l2t(struct l2t_data *d);
#ifdef CONFIG_PROC_FS
int t3_l2t_proc_setup(struct proc_dir_entry *dir, struct l2t_data *d);
void t3_l2t_proc_free(struct proc_dir_entry *dir);
#else
#define l2t_proc_setup(dir, d) 0
#define l2t_proc_free(dir)
#endif
int cxgb_ofld_send(struct toedev *dev, struct mbuf *m);
static inline int l2t_send(struct toedev *dev, struct mbuf *m,
struct l2t_entry *e)
{
if (__predict_true(e->state == L2T_STATE_VALID))
return cxgb_ofld_send(dev, m);
return t3_l2t_send_slow(dev, m, e);
}
static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_fetchadd_int(&e->refcnt, -1) == 1)
t3_l2e_free(d, e);
}
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_fetchadd_int(&e->refcnt, 1) == 1) /* 0 -> 1 transition */
atomic_add_int(&d->nfree, 1);
}
#endif

View File

@ -1,413 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __NetBSD__
__KERNEL_RCSID(0, "$NetBSD: cxgb_lro.c,v 1.6 2008/01/17 06:03:21 jklos Exp $");
#endif
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_lro.c,v 1.8 2007/08/25 21:07:36 kmacy Exp $");
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#ifdef __FreeBSD__
#include <sys/module.h>
#include <sys/bus.h>
#endif
#include <sys/conf.h>
#include <machine/bus.h>
#ifdef __FreeBSD__
#include <machine/resource.h>
#include <sys/bus_dma.h>
#include <sys/rman.h>
#endif
#include <sys/queue.h>
#ifdef __FreeBSD__
#include <sys/taskqueue.h>
#endif
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#ifdef CONFIG_DEFINED
#ifdef __FreeBSD__
#include <cxgb_include.h>
#else
#include <dev/cxgb/cxgb_include.h>
#endif
#include <machine/in_cksum.h>
#endif
#ifdef __NetBSD__
#include "cxgb_include.h"
#endif
#ifndef M_LRO
#define M_LRO 0x0200
#endif
#ifdef DEBUG
#define MBUF_HEADER_CHECK(m) do { \
if ((m->m_len == 0) || (m->m_pkthdr.len == 0) \
|| ((m->m_flags & M_PKTHDR) == 0)) \
panic("lro_flush_session - mbuf len=%d pktlen=%d flags=0x%x\n", \
m->m_len, m->m_pkthdr.len, m->m_flags); \
if ((m->m_flags & M_PKTHDR) == 0) \
panic("first mbuf is not packet header - flags=0x%x\n", \
m->m_flags); \
if ((m->m_len < ETHER_HDR_LEN) || (m->m_pkthdr.len < ETHER_HDR_LEN)) \
panic("packet too small len=%d pktlen=%d\n", \
m->m_len, m->m_pkthdr.len);\
} while (0)
#else
#define MBUF_HEADER_CHECK(m)
#endif
#define IPH_OFFSET (2 + sizeof (struct cpl_rx_pkt) + ETHER_HDR_LEN)
#define LRO_SESSION_IDX_HINT_HASH(hash) (hash & (MAX_LRO_SES - 1))
#define LRO_IDX_INC(idx) idx = (idx + 1) & (MAX_LRO_SES - 1)
static __inline int
lro_match(struct mbuf *m, struct ip *ih, struct tcphdr *th)
{
struct ip *sih = (struct ip *)(mtod(m, uint8_t *) + IPH_OFFSET);
struct tcphdr *sth = (struct tcphdr *) (sih + 1);
return (th->th_sport == sth->th_sport &&
th->th_dport == sth->th_dport &&
ih->ip_src.s_addr == sih->ip_src.s_addr &&
ih->ip_dst.s_addr == sih->ip_dst.s_addr);
}
static __inline struct t3_lro_session *
lro_lookup(struct lro_state *l, int idx, struct ip *ih, struct tcphdr *th)
{
struct t3_lro_session *s = NULL;
int active = l->nactive;
while (active) {
s = &l->sess[idx];
if (s->head) {
if (lro_match(s->head, ih, th))
break;
active--;
}
LRO_IDX_INC(idx);
}
return (s);
}
static __inline int
can_lro_packet(struct cpl_rx_pkt *cpl, unsigned int rss_hi)
{
struct ether_header *eh = (struct ether_header *)(cpl + 1);
struct ip *ih = (struct ip *)(eh + 1);
/*
* XXX VLAN support?
*/
if (__predict_false(G_HASHTYPE(ntohl(rss_hi)) != RSS_HASH_4_TUPLE ||
(*((uint8_t *)cpl + 1) & 0x90) != 0x10 ||
cpl->csum != 0xffff || eh->ether_type != ntohs(ETHERTYPE_IP) ||
ih->ip_hl != (sizeof (*ih) >> 2))) {
return 0;
}
return 1;
}
static int
can_lro_tcpsegment(struct tcphdr *th)
{
int olen = (th->th_off << 2) - sizeof (*th);
u8 control_bits = *((u8 *)th + 13);
if (__predict_false((control_bits & 0xB7) != 0x10))
goto no_lro;
if (olen) {
uint32_t *ptr = (u32 *)(th + 1);
if (__predict_false(olen != TCPOLEN_TSTAMP_APPA ||
*ptr != ntohl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP)))
goto no_lro;
}
return 1;
no_lro:
return 0;
}
static __inline void
lro_new_session_init(struct t3_lro_session *s, struct mbuf *m)
{
struct ip *ih = (struct ip *)(mtod(m, uint8_t *) + IPH_OFFSET);
struct tcphdr *th = (struct tcphdr *) (ih + 1);
int ip_len = ntohs(ih->ip_len);
DPRINTF("%s(s=%p, m=%p)\n", __func__, s, m);
s->head = m;
MBUF_HEADER_CHECK(m);
s->ip_len = ip_len;
s->seq = ntohl(th->th_seq) + ip_len - sizeof(*ih) - (th->th_off << 2);
}
static void
lro_flush_session(struct sge_qset *qs, struct t3_lro_session *s, struct mbuf *m)
{
struct lro_state *l = &qs->lro;
struct mbuf *sm = s->head;
struct ip *ih = (struct ip *)(mtod(sm, uint8_t *) + IPH_OFFSET);
DPRINTF("%s(qs=%p, s=%p, ", __func__,
qs, s);
if (m)
DPRINTF("m=%p)\n", m);
else
DPRINTF("m=NULL)\n");
ih->ip_len = htons(s->ip_len);
ih->ip_sum = 0;
ih->ip_sum = in_cksum_hdr(ih);
MBUF_HEADER_CHECK(sm);
sm->m_flags |= M_LRO;
t3_rx_eth(qs->port->adapter, &qs->rspq, sm, 2);
if (m) {
s->head = m;
lro_new_session_init(s, m);
} else {
s->head = NULL;
l->nactive--;
}
qs->port_stats[SGE_PSTATS_LRO_FLUSHED]++;
}
static __inline struct t3_lro_session *
lro_new_session(struct sge_qset *qs, struct mbuf *m, uint32_t rss_hash)
{
struct lro_state *l = &qs->lro;
int idx = LRO_SESSION_IDX_HINT_HASH(rss_hash);
struct t3_lro_session *s = &l->sess[idx];
DPRINTF("%s(qs=%p, m=%p, rss_hash=0x%x)\n", __func__,
qs, m, rss_hash);
if (__predict_true(!s->head))
goto done;
if (l->nactive > MAX_LRO_SES)
panic("MAX_LRO_PER_QSET exceeded");
if (l->nactive == MAX_LRO_SES) {
lro_flush_session(qs, s, m);
qs->port_stats[SGE_PSTATS_LRO_X_STREAMS]++;
return s;
}
while (1) {
LRO_IDX_INC(idx);
s = &l->sess[idx];
if (!s->head)
break;
}
done:
lro_new_session_init(s, m);
l->nactive++;
return s;
}
static __inline int
lro_update_session(struct t3_lro_session *s, struct mbuf *m)
{
struct mbuf *sm = s->head;
struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(sm, uint8_t *) + 2);
struct cpl_rx_pkt *ncpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + 2);
struct ip *nih = (struct ip *)(mtod(m, uint8_t *) + IPH_OFFSET);
struct tcphdr *th, *nth = (struct tcphdr *)(nih + 1);
uint32_t seq = ntohl(nth->th_seq);
int plen, tcpiphlen, olen = (nth->th_off << 2) - sizeof (*nth);
DPRINTF("%s(s=%p, m=%p)\n", __func__, s, m);
if (cpl->vlan_valid && cpl->vlan != ncpl->vlan) {
return -1;
}
if (__predict_false(seq != s->seq)) {
DPRINTF("sequence mismatch\n");
return -1;
}
MBUF_HEADER_CHECK(sm);
th = (struct tcphdr *)(mtod(sm, uint8_t *) + IPH_OFFSET + sizeof (struct ip));
if (olen) {
uint32_t *ptr = (uint32_t *)(th + 1);
uint32_t *nptr = (uint32_t *)(nth + 1);
if (__predict_false(ntohl(*(ptr + 1)) > ntohl(*(nptr + 1)) ||
!*(nptr + 2))) {
return -1;
}
*(ptr + 1) = *(nptr + 1);
*(ptr + 2) = *(nptr + 2);
}
th->th_ack = nth->th_ack;
th->th_win = nth->th_win;
tcpiphlen = (nth->th_off << 2) + sizeof (*nih);
plen = ntohs(nih->ip_len) - tcpiphlen;
s->seq += plen;
s->ip_len += plen;
sm->m_pkthdr.len += plen;
/*
* XXX FIX ME
*
*
*/
#if 0
/* XXX this I *do not* understand */
if (plen > skb_shinfo(s->skb)->gso_size)
skb_shinfo(s->skb)->gso_size = plen;
#endif
#if __FreeBSD_version > 700000
if (plen > sm->m_pkthdr.tso_segsz)
sm->m_pkthdr.tso_segsz = plen;
#endif
DPRINTF("m_adj(%d)\n", (int)(IPH_OFFSET + tcpiphlen));
m_adj(m, IPH_OFFSET + tcpiphlen);
#if 0
if (__predict_false(!skb_shinfo(s->skb)->frag_list))
skb_shinfo(s->skb)->frag_list = skb;
#endif
#if 0
/*
* XXX we really need to be able to
* support vectors of buffers in FreeBSD
*/
int nr = skb_shinfo(s->skb)->nr_frags;
skb_shinfo(s->skb)->frags[nr].page = frag->page;
skb_shinfo(s->skb)->frags[nr].page_offset =
frag->page_offset + IPH_OFFSET + tcpiphlen;
skb_shinfo(s->skb)->frags[nr].size = plen;
skb_shinfo(s->skb)->nr_frags = ++nr;
#endif
return (0);
}
void
t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro)
{
struct sge_qset *qs = rspq_to_qset(rq);
struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
struct ether_header *eh = (struct ether_header *)(cpl + 1);
struct ip *ih;
struct tcphdr *th;
struct t3_lro_session *s = NULL;
if (lro == 0)
goto no_lro;
if (!can_lro_packet(cpl, rss_csum))
goto no_lro;
ih = (struct ip *)(eh + 1);
th = (struct tcphdr *)(ih + 1);
s = lro_lookup(&qs->lro,
LRO_SESSION_IDX_HINT_HASH(rss_hash), ih, th);
if (__predict_false(!can_lro_tcpsegment(th))) {
goto no_lro;
} else if (__predict_false(!s)) {
s = lro_new_session(qs, m, rss_hash);
} else {
if (lro_update_session(s, m)) {
lro_flush_session(qs, s, m);
}
#ifdef notyet
if (__predict_false(s->head->m_pkthdr.len + pi->ifp->if_mtu > 65535)) {
lro_flush_session(qs, s, NULL);
}
#endif
}
qs->port_stats[SGE_PSTATS_LRO_QUEUED]++;
return;
no_lro:
if (s)
lro_flush_session(qs, s, NULL);
if (m->m_len == 0 || m->m_pkthdr.len == 0 || (m->m_flags & M_PKTHDR) == 0)
DPRINTF("rx_eth_lro mbuf len=%d pktlen=%d flags=0x%x\n",
m->m_len, m->m_pkthdr.len, m->m_flags);
t3_rx_eth(adap, rq, m, ethpad);
}
void
t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state)
{
unsigned int idx = state->active_idx;
while (state->nactive) {
struct t3_lro_session *s = &state->sess[idx];
if (s->head)
lro_flush_session(qs, s, NULL);
LRO_IDX_INC(idx);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,93 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/sys/mbufq.h,v 1.2 2007/05/28 22:57:27 kmacy Exp $
***************************************************************************/
#ifndef CXGB_MBUFQ_H_
#define CXGB_MBUFQ_H_
#include <sys/mbuf.h>
struct mbuf_head {
struct mbuf *head;
struct mbuf *tail;
uint32_t qlen;
struct mtx lock;
};
static __inline void
mbufq_init(struct mbuf_head *l)
{
l->head = l->tail = NULL;
}
static __inline int
mbufq_empty(struct mbuf_head *l)
{
return (l->head == NULL);
}
static __inline int
mbufq_len(struct mbuf_head *l)
{
return (l->qlen);
}
static __inline void
mbufq_tail(struct mbuf_head *l, struct mbuf *m)
{
l->qlen++;
l->tail->m_nextpkt = m;
l->tail = m;
}
static __inline struct mbuf *
mbufq_dequeue(struct mbuf_head *l)
{
struct mbuf *m;
m = l->head;
if (m) {
if (m == l->tail)
l->tail = NULL;
l->head = m->m_nextpkt;
l->qlen--;
}
return (m);
}
static __inline struct mbuf *
mbufq_peek(struct mbuf_head *l)
{
return (l->head);
}
#endif /* CXGB_MBUFQ_H_ */

View File

@ -1,498 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __NetBSD__
__KERNEL_RCSID(0, "$NetBSD: cxgb_mc5.c,v 1.3 2008/01/17 06:03:22 jklos Exp $");
#endif
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/common/cxgb_mc5.c,v 1.5 2007/07/17 06:50:34 kmacy Exp $");
#endif
#ifdef CONFIG_DEFINED
#include <common/cxgb_common.h>
#include <common/cxgb_regs.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/common/cxgb_common.h>
#include <dev/cxgb/common/cxgb_regs.h>
#endif
#ifdef __NetBSD__
#include <dev/pci/cxgb_common.h>
#include <dev/pci/cxgb_regs.h>
#endif
#endif
enum {
IDT75P52100 = 4,
IDT75N43102 = 5
};
/* DBGI command mode */
enum {
DBGI_MODE_MBUS = 0,
DBGI_MODE_IDT52100 = 5
};
/* IDT 75P52100 commands */
#define IDT_CMD_READ 0
#define IDT_CMD_WRITE 1
#define IDT_CMD_SEARCH 2
#define IDT_CMD_LEARN 3
/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
#define IDT_LAR_ADR0 0x180006
#define IDT_LAR_MODE144 0xffff0000
/* IDT SCR and SSR addresses (low 32 bits) */
#define IDT_SCR_ADR0 0x180000
#define IDT_SSR0_ADR0 0x180002
#define IDT_SSR1_ADR0 0x180004
/* IDT GMR base address (low 32 bits) */
#define IDT_GMR_BASE_ADR0 0x180020
/* IDT data and mask array base addresses (low 32 bits) */
#define IDT_DATARY_BASE_ADR0 0
#define IDT_MSKARY_BASE_ADR0 0x80000
/* IDT 75N43102 commands */
#define IDT4_CMD_SEARCH144 3
#define IDT4_CMD_WRITE 4
#define IDT4_CMD_READ 5
/* IDT 75N43102 SCR address (low 32 bits) */
#define IDT4_SCR_ADR0 0x3
/* IDT 75N43102 GMR base addresses (low 32 bits) */
#define IDT4_GMR_BASE0 0x10
#define IDT4_GMR_BASE1 0x20
#define IDT4_GMR_BASE2 0x30
/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
#define IDT4_DATARY_BASE_ADR0 0x1000000
#define IDT4_MSKARY_BASE_ADR0 0x2000000
#define MAX_WRITE_ATTEMPTS 5
#define MAX_ROUTES 2048
/*
* Issue a command to the TCAM and wait for its completion. The address and
* any data required by the command must have been setup by the caller.
*/
static int mc5_cmd_write(adapter_t *adapter, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
}
static inline void dbgi_wr_addr3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
}
static inline void dbgi_wr_data3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
}
static inline void dbgi_rd_rsp3(adapter_t *adapter, u32 *v1, u32 *v2, u32 *v3)
{
*v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
*v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
*v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
}
/*
* Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
* command cmd. The data to be written must have been set up by the caller.
* Returns -1 on failure, 0 on success.
*/
static int mc5_write(adapter_t *adapter, u32 addr_lo, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
if (mc5_cmd_write(adapter, cmd) == 0)
return 0;
CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n", addr_lo);
return -1;
}
static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
u32 data_array_base, u32 write_cmd,
int addr_shift)
{
unsigned int i;
adapter_t *adap = mc5->adapter;
/*
* We need the size of the TCAM data and mask arrays in terms of
* 72-bit entries.
*/
unsigned int size72 = mc5->tcam_size;
unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
if (mc5->mode == MC5_MODE_144_BIT) {
size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
server_base *= 2;
}
/* Clear the data array */
dbgi_wr_data3(adap, 0, 0, 0);
for (i = 0; i < size72; i++)
if (mc5_write(adap, data_array_base + (i << addr_shift),
write_cmd))
return -1;
/* Initialize the mask array. */
for (i = 0; i < server_base; i++) {
dbgi_wr_data3(adap, 0x3fffffff, 0xfff80000, 0xff);
if (mc5_write(adap, mask_array_base + (i << addr_shift),
write_cmd))
return -1;
i++;
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
if (mc5_write(adap, mask_array_base + (i << addr_shift),
write_cmd))
return -1;
}
dbgi_wr_data3(adap,
mc5->mode == MC5_MODE_144_BIT ? 0xfffffff9 : 0xfffffffd,
0xffffffff, 0xff);
for (; i < size72; i++)
if (mc5_write(adap, mask_array_base + (i << addr_shift),
write_cmd))
return -1;
return 0;
}
static int init_idt52100(struct mc5 *mc5)
{
int i;
adapter_t *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
/*
* Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
* GMRs 8-9 for ACK- and AOPEN searches.
*/
t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
/* Set DBGI command mode for IDT TCAM. */
t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
/* Set up LAR */
dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
goto err;
/* Set up SSRs */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
goto err;
/* Set up GMRs */
for (i = 0; i < 32; ++i) {
if (i >= 12 && i < 15)
dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
else if (i == 15)
dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
else
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
goto err;
}
/* Set up SCR */
dbgi_wr_data3(adap, 1, 0, 0);
if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
goto err;
return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
err:
return -EIO;
}
static int init_idt43102(struct mc5 *mc5)
{
int i;
adapter_t *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
V_RDLAT(0xd) | V_SRCHLAT(0x12));
/*
* Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
* for ACK- and AOPEN searches.
*/
t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
/* Set DBGI command mode for IDT TCAM. */
t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
/* Set up GMRs */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
for (i = 0; i < 7; ++i)
if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
goto err;
for (i = 0; i < 4; ++i)
if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
goto err;
dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
goto err;
dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
goto err;
/* Set up SCR */
dbgi_wr_data3(adap, 0xf0000000, 0, 0);
if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
goto err;
return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
err:
return -EIO;
}
/* Put MC5 in DBGI mode. */
static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
{
t3_set_reg_field(mc5->adapter, A_MC5_DB_CONFIG, F_PRTYEN | F_MBUSEN,
F_DBGIEN);
}
/* Put MC5 in M-Bus mode. */
static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
{
t3_set_reg_field(mc5->adapter, A_MC5_DB_CONFIG, F_DBGIEN,
V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
}
/*
* Initialization that requires the OS and protocol layers to already
* be intialized goes here.
*/
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes)
{
int err;
unsigned int tcam_size = mc5->tcam_size;
unsigned int mode72 = mc5->mode == MC5_MODE_72_BIT;
adapter_t *adap = mc5->adapter;
if (!tcam_size)
return 0;
if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
return -EINVAL;
if (nfilters && adap->params.rev < T3_REV_C)
mc5->parity_enabled = 0;
/* Reset the TCAM */
t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_TMMODE | F_COMPEN,
V_COMPEN(mode72) | V_TMMODE(mode72) | F_TMRST);
if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
CH_ERR(adap, "TCAM reset timed out\n");
return -1;
}
t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
tcam_size - nroutes - nfilters);
t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
tcam_size - nroutes - nfilters - nservers);
/* All the TCAM addresses we access have only the low 32 bits non 0 */
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
mc5_dbgi_mode_enable(mc5);
switch (mc5->part_type) {
case IDT75P52100:
err = init_idt52100(mc5);
break;
case IDT75N43102:
err = init_idt43102(mc5);
break;
default:
CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
err = -EINVAL;
break;
}
mc5_dbgi_mode_disable(mc5);
return err;
}
/*
* read_mc5_range - dump a part of the memory managed by MC5
* @mc5: the MC5 handle
* @start: the start address for the dump
* @n: number of 72-bit words to read
* @buf: result buffer
*
* Read n 72-bit words from MC5 memory from the given start location.
*/
int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
unsigned int n, u32 *buf)
{
u32 read_cmd;
int err = 0;
adapter_t *adap = mc5->adapter;
if (mc5->part_type == IDT75P52100)
read_cmd = IDT_CMD_READ;
else if (mc5->part_type == IDT75N43102)
read_cmd = IDT4_CMD_READ;
else
return -EINVAL;
mc5_dbgi_mode_enable(mc5);
while (n--) {
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
if (mc5_cmd_write(adap, read_cmd)) {
err = -EIO;
break;
}
dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
buf += 3;
}
mc5_dbgi_mode_disable(mc5);
return 0;
}
#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
/*
* MC5 interrupt handler
*/
void t3_mc5_intr_handler(struct mc5 *mc5)
{
adapter_t *adap = mc5->adapter;
u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
if ((cause & F_PARITYERR) && mc5->parity_enabled) {
CH_ALERT(adap, "MC5 parity error\n");
mc5->stats.parity_err++;
}
if (cause & F_REQQPARERR) {
CH_ALERT(adap, "MC5 request queue parity error\n");
mc5->stats.reqq_parity_err++;
}
if (cause & F_DISPQPARERR) {
CH_ALERT(adap, "MC5 dispatch queue parity error\n");
mc5->stats.dispq_parity_err++;
}
if (cause & F_ACTRGNFULL)
mc5->stats.active_rgn_full++;
if (cause & F_NFASRCHFAIL)
mc5->stats.nfa_srch_err++;
if (cause & F_UNKNOWNCMD)
mc5->stats.unknown_cmd++;
if (cause & F_DELACTEMPTY)
mc5->stats.del_act_empty++;
if (cause & MC5_INT_FATAL)
t3_fatal_err(adap);
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
}
void __devinit t3_mc5_prep(adapter_t *adapter, struct mc5 *mc5, int mode)
{
#define K * 1024
static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
64 K, 128 K, 256 K, 32 K
};
#undef K
u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
mc5->adapter = adapter;
mc5->parity_enabled = 1;
mc5->mode = (unsigned char) mode;
mc5->part_type = (unsigned char) G_TMTYPE(cfg);
if (cfg & F_TMTYPEHI)
mc5->part_type |= 4;
mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
if (mode == MC5_MODE_144_BIT)
mc5->tcam_size /= 2;
}

View File

@ -1,312 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __NetBSD__
__KERNEL_RCSID(0, "$NetBSD: cxgb_mv88e1xxx.c,v 1.3 2008/01/17 06:03:22 jklos Exp $");
#endif
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/common/cxgb_mv88e1xxx.c,v 1.2 2007/05/28 22:57:26 kmacy Exp $");
#endif
#ifdef CONFIG_DEFINED
#include <cxgb_include.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/cxgb_include.h>
#endif
#ifdef __NetBSD__
#include <dev/pci/cxgb_include.h>
#endif
#endif
/* Marvell PHY interrupt status bits. */
#define MV_INTR_JABBER 0x0001
#define MV_INTR_POLARITY_CHNG 0x0002
#define MV_INTR_ENG_DETECT_CHNG 0x0010
#define MV_INTR_DOWNSHIFT 0x0020
#define MV_INTR_MDI_XOVER_CHNG 0x0040
#define MV_INTR_FIFO_OVER_UNDER 0x0080
#define MV_INTR_FALSE_CARRIER 0x0100
#define MV_INTR_SYMBOL_ERROR 0x0200
#define MV_INTR_LINK_CHNG 0x0400
#define MV_INTR_AUTONEG_DONE 0x0800
#define MV_INTR_PAGE_RECV 0x1000
#define MV_INTR_DUPLEX_CHNG 0x2000
#define MV_INTR_SPEED_CHNG 0x4000
#define MV_INTR_AUTONEG_ERR 0x8000
/* Marvell PHY specific registers. */
#define MV88E1XXX_SPECIFIC_CNTRL 16
#define MV88E1XXX_SPECIFIC_STATUS 17
#define MV88E1XXX_INTR_ENABLE 18
#define MV88E1XXX_INTR_STATUS 19
#define MV88E1XXX_EXT_SPECIFIC_CNTRL 20
#define MV88E1XXX_RECV_ERR 21
#define MV88E1XXX_EXT_ADDR 22
#define MV88E1XXX_GLOBAL_STATUS 23
#define MV88E1XXX_LED_CNTRL 24
#define MV88E1XXX_LED_OVERRIDE 25
#define MV88E1XXX_EXT_SPECIFIC_CNTRL2 26
#define MV88E1XXX_EXT_SPECIFIC_STATUS 27
#define MV88E1XXX_VIRTUAL_CABLE_TESTER 28
#define MV88E1XXX_EXTENDED_ADDR 29
#define MV88E1XXX_EXTENDED_DATA 30
/* PHY specific control register fields */
#define S_PSCR_MDI_XOVER_MODE 5
#define M_PSCR_MDI_XOVER_MODE 0x3
#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
/* Extended PHY specific control register fields */
#define S_DOWNSHIFT_ENABLE 8
#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
#define S_DOWNSHIFT_CNT 9
#define M_DOWNSHIFT_CNT 0x7
#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
/* PHY specific status register fields */
#define S_PSSR_JABBER 0
#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
#define S_PSSR_POLARITY 1
#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
#define S_PSSR_RX_PAUSE 2
#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
#define S_PSSR_TX_PAUSE 3
#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
#define S_PSSR_ENERGY_DETECT 4
#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
#define S_PSSR_DOWNSHIFT_STATUS 5
#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
#define S_PSSR_MDI 6
#define V_PSSR_MDI (1 << S_PSSR_MDI)
#define S_PSSR_CABLE_LEN 7
#define M_PSSR_CABLE_LEN 0x7
#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
#define S_PSSR_LINK 10
#define V_PSSR_LINK (1 << S_PSSR_LINK)
#define S_PSSR_STATUS_RESOLVED 11
#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
#define S_PSSR_PAGE_RECEIVED 12
#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
#define S_PSSR_DUPLEX 13
#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
#define S_PSSR_SPEED 14
#define M_PSSR_SPEED 0x3
#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
/* MV88E1XXX MDI crossover register values */
#define CROSSOVER_MDI 0
#define CROSSOVER_MDIX 1
#define CROSSOVER_AUTO 3
#define INTR_ENABLE_MASK (MV_INTR_SPEED_CHNG | MV_INTR_DUPLEX_CHNG | \
MV_INTR_AUTONEG_DONE | MV_INTR_LINK_CHNG | MV_INTR_FIFO_OVER_UNDER | \
MV_INTR_ENG_DETECT_CHNG)
/*
* Reset the PHY. If 'wait' is set wait until the reset completes.
*/
static int mv88e1xxx_reset(struct cphy *cphy, int wait)
{
return t3_phy_reset(cphy, 0, wait);
}
static int mv88e1xxx_intr_enable(struct cphy *cphy)
{
return mdio_write(cphy, 0, MV88E1XXX_INTR_ENABLE, INTR_ENABLE_MASK);
}
static int mv88e1xxx_intr_disable(struct cphy *cphy)
{
return mdio_write(cphy, 0, MV88E1XXX_INTR_ENABLE, 0);
}
static int mv88e1xxx_intr_clear(struct cphy *cphy)
{
u32 val;
/* Clear PHY interrupts by reading the register. */
return mdio_read(cphy, 0, MV88E1XXX_INTR_STATUS, &val);
}
static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover)
{
return t3_mdio_change_bits(cphy, 0, MV88E1XXX_SPECIFIC_CNTRL,
V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE),
V_PSCR_MDI_XOVER_MODE(crossover));
}
static int mv88e1xxx_autoneg_enable(struct cphy *cphy)
{
mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO);
/* restart autoneg for change to take effect */
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANENABLE | BMCR_ANRESTART);
}
static int mv88e1xxx_autoneg_restart(struct cphy *cphy)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANRESTART);
}
static int mv88e1xxx_set_loopback(struct cphy *cphy, int mmd, int dir, int on)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_LOOPBACK,
on ? BMCR_LOOPBACK : 0);
}
static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
u32 status;
int sp = -1, dplx = -1, pause = 0;
mdio_read(cphy, 0, MV88E1XXX_SPECIFIC_STATUS, &status);
if ((status & V_PSSR_STATUS_RESOLVED) != 0) {
if (status & V_PSSR_RX_PAUSE)
pause |= PAUSE_RX;
if (status & V_PSSR_TX_PAUSE)
pause |= PAUSE_TX;
dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
sp = G_PSSR_SPEED(status);
if (sp == 0)
sp = SPEED_10;
else if (sp == 1)
sp = SPEED_100;
else
sp = SPEED_1000;
}
if (link_ok)
*link_ok = (status & V_PSSR_LINK) != 0;
if (speed)
*speed = sp;
if (duplex)
*duplex = dplx;
if (fc)
*fc = pause;
return 0;
}
static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable)
{
/*
* Set the downshift counter to 2 so we try to establish Gb link
* twice before downshifting.
*/
return t3_mdio_change_bits(cphy, 0, MV88E1XXX_EXT_SPECIFIC_CNTRL,
V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT),
downshift_enable ? V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2) : 0);
}
static int mv88e1xxx_power_down(struct cphy *cphy, int enable)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
enable ? BMCR_PDOWN : 0);
}
static int mv88e1xxx_intr_handler(struct cphy *cphy)
{
const u32 link_change_intrs = MV_INTR_LINK_CHNG |
MV_INTR_AUTONEG_DONE | MV_INTR_DUPLEX_CHNG |
MV_INTR_SPEED_CHNG | MV_INTR_DOWNSHIFT;
u32 cause;
int cphy_cause = 0;
mdio_read(cphy, 0, MV88E1XXX_INTR_STATUS, &cause);
cause &= INTR_ENABLE_MASK;
if (cause & link_change_intrs)
cphy_cause |= cphy_cause_link_change;
if (cause & MV_INTR_FIFO_OVER_UNDER)
cphy_cause |= cphy_cause_fifo_error;
return cphy_cause;
}
#ifdef C99_NOT_SUPPORTED
static struct cphy_ops mv88e1xxx_ops = {
NULL,
mv88e1xxx_reset,
mv88e1xxx_intr_enable,
mv88e1xxx_intr_disable,
mv88e1xxx_intr_clear,
mv88e1xxx_intr_handler,
mv88e1xxx_autoneg_enable,
mv88e1xxx_autoneg_restart,
t3_phy_advertise,
mv88e1xxx_set_loopback,
t3_set_phy_speed_duplex,
mv88e1xxx_get_link_status,
mv88e1xxx_power_down,
};
#else
static struct cphy_ops mv88e1xxx_ops = {
.reset = mv88e1xxx_reset,
.intr_enable = mv88e1xxx_intr_enable,
.intr_disable = mv88e1xxx_intr_disable,
.intr_clear = mv88e1xxx_intr_clear,
.intr_handler = mv88e1xxx_intr_handler,
.autoneg_enable = mv88e1xxx_autoneg_enable,
.autoneg_restart = mv88e1xxx_autoneg_restart,
.advertise = t3_phy_advertise,
.set_loopback = mv88e1xxx_set_loopback,
.set_speed_duplex = t3_set_phy_speed_duplex,
.get_link_status = mv88e1xxx_get_link_status,
.power_down = mv88e1xxx_power_down,
};
#endif
void t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
/* Configure copper PHY transmitter as class A to reduce EMI. */
mdio_write(phy, 0, MV88E1XXX_EXTENDED_ADDR, 0xb);
mdio_write(phy, 0, MV88E1XXX_EXTENDED_DATA, 0x8004);
mv88e1xxx_downshift_set(phy, 1); /* Enable downshift */
}

View File

@ -1,205 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2007, Kip Macy kmacy@freebsd.org
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. The name of Kip Macy nor the names of other
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD: src/sys/dev/cxgb/sys/mvec.h,v 1.5 2007/05/25 16:42:25 kmacy Exp $
*
***************************************************************************/
#ifndef _MVEC_H_
#define _MVEC_H_
#include <sys/mbuf.h>
#define mtomv(m) ((struct mbuf_vec *)((m)->m_pktdat))
#define M_IOVEC 0x100000 /* mbuf immediate data area is used for cluster ptrs */
#define MBUF_IOV_TYPE_MASK ((1<<3)-1)
#define mbuf_vec_set_type(mv, i, type) \
(mv)->mv_vec[(i)].mi_flags = (((mv)->mv_vec[(i)].mi_flags \
& ~MBUF_IOV_TYPE_MASK) | type)
#define mbuf_vec_get_type(mv, i) \
((mv)->mv_vec[(i)].mi_flags & MBUF_IOV_TYPE_MASK)
struct mbuf_iovec {
uint16_t mi_flags; /* per-cluster flags */
uint16_t mi_len; /* length of cluster */
uint32_t mi_offset; /* data offsets into cluster */
uint8_t *mi_base; /* pointers to cluster */
volatile uint32_t *mi_refcnt; /* refcnt for cluster*/
#ifdef __i386__
void *mi_args; /* for sf_buf */
#endif
};
#define MAX_MBUF_IOV ((MHLEN-8)/sizeof(struct mbuf_iovec))
struct mbuf_vec {
uint16_t mv_first; /* first valid cluster */
uint16_t mv_count; /* # of clusters */
uint32_t mv_flags; /* flags for iovec */
struct mbuf_iovec mv_vec[MAX_MBUF_IOV];
};
int _m_explode(struct mbuf *);
int _m_collapse(struct mbuf *, int maxbufs, struct mbuf **);
void mb_free_vec(struct mbuf *m);
static inline void
m_iovinit(struct mbuf *m)
{
struct mbuf_vec *mv = mtomv(m);
mv->mv_first = mv->mv_count = 0;
m->m_pkthdr.len = m->m_len = 0;
m->m_flags |= M_IOVEC;
}
static inline void
m_iovappend(struct mbuf *m, uint8_t *cl, int size, int len, int offset)
{
struct mbuf_vec *mv = mtomv(m);
struct mbuf_iovec *iov;
int idx = mv->mv_first + mv->mv_count;
#ifdef __FreeBSD__
KASSERT(idx <= MAX_MBUF_IOV, ("tried to append too many clusters to mbuf iovec"));
#endif
if ((m->m_flags & M_EXT) != 0)
panic("invalid flags in %s", __func__);
if (mv->mv_count == 0)
m->m_data = cl + offset;
iov = &mv->mv_vec[idx];
#ifdef __FreeBSD__
iov->mi_flags = m_gettype(size);
#endif
iov->mi_base = cl;
iov->mi_len = len;
iov->mi_offset = offset;
m->m_pkthdr.len += len;
m->m_len += len;
mv->mv_count++;
}
static inline int
m_explode(struct mbuf *m)
{
if ((m->m_flags & M_IOVEC) == 0)
return (0);
return _m_explode(m);
}
static inline int
m_collapse(struct mbuf *m, int maxbufs, struct mbuf **mnew)
{
#if (!defined(__sparc64__) && !defined(__sun4v__))
if (m->m_next == NULL)
#endif
{
*mnew = m;
return (0);
}
return _m_collapse(m, maxbufs, mnew);
}
static inline struct mbuf *
m_free_vec(struct mbuf *m)
{
#ifdef __FreeBSD__
struct mbuf *n = m->m_next;
if (m->m_flags & M_IOVEC)
mb_free_vec(m);
else if (m->m_flags & M_EXT)
mb_free_ext(m);
else
uma_zfree(zone_mbuf, m);
#endif
#ifdef __NetBSD__
struct mbuf *n = NULL;
MFREE(m, n);
#endif
return (n);
}
static inline void
m_freem_vec(struct mbuf *m)
{
while (m != NULL)
m = m_free_vec(m);
}
#ifdef __FreeBSD__
static inline uma_zone_t
m_getzonefromtype(int type)
{
uma_zone_t zone;
switch (type) {
case EXT_MBUF:
zone = zone_mbuf;
break;
case EXT_CLUSTER:
zone = zone_clust;
break;
#if MJUMPAGESIZE != MCLBYTES
case EXT_JUMBOP:
zone = zone_jumbop;
break;
#endif
case EXT_JUMBO9:
zone = zone_jumbo9;
break;
case EXT_JUMBO16:
zone = zone_jumbo16;
break;
#ifndef PACKET_ZONE_DISABLED
case EXT_PACKET:
zone = zone_pack;
break;
#endif
default:
panic("%s: invalid cluster type %d", __func__, type);
}
return (zone);
}
#endif
#if (!defined(__sparc64__) && !defined(__sun4v__))
int
bus_dmamap_load_mvec_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dma_segment_t *segs, int *nsegs, int flags);
#else
#define bus_dmamap_load_mvec_sg bus_dmamap_load_mbuf_sg
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,274 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/cxgb_offload.h,v 1.5 2007/09/10 00:59:51 kmacy Exp $
***************************************************************************/
#ifndef _CXGB_OFFLOAD_H
#define _CXGB_OFFLOAD_H
#ifdef CONFIG_DEFINED
#include <common/cxgb_version.h>
#include <cxgb_config.h>
#include <cxgb_l2t.h>
#include <common/cxgb_tcb.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/common/cxgb_version.h>
#include <dev/cxgb/cxgb_config.h>
#include <dev/cxgb/cxgb_l2t.h>
#include <dev/cxgb/common/cxgb_tcb.h>
#endif
#ifdef __NetBSD__
#include "cxgb_version.h"
#include "cxgb_config.h"
#include "cxgb_l2t.h"
#include "cxgb_tcb.h"
#endif
#endif
struct adapter;
struct cxgb_client;
void cxgb_offload_init(void);
void cxgb_offload_exit(void);
void cxgb_adapter_ofld(struct adapter *adapter);
void cxgb_adapter_unofld(struct adapter *adapter);
int cxgb_offload_activate(struct adapter *adapter);
void cxgb_offload_deactivate(struct adapter *adapter);
int cxgb_ofld_recv(struct toedev *dev, struct mbuf **m, int n);
void cxgb_set_dummy_ops(struct toedev *dev);
/*
* Client registration. Users of T3 driver must register themselves.
* The T3 driver will call the add function of every client for each T3
* adapter activated, passing up the toedev ptr. Each client fills out an
* array of callback functions to process CPL messages.
*/
void cxgb_register_client(struct cxgb_client *client);
void cxgb_unregister_client(struct cxgb_client *client);
void cxgb_add_clients(struct toedev *tdev);
void cxgb_remove_clients(struct toedev *tdev);
typedef int (*cxgb_cpl_handler_func)(struct toedev *dev,
struct mbuf *m, void *ctx);
struct cxgb_client {
char *name;
void (*add) (struct toedev *);
void (*remove) (struct toedev *);
cxgb_cpl_handler_func *handlers;
int (*redirect)(void *ctx, struct rtentry *old,
struct rtentry *new,
struct l2t_entry *l2t);
TAILQ_ENTRY(cxgb_client) client_entry;
};
/*
* TID allocation services.
*/
int cxgb_alloc_atid(struct toedev *dev, struct cxgb_client *client,
void *ctx);
int cxgb_alloc_stid(struct toedev *dev, struct cxgb_client *client,
void *ctx);
void *cxgb_free_atid(struct toedev *dev, int atid);
void cxgb_free_stid(struct toedev *dev, int stid);
void cxgb_insert_tid(struct toedev *dev, struct cxgb_client *client,
void *ctx,
unsigned int tid);
void cxgb_queue_tid_release(struct toedev *dev, unsigned int tid);
void cxgb_remove_tid(struct toedev *dev, void *ctx, unsigned int tid);
struct toe_tid_entry {
struct cxgb_client *client;
void *ctx;
};
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
CPL_PRIORITY_SETUP = 1, /* connection setup messages */
CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
CPL_PRIORITY_ACK = 1, /* RX ACK messages */
CPL_PRIORITY_CONTROL = 1 /* offload control messages */
};
/* Flags for return value of CPL message handlers */
enum {
CPL_RET_BUF_DONE = 1, // buffer processing done, buffer may be freed
CPL_RET_BAD_MSG = 2, // bad CPL message (e.g., unknown opcode)
CPL_RET_UNKNOWN_TID = 4 // unexpected unknown TID
};
typedef int (*cpl_handler_func)(struct toedev *dev, struct mbuf *m);
/*
* Returns a pointer to the first byte of the CPL header in an sk_buff that
* contains a CPL message.
*/
static inline void *cplhdr(struct mbuf *m)
{
return mtod(m, uint8_t *);
}
void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
union listen_entry {
struct toe_tid_entry toe_tid;
union listen_entry *next;
};
union active_open_entry {
struct toe_tid_entry toe_tid;
union active_open_entry *next;
};
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables for a offload device.
* The tables themselves are allocated dynamically.
*/
struct tid_info {
struct toe_tid_entry *tid_tab;
unsigned int ntids;
volatile unsigned int tids_in_use;
union listen_entry *stid_tab;
unsigned int nstids;
unsigned int stid_base;
union active_open_entry *atid_tab;
unsigned int natids;
unsigned int atid_base;
/*
* The following members are accessed R/W so we put them in their own
* cache lines.
*
* XXX We could combine the atid fields above with the lock here since
* atids are use once (unlike other tids). OTOH the above fields are
* usually in cache due to tid_tab.
*/
struct mtx atid_lock /* ____cacheline_aligned_in_smp */;
union active_open_entry *afree;
unsigned int atids_in_use;
struct mtx stid_lock /*____cacheline_aligned */;
union listen_entry *sfree;
unsigned int stids_in_use;
};
struct toe_data {
#ifdef notyet
struct list_head list_node;
#endif
struct toedev *dev;
unsigned int tx_max_chunk; /* max payload for TX_DATA */
unsigned int max_wrs; /* max in-flight WRs per connection */
unsigned int nmtus;
const unsigned short *mtus;
struct tid_info tid_maps;
struct toe_tid_entry *tid_release_list;
struct mtx tid_release_lock;
#ifdef __FreeBSD__
struct task tid_release_task;
#endif
#ifdef __NetBSD__
struct cxgb_task tid_release_task;
#endif
};
/*
* toedev -> toe_data accessor
*/
#define TOE_DATA(dev) (*(struct toe_data **)&(dev)->l4opt)
/*
* Map an ATID or STID to their entries in the corresponding TID tables.
*/
static inline union active_open_entry *atid2entry(const struct tid_info *t,
unsigned int atid)
{
return &t->atid_tab[atid - t->atid_base];
}
static inline union listen_entry *stid2entry(const struct tid_info *t,
unsigned int stid)
{
return &t->stid_tab[stid - t->stid_base];
}
/*
* Find the connection corresponding to a TID.
*/
static inline struct toe_tid_entry *lookup_tid(const struct tid_info *t,
unsigned int tid)
{
return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
}
/*
* Find the connection corresponding to a server TID.
*/
static inline struct toe_tid_entry *lookup_stid(const struct tid_info *t,
unsigned int tid)
{
if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
return NULL;
return &(stid2entry(t, tid)->toe_tid);
}
/*
* Find the connection corresponding to an active-open TID.
*/
static inline struct toe_tid_entry *lookup_atid(const struct tid_info *t,
unsigned int tid)
{
if (tid < t->atid_base || tid >= t->atid_base + t->natids)
return NULL;
return &(atid2entry(t, tid)->toe_tid);
}
void *cxgb_alloc_mem(unsigned long size);
void cxgb_free_mem(void *addr);
void cxgb_neigh_update(struct rtentry *rt);
void cxgb_redirect(struct rtentry *old, struct rtentry *new);
int process_rx(struct toedev *dev, struct mbuf **m, int n);
int attach_toedev(struct toedev *dev);
void detach_toedev(struct toedev *dev);
#endif

View File

@ -1,207 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/conf.h>
#include <machine/bus.h>
#include <sys/ioccom.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/queue.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/cxgb_include.h>
#include <altq/altq_conf.h>
int cxgb_initialized = FALSE;
int atomic_fetchadd_int(volatile int *p, int v)
{
int tmp = *p;
*p += v;
return (tmp);
}
#if 0
int atomic_add_int(volatile int *p, int v)
{
return (*p += v);
}
#endif
int atomic_load_acq_int(volatile int *p)
{
return (*p);
}
void atomic_store_rel_int(volatile int *p, int v)
{
*p = v;
}
u_short in_cksum_hdr(struct ip *ih)
{
u_long sum = 0;
u_short *p = (u_short *)ih;
int i;
i = ih->ip_hl*2;
while (i--)
sum += *p++;
if (sum > 0xffff)
sum -= 0xffff;
return (~sum);
}
void m_cljset(struct mbuf *m, void *cl, int type)
{
MEXTADD(m, cl, m->m_len, M_DEVBUF, NULL, NULL);
}
int
_m_explode(struct mbuf *m)
{
int i, offset, type, first, len;
uint8_t *cl;
struct mbuf *m0, *head = NULL;
struct mbuf_vec *mv;
#ifdef INVARIANTS
len = m->m_len;
m0 = m->m_next;
while (m0) {
KASSERT((m0->m_flags & M_PKTHDR) == 0,
("pkthdr set on intermediate mbuf - pre"));
len += m0->m_len;
m0 = m0->m_next;
}
if (len != m->m_pkthdr.len)
panic("at start len=%d pktlen=%d", len, m->m_pkthdr.len);
#endif
mv = (struct mbuf_vec *)((m)->m_pktdat);
first = mv->mv_first;
for (i = mv->mv_count + first - 1; i > first; i--) {
type = mbuf_vec_get_type(mv, i);
cl = mv->mv_vec[i].mi_base;
offset = mv->mv_vec[i].mi_offset;
len = mv->mv_vec[i].mi_len;
#if 0
if (__predict_false(type == EXT_MBUF)) {
m0 = (struct mbuf *)cl;
KASSERT((m0->m_flags & M_EXT) == 0);
m0->m_len = len;
m0->m_data = cl + offset;
goto skip_cluster;
} else
#endif
if ((m0 = m_get(M_NOWAIT, MT_DATA)) == NULL) {
/*
* Check for extra memory leaks
*/
m_freem(head);
return (ENOMEM);
}
m0->m_flags = 0;
m0->m_len = mv->mv_vec[i].mi_len;
m_cljset(m0, (uint8_t *)cl, type);
if (offset)
m_adj(m0, offset);
// skip_cluster:
m0->m_next = head;
m->m_len -= m0->m_len;
head = m0;
}
offset = mv->mv_vec[first].mi_offset;
cl = mv->mv_vec[first].mi_base;
type = mbuf_vec_get_type(mv, first);
m->m_flags &= ~(M_IOVEC);
m_cljset(m, cl, type);
if (offset)
m_adj(m, offset);
m->m_next = head;
head = m;
M_SANITY(m, 0);
return (0);
}
/*
* Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
* The allocated memory is cleared.
*/
void *
cxgb_alloc_mem(unsigned long size)
{
return malloc(size, M_DEVBUF, M_ZERO);
}
/*
* Free memory allocated through t3_alloc_mem().
*/
void
cxgb_free_mem(void *addr)
{
free(addr, M_DEVBUF);
}
void pci_enable_busmaster(device_t dev)
{
adapter_t *sc = (adapter_t *)dev;
uint32_t reg;
t3_os_pci_read_config_4(sc, PCI_COMMAND_STATUS_REG, &reg);
reg |= PCI_COMMAND_MASTER_ENABLE;
t3_os_pci_write_config_4(sc, PCI_COMMAND_STATUS_REG, reg);
}

View File

@ -1,480 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/cxgb_osdep.h,v 1.14 2007/09/10 00:59:51 kmacy Exp $
***************************************************************************/
#include <sys/param.h>
#include <sys/systm.h>
#ifdef __FreeBSD__
#include <sys/ctype.h>
#endif
#include <sys/endian.h>
#ifdef __FreeBSD__
#include <sys/bus.h>
#endif
#include <dev/mii/mii.h>
#ifdef __FreeBSD__
#ifdef CONFIG_DEFINED
#include <common/cxgb_version.h>
#include <cxgb_config.h>
#else
#include <dev/cxgb/common/cxgb_version.h>
#include <dev/cxgb/cxgb_config.h>
#endif
#endif
#ifndef _CXGB_OSDEP_H_
#define _CXGB_OSDEP_H_
#ifdef __NetBSD__
typedef char *caddr_t;
#include <dev/pci/cxgb_version.h>
#include <dev/pci/cxgb_config.h>
#include <sys/mbuf.h>
#include <machine/bus.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <sys/simplelock.h>
#include <sys/kthread.h>
#include <sys/workqueue.h>
#include <sys/atomic.h>
void pci_enable_busmaster(device_t dev);
struct cxgb_task
{
const char *name;
void (*func)(struct work *, void *);
struct workqueue *wq;
struct work w;
void *context;
};
void cxgb_make_task(void *);
void m_cljset(struct mbuf *m, void *cl, int type);
#define mtx simplelock
#define mtx_init(a, b, c, d) { (a)->lock_data = __SIMPLELOCK_UNLOCKED; }
#define mtx_destroy(a)
#define mtx_lock(a) simple_lock(a)
#define mtx_unlock(a) simple_unlock(a)
#define mtx_trylock(a) simple_lock_try(a)
#define MA_OWNED 1
#define MA_NOTOWNED 0
#define mtx_assert(a, w)
#if 0
#define RT_LOCK_INIT(_rt) \
mtx_init(&(_rt)->rt_mtx, "rtentry", NULL, MTX_DEF | MTX_DUPOK)
#define RT_LOCK(_rt) mtx_lock(&(_rt)->rt_mtx)
#define RT_UNLOCK(_rt) mtx_unlock(&(_rt)->rt_mtx)
#define RT_LOCK_DESTROY(_rt) mtx_destroy(&(_rt)->rt_mtx)
#define RT_LOCK_ASSERT(_rt) mtx_assert(&(_rt)->rt_mtx, MA_OWNED)
#else
#define RT_LOCK_INIT(_rt)
#define RT_LOCK(_rt)
#define RT_UNLOCK(_rt)
#define RT_LOCK_DESTROY(_rt)
#define RT_LOCK_ASSERT(_rt)
#endif
#define RT_ADDREF(_rt) do { \
RT_LOCK_ASSERT(_rt); \
KASSERT((_rt)->rt_refcnt >= 0); \
(_rt)->rt_refcnt++; \
} while (0)
#define RT_REMREF(_rt) do { \
RT_LOCK_ASSERT(_rt); \
KASSERT((_rt)->rt_refcnt > 0); \
(_rt)->rt_refcnt--; \
} while (0)
#define EVL_VLID_MASK 0x0FFF
static inline void critical_enter(void)
{
}
static inline void critical_exit(void)
{
}
static inline void device_printf(device_t d, ...)
{
}
int atomic_fetchadd_int(volatile int *p, int v);
#if 0
int atomic_add_int(volatile int *p, int v);
#endif
int atomic_load_acq_int(volatile int *p);
void atomic_store_rel_int(volatile int *p, int v);
u_short in_cksum_hdr(struct ip *ih);
#define if_drv_flags if_flags
#define IFF_DRV_RUNNING IFF_RUNNING
#define IFF_DRV_OACTIVE IFF_OACTIVE
#define MJUM16BYTES (16*1024)
#define MJUMPAGESIZE PAGE_SIZE
#if 0
#define rw_rlock(x) rw_enter(x, RW_READER)
#define rw_runlock(x) rw_exit(x)
#define rw_wlock(x) rw_enter(x, RW_WRITER)
#define rw_wunlock(x) rw_exit(x)
#endif
#define callout_drain(x) callout_stop(x)
static inline int atomic_cmpset_ptr(volatile long *dst, long exp, long src)
{
if (*dst == exp)
{
*dst = src;
return (1);
}
return (0);
}
#define atomic_cmpset_int(a, b, c) atomic_cmpset_ptr((volatile long *)a, (long)b, (long)c)
static inline int atomic_set_int(volatile int *dst, int val)
{
*dst = val;
return (val);
}
static inline void log(int x, ...)
{
}
struct cxgb_attach_args
{
int port;
};
#define INT3 __asm("int $3")
static inline struct mbuf *
m_defrag(struct mbuf *m0, int flags)
{
struct mbuf *m;
MGETHDR(m, flags, MT_DATA);
if (m == NULL)
return NULL;
M_COPY_PKTHDR(m, m0);
MCLGET(m, flags);
if ((m->m_flags & M_EXT) == 0) {
m_free(m);
return NULL;
}
m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
m->m_len = m->m_pkthdr.len;
return m;
}
#endif
typedef struct adapter adapter_t;
struct sge_rspq;
struct t3_mbuf_hdr {
struct mbuf *mh_head;
struct mbuf *mh_tail;
};
#define PANIC_IF(exp) do { \
if (exp) \
panic("BUG: %s", exp); \
} while (0)
#define m_get_priority(m) ((uintptr_t)(m)->m_pkthdr.rcvif)
#define m_set_priority(m, pri) ((m)->m_pkthdr.rcvif = (struct ifnet *)((uintptr_t)pri))
#if __FreeBSD_version > 700030
#define INTR_FILTERS
#define FIRMWARE_LATEST
#endif
#if ((__FreeBSD_version > 602103) && (__FreeBSD_version < 700000))
#define FIRMWARE_LATEST
#endif
#if __FreeBSD_version > 700000
#define MSI_SUPPORTED
#define TSO_SUPPORTED
#define VLAN_SUPPORTED
#define TASKQUEUE_CURRENT
#else
#define if_name(ifp) (ifp)->if_xname
#define M_SANITY(m, n)
#endif
#define __read_mostly __section(".data.read_mostly")
/*
* Workaround for weird Chelsio issue
*/
#if __FreeBSD_version > 700029
#define PRIV_SUPPORTED
#endif
#define CXGB_TX_CLEANUP_THRESHOLD 32
#define LOG_WARNING 1
#define LOG_ERR 2
#ifdef DEBUG_PRINT
#define DPRINTF printf
#else
#define DPRINTF(...)
#endif
#define TX_MAX_SIZE (1 << 16) /* 64KB */
#define TX_MAX_SEGS 36 /* maximum supported by card */
#define TX_MAX_DESC 4 /* max descriptors per packet */
#define TX_START_MIN_DESC (TX_MAX_DESC << 2)
#if 0
#define TX_START_MAX_DESC (TX_ETH_Q_SIZE >> 2) /* maximum number of descriptors */
#endif
#define TX_START_MAX_DESC (TX_MAX_DESC << 3) /* maximum number of descriptors
* call to start used per */
#define TX_CLEAN_MAX_DESC (TX_MAX_DESC << 4) /* maximum tx descriptors
* to clean per iteration */
#if defined(__i386__) || defined(__amd64__)
#define mb() __asm volatile("mfence":::"memory")
#define rmb() __asm volatile("lfence":::"memory")
#define wmb() __asm volatile("sfence" ::: "memory")
#define smp_mb() mb()
#define L1_CACHE_BYTES 64
static __inline
void prefetch(void *x)
{
__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
}
extern void kdb_backtrace(void);
#define WARN_ON(condition) do { \
if (unlikely((condition)!=0)) { \
log(LOG_WARNING, "BUG: warning at %s:%d/%s()\n", __FILE__, __LINE__, __func__); \
kdb_backtrace(); \
} \
} while (0)
#else /* !i386 && !amd64 */
#define mb()
#define rmb()
#define wmb()
#define smp_mb()
#define prefetch(x)
#define L1_CACHE_BYTES 32
#endif
#define DBG_RX (1 << 0)
static const int debug_flags = DBG_RX;
#ifdef DEBUG_PRINT
#define DBG(flag, msg) do { \
if ((flag & debug_flags)) \
printf msg; \
} while (0)
#else
#define DBG(...)
#endif
#define promisc_rx_mode(rm) ((rm)->port->ifp->if_flags & IFF_PROMISC)
#define allmulti_rx_mode(rm) ((rm)->port->ifp->if_flags & IFF_ALLMULTI)
#ifdef __FreeBSD__
#define CH_ERR(adap, fmt, ...)device_printf(adap->dev, fmt, ##__VA_ARGS__);
#define CH_WARN(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__)
#define CH_ALERT(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__)
#endif
#ifdef __NetBSD__
#define CH_ERR(adap, fmt, ...) { }
#define CH_WARN(adap, fmt, ...) { }
#define CH_ALERT(adap, fmt, ...) { }
#endif
#define t3_os_sleep(x) DELAY((x) * 1000)
#define test_and_clear_bit(bit, p) atomic_cmpset_int((p), ((*(p)) | bit), ((*(p)) & ~bit))
#define max_t(type, a, b) (type)max((a), (b))
#define net_device ifnet
#define cpu_to_be32 htobe32
/* Standard PHY definitions */
#define BMCR_LOOPBACK BMCR_LOOP
#define BMCR_ISOLATE BMCR_ISO
#define BMCR_ANENABLE BMCR_AUTOEN
#define BMCR_SPEED1000 BMCR_SPEED1
#define BMCR_SPEED100 BMCR_SPEED0
#define BMCR_ANRESTART BMCR_STARTNEG
#define BMCR_FULLDPLX BMCR_FDX
#define BMSR_LSTATUS BMSR_LINK
#define BMSR_ANEGCOMPLETE BMSR_ACOMP
#define MII_LPA MII_ANLPAR
#define MII_ADVERTISE MII_ANAR
#define MII_CTRL1000 MII_100T2CR
#define ADVERTISE_PAUSE_CAP ANAR_FC
#define ADVERTISE_PAUSE_ASYM 0x0800
#define ADVERTISE_1000HALF ANAR_X_HD
#define ADVERTISE_1000FULL ANAR_X_FD
#define ADVERTISE_10FULL ANAR_10_FD
#define ADVERTISE_10HALF ANAR_10
#define ADVERTISE_100FULL ANAR_TX_FD
#define ADVERTISE_100HALF ANAR_TX
/* Standard PCI Extended Capaibilities definitions */
#define PCI_CAP_ID_VPD 0x03
#define PCI_VPD_ADDR 2
#define PCI_VPD_ADDR_F 0x8000
#define PCI_VPD_DATA 4
#define PCI_CAP_ID_EXP 0x10
#define PCI_EXP_DEVCTL 8
#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0
#define PCI_EXP_LNKCTL 16
#define PCI_EXP_LNKSTA 18
/*
* Linux compatibility macros
*/
/* Some simple translations */
#define __devinit
#define udelay(x) DELAY(x)
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define le32_to_cpu(x) le32toh(x)
#define cpu_to_le32(x) htole32(x)
#define swab32(x) bswap32(x)
#define simple_strtoul strtoul
/* More types and endian definitions */
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef uint8_t __u8;
typedef uint16_t __u16;
typedef uint32_t __u32;
typedef uint8_t __be8;
typedef uint16_t __be16;
typedef uint32_t __be32;
typedef uint64_t __be64;
#if BYTE_ORDER == BIG_ENDIAN
#define __BIG_ENDIAN_BITFIELD
#elif BYTE_ORDER == LITTLE_ENDIAN
#define __LITTLE_ENDIAN_BITFIELD
#else
#error "Must set BYTE_ORDER"
#endif
/* Indicates what features are supported by the interface. */
#define SUPPORTED_10baseT_Half (1 << 0)
#define SUPPORTED_10baseT_Full (1 << 1)
#define SUPPORTED_100baseT_Half (1 << 2)
#define SUPPORTED_100baseT_Full (1 << 3)
#define SUPPORTED_1000baseT_Half (1 << 4)
#define SUPPORTED_1000baseT_Full (1 << 5)
#define SUPPORTED_Autoneg (1 << 6)
#define SUPPORTED_TP (1 << 7)
#define SUPPORTED_AUI (1 << 8)
#define SUPPORTED_MII (1 << 9)
#define SUPPORTED_FIBRE (1 << 10)
#define SUPPORTED_BNC (1 << 11)
#define SUPPORTED_10000baseT_Full (1 << 12)
#define SUPPORTED_Pause (1 << 13)
#define SUPPORTED_Asym_Pause (1 << 14)
/* Indicates what features are advertised by the interface. */
#define ADVERTISED_10baseT_Half (1 << 0)
#define ADVERTISED_10baseT_Full (1 << 1)
#define ADVERTISED_100baseT_Half (1 << 2)
#define ADVERTISED_100baseT_Full (1 << 3)
#define ADVERTISED_1000baseT_Half (1 << 4)
#define ADVERTISED_1000baseT_Full (1 << 5)
#define ADVERTISED_Autoneg (1 << 6)
#define ADVERTISED_TP (1 << 7)
#define ADVERTISED_AUI (1 << 8)
#define ADVERTISED_MII (1 << 9)
#define ADVERTISED_FIBRE (1 << 10)
#define ADVERTISED_BNC (1 << 11)
#define ADVERTISED_10000baseT_Full (1 << 12)
#define ADVERTISED_Pause (1 << 13)
#define ADVERTISED_Asym_Pause (1 << 14)
/* Enable or disable autonegotiation. If this is set to enable,
* the forced link modes above are completely ignored.
*/
#define AUTONEG_DISABLE 0x00
#define AUTONEG_ENABLE 0x01
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_10000 10000
#define DUPLEX_HALF 0
#define DUPLEX_FULL 1
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,289 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/common/cxgb_sge_defs.h,v 1.3 2007/09/09 01:28:03 kmacy Exp $
***************************************************************************/
/*
* This file is automatically generated --- any changes will be lost.
*/
#ifndef _SGE_DEFS_H
#define _SGE_DEFS_H
#define S_EC_CREDITS 0
#define M_EC_CREDITS 0x7FFF
#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
#define S_EC_GTS 15
#define V_EC_GTS(x) ((x) << S_EC_GTS)
#define F_EC_GTS V_EC_GTS(1U)
#define S_EC_INDEX 16
#define M_EC_INDEX 0xFFFF
#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
#define S_EC_SIZE 0
#define M_EC_SIZE 0xFFFF
#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
#define S_EC_BASE_LO 16
#define M_EC_BASE_LO 0xFFFF
#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
#define S_EC_BASE_HI 0
#define M_EC_BASE_HI 0xF
#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
#define S_EC_RESPQ 4
#define M_EC_RESPQ 0x7
#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
#define S_EC_TYPE 7
#define M_EC_TYPE 0x7
#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
#define S_EC_GEN 10
#define V_EC_GEN(x) ((x) << S_EC_GEN)
#define F_EC_GEN V_EC_GEN(1U)
#define S_EC_UP_TOKEN 11
#define M_EC_UP_TOKEN 0xFFFFF
#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
#define S_EC_VALID 31
#define V_EC_VALID(x) ((x) << S_EC_VALID)
#define F_EC_VALID V_EC_VALID(1U)
#define S_RQ_MSI_VEC 20
#define M_RQ_MSI_VEC 0x3F
#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
#define S_RQ_INTR_EN 26
#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
#define S_RQ_GEN 28
#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
#define F_RQ_GEN V_RQ_GEN(1U)
#define S_CQ_INDEX 0
#define M_CQ_INDEX 0xFFFF
#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
#define S_CQ_SIZE 16
#define M_CQ_SIZE 0xFFFF
#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
#define S_CQ_BASE_HI 0
#define M_CQ_BASE_HI 0xFFFFF
#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
#define S_CQ_RSPQ 20
#define M_CQ_RSPQ 0x3F
#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
#define S_CQ_ASYNC_NOTIF 26
#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
#define S_CQ_ARMED 27
#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
#define F_CQ_ARMED V_CQ_ARMED(1U)
#define S_CQ_ASYNC_NOTIF_SOL 28
#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
#define S_CQ_GEN 29
#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
#define F_CQ_GEN V_CQ_GEN(1U)
#define S_CQ_ERR 30
#define V_CQ_ERR(x) ((x) << S_CQ_ERR)
#define F_CQ_ERR V_CQ_ERR(1U)
#define S_CQ_OVERFLOW_MODE 31
#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
#define S_CQ_CREDITS 0
#define M_CQ_CREDITS 0xFFFF
#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
#define S_CQ_CREDIT_THRES 16
#define M_CQ_CREDIT_THRES 0x1FFF
#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
#define S_FL_BASE_HI 0
#define M_FL_BASE_HI 0xFFFFF
#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
#define S_FL_INDEX_LO 20
#define M_FL_INDEX_LO 0xFFF
#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
#define S_FL_INDEX_HI 0
#define M_FL_INDEX_HI 0xF
#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
#define S_FL_SIZE 4
#define M_FL_SIZE 0xFFFF
#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
#define S_FL_GEN 20
#define V_FL_GEN(x) ((x) << S_FL_GEN)
#define F_FL_GEN V_FL_GEN(1U)
#define S_FL_ENTRY_SIZE_LO 21
#define M_FL_ENTRY_SIZE_LO 0x7FF
#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
#define S_FL_ENTRY_SIZE_HI 0
#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
#define S_FL_CONG_THRES 21
#define M_FL_CONG_THRES 0x3FF
#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
#define S_FL_GTS 31
#define V_FL_GTS(x) ((x) << S_FL_GTS)
#define F_FL_GTS V_FL_GTS(1U)
#define S_FLD_GEN1 31
#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
#define F_FLD_GEN1 V_FLD_GEN1(1U)
#define S_FLD_GEN2 0
#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
#define F_FLD_GEN2 V_FLD_GEN2(1U)
#define S_RSPD_TXQ1_CR 0
#define M_RSPD_TXQ1_CR 0x7F
#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
#define S_RSPD_TXQ1_GTS 7
#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
#define S_RSPD_TXQ2_CR 8
#define M_RSPD_TXQ2_CR 0x7F
#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
#define S_RSPD_TXQ2_GTS 15
#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
#define S_RSPD_TXQ0_CR 16
#define M_RSPD_TXQ0_CR 0x7F
#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
#define S_RSPD_TXQ0_GTS 23
#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
#define S_RSPD_EOP 24
#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
#define F_RSPD_EOP V_RSPD_EOP(1U)
#define G_RSPD_EOP(x) ((x) & F_RSPD_EOP)
#define S_RSPD_SOP 25
#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
#define F_RSPD_SOP V_RSPD_SOP(1U)
#define G_RSPD_SOP(x) ((x) & F_RSPD_SOP)
#define G_RSPD_SOP_EOP(x) ((G_RSPD_SOP(x) | G_RSPD_EOP(x)) >> S_RSPD_EOP)
#define S_RSPD_ASYNC_NOTIF 26
#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
#define S_RSPD_FL0_GTS 27
#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
#define S_RSPD_FL1_GTS 28
#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
#define S_RSPD_IMM_DATA_VALID 29
#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
#define S_RSPD_OFFLOAD 30
#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
#define S_RSPD_GEN1 31
#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
#define S_RSPD_LEN 0
#define M_RSPD_LEN 0x7FFFFFFF
#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
#define S_RSPD_FLQ 31
#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
#define F_RSPD_FLQ V_RSPD_FLQ(1U)
#define S_RSPD_GEN2 0
#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
#define S_RSPD_INR_VEC 1
#define M_RSPD_INR_VEC 0x7F
#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
#endif /* _SGE_DEFS_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,674 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/common/cxgb_tcb.h,v 1.2 2007/05/28 22:57:26 kmacy Exp $
***************************************************************************/
/* This file is automatically generated --- do not edit */
#ifndef _TCB_DEFS_H
#define _TCB_DEFS_H
#define W_TCB_T_STATE 0
#define S_TCB_T_STATE 0
#define M_TCB_T_STATE 0xfULL
#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
#define W_TCB_TIMER 0
#define S_TCB_TIMER 4
#define M_TCB_TIMER 0x1ULL
#define V_TCB_TIMER(x) ((x) << S_TCB_TIMER)
#define W_TCB_DACK_TIMER 0
#define S_TCB_DACK_TIMER 5
#define M_TCB_DACK_TIMER 0x1ULL
#define V_TCB_DACK_TIMER(x) ((x) << S_TCB_DACK_TIMER)
#define W_TCB_DEL_FLAG 0
#define S_TCB_DEL_FLAG 6
#define M_TCB_DEL_FLAG 0x1ULL
#define V_TCB_DEL_FLAG(x) ((x) << S_TCB_DEL_FLAG)
#define W_TCB_L2T_IX 0
#define S_TCB_L2T_IX 7
#define M_TCB_L2T_IX 0x7ffULL
#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
#define W_TCB_SMAC_SEL 0
#define S_TCB_SMAC_SEL 18
#define M_TCB_SMAC_SEL 0x3ULL
#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
#define W_TCB_TOS 0
#define S_TCB_TOS 20
#define M_TCB_TOS 0x3fULL
#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
#define W_TCB_MAX_RT 0
#define S_TCB_MAX_RT 26
#define M_TCB_MAX_RT 0xfULL
#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
#define W_TCB_T_RXTSHIFT 0
#define S_TCB_T_RXTSHIFT 30
#define M_TCB_T_RXTSHIFT 0xfULL
#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
#define W_TCB_T_DUPACKS 1
#define S_TCB_T_DUPACKS 2
#define M_TCB_T_DUPACKS 0xfULL
#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
#define W_TCB_T_MAXSEG 1
#define S_TCB_T_MAXSEG 6
#define M_TCB_T_MAXSEG 0xfULL
#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
#define W_TCB_T_FLAGS1 1
#define S_TCB_T_FLAGS1 10
#define M_TCB_T_FLAGS1 0xffffffffULL
#define V_TCB_T_FLAGS1(x) ((x) << S_TCB_T_FLAGS1)
#define W_TCB_T_FLAGS2 2
#define S_TCB_T_FLAGS2 10
#define M_TCB_T_FLAGS2 0x7fULL
#define V_TCB_T_FLAGS2(x) ((x) << S_TCB_T_FLAGS2)
#define W_TCB_SND_SCALE 2
#define S_TCB_SND_SCALE 17
#define M_TCB_SND_SCALE 0xfULL
#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
#define W_TCB_RCV_SCALE 2
#define S_TCB_RCV_SCALE 21
#define M_TCB_RCV_SCALE 0xfULL
#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
#define W_TCB_SND_UNA_RAW 2
#define S_TCB_SND_UNA_RAW 25
#define M_TCB_SND_UNA_RAW 0x7ffffffULL
#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
#define W_TCB_SND_NXT_RAW 3
#define S_TCB_SND_NXT_RAW 20
#define M_TCB_SND_NXT_RAW 0x7ffffffULL
#define V_TCB_SND_NXT_RAW(x) ((x) << S_TCB_SND_NXT_RAW)
#define W_TCB_RCV_NXT 4
#define S_TCB_RCV_NXT 15
#define M_TCB_RCV_NXT 0xffffffffULL
#define V_TCB_RCV_NXT(x) ((x) << S_TCB_RCV_NXT)
#define W_TCB_RCV_ADV 5
#define S_TCB_RCV_ADV 15
#define M_TCB_RCV_ADV 0xffffULL
#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
#define W_TCB_SND_MAX_RAW 5
#define S_TCB_SND_MAX_RAW 31
#define M_TCB_SND_MAX_RAW 0x7ffffffULL
#define V_TCB_SND_MAX_RAW(x) ((x) << S_TCB_SND_MAX_RAW)
#define W_TCB_SND_CWND 6
#define S_TCB_SND_CWND 26
#define M_TCB_SND_CWND 0x7ffffffULL
#define V_TCB_SND_CWND(x) ((x) << S_TCB_SND_CWND)
#define W_TCB_SND_SSTHRESH 7
#define S_TCB_SND_SSTHRESH 21
#define M_TCB_SND_SSTHRESH 0x7ffffffULL
#define V_TCB_SND_SSTHRESH(x) ((x) << S_TCB_SND_SSTHRESH)
#define W_TCB_T_RTT_TS_RECENT_AGE 8
#define S_TCB_T_RTT_TS_RECENT_AGE 16
#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
#define W_TCB_T_RTSEQ_RECENT 9
#define S_TCB_T_RTSEQ_RECENT 16
#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
#define W_TCB_T_SRTT 10
#define S_TCB_T_SRTT 16
#define M_TCB_T_SRTT 0xffffULL
#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
#define W_TCB_T_RTTVAR 11
#define S_TCB_T_RTTVAR 0
#define M_TCB_T_RTTVAR 0xffffULL
#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
#define W_TCB_TS_LAST_ACK_SENT_RAW 11
#define S_TCB_TS_LAST_ACK_SENT_RAW 16
#define M_TCB_TS_LAST_ACK_SENT_RAW 0x7ffffffULL
#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
#define W_TCB_DIP 12
#define S_TCB_DIP 11
#define M_TCB_DIP 0xffffffffULL
#define V_TCB_DIP(x) ((x) << S_TCB_DIP)
#define W_TCB_SIP 13
#define S_TCB_SIP 11
#define M_TCB_SIP 0xffffffffULL
#define V_TCB_SIP(x) ((x) << S_TCB_SIP)
#define W_TCB_DP 14
#define S_TCB_DP 11
#define M_TCB_DP 0xffffULL
#define V_TCB_DP(x) ((x) << S_TCB_DP)
#define W_TCB_SP 14
#define S_TCB_SP 27
#define M_TCB_SP 0xffffULL
#define V_TCB_SP(x) ((x) << S_TCB_SP)
#define W_TCB_TIMESTAMP 15
#define S_TCB_TIMESTAMP 11
#define M_TCB_TIMESTAMP 0xffffffffULL
#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
#define W_TCB_TIMESTAMP_OFFSET 16
#define S_TCB_TIMESTAMP_OFFSET 11
#define M_TCB_TIMESTAMP_OFFSET 0xfULL
#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
#define W_TCB_TX_MAX 16
#define S_TCB_TX_MAX 15
#define M_TCB_TX_MAX 0xffffffffULL
#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
#define W_TCB_TX_HDR_PTR_RAW 17
#define S_TCB_TX_HDR_PTR_RAW 15
#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
#define W_TCB_TX_LAST_PTR_RAW 18
#define S_TCB_TX_LAST_PTR_RAW 0
#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
#define V_TCB_TX_LAST_PTR_RAW(x) ((x) << S_TCB_TX_LAST_PTR_RAW)
#define W_TCB_TX_COMPACT 18
#define S_TCB_TX_COMPACT 17
#define M_TCB_TX_COMPACT 0x1ULL
#define V_TCB_TX_COMPACT(x) ((x) << S_TCB_TX_COMPACT)
#define W_TCB_RX_COMPACT 18
#define S_TCB_RX_COMPACT 18
#define M_TCB_RX_COMPACT 0x1ULL
#define V_TCB_RX_COMPACT(x) ((x) << S_TCB_RX_COMPACT)
#define W_TCB_RCV_WND 18
#define S_TCB_RCV_WND 19
#define M_TCB_RCV_WND 0x7ffffffULL
#define V_TCB_RCV_WND(x) ((x) << S_TCB_RCV_WND)
#define W_TCB_RX_HDR_OFFSET 19
#define S_TCB_RX_HDR_OFFSET 14
#define M_TCB_RX_HDR_OFFSET 0x7ffffffULL
#define V_TCB_RX_HDR_OFFSET(x) ((x) << S_TCB_RX_HDR_OFFSET)
#define W_TCB_RX_FRAG0_START_IDX_RAW 20
#define S_TCB_RX_FRAG0_START_IDX_RAW 9
#define M_TCB_RX_FRAG0_START_IDX_RAW 0x7ffffffULL
#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((x) << S_TCB_RX_FRAG0_START_IDX_RAW)
#define W_TCB_RX_FRAG1_START_IDX_OFFSET 21
#define S_TCB_RX_FRAG1_START_IDX_OFFSET 4
#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0x7ffffffULL
#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
#define W_TCB_RX_FRAG0_LEN 21
#define S_TCB_RX_FRAG0_LEN 31
#define M_TCB_RX_FRAG0_LEN 0x7ffffffULL
#define V_TCB_RX_FRAG0_LEN(x) ((x) << S_TCB_RX_FRAG0_LEN)
#define W_TCB_RX_FRAG1_LEN 22
#define S_TCB_RX_FRAG1_LEN 26
#define M_TCB_RX_FRAG1_LEN 0x7ffffffULL
#define V_TCB_RX_FRAG1_LEN(x) ((x) << S_TCB_RX_FRAG1_LEN)
#define W_TCB_NEWRENO_RECOVER 23
#define S_TCB_NEWRENO_RECOVER 21
#define M_TCB_NEWRENO_RECOVER 0x7ffffffULL
#define V_TCB_NEWRENO_RECOVER(x) ((x) << S_TCB_NEWRENO_RECOVER)
#define W_TCB_PDU_HAVE_LEN 24
#define S_TCB_PDU_HAVE_LEN 16
#define M_TCB_PDU_HAVE_LEN 0x1ULL
#define V_TCB_PDU_HAVE_LEN(x) ((x) << S_TCB_PDU_HAVE_LEN)
#define W_TCB_PDU_LEN 24
#define S_TCB_PDU_LEN 17
#define M_TCB_PDU_LEN 0xffffULL
#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
#define W_TCB_RX_QUIESCE 25
#define S_TCB_RX_QUIESCE 1
#define M_TCB_RX_QUIESCE 0x1ULL
#define V_TCB_RX_QUIESCE(x) ((x) << S_TCB_RX_QUIESCE)
#define W_TCB_RX_PTR_RAW 25
#define S_TCB_RX_PTR_RAW 2
#define M_TCB_RX_PTR_RAW 0x1ffffULL
#define V_TCB_RX_PTR_RAW(x) ((x) << S_TCB_RX_PTR_RAW)
#define W_TCB_CPU_NO 25
#define S_TCB_CPU_NO 19
#define M_TCB_CPU_NO 0x7fULL
#define V_TCB_CPU_NO(x) ((x) << S_TCB_CPU_NO)
#define W_TCB_ULP_TYPE 25
#define S_TCB_ULP_TYPE 26
#define M_TCB_ULP_TYPE 0xfULL
#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
#define W_TCB_RX_FRAG1_PTR_RAW 25
#define S_TCB_RX_FRAG1_PTR_RAW 30
#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0x7ffffffULL
#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
#define W_TCB_RX_FRAG2_PTR_RAW 27
#define S_TCB_RX_FRAG2_PTR_RAW 10
#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
#define W_TCB_RX_FRAG2_LEN_RAW 27
#define S_TCB_RX_FRAG2_LEN_RAW 27
#define M_TCB_RX_FRAG2_LEN_RAW 0x7ffffffULL
#define V_TCB_RX_FRAG2_LEN_RAW(x) ((x) << S_TCB_RX_FRAG2_LEN_RAW)
#define W_TCB_RX_FRAG3_PTR_RAW 28
#define S_TCB_RX_FRAG3_PTR_RAW 22
#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
#define V_TCB_RX_FRAG3_PTR_RAW(x) ((x) << S_TCB_RX_FRAG3_PTR_RAW)
#define W_TCB_RX_FRAG3_LEN_RAW 29
#define S_TCB_RX_FRAG3_LEN_RAW 7
#define M_TCB_RX_FRAG3_LEN_RAW 0x7ffffffULL
#define V_TCB_RX_FRAG3_LEN_RAW(x) ((x) << S_TCB_RX_FRAG3_LEN_RAW)
#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 2
#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0x7ffffffULL
#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
#define W_TCB_PDU_HDR_LEN 30
#define S_TCB_PDU_HDR_LEN 29
#define M_TCB_PDU_HDR_LEN 0xffULL
#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
#define W_TCB_SLUSH1 31
#define S_TCB_SLUSH1 5
#define M_TCB_SLUSH1 0x7ffffULL
#define V_TCB_SLUSH1(x) ((x) << S_TCB_SLUSH1)
#define W_TCB_ULP_RAW 31
#define S_TCB_ULP_RAW 24
#define M_TCB_ULP_RAW 0xffULL
#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
#define W_TCB_DDP_RDMAP_VERSION 25
#define S_TCB_DDP_RDMAP_VERSION 30
#define M_TCB_DDP_RDMAP_VERSION 0x1ULL
#define V_TCB_DDP_RDMAP_VERSION(x) ((x) << S_TCB_DDP_RDMAP_VERSION)
#define W_TCB_MARKER_ENABLE_RX 25
#define S_TCB_MARKER_ENABLE_RX 31
#define M_TCB_MARKER_ENABLE_RX 0x1ULL
#define V_TCB_MARKER_ENABLE_RX(x) ((x) << S_TCB_MARKER_ENABLE_RX)
#define W_TCB_MARKER_ENABLE_TX 26
#define S_TCB_MARKER_ENABLE_TX 0
#define M_TCB_MARKER_ENABLE_TX 0x1ULL
#define V_TCB_MARKER_ENABLE_TX(x) ((x) << S_TCB_MARKER_ENABLE_TX)
#define W_TCB_CRC_ENABLE 26
#define S_TCB_CRC_ENABLE 1
#define M_TCB_CRC_ENABLE 0x1ULL
#define V_TCB_CRC_ENABLE(x) ((x) << S_TCB_CRC_ENABLE)
#define W_TCB_IRS_ULP 26
#define S_TCB_IRS_ULP 2
#define M_TCB_IRS_ULP 0x1ffULL
#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
#define W_TCB_ISS_ULP 26
#define S_TCB_ISS_ULP 11
#define M_TCB_ISS_ULP 0x1ffULL
#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
#define W_TCB_TX_PDU_LEN 26
#define S_TCB_TX_PDU_LEN 20
#define M_TCB_TX_PDU_LEN 0x3fffULL
#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
#define W_TCB_TX_PDU_OUT 27
#define S_TCB_TX_PDU_OUT 2
#define M_TCB_TX_PDU_OUT 0x1ULL
#define V_TCB_TX_PDU_OUT(x) ((x) << S_TCB_TX_PDU_OUT)
#define W_TCB_CQ_IDX_SQ 27
#define S_TCB_CQ_IDX_SQ 3
#define M_TCB_CQ_IDX_SQ 0xffffULL
#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
#define W_TCB_CQ_IDX_RQ 27
#define S_TCB_CQ_IDX_RQ 19
#define M_TCB_CQ_IDX_RQ 0xffffULL
#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
#define W_TCB_QP_ID 28
#define S_TCB_QP_ID 3
#define M_TCB_QP_ID 0xffffULL
#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
#define W_TCB_PD_ID 28
#define S_TCB_PD_ID 19
#define M_TCB_PD_ID 0xffffULL
#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
#define W_TCB_STAG 29
#define S_TCB_STAG 3
#define M_TCB_STAG 0xffffffffULL
#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
#define W_TCB_RQ_START 30
#define S_TCB_RQ_START 3
#define M_TCB_RQ_START 0x3ffffffULL
#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
#define W_TCB_RQ_MSN 30
#define S_TCB_RQ_MSN 29
#define M_TCB_RQ_MSN 0x3ffULL
#define V_TCB_RQ_MSN(x) ((x) << S_TCB_RQ_MSN)
#define W_TCB_RQ_MAX_OFFSET 31
#define S_TCB_RQ_MAX_OFFSET 7
#define M_TCB_RQ_MAX_OFFSET 0xfULL
#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
#define W_TCB_RQ_WRITE_PTR 31
#define S_TCB_RQ_WRITE_PTR 11
#define M_TCB_RQ_WRITE_PTR 0x3ffULL
#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
#define W_TCB_INB_WRITE_PERM 31
#define S_TCB_INB_WRITE_PERM 21
#define M_TCB_INB_WRITE_PERM 0x1ULL
#define V_TCB_INB_WRITE_PERM(x) ((x) << S_TCB_INB_WRITE_PERM)
#define W_TCB_INB_READ_PERM 31
#define S_TCB_INB_READ_PERM 22
#define M_TCB_INB_READ_PERM 0x1ULL
#define V_TCB_INB_READ_PERM(x) ((x) << S_TCB_INB_READ_PERM)
#define W_TCB_ORD_L_BIT_VLD 31
#define S_TCB_ORD_L_BIT_VLD 23
#define M_TCB_ORD_L_BIT_VLD 0x1ULL
#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
#define W_TCB_RDMAP_OPCODE 31
#define S_TCB_RDMAP_OPCODE 24
#define M_TCB_RDMAP_OPCODE 0xfULL
#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
#define W_TCB_TX_FLUSH 31
#define S_TCB_TX_FLUSH 28
#define M_TCB_TX_FLUSH 0x1ULL
#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
#define W_TCB_TX_OOS_RXMT 31
#define S_TCB_TX_OOS_RXMT 29
#define M_TCB_TX_OOS_RXMT 0x1ULL
#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
#define W_TCB_TX_OOS_TXMT 31
#define S_TCB_TX_OOS_TXMT 30
#define M_TCB_TX_OOS_TXMT 0x1ULL
#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
#define W_TCB_SLUSH_AUX2 31
#define S_TCB_SLUSH_AUX2 31
#define M_TCB_SLUSH_AUX2 0x1ULL
#define V_TCB_SLUSH_AUX2(x) ((x) << S_TCB_SLUSH_AUX2)
#define W_TCB_RX_FRAG1_PTR_RAW2 25
#define S_TCB_RX_FRAG1_PTR_RAW2 30
#define M_TCB_RX_FRAG1_PTR_RAW2 0x1ffffULL
#define V_TCB_RX_FRAG1_PTR_RAW2(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW2)
#define W_TCB_RX_DDP_FLAGS 26
#define S_TCB_RX_DDP_FLAGS 15
#define M_TCB_RX_DDP_FLAGS 0xffffULL
#define V_TCB_RX_DDP_FLAGS(x) ((x) << S_TCB_RX_DDP_FLAGS)
#define W_TCB_SLUSH_AUX3 26
#define S_TCB_SLUSH_AUX3 31
#define M_TCB_SLUSH_AUX3 0x1ffULL
#define V_TCB_SLUSH_AUX3(x) ((x) << S_TCB_SLUSH_AUX3)
#define W_TCB_RX_DDP_BUF0_OFFSET 27
#define S_TCB_RX_DDP_BUF0_OFFSET 8
#define M_TCB_RX_DDP_BUF0_OFFSET 0x3fffffULL
#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
#define W_TCB_RX_DDP_BUF0_LEN 27
#define S_TCB_RX_DDP_BUF0_LEN 30
#define M_TCB_RX_DDP_BUF0_LEN 0x3fffffULL
#define V_TCB_RX_DDP_BUF0_LEN(x) ((x) << S_TCB_RX_DDP_BUF0_LEN)
#define W_TCB_RX_DDP_BUF1_OFFSET 28
#define S_TCB_RX_DDP_BUF1_OFFSET 20
#define M_TCB_RX_DDP_BUF1_OFFSET 0x3fffffULL
#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
#define W_TCB_RX_DDP_BUF1_LEN 29
#define S_TCB_RX_DDP_BUF1_LEN 10
#define M_TCB_RX_DDP_BUF1_LEN 0x3fffffULL
#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
#define W_TCB_RX_DDP_BUF0_TAG 30
#define S_TCB_RX_DDP_BUF0_TAG 0
#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
#define W_TCB_RX_DDP_BUF1_TAG 31
#define S_TCB_RX_DDP_BUF1_TAG 0
#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
#define S_TF_DACK 10
#define V_TF_DACK(x) ((x) << S_TF_DACK)
#define S_TF_NAGLE 11
#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
#define S_TF_RECV_SCALE 12
#define V_TF_RECV_SCALE(x) ((x) << S_TF_RECV_SCALE)
#define S_TF_RECV_TSTMP 13
#define V_TF_RECV_TSTMP(x) ((x) << S_TF_RECV_TSTMP)
#define S_TF_RECV_SACK 14
#define V_TF_RECV_SACK(x) ((x) << S_TF_RECV_SACK)
#define S_TF_TURBO 15
#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
#define S_TF_KEEPALIVE 16
#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
#define S_TF_TCAM_BYPASS 17
#define V_TF_TCAM_BYPASS(x) ((x) << S_TF_TCAM_BYPASS)
#define S_TF_CORE_FIN 18
#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
#define S_TF_CORE_MORE 19
#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
#define S_TF_MIGRATING 20
#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
#define S_TF_ACTIVE_OPEN 21
#define V_TF_ACTIVE_OPEN(x) ((x) << S_TF_ACTIVE_OPEN)
#define S_TF_ASK_MODE 22
#define V_TF_ASK_MODE(x) ((x) << S_TF_ASK_MODE)
#define S_TF_NON_OFFLOAD 23
#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
#define S_TF_MOD_SCHD 24
#define V_TF_MOD_SCHD(x) ((x) << S_TF_MOD_SCHD)
#define S_TF_MOD_SCHD_REASON0 25
#define V_TF_MOD_SCHD_REASON0(x) ((x) << S_TF_MOD_SCHD_REASON0)
#define S_TF_MOD_SCHD_REASON1 26
#define V_TF_MOD_SCHD_REASON1(x) ((x) << S_TF_MOD_SCHD_REASON1)
#define S_TF_MOD_SCHD_RX 27
#define V_TF_MOD_SCHD_RX(x) ((x) << S_TF_MOD_SCHD_RX)
#define S_TF_CORE_PUSH 28
#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
#define S_TF_RCV_COALESCE_ENABLE 29
#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
#define S_TF_RCV_COALESCE_PUSH 30
#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
#define S_TF_RCV_COALESCE_LAST_PSH 31
#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
#define S_TF_RCV_COALESCE_HEARTBEAT 32
#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((x) << S_TF_RCV_COALESCE_HEARTBEAT)
#define S_TF_LOCK_TID 33
#define V_TF_LOCK_TID(x) ((x) << S_TF_LOCK_TID)
#define S_TF_DACK_MSS 34
#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
#define S_TF_CCTRL_SEL0 35
#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
#define S_TF_CCTRL_SEL1 36
#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
#define S_TF_TCP_NEWRENO_FAST_RECOVERY 37
#define V_TF_TCP_NEWRENO_FAST_RECOVERY(x) ((x) << S_TF_TCP_NEWRENO_FAST_RECOVERY)
#define S_TF_TX_PACE_AUTO 38
#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
#define S_TF_PEER_FIN_HELD 39
#define V_TF_PEER_FIN_HELD(x) ((x) << S_TF_PEER_FIN_HELD)
#define S_TF_CORE_URG 40
#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
#define S_TF_RDMA_ERROR 41
#define V_TF_RDMA_ERROR(x) ((x) << S_TF_RDMA_ERROR)
#define S_TF_SSWS_DISABLED 42
#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
#define S_TF_DUPACK_COUNT_ODD 43
#define V_TF_DUPACK_COUNT_ODD(x) ((x) << S_TF_DUPACK_COUNT_ODD)
#define S_TF_TX_CHANNEL 44
#define V_TF_TX_CHANNEL(x) ((x) << S_TF_TX_CHANNEL)
#define S_TF_RX_CHANNEL 45
#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
#define S_TF_TX_PACE_FIXED 46
#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
#define S_TF_RDMA_FLM_ERROR 47
#define V_TF_RDMA_FLM_ERROR(x) ((x) << S_TF_RDMA_FLM_ERROR)
#define S_TF_RX_FLOW_CONTROL_DISABLE 48
#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
#define S_TF_DDP_INDICATE_OUT 15
#define V_TF_DDP_INDICATE_OUT(x) ((x) << S_TF_DDP_INDICATE_OUT)
#define S_TF_DDP_ACTIVE_BUF 16
#define V_TF_DDP_ACTIVE_BUF(x) ((x) << S_TF_DDP_ACTIVE_BUF)
#define S_TF_DDP_BUF0_VALID 17
#define V_TF_DDP_BUF0_VALID(x) ((x) << S_TF_DDP_BUF0_VALID)
#define S_TF_DDP_BUF1_VALID 18
#define V_TF_DDP_BUF1_VALID(x) ((x) << S_TF_DDP_BUF1_VALID)
#define S_TF_DDP_BUF0_INDICATE 19
#define V_TF_DDP_BUF0_INDICATE(x) ((x) << S_TF_DDP_BUF0_INDICATE)
#define S_TF_DDP_BUF1_INDICATE 20
#define V_TF_DDP_BUF1_INDICATE(x) ((x) << S_TF_DDP_BUF1_INDICATE)
#define S_TF_DDP_PUSH_DISABLE_0 21
#define V_TF_DDP_PUSH_DISABLE_0(x) ((x) << S_TF_DDP_PUSH_DISABLE_0)
#define S_TF_DDP_PUSH_DISABLE_1 22
#define V_TF_DDP_PUSH_DISABLE_1(x) ((x) << S_TF_DDP_PUSH_DISABLE_1)
#define S_TF_DDP_OFF 23
#define V_TF_DDP_OFF(x) ((x) << S_TF_DDP_OFF)
#define S_TF_DDP_WAIT_FRAG 24
#define V_TF_DDP_WAIT_FRAG(x) ((x) << S_TF_DDP_WAIT_FRAG)
#define S_TF_DDP_BUF_INF 25
#define V_TF_DDP_BUF_INF(x) ((x) << S_TF_DDP_BUF_INF)
#define S_TF_DDP_RX2TX 26
#define V_TF_DDP_RX2TX(x) ((x) << S_TF_DDP_RX2TX)
#define S_TF_DDP_BUF0_FLUSH 27
#define V_TF_DDP_BUF0_FLUSH(x) ((x) << S_TF_DDP_BUF0_FLUSH)
#define S_TF_DDP_BUF1_FLUSH 28
#define V_TF_DDP_BUF1_FLUSH(x) ((x) << S_TF_DDP_BUF1_FLUSH)
#define S_TF_DDP_PSH_NO_INVALIDATE 29
#define V_TF_DDP_PSH_NO_INVALIDATE(x) ((x) << S_TF_DDP_PSH_NO_INVALIDATE)
#endif /* _TCB_DEFS_H */

View File

@ -1,173 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/ulp/toecore/toedev.h,v 1.1 2007/05/25 16:17:59 kmacy Exp $
***************************************************************************/
#ifndef _OFFLOAD_DEV_H_
#define _OFFLOAD_DEV_H_
#include <net/route.h>
/* Parameter values for offload_get_phys_egress() */
enum {
TOE_OPEN,
TOE_FAILOVER,
};
/* Parameter values for toe_failover() */
enum {
TOE_ACTIVE_SLAVE,
TOE_LINK_DOWN,
TOE_LINK_UP,
TOE_RELEASE,
TOE_RELEASE_ALL,
};
#define TOENAMSIZ 16
/* belongs in linux/netdevice.h */
#define NETIF_F_TCPIP_OFFLOAD (1 << 15)
/* Get the toedev associated with a ifnet */
#define TOEDEV(netdev) (*(struct toedev **)&(netdev)->if_softc)
/* offload type ids */
enum {
TOE_ID_CHELSIO_T1 = 1,
TOE_ID_CHELSIO_T1C,
TOE_ID_CHELSIO_T2,
TOE_ID_CHELSIO_T3,
TOE_ID_CHELSIO_T3B,
};
struct offload_id {
unsigned int id;
unsigned long data;
};
struct ifnet;
struct rt_entry;
struct tom_info;
struct sysctl_oid;
struct socket;
struct mbuf;
enum toetype {
T3A = 0,
T3B
};
struct toedev {
char name[TOENAMSIZ]; /* TOE device name */
enum toetype type;
struct adapter *adapter;
unsigned int ttid; /* TOE type id */
unsigned long flags; /* device flags */
unsigned int mtu; /* max size of TX offloaded data */
unsigned int nconn; /* max # of offloaded connections */
struct ifnet *lldev; /* LL device associated with TOE messages */
const struct tom_info *offload_mod; /* attached TCP offload module */
struct sysctl_oid *sysctl_root; /* root of proc dir for this TOE */
TAILQ_ENTRY(toedev) ofld_entry; /* for list linking */
int (*open)(struct toedev *dev);
int (*close)(struct toedev *dev);
int (*can_offload)(struct toedev *dev, struct socket *so);
int (*connect)(struct toedev *dev, struct socket *so,
struct ifnet *egress_ifp);
int (*send)(struct toedev *dev, struct mbuf *m);
int (*recv)(struct toedev *dev, struct mbuf **m, int n);
int (*ctl)(struct toedev *dev, unsigned int req, void *data);
void (*neigh_update)(struct toedev *dev, struct rtentry *neigh);
void (*failover)(struct toedev *dev, struct ifnet *bond_ifp,
struct ifnet *ndev, int event);
void *priv; /* driver private data */
void *l2opt; /* optional layer 2 data */
void *l3opt; /* optional layer 3 data */
void *l4opt; /* optional layer 4 data */
void *ulp; /* ulp stuff */
};
struct tom_info {
int (*attach)(struct toedev *dev, const struct offload_id *entry);
int (*detach)(struct toedev *dev);
const char *name;
const struct offload_id *id_table;
TAILQ_ENTRY(tom_info) entry;
};
static inline void init_offload_dev(struct toedev *dev)
{
}
extern int register_tom(struct tom_info *t);
extern int unregister_tom(struct tom_info *t);
extern int register_toedev(struct toedev *dev, const char *name);
extern int unregister_toedev(struct toedev *dev);
extern int activate_offload(struct toedev *dev);
extern int toe_send(struct toedev *dev, struct mbuf *m);
extern struct ifnet *offload_get_phys_egress(struct ifnet *dev,
struct socket *so,
int context);
#if defined(CONFIG_TCP_OFFLOAD_MODULE)
static inline int toe_receive_mbuf(struct toedev *dev, struct mbuf **m,
int n)
{
return dev->recv(dev, m, n);
}
extern int prepare_tcp_for_offload(void);
extern void restore_tcp_to_nonoffload(void);
#elif defined(CONFIG_TCP_OFFLOAD)
extern int toe_receive_mbuf(struct toedev *dev, struct mbuf **m, int n);
#endif
#if defined(CONFIG_TCP_OFFLOAD) || \
(defined(CONFIG_TCP_OFFLOAD_MODULE) && defined(MODULE))
extern void toe_neigh_update(struct rtentry *neigh);
extern void toe_failover(struct ifnet *bond_ifp,
struct ifnet *fail_ifp, int event);
extern int toe_enslave(struct ifnet *bond_ifp,
struct ifnet *slave_ifp);
#else
static inline void toe_neigh_update(struct ifnet *neigh) {}
static inline void toe_failover(struct ifnet *bond_ifp,
struct ifnet *fail_ifp, int event)
{}
static inline int toe_enslave(struct ifnet *bond_ifp,
struct ifnet *slave_ifp)
{
return 0;
}
#endif /* CONFIG_TCP_OFFLOAD */
#endif /* _OFFLOAD_DEV_H_ */

View File

@ -1,41 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
$FreeBSD: src/sys/dev/cxgb/common/cxgb_version.h,v 1.4 2007/05/28 22:57:26 kmacy Exp $
***************************************************************************/
/*
* Note that although this driver doesn't contain all of the functionality of the Linux driver
* the common code is 99% the same. Hence we keep the same version number to indicate what linux
* driver the common code corresponds to.
*/
#ifndef __CHELSIO_VERSION_H
#define __CHELSIO_VERSION_H
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb"
#define DRV_VERSION "1.0.086"
#endif

View File

@ -1,375 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __NetBSD__
__KERNEL_RCSID(0, "$NetBSD: cxgb_vsc7323.c,v 1.3 2008/01/17 06:03:21 jklos Exp $");
#endif
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/common/cxgb_vsc7323.c,v 1.3 2007/09/09 01:28:03 kmacy Exp $");
#endif
#ifdef CONFIG_DEFINED
#include <common/cxgb_common.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/common/cxgb_common.h>
#endif
#ifdef __NetBSD__
#include "cxgb_common.h"
#endif
#endif
enum {
ELMR_ADDR = 0,
ELMR_STAT = 1,
ELMR_DATA_LO = 2,
ELMR_DATA_HI = 3,
ELMR_THRES0 = 0xe000,
ELMR_BW = 0xe00c,
ELMR_FIFO_SZ = 0xe00d,
ELMR_STATS = 0xf000,
ELMR_MDIO_ADDR = 10
};
#define VSC_REG(block, subblock, reg) \
((reg) | ((subblock) << 8) | ((block) << 12))
int t3_elmr_blk_write(adapter_t *adap, int start, const u32 *vals, int n)
{
int ret;
const struct mdio_ops *mo = adapter_info(adap)->mdio_ops;
ELMR_LOCK(adap);
ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_ADDR, start);
for ( ; !ret && n; n--, vals++) {
ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_LO,
*vals & 0xffff);
if (!ret)
ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_HI,
*vals >> 16);
}
ELMR_UNLOCK(adap);
return ret;
}
static int elmr_write(adapter_t *adap, int addr, u32 val)
{
return t3_elmr_blk_write(adap, addr, &val, 1);
}
int t3_elmr_blk_read(adapter_t *adap, int start, u32 *vals, int n)
{
int i, ret;
unsigned int v;
const struct mdio_ops *mo = adapter_info(adap)->mdio_ops;
ELMR_LOCK(adap);
ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_ADDR, start);
if (ret)
goto out;
for (i = 0; i < 5; i++) {
ret = mo->read(adap, ELMR_MDIO_ADDR, 0, ELMR_STAT, &v);
if (ret)
goto out;
if (v == 1)
break;
udelay(5);
}
if (v != 1) {
ret = -ETIMEDOUT;
goto out;
}
for ( ; !ret && n; n--, vals++) {
ret = mo->read(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_LO, vals);
if (!ret) {
ret = mo->read(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_HI,
&v);
*vals |= v << 16;
}
}
out: ELMR_UNLOCK(adap);
return ret;
}
int t3_vsc7323_init(adapter_t *adap, int nports)
{
static struct addr_val_pair sys_avp[] = {
{ VSC_REG(7, 15, 0xf), 2 },
{ VSC_REG(7, 15, 0x19), 0xd6 },
{ VSC_REG(7, 15, 7), 0xc },
{ VSC_REG(7, 1, 0), 0x220 },
};
static struct addr_val_pair fifo_avp[] = {
{ VSC_REG(2, 0, 0x2f), 0 },
{ VSC_REG(2, 0, 0xf), 0xa0010291 },
{ VSC_REG(2, 1, 0x2f), 1 },
{ VSC_REG(2, 1, 0xf), 0xa026301 }
};
static struct addr_val_pair xg_avp[] = {
{ VSC_REG(1, 10, 0), 0x600b },
{ VSC_REG(1, 10, 1), 0x70600 }, //QUANTA = 96*1024*8/512
{ VSC_REG(1, 10, 2), 0x2710 },
{ VSC_REG(1, 10, 5), 0x65 },
{ VSC_REG(1, 10, 7), 0x23 },
{ VSC_REG(1, 10, 0x23), 0x800007bf },
{ VSC_REG(1, 10, 0x23), 0x000007bf },
{ VSC_REG(1, 10, 0x23), 0x800007bf },
{ VSC_REG(1, 10, 0x24), 4 }
};
int i, ret, ing_step, egr_step, ing_bot, egr_bot;
for (i = 0; i < ARRAY_SIZE(sys_avp); i++)
if ((ret = t3_elmr_blk_write(adap, sys_avp[i].reg_addr,
&sys_avp[i].val, 1)))
return ret;
ing_step = 0xc0 / nports;
egr_step = 0x40 / nports;
ing_bot = egr_bot = 0;
// ing_wm = ing_step * 64;
// egr_wm = egr_step * 64;
/* {ING,EGR}_CONTROL.CLR = 1 here */
for (i = 0; i < nports; i++) {
if (
(ret = elmr_write(adap, VSC_REG(2, 0, 0x10 + i),
((ing_bot + ing_step) << 16) | ing_bot)) ||
(ret = elmr_write(adap, VSC_REG(2, 0, 0x40 + i),
0x6000bc0)) ||
(ret = elmr_write(adap, VSC_REG(2, 0, 0x50 + i), 1)) ||
(ret = elmr_write(adap, VSC_REG(2, 1, 0x10 + i),
((egr_bot + egr_step) << 16) | egr_bot)) ||
(ret = elmr_write(adap, VSC_REG(2, 1, 0x40 + i),
0x2000280)) ||
(ret = elmr_write(adap, VSC_REG(2, 1, 0x50 + i), 0)))
return ret;
ing_bot += ing_step;
egr_bot += egr_step;
}
for (i = 0; i < ARRAY_SIZE(fifo_avp); i++)
if ((ret = t3_elmr_blk_write(adap, fifo_avp[i].reg_addr,
&fifo_avp[i].val, 1)))
return ret;
for (i = 0; i < ARRAY_SIZE(xg_avp); i++)
if ((ret = t3_elmr_blk_write(adap, xg_avp[i].reg_addr,
&xg_avp[i].val, 1)))
return ret;
for (i = 0; i < nports; i++)
if ((ret = elmr_write(adap, VSC_REG(1, i, 0), 0xa59c)) ||
(ret = elmr_write(adap, VSC_REG(1, i, 5),
(i << 12) | 0x63)) ||
(ret = elmr_write(adap, VSC_REG(1, i, 0xb), 0x96)) ||
(ret = elmr_write(adap, VSC_REG(1, i, 0x15), 0x21)) ||
(ret = elmr_write(adap, ELMR_THRES0 + i, 768)))
return ret;
if ((ret = elmr_write(adap, ELMR_BW, 7)))
return ret;
return ret;
}
int t3_vsc7323_set_speed_fc(adapter_t *adap, int speed, int fc, int port)
{
int mode, clk, r;
if (speed >= 0) {
if (speed == SPEED_10)
mode = clk = 1;
else if (speed == SPEED_100)
mode = 1, clk = 2;
else if (speed == SPEED_1000)
mode = clk = 3;
else
return -EINVAL;
if ((r = elmr_write(adap, VSC_REG(1, port, 0),
0xa590 | (mode << 2))) ||
(r = elmr_write(adap, VSC_REG(1, port, 0xb),
0x91 | (clk << 1))) ||
(r = elmr_write(adap, VSC_REG(1, port, 0xb),
0x90 | (clk << 1))) ||
(r = elmr_write(adap, VSC_REG(1, port, 0),
0xa593 | (mode << 2))))
return r;
}
r = (fc & PAUSE_RX) ? 0x60200 : 0x20200; //QUANTA = 32*1024*8/512
if (fc & PAUSE_TX)
r |= (1 << 19);
return elmr_write(adap, VSC_REG(1, port, 1), r);
}
int t3_vsc7323_set_mtu(adapter_t *adap, unsigned int mtu, int port)
{
return elmr_write(adap, VSC_REG(1, port, 2), mtu);
}
int t3_vsc7323_set_addr(adapter_t *adap, u8 addr[6], int port)
{
int ret;
ret = elmr_write(adap, VSC_REG(1, port, 3),
(addr[0] << 16) | (addr[1] << 8) | addr[2]);
if (!ret)
ret = elmr_write(adap, VSC_REG(1, port, 4),
(addr[3] << 16) | (addr[4] << 8) | addr[5]);
return ret;
}
int t3_vsc7323_enable(adapter_t *adap, int port, int which)
{
int ret;
unsigned int v, orig;
ret = t3_elmr_blk_read(adap, VSC_REG(1, port, 0), &v, 1);
if (!ret) {
orig = v;
if (which & MAC_DIRECTION_TX)
v |= 1;
if (which & MAC_DIRECTION_RX)
v |= 2;
if (v != orig)
ret = elmr_write(adap, VSC_REG(1, port, 0), v);
}
return ret;
}
int t3_vsc7323_disable(adapter_t *adap, int port, int which)
{
int ret;
unsigned int v, orig;
ret = t3_elmr_blk_read(adap, VSC_REG(1, port, 0), &v, 1);
if (!ret) {
orig = v;
if (which & MAC_DIRECTION_TX)
v &= ~1;
if (which & MAC_DIRECTION_RX)
v &= ~2;
if (v != orig)
ret = elmr_write(adap, VSC_REG(1, port, 0), v);
}
return ret;
}
#define STATS0_START 1
#define STATS1_START 0x24
#define NSTATS0 (0x1d - STATS0_START + 1)
#define NSTATS1 (0x2a - STATS1_START + 1)
#define ELMR_STAT(port, reg) (ELMR_STATS + port * 0x40 + reg)
const struct mac_stats *t3_vsc7323_update_stats(struct cmac *mac)
{
int ret;
u64 rx_ucast, tx_ucast;
u32 stats0[NSTATS0], stats1[NSTATS1];
ret = t3_elmr_blk_read(mac->adapter,
ELMR_STAT(mac->ext_port, STATS0_START),
stats0, NSTATS0);
if (!ret)
ret = t3_elmr_blk_read(mac->adapter,
ELMR_STAT(mac->ext_port, STATS1_START),
stats1, NSTATS1);
if (ret)
goto out;
/*
* HW counts Rx/Tx unicast frames but we want all the frames.
*/
rx_ucast = mac->stats.rx_frames - mac->stats.rx_mcast_frames -
mac->stats.rx_bcast_frames;
rx_ucast += (u64)(stats0[6 - STATS0_START] - (u32)rx_ucast);
tx_ucast = mac->stats.tx_frames - mac->stats.tx_mcast_frames -
mac->stats.tx_bcast_frames;
tx_ucast += (u64)(stats0[27 - STATS0_START] - (u32)tx_ucast);
#define RMON_UPDATE(mac, name, hw_stat) \
mac->stats.name += (u64)((hw_stat) - (u32)(mac->stats.name))
RMON_UPDATE(mac, rx_octets, stats0[4 - STATS0_START]);
RMON_UPDATE(mac, rx_frames, stats0[6 - STATS0_START]);
RMON_UPDATE(mac, rx_frames, stats0[7 - STATS0_START]);
RMON_UPDATE(mac, rx_frames, stats0[8 - STATS0_START]);
RMON_UPDATE(mac, rx_mcast_frames, stats0[7 - STATS0_START]);
RMON_UPDATE(mac, rx_bcast_frames, stats0[8 - STATS0_START]);
RMON_UPDATE(mac, rx_fcs_errs, stats0[9 - STATS0_START]);
RMON_UPDATE(mac, rx_pause, stats0[2 - STATS0_START]);
RMON_UPDATE(mac, rx_jabber, stats0[16 - STATS0_START]);
RMON_UPDATE(mac, rx_short, stats0[11 - STATS0_START]);
RMON_UPDATE(mac, rx_symbol_errs, stats0[1 - STATS0_START]);
RMON_UPDATE(mac, rx_too_long, stats0[15 - STATS0_START]);
RMON_UPDATE(mac, rx_frames_64, stats0[17 - STATS0_START]);
RMON_UPDATE(mac, rx_frames_65_127, stats0[18 - STATS0_START]);
RMON_UPDATE(mac, rx_frames_128_255, stats0[19 - STATS0_START]);
RMON_UPDATE(mac, rx_frames_256_511, stats0[20 - STATS0_START]);
RMON_UPDATE(mac, rx_frames_512_1023, stats0[21 - STATS0_START]);
RMON_UPDATE(mac, rx_frames_1024_1518, stats0[22 - STATS0_START]);
RMON_UPDATE(mac, rx_frames_1519_max, stats0[23 - STATS0_START]);
RMON_UPDATE(mac, tx_octets, stats0[26 - STATS0_START]);
RMON_UPDATE(mac, tx_frames, stats0[27 - STATS0_START]);
RMON_UPDATE(mac, tx_frames, stats0[28 - STATS0_START]);
RMON_UPDATE(mac, tx_frames, stats0[29 - STATS0_START]);
RMON_UPDATE(mac, tx_mcast_frames, stats0[28 - STATS0_START]);
RMON_UPDATE(mac, tx_bcast_frames, stats0[29 - STATS0_START]);
RMON_UPDATE(mac, tx_pause, stats0[25 - STATS0_START]);
RMON_UPDATE(mac, tx_underrun, 0);
RMON_UPDATE(mac, tx_frames_64, stats1[36 - STATS1_START]);
RMON_UPDATE(mac, tx_frames_65_127, stats1[37 - STATS1_START]);
RMON_UPDATE(mac, tx_frames_128_255, stats1[38 - STATS1_START]);
RMON_UPDATE(mac, tx_frames_256_511, stats1[39 - STATS1_START]);
RMON_UPDATE(mac, tx_frames_512_1023, stats1[40 - STATS1_START]);
RMON_UPDATE(mac, tx_frames_1024_1518, stats1[41 - STATS1_START]);
RMON_UPDATE(mac, tx_frames_1519_max, stats1[42 - STATS1_START]);
#undef RMON_UPDATE
mac->stats.rx_frames = rx_ucast + mac->stats.rx_mcast_frames +
mac->stats.rx_bcast_frames;
mac->stats.tx_frames = tx_ucast + mac->stats.tx_mcast_frames +
mac->stats.tx_bcast_frames;
out: return &mac->stats;
}

View File

@ -1,262 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __NetBSD__
__KERNEL_RCSID(0, "$NetBSD: cxgb_vsc8211.c,v 1.3 2008/01/17 06:03:21 jklos Exp $");
#endif
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/common/cxgb_vsc8211.c,v 1.3 2007/08/25 21:07:37 kmacy Exp $");
#endif
#ifdef CONFIG_DEFINED
#include <cxgb_include.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/cxgb_include.h>
#endif
#ifdef __NetBSD__
#include <dev/pci/cxgb_include.h>
#endif
#endif
/* VSC8211 PHY specific registers. */
enum {
VSC8211_INTR_ENABLE = 25,
VSC8211_INTR_STATUS = 26,
VSC8211_AUX_CTRL_STAT = 28,
};
enum {
VSC_INTR_RX_ERR = 1 << 0,
VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
VSC_INTR_CABLE = 1 << 2, /* cable impairment */
VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
VSC_INTR_LINK_CHG = 1 << 13, /* link change */
VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
};
#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
VSC_INTR_NEG_DONE)
#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
VSC_INTR_ENABLE)
/* PHY specific auxiliary control & status register fields */
#define S_ACSR_ACTIPHY_TMR 0
#define M_ACSR_ACTIPHY_TMR 0x3
#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
#define S_ACSR_SPEED 3
#define M_ACSR_SPEED 0x3
#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
#define S_ACSR_DUPLEX 5
#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
#define S_ACSR_ACTIPHY 6
#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
/*
* Reset the PHY. This PHY completes reset immediately so we never wait.
*/
static int vsc8211_reset(struct cphy *cphy, int wait)
{
return t3_phy_reset(cphy, 0, 0);
}
static int vsc8211_intr_enable(struct cphy *cphy)
{
return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
}
static int vsc8211_intr_disable(struct cphy *cphy)
{
return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
}
static int vsc8211_intr_clear(struct cphy *cphy)
{
u32 val;
/* Clear PHY interrupts by reading the register. */
return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
}
static int vsc8211_autoneg_enable(struct cphy *cphy)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANENABLE | BMCR_ANRESTART);
}
static int vsc8211_autoneg_restart(struct cphy *cphy)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANRESTART);
}
static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
unsigned int bmcr, status, lpa, adv;
int err, sp = -1, dplx = -1, pause = 0;
err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
if (!err)
err = mdio_read(cphy, 0, MII_BMSR, &status);
if (err)
return err;
if (link_ok) {
/*
* BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
* once more to get the current link state.
*/
if (!(status & BMSR_LSTATUS))
err = mdio_read(cphy, 0, MII_BMSR, &status);
if (err)
return err;
*link_ok = (status & BMSR_LSTATUS) != 0;
}
if (!(bmcr & BMCR_ANENABLE)) {
dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
if (bmcr & BMCR_SPEED1000)
sp = SPEED_1000;
else if (bmcr & BMCR_SPEED100)
sp = SPEED_100;
else
sp = SPEED_10;
} else if (status & BMSR_ANEGCOMPLETE) {
err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
if (err)
return err;
dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
sp = G_ACSR_SPEED(status);
if (sp == 0)
sp = SPEED_10;
else if (sp == 1)
sp = SPEED_100;
else
sp = SPEED_1000;
if (fc && dplx == DUPLEX_FULL) {
err = mdio_read(cphy, 0, MII_LPA, &lpa);
if (!err)
err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
if (err)
return err;
if (lpa & adv & ADVERTISE_PAUSE_CAP)
pause = PAUSE_RX | PAUSE_TX;
else if ((lpa & ADVERTISE_PAUSE_CAP) &&
(lpa & ADVERTISE_PAUSE_ASYM) &&
(adv & ADVERTISE_PAUSE_ASYM))
pause = PAUSE_TX;
else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
(adv & ADVERTISE_PAUSE_CAP))
pause = PAUSE_RX;
}
}
if (speed)
*speed = sp;
if (duplex)
*duplex = dplx;
if (fc)
*fc = pause;
return 0;
}
static int vsc8211_power_down(struct cphy *cphy, int enable)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
enable ? BMCR_PDOWN : 0);
}
static int vsc8211_intr_handler(struct cphy *cphy)
{
unsigned int cause;
int err, cphy_cause = 0;
err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
if (err)
return err;
cause &= INTR_MASK;
if (cause & CFG_CHG_INTR_MASK)
cphy_cause |= cphy_cause_link_change;
if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
cphy_cause |= cphy_cause_fifo_error;
return cphy_cause;
}
#ifdef C99_NOT_SUPPORTED
static struct cphy_ops vsc8211_ops = {
NULL,
vsc8211_reset,
vsc8211_intr_enable,
vsc8211_intr_disable,
vsc8211_intr_clear,
vsc8211_intr_handler,
vsc8211_autoneg_enable,
vsc8211_autoneg_restart,
t3_phy_advertise,
NULL,
t3_set_phy_speed_duplex,
vsc8211_get_link_status,
vsc8211_power_down,
};
#else
static struct cphy_ops vsc8211_ops = {
.reset = vsc8211_reset,
.intr_enable = vsc8211_intr_enable,
.intr_disable = vsc8211_intr_disable,
.intr_clear = vsc8211_intr_clear,
.intr_handler = vsc8211_intr_handler,
.autoneg_enable = vsc8211_autoneg_enable,
.autoneg_restart = vsc8211_autoneg_restart,
.advertise = t3_phy_advertise,
.set_speed_duplex = t3_set_phy_speed_duplex,
.get_link_status = vsc8211_get_link_status,
.power_down = vsc8211_power_down,
};
#endif
void t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
t3_os_sleep(20); /* PHY needs ~10ms to start responding to MDIO */
}

View File

@ -1,703 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __NetBSD__
__KERNEL_RCSID(0, "$NetBSD: cxgb_xgmac.c,v 1.3 2008/01/17 06:03:21 jklos Exp $");
#endif
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/common/cxgb_xgmac.c,v 1.7 2007/09/09 01:28:03 kmacy Exp $");
#endif
#ifdef CONFIG_DEFINED
#include <cxgb_include.h>
#else
#ifdef __FreeBSD__
#include <dev/cxgb/cxgb_include.h>
#endif
#ifdef __NetBSD__
#include "cxgb_include.h"
#endif
#endif
#undef msleep
#define msleep t3_os_sleep
/*
* # of exact address filters. The first one is used for the station address,
* the rest are available for multicast addresses.
*/
#define EXACT_ADDR_FILTERS 8
static inline int macidx(const struct cmac *mac)
{
return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
}
static void xaui_serdes_reset(struct cmac *mac)
{
static const unsigned int clear[] = {
F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
};
int i;
adapter_t *adap = mac->adapter;
u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
F_RESETPLL23 | F_RESETPLL01);
(void)t3_read_reg(adap, ctrl);
udelay(15);
for (i = 0; i < ARRAY_SIZE(clear); i++) {
t3_set_reg_field(adap, ctrl, clear[i], 0);
udelay(15);
}
}
void t3b_pcs_reset(struct cmac *mac)
{
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
F_PCS_RESET_, 0);
udelay(20);
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
F_PCS_RESET_);
}
int t3_mac_reset(struct cmac *mac)
{
static struct addr_val_pair mac_reset_avp[] = {
{ A_XGM_TX_CTRL, 0 },
{ A_XGM_RX_CTRL, 0 },
{ A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
F_RMFCS | F_ENJUMBO | F_ENHASHMCAST },
{ A_XGM_RX_HASH_LOW, 0 },
{ A_XGM_RX_HASH_HIGH, 0 },
{ A_XGM_RX_EXACT_MATCH_LOW_1, 0 },
{ A_XGM_RX_EXACT_MATCH_LOW_2, 0 },
{ A_XGM_RX_EXACT_MATCH_LOW_3, 0 },
{ A_XGM_RX_EXACT_MATCH_LOW_4, 0 },
{ A_XGM_RX_EXACT_MATCH_LOW_5, 0 },
{ A_XGM_RX_EXACT_MATCH_LOW_6, 0 },
{ A_XGM_RX_EXACT_MATCH_LOW_7, 0 },
{ A_XGM_RX_EXACT_MATCH_LOW_8, 0 },
{ A_XGM_STAT_CTRL, F_CLRSTATS }
};
u32 val;
adapter_t *adap = mac->adapter;
unsigned int oft = mac->offset;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
(void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
F_RXSTRFRWRD | F_DISERRFRAMES,
uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
if (uses_xaui(adap)) {
if (adap->params.rev == 0) {
t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
F_RXENABLE | F_TXENABLE);
if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
F_CMULOCK, 1, 5, 2)) {
CH_ERR(adap,
"MAC %d XAUI SERDES CMU lock failed\n",
macidx(mac));
return -1;
}
t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
F_SERDESRESET_);
} else
xaui_serdes_reset(mac);
}
if (mac->multiport) {
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
MAX_FRAME_SIZE - 4);
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0,
F_DISPREAMBLE);
t3_set_reg_field(adap, A_XGM_RX_CFG + oft, 0, F_COPYPREAMBLE |
F_ENNON802_3PREAMBLE);
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft,
V_TXFIFOTHRESH(M_TXFIFOTHRESH),
V_TXFIFOTHRESH(64));
t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
}
val = F_MAC_RESET_;
if (is_10G(adap) || mac->multiport)
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
val |= F_PCS_RESET_ | F_XG2G_RESET_;
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
(void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
if ((val & F_PCS_RESET_) && adap->params.rev) {
msleep(1);
t3b_pcs_reset(mac);
}
memset(&mac->stats, 0, sizeof(mac->stats));
return 0;
}
static int t3b2_mac_reset(struct cmac *mac)
{
u32 val;
adapter_t *adap = mac->adapter;
unsigned int oft = mac->offset;
/* Stop egress traffic to xgm*/
if (!macidx(mac))
t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
else
t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
/* PCS in reset */
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
(void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
msleep(10);
/* Check for xgm Rx fifo empty */
if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
0x80000000, 1, 5, 2)) {
CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
macidx(mac));
return -1;
}
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); /*MAC in reset*/
(void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
val = F_MAC_RESET_;
if (is_10G(adap))
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
val |= F_PCS_RESET_ | F_XG2G_RESET_;
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
(void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
if ((val & F_PCS_RESET_) && adap->params.rev) {
msleep(1);
t3b_pcs_reset(mac);
}
t3_write_reg(adap, A_XGM_RX_CFG + oft,
F_DISPAUSEFRAMES | F_EN1536BFRAMES |
F_RMFCS | F_ENJUMBO | F_ENHASHMCAST );
/*Resume egress traffic to xgm*/
if (!macidx(mac))
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
else
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
return 0;
}
/*
* Set the exact match register 'idx' to recognize the given Ethernet address.
*/
static void set_addr_filter(struct cmac *mac, int idx, const u8 *addr)
{
u32 addr_lo, addr_hi;
unsigned int oft = mac->offset + idx * 8;
addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
addr_hi = (addr[5] << 8) | addr[4];
t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
}
/* Set one of the station's unicast MAC addresses. */
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
{
if (mac->multiport)
idx = mac->ext_port + idx * mac->adapter->params.nports;
if (idx >= mac->nucast)
return -EINVAL;
set_addr_filter(mac, idx, addr);
if (mac->multiport && idx < mac->adapter->params.nports)
t3_vsc7323_set_addr(mac->adapter, addr, idx);
return 0;
}
/*
* Specify the number of exact address filters that should be reserved for
* unicast addresses. Caller should reload the unicast and multicast addresses
* after calling this.
*/
int t3_mac_set_num_ucast(struct cmac *mac, unsigned char n)
{
if (n > EXACT_ADDR_FILTERS)
return -EINVAL;
mac->nucast = n;
return 0;
}
static void disable_exact_filters(struct cmac *mac)
{
unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
u32 v = t3_read_reg(mac->adapter, reg);
t3_write_reg(mac->adapter, reg, v);
}
t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
}
static void enable_exact_filters(struct cmac *mac)
{
unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
u32 v = t3_read_reg(mac->adapter, reg);
t3_write_reg(mac->adapter, reg, v);
}
t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
}
/* Calculate the RX hash filter index of an Ethernet address */
static int hash_hw_addr(const u8 *addr)
{
int hash = 0, octet, bit, i = 0, c;
for (octet = 0; octet < 6; ++octet)
for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
hash ^= (c & 1) << i;
if (++i == 6)
i = 0;
}
return hash;
}
int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
{
u32 hash_lo, hash_hi;
adapter_t *adap = mac->adapter;
unsigned int oft = mac->offset;
if (promisc_rx_mode(rm))
mac->promisc_map |= 1 << mac->ext_port;
else
mac->promisc_map &= ~(1 << mac->ext_port);
t3_set_reg_field(adap, A_XGM_RX_CFG + oft, F_COPYALLFRAMES,
mac->promisc_map ? F_COPYALLFRAMES : 0);
if (allmulti_rx_mode(rm) || mac->multiport)
hash_lo = hash_hi = 0xffffffff;
else {
u8 *addr;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
while ((addr = t3_get_next_mcaddr(rm)))
if (exact_addr_idx < EXACT_ADDR_FILTERS)
set_addr_filter(mac, exact_addr_idx++, addr);
else {
int hash = hash_hw_addr(addr);
if (hash < 32)
hash_lo |= (1 << hash);
else
hash_hi |= (1 << (hash - 32));
}
}
t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
return 0;
}
static int rx_fifo_hwm(int mtu)
{
int hwm;
hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
return min(hwm, MAC_RXFIFO_SIZE - 8192);
}
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
{
int hwm, lwm;
unsigned int thres, v;
adapter_t *adap = mac->adapter;
/*
* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
* packet size register includes header, but not FCS.
*/
mtu += 14;
if (mac->multiport)
mtu += 8; /* for preamble */
if (mtu > MAX_FRAME_SIZE - 4)
return -EINVAL;
if (mac->multiport)
return t3_vsc7323_set_mtu(adap, mtu - 4, mac->ext_port);
if (adap->params.rev == T3_REV_B2 &&
(t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
disable_exact_filters(mac);
v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
/* drain rx FIFO */
if (t3_wait_op_done(adap,
A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + mac->offset,
1 << 31, 1, 20, 5)) {
t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
enable_exact_filters(mac);
return -EIO;
}
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
enable_exact_filters(mac);
} else
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
/*
* Adjust the PAUSE frame watermarks. We always set the LWM, and the
* HWM only if flow-control is enabled.
*/
hwm = rx_fifo_hwm(mtu);
lwm = min(3 * (int) mtu, MAC_RXFIFO_SIZE /4);
v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
v |= V_RXFIFOPAUSELWM(lwm / 8);
if (G_RXFIFOPAUSEHWM(v))
v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
V_RXFIFOPAUSEHWM(hwm / 8);
t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
/* Adjust the TX FIFO threshold based on the MTU */
thres = (adap->params.vpd.cclk * 1000) / 15625;
thres = (thres * mtu) / 1000;
if (is_10G(adap))
thres /= 10;
thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
thres = max(thres, 8U); /* need at least 8 */
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
V_TXFIFOTHRESH(thres) | V_TXIPG(1));
/* Assuming a minimum drain rate of 2.5Gbps...
*/
if (adap->params.rev > 0)
t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
(hwm - lwm) * 4 / 8);
t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
MAC_RXFIFO_SIZE * 4 * 8 / 512);
return 0;
}
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
{
u32 val;
adapter_t *adap = mac->adapter;
unsigned int oft = mac->offset;
if (duplex >= 0 && duplex != DUPLEX_FULL)
return -EINVAL;
if (mac->multiport) {
val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(t3_read_reg(adap,
A_XGM_RX_MAX_PKT_SIZE + oft)) / 8);
t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
F_TXPAUSEEN);
return t3_vsc7323_set_speed_fc(adap, speed, fc, mac->ext_port);
}
if (speed >= 0) {
if (speed == SPEED_10)
val = V_PORTSPEED(0);
else if (speed == SPEED_100)
val = V_PORTSPEED(1);
else if (speed == SPEED_1000)
val = V_PORTSPEED(2);
else if (speed == SPEED_10000)
val = V_PORTSPEED(3);
else
return -EINVAL;
t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
V_PORTSPEED(M_PORTSPEED), val);
}
val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
if (fc & PAUSE_TX)
val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(t3_read_reg(adap,
A_XGM_RX_MAX_PKT_SIZE + oft)) / 8);
t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
(fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
return 0;
}
int t3_mac_enable(struct cmac *mac, int which)
{
int idx = macidx(mac);
adapter_t *adap = mac->adapter;
unsigned int oft = mac->offset;
struct mac_stats *s = &mac->stats;
if (mac->multiport)
return t3_vsc7323_enable(adap, mac->ext_port, which);
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
mac->tx_mcnt = s->tx_frames;
mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
A_TP_PIO_DATA)));
mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_TX_SPI4_SOP_EOP_CNT +
oft)));
mac->rx_mcnt = s->rx_frames;
mac->rx_pause = s->rx_pause;
mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_RX_SPI4_SOP_EOP_CNT +
oft)));
mac->rx_ocnt = s->rx_fifo_ovfl;
mac->txen = F_TXEN;
mac->toggle_cnt = 0;
}
if (which & MAC_DIRECTION_RX)
t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
return 0;
}
int t3_mac_disable(struct cmac *mac, int which)
{
adapter_t *adap = mac->adapter;
if (mac->multiport)
return t3_vsc7323_disable(adap, mac->ext_port, which);
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
mac->txen = 0;
}
if (which & MAC_DIRECTION_RX) {
int val = F_MAC_RESET_;
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
F_PCS_RESET_, 0);
msleep(100);
t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
if (is_10G(adap))
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
val |= F_PCS_RESET_ | F_XG2G_RESET_;
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
}
return 0;
}
int t3b2_mac_watchdog_task(struct cmac *mac)
{
int status;
unsigned int tx_tcnt, tx_xcnt;
adapter_t *adap = mac->adapter;
struct mac_stats *s = &mac->stats;
unsigned int tx_mcnt = (unsigned int)s->tx_frames;
unsigned int rx_mcnt = (unsigned int)s->rx_frames;
unsigned int rx_xcnt;
if (mac->multiport) {
tx_mcnt = t3_read_reg(adap, A_XGM_STAT_TX_FRAME_LOW);
rx_mcnt = t3_read_reg(adap, A_XGM_STAT_RX_FRAMES_LOW);
} else {
tx_mcnt = (unsigned int)s->tx_frames;
rx_mcnt = (unsigned int)s->rx_frames;
}
status = 0;
tx_xcnt = 1; /* By default tx_xcnt is making progress*/
tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt*/
rx_xcnt = 1; /* By default rx_xcnt is making progress*/
if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_TX_SPI4_SOP_EOP_CNT +
mac->offset)));
if (tx_xcnt == 0) {
t3_write_reg(adap, A_TP_PIO_ADDR,
A_TP_TX_DROP_CNT_CH0 + macidx(mac));
tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
A_TP_PIO_DATA)));
} else {
goto rxcheck;
}
} else {
mac->toggle_cnt = 0;
goto rxcheck;
}
if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
if (mac->toggle_cnt > 4) {
status = 2;
goto out;
} else {
status = 1;
goto out;
}
} else {
mac->toggle_cnt = 0;
goto rxcheck;
}
rxcheck:
if (rx_mcnt != mac->rx_mcnt) {
rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_RX_SPI4_SOP_EOP_CNT +
mac->offset))) +
(s->rx_fifo_ovfl - mac->rx_ocnt);
mac->rx_ocnt = s->rx_fifo_ovfl;
} else
goto out;
if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0) {
if (!mac->multiport)
status = 2;
goto out;
}
out:
mac->tx_tcnt = tx_tcnt;
mac->tx_xcnt = tx_xcnt;
mac->tx_mcnt = s->tx_frames;
mac->rx_xcnt = rx_xcnt;
mac->rx_mcnt = s->rx_frames;
mac->rx_pause = s->rx_pause;
if (status == 1) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
mac->toggle_cnt++;
} else if (status == 2) {
t3b2_mac_reset(mac);
mac->toggle_cnt = 0;
}
return status;
}
/*
* This function is called periodically to accumulate the current values of the
* RMON counters into the port statistics. Since the packet counters are only
* 32 bits they can overflow in ~286 secs at 10G, so the function should be
* called more frequently than that. The byte counters are 45-bit wide, they
* would overflow in ~7.8 hours.
*/
const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
{
#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
#define RMON_UPDATE(mac, name, reg) \
(mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
(mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
u32 v, lo;
if (mac->multiport)
return t3_vsc7323_update_stats(mac);
RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
if (mac->adapter->params.rev == T3_REV_B2)
v &= 0x7fffffff;
mac->stats.rx_too_long += v;
RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
RMON_UPDATE(mac, tx_pause, TX_PAUSE);
/* This counts error frames in general (bad FCS, underrun, etc). */
RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
/* The next stat isn't clear-on-read. */
t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
lo = (u32)mac->stats.rx_cong_drops;
mac->stats.rx_cong_drops += (u64)(v - lo);
return &mac->stats;
}