Updating Chelsio files. Not quite compiling yet.

This commit is contained in:
jklos 2007-07-07 20:48:16 +00:00
parent 947224b2b3
commit 37c2139a38
11 changed files with 1482 additions and 56 deletions

View File

@ -44,10 +44,11 @@ __FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_adapter.h,v 1.10 2007/05/28 22:57:26 k
#include <sys/mutex.h>
#ifdef __NetBSD__
#define mtx kmutex
#define mtx_init mutex_init
#define mtx_init(a, b, c, d) mutex_init(a, MUTEX_DEFAULT, IPL_NONE)
#define mtx_destroy mutex_destroy
#define mtx_lock mutex_enter
#define mtx_unlock mutex_exit
#define mtx_assert(x, y) // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif
#ifdef __FreeBSD__
#include <sys/rman.h>
@ -60,8 +61,7 @@ __FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_adapter.h,v 1.10 2007/05/28 22:57:26 k
#include <net/ethernet.h>
#endif
#ifdef __NetBSD__
#define ETHER_ADDR_LEN 6
#define ETHER_HDR_LEN 14
#include <net/if_ether.h>
#endif
#include <net/if.h>
#include <net/if_media.h>
@ -348,9 +348,7 @@ struct adapter {
uint32_t open_device_map;
uint32_t registered_device_map;
struct mtx lock;
#ifdef __FreeBSD__
driver_intr_t *cxgb_intr;
#endif
void (*cxgb_intr)(void *);
int msi_count;
};

View File

@ -40,6 +40,7 @@ $FreeBSD: src/sys/dev/cxgb/common/cxgb_common.h,v 1.4 2007/05/28 22:57:26 kmacy
#endif
#ifdef __NetBSD__
#include <dev/pci/cxgb_osdep.h>
#include "cxgb_toedev.h"
#endif
#endif

View File

@ -45,7 +45,7 @@
#include <dev/pci/cxgb_osdep.h>
#include <dev/pci/cxgb_common.h>
#include <dev/pci/cxgb_ioctl.h>
// #include <dev/pci/cxgb_offload.h>
#include <dev/pci/cxgb_offload.h>
#include <dev/pci/cxgb_regs.h>
#include <dev/pci/cxgb_t3_cpl.h>
#include <dev/pci/cxgb_ctl_defs.h>
@ -59,16 +59,16 @@
#include <dev/pci/cxgb_osdep.h>
#include <dev/pci/cxgb_common.h>
#include <dev/pci/cxgb_ioctl.h>
// #include <dev/pci/cxgb_offload.h>
#include <dev/pci/cxgb_offload.h>
#include <dev/pci/cxgb_regs.h>
#include <dev/pci/cxgb_t3_cpl.h>
#include <dev/pci/cxgb_ctl_defs.h>
#include <dev/pci/cxgb_sge_defs.h>
#include <dev/pci/cxgb_firmware_exports.h>
// #include <sys/mvec.h>
#include "toedev.h"
#include "cxgb_toedev.h"
// #include <sys/mbufq.h>
#include <dev/pci/jhash.h>
#include "cxgb_jhash.h"
#endif
#endif

680
sys/dev/pci/cxgb_l2t.c Normal file
View File

@ -0,0 +1,680 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_l2t.c,v 1.2 2007/05/28 22:57:26 kmacy Exp $");
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#ifdef __FreeBSD__
#include <sys/module.h>
#include <sys/bus.h>
#endif
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#ifdef __FreeBSD__
#include <net/if.h>
#include <net/ethernet.h>
#include <net/if_vlan_var.h>
#endif
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#ifdef __FreeBSD__
#include <netinet/if_ether.h>
#endif
#ifdef __FreeBSD__
#ifdef CONFIG_DEFINED
#include <cxgb_include.h>
#else
#include <dev/cxgb/cxgb_include.h>
#endif
#endif
#ifdef __NetBSD__
#include "cxgb_include.h"
#endif
#define VLAN_NONE 0xfff
#define SDL(s) ((struct sockaddr_dl *)s)
#define RT_ENADDR(rt) ((char *)LLADDR(SDL((rt))))
#define rt_expire rt_rmx.rmx_expire
struct llinfo_arp {
struct callout la_timer;
struct rtentry *la_rt;
struct mbuf *la_hold; /* last packet until resolved/timeout */
u_short la_preempt; /* countdown for pre-expiry arps */
u_short la_asked; /* # requests sent */
};
/*
* Module locking notes: There is a RW lock protecting the L2 table as a
* whole plus a spinlock per L2T entry. Entry lookups and allocations happen
* under the protection of the table lock, individual entry changes happen
* while holding that entry's spinlock. The table lock nests outside the
* entry locks. Allocations of new entries take the table lock as writers so
* no other lookups can happen while allocating new entries. Entry updates
* take the table lock as readers so multiple entries can be updated in
* parallel. An L2T entry can be dropped by decrementing its reference count
* and therefore can happen in parallel with entry allocation but no entry
* can change state or increment its ref count during allocation as both of
* these perform lookups.
*/
static inline unsigned int
vlan_prio(const struct l2t_entry *e)
{
return e->vlan >> 13;
}
static inline unsigned int
arp_hash(u32 key, int ifindex, const struct l2t_data *d)
{
return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
}
static inline void
neigh_replace(struct l2t_entry *e, struct rtentry *rt)
{
RT_LOCK(rt);
RT_ADDREF(rt);
RT_UNLOCK(rt);
if (e->neigh) {
RT_LOCK(e->neigh);
RT_REMREF(e->neigh);
RT_UNLOCK(e->neigh);
}
e->neigh = rt;
}
/*
* Set up an L2T entry and send any packets waiting in the arp queue. The
* supplied mbuf is used for the CPL_L2T_WRITE_REQ. Must be called with the
* entry locked.
*/
static int
setup_l2e_send_pending(struct toedev *dev, struct mbuf *m,
struct l2t_entry *e)
{
struct cpl_l2t_write_req *req;
if (!m) {
if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return (ENOMEM);
}
/*
* XXX MH_ALIGN
*/
req = mtod(m, struct cpl_l2t_write_req *);
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
V_L2T_W_VLAN(e->vlan & EVL_VLID_MASK) |
V_L2T_W_PRIO(vlan_prio(e)));
memcpy(e->dmac, RT_ENADDR(e->neigh), sizeof(e->dmac));
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
m_set_priority(m, CPL_PRIORITY_CONTROL);
cxgb_ofld_send(dev, m);
while (e->arpq_head) {
m = e->arpq_head;
e->arpq_head = m->m_next;
m->m_next = NULL;
cxgb_ofld_send(dev, m);
}
e->arpq_tail = NULL;
e->state = L2T_STATE_VALID;
return 0;
}
/*
* Add a packet to the an L2T entry's queue of packets awaiting resolution.
* Must be called with the entry's lock held.
*/
static inline void
arpq_enqueue(struct l2t_entry *e, struct mbuf *m)
{
m->m_next = NULL;
if (e->arpq_head)
e->arpq_tail->m_next = m;
else
e->arpq_head = m;
e->arpq_tail = m;
}
int
t3_l2t_send_slow(struct toedev *dev, struct mbuf *m,
struct l2t_entry *e)
{
struct rtentry *rt;
struct mbuf *m0;
if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return (ENOMEM);
rt = e->neigh;
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
mtx_lock(&e->lock);
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
mtx_unlock(&e->lock);
case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb_ofld_send(dev, m);
case L2T_STATE_RESOLVING:
mtx_lock(&e->lock);
if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
mtx_unlock(&e->lock);
goto again;
}
arpq_enqueue(e, m);
mtx_unlock(&e->lock);
if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return (ENOMEM);
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the m_gethdr below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
if (arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt)) == 0) {
mtx_lock(&e->lock);
if (e->arpq_head)
setup_l2e_send_pending(dev, m, e);
else
m_freem(m);
mtx_unlock(&e->lock);
}
}
return 0;
}
void
t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e)
{
struct rtentry *rt;
struct mbuf *m0;
if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return;
rt = e->neigh;
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
mtx_lock(&e->lock);
if (e->state == L2T_STATE_STALE) {
e->state = L2T_STATE_VALID;
}
mtx_unlock(&e->lock);
return;
case L2T_STATE_VALID: /* fast-path, send the packet on */
return;
case L2T_STATE_RESOLVING:
mtx_lock(&e->lock);
if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
mtx_unlock(&e->lock);
goto again;
}
mtx_unlock(&e->lock);
if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
return;
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the alloc_skb below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
}
return;
}
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
static struct l2t_entry *
alloc_l2e(struct l2t_data *d)
{
struct l2t_entry *end, *e, **p;
if (!atomic_load_acq_int(&d->nfree))
return NULL;
/* there's definitely a free entry */
for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
if (atomic_load_acq_int(&e->refcnt) == 0)
goto found;
for (e = &d->l2tab[1]; atomic_load_acq_int(&e->refcnt); ++e) ;
found:
d->rover = e + 1;
atomic_add_int(&d->nfree, -1);
/*
* The entry we found may be an inactive entry that is
* presently in the hash table. We need to remove it.
*/
if (e->state != L2T_STATE_UNUSED) {
int hash = arp_hash(e->addr, e->ifindex, d);
for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
if (*p == e) {
*p = e->next;
break;
}
e->state = L2T_STATE_UNUSED;
}
return e;
}
/*
* Called when an L2T entry has no more users. The entry is left in the hash
* table since it is likely to be reused but we also bump nfree to indicate
* that the entry can be reallocated for a different neighbor. We also drop
* the existing neighbor reference in case the neighbor is going away and is
* waiting on our reference.
*
* Because entries can be reallocated to other neighbors once their ref count
* drops to 0 we need to take the entry's lock to avoid races with a new
* incarnation.
*/
void
t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
{
mtx_lock(&e->lock);
if (atomic_load_acq_int(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
RT_LOCK(e->neigh);
RT_REMREF(e->neigh);
RT_UNLOCK(e->neigh);
e->neigh = NULL;
}
}
mtx_unlock(&e->lock);
atomic_add_int(&d->nfree, 1);
}
/*
* Update an L2T entry that was previously used for the same next hop as neigh.
* Must be called with softirqs disabled.
*/
static inline void
reuse_entry(struct l2t_entry *e, struct rtentry *neigh)
{
struct llinfo_arp *la;
la = (struct llinfo_arp *)neigh->rt_llinfo;
mtx_lock(&e->lock); /* avoid race with t3_l2t_free */
if (neigh != e->neigh)
neigh_replace(e, neigh);
if (memcmp(e->dmac, RT_ENADDR(neigh), sizeof(e->dmac)) ||
(neigh->rt_expire > time_uptime))
e->state = L2T_STATE_RESOLVING;
else if (la->la_hold == NULL)
e->state = L2T_STATE_VALID;
else
e->state = L2T_STATE_STALE;
mtx_unlock(&e->lock);
}
struct l2t_entry *
t3_l2t_get(struct toedev *dev, struct rtentry *neigh,
unsigned int smt_idx)
{
struct l2t_entry *e;
struct l2t_data *d = L2DATA(dev);
u32 addr = *(u32 *) rt_key(neigh);
int ifidx = neigh->rt_ifp->if_index;
int hash = arp_hash(addr, ifidx, d);
rw_wlock(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx &&
e->smt_idx == smt_idx) {
l2t_hold(d, e);
if (atomic_load_acq_int(&e->refcnt) == 1)
reuse_entry(e, neigh);
goto done;
}
/* Need to allocate a new entry */
e = alloc_l2e(d);
if (e) {
mtx_lock(&e->lock); /* avoid race with t3_l2t_free */
e->next = d->l2tab[hash].first;
d->l2tab[hash].first = e;
e->state = L2T_STATE_RESOLVING;
e->addr = addr;
e->ifindex = ifidx;
e->smt_idx = smt_idx;
atomic_store_rel_int(&e->refcnt, 1);
neigh_replace(e, neigh);
#ifdef notyet
/*
* XXX need to add accessor function for vlan tag
*/
if (neigh->rt_ifp->if_vlantrunk)
e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
else
#endif
e->vlan = VLAN_NONE;
mtx_unlock(&e->lock);
}
done:
rw_wunlock(&d->lock);
return e;
}
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
* otherwise the packets is sent to the TOE.
*
* XXX: maybe we should abandon the latter behavior and just require a failure
* handler.
*/
static void
handle_failed_resolution(struct toedev *dev, struct mbuf *arpq)
{
while (arpq) {
struct mbuf *m = arpq;
#ifdef notyet
struct l2t_mbuf_cb *cb = L2T_MBUF_CB(m);
#endif
arpq = m->m_next;
m->m_next = NULL;
#ifdef notyet
if (cb->arp_failure_handler)
cb->arp_failure_handler(dev, m);
else
#endif
cxgb_ofld_send(dev, m);
}
}
#if defined(NETEVENT) || !defined(CONFIG_CHELSIO_T3_MODULE)
/*
* Called when the host's ARP layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void
t3_l2t_update(struct toedev *dev, struct rtentry *neigh)
{
struct l2t_entry *e;
struct mbuf *arpq = NULL;
struct l2t_data *d = L2DATA(dev);
u32 addr = *(u32 *) rt_key(neigh);
int ifidx = neigh->rt_ifp->if_index;
int hash = arp_hash(addr, ifidx, d);
struct llinfo_arp *la;
rw_rlock(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx) {
mtx_lock(&e->lock);
goto found;
}
rw_runlock(&d->lock);
return;
found:
rw_runlock(&d->lock);
if (atomic_load_acq_int(&e->refcnt)) {
if (neigh != e->neigh)
neigh_replace(e, neigh);
la = (struct llinfo_arp *)neigh->rt_llinfo;
if (e->state == L2T_STATE_RESOLVING) {
if (la->la_asked >= 5 /* arp_maxtries */) {
arpq = e->arpq_head;
e->arpq_head = e->arpq_tail = NULL;
} else if (la->la_hold == NULL)
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = (la->la_hold == NULL) ?
L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, RT_ENADDR(neigh), 6))
setup_l2e_send_pending(dev, NULL, e);
}
}
mtx_unlock(&e->lock);
if (arpq)
handle_failed_resolution(dev, arpq);
}
#else
/*
* Called from a kprobe, interrupts are off.
*/
void
t3_l2t_update(struct toedev *dev, struct rtentry *neigh)
{
struct l2t_entry *e;
struct l2t_data *d = L2DATA(dev);
u32 addr = *(u32 *) rt_key(neigh);
int ifidx = neigh->dev->ifindex;
int hash = arp_hash(addr, ifidx, d);
rw_rlock(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx) {
mtx_lock(&e->lock);
if (atomic_load_acq_int(&e->refcnt)) {
if (neigh != e->neigh)
neigh_replace(e, neigh);
e->tdev = dev;
mod_timer(&e->update_timer, jiffies + 1);
}
mtx_unlock(&e->lock);
break;
}
rw_runlock(&d->lock);
}
static void
update_timer_cb(unsigned long data)
{
struct mbuf *arpq = NULL;
struct l2t_entry *e = (struct l2t_entry *)data;
struct rtentry *neigh = e->neigh;
struct toedev *dev = e->tdev;
barrier();
if (!atomic_load_acq_int(&e->refcnt))
return;
rw_rlock(&neigh->lock);
mtx_lock(&e->lock);
if (atomic_load_acq_int(&e->refcnt)) {
if (e->state == L2T_STATE_RESOLVING) {
if (neigh->nud_state & NUD_FAILED) {
arpq = e->arpq_head;
e->arpq_head = e->arpq_tail = NULL;
} else if (neigh_is_connected(neigh) && e->arpq_head)
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = neigh_is_connected(neigh) ?
L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, RT_ENADDR(neigh), sizeof(e->dmac)))
setup_l2e_send_pending(dev, NULL, e);
}
}
mtx_unlock(&e->lock);
rw_runlock(&neigh->lock);
if (arpq)
handle_failed_resolution(dev, arpq);
}
#endif
struct l2t_data *
t3_init_l2t(unsigned int l2t_capacity)
{
struct l2t_data *d;
int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
d = cxgb_alloc_mem(size);
if (!d)
return NULL;
d->nentries = l2t_capacity;
d->rover = &d->l2tab[1]; /* entry 0 is not used */
atomic_store_rel_int(&d->nfree, l2t_capacity - 1);
rw_init(&d->lock, "L2T");
for (i = 0; i < l2t_capacity; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
mtx_init(&d->l2tab[i].lock, "L2TAB", NULL, MTX_DEF);
atomic_store_rel_int(&d->l2tab[i].refcnt, 0);
#ifndef NETEVENT
#ifdef CONFIG_CHELSIO_T3_MODULE
setup_timer(&d->l2tab[i].update_timer, update_timer_cb,
(unsigned long)&d->l2tab[i]);
#endif
#endif
}
return d;
}
void
t3_free_l2t(struct l2t_data *d)
{
#ifndef NETEVENT
#ifdef CONFIG_CHELSIO_T3_MODULE
int i;
/* Stop all L2T timers */
for (i = 0; i < d->nentries; ++i)
del_timer_sync(&d->l2tab[i].update_timer);
#endif
#endif
cxgb_free_mem(d);
}
#ifdef CONFIG_PROC_FS
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
static inline void *
l2t_get_idx(struct seq_file *seq, loff_t pos)
{
struct l2t_data *d = seq->private;
return pos >= d->nentries ? NULL : &d->l2tab[pos];
}
static void *
l2t_seq_start(struct seq_file *seq, loff_t *pos)
{
return *pos ? l2t_get_idx(seq, *pos) : SEQ_START_TOKEN;
}
static void *
l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = l2t_get_idx(seq, *pos + 1);
if (v)
++*pos;
return v;
}
static void
l2t_seq_stop(struct seq_file *seq, void *v)
{
}
static char
l2e_state(const struct l2t_entry *e)
{
switch (e->state) {
case L2T_STATE_VALID: return 'V'; /* valid, fast-path entry */
case L2T_STATE_STALE: return 'S'; /* needs revalidation, but usable */
case L2T_STATE_RESOLVING:
return e->arpq_head ? 'A' : 'R';
default:
return 'U';
}
}
static int
l2t_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Index IP address Ethernet address VLAN "
"Prio State Users SMTIDX Port\n");
else {
char ip[20];
struct l2t_entry *e = v;
mtx_lock(&e->lock);
sprintf(ip, "%u.%u.%u.%u", NIPQUAD(e->addr));
seq_printf(seq, "%-5u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d"
" %3u %c %7u %4u %s\n",
e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
e->dmac[3], e->dmac[4], e->dmac[5],
e->vlan & EVL_VLID_MASK, vlan_prio(e),
l2e_state(e), atomic_load_acq_int(&e->refcnt), e->smt_idx,
e->neigh ? e->neigh->dev->name : "");
mtx_unlock(&e->lock);
}
return 0;
}
#endif

View File

@ -138,6 +138,25 @@ void t3_l2t_proc_free(struct proc_dir_entry *dir);
int cxgb_ofld_send(struct toedev *dev, struct mbuf *m);
#ifdef __NetBSD__
static inline u_int atomic_fetchadd_int(volatile u_int *p, u_int v)
{
#if 0
if (v == 1)
return (atomic_inc_return(*p));
else
return (atomic_dec_return(*p));
#else
return (atomic_fetchadd_int(p, v)); // XXXXXXXXXXXXXXXXXXXXXX
#endif
}
static inline void atomic_add_int(volatile int *p, int v)
{
*p += v; // XXXXXXXXXXXXXXXXXXXXXXXXXX
}
#endif
static inline int l2t_send(struct toedev *dev, struct mbuf *m,
struct l2t_entry *e)
{

407
sys/dev/pci/cxgb_lro.c Normal file
View File

@ -0,0 +1,407 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#ifdef __FreeBSD__
__FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_lro.c,v 1.7 2007/06/13 05:35:59 kmacy Exp $");
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#ifdef __FreeBSD__
#include <sys/module.h>
#include <sys/bus.h>
#endif
#include <sys/conf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus_dma.h>
#include <sys/rman.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#ifdef __FreeBSD__
#ifdef CONFIG_DEFINED
#include <cxgb_include.h>
#else
#include <dev/cxgb/cxgb_include.h>
#endif
#endif
#ifdef __NetBSD__
#include "cxgb_include.h"
#endif
#include <machine/in_cksum.h>
#ifndef M_LRO
#define M_LRO 0x0200
#endif
#ifdef DEBUG
#define MBUF_HEADER_CHECK(m) do { \
if ((m->m_len == 0) || (m->m_pkthdr.len == 0) \
|| ((m->m_flags & M_PKTHDR) == 0)) \
panic("lro_flush_session - mbuf len=%d pktlen=%d flags=0x%x\n", \
m->m_len, m->m_pkthdr.len, m->m_flags); \
if ((m->m_flags & M_PKTHDR) == 0) \
panic("first mbuf is not packet header - flags=0x%x\n", \
m->m_flags); \
if ((m->m_len < ETHER_HDR_LEN) || (m->m_pkthdr.len < ETHER_HDR_LEN)) \
panic("packet too small len=%d pktlen=%d\n", \
m->m_len, m->m_pkthdr.len);\
} while (0)
#else
#define MBUF_HEADER_CHECK(m)
#endif
#define IPH_OFFSET (2 + sizeof (struct cpl_rx_pkt) + ETHER_HDR_LEN)
#define LRO_SESSION_IDX_HINT_HASH(hash) (hash & (MAX_LRO_SES - 1))
#define LRO_IDX_INC(idx) idx = (idx + 1) & (MAX_LRO_SES - 1)
static __inline int
lro_match(struct mbuf *m, struct ip *ih, struct tcphdr *th)
{
struct ip *sih = (struct ip *)(mtod(m, uint8_t *) + IPH_OFFSET);
struct tcphdr *sth = (struct tcphdr *) (sih + 1);
return (th->th_sport == sth->th_sport &&
th->th_dport == sth->th_dport &&
ih->ip_src.s_addr == sih->ip_src.s_addr &&
ih->ip_dst.s_addr == sih->ip_dst.s_addr);
}
static __inline struct t3_lro_session *
lro_lookup(struct lro_state *l, int idx, struct ip *ih, struct tcphdr *th)
{
struct t3_lro_session *s = NULL;
int active = l->nactive;
while (active) {
s = &l->sess[idx];
if (s->head) {
if (lro_match(s->head, ih, th))
break;
active--;
}
LRO_IDX_INC(idx);
}
return (s);
}
static __inline int
can_lro_packet(struct cpl_rx_pkt *cpl, unsigned int rss_hi)
{
struct ether_header *eh = (struct ether_header *)(cpl + 1);
struct ip *ih = (struct ip *)(eh + 1);
/*
* XXX VLAN support?
*/
if (__predict_false(G_HASHTYPE(ntohl(rss_hi)) != RSS_HASH_4_TUPLE ||
(*((uint8_t *)cpl + 1) & 0x90) != 0x10 ||
cpl->csum != 0xffff || eh->ether_type != ntohs(ETHERTYPE_IP) ||
ih->ip_hl != (sizeof (*ih) >> 2))) {
return 0;
}
return 1;
}
static int
can_lro_tcpsegment(struct tcphdr *th)
{
int olen = (th->th_off << 2) - sizeof (*th);
u8 control_bits = *((u8 *)th + 13);
if (__predict_false((control_bits & 0xB7) != 0x10))
goto no_lro;
if (olen) {
uint32_t *ptr = (u32 *)(th + 1);
if (__predict_false(olen != TCPOLEN_TSTAMP_APPA ||
*ptr != ntohl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP)))
goto no_lro;
}
return 1;
no_lro:
return 0;
}
static __inline void
lro_new_session_init(struct t3_lro_session *s, struct mbuf *m)
{
struct ip *ih = (struct ip *)(mtod(m, uint8_t *) + IPH_OFFSET);
struct tcphdr *th = (struct tcphdr *) (ih + 1);
int ip_len = ntohs(ih->ip_len);
DPRINTF("%s(s=%p, m=%p)\n", __FUNCTION__, s, m);
s->head = m;
MBUF_HEADER_CHECK(m);
s->ip_len = ip_len;
s->seq = ntohl(th->th_seq) + ip_len - sizeof(*ih) - (th->th_off << 2);
}
static void
lro_flush_session(struct sge_qset *qs, struct t3_lro_session *s, struct mbuf *m)
{
struct lro_state *l = &qs->lro;
struct mbuf *sm = s->head;
struct ip *ih = (struct ip *)(mtod(sm, uint8_t *) + IPH_OFFSET);
DPRINTF("%s(qs=%p, s=%p, ", __FUNCTION__,
qs, s);
if (m)
DPRINTF("m=%p)\n", m);
else
DPRINTF("m=NULL)\n");
ih->ip_len = htons(s->ip_len);
ih->ip_sum = 0;
ih->ip_sum = in_cksum_hdr(ih);
MBUF_HEADER_CHECK(sm);
sm->m_flags |= M_LRO;
t3_rx_eth(qs->port, &qs->rspq, sm, 2);
if (m) {
s->head = m;
lro_new_session_init(s, m);
} else {
s->head = NULL;
l->nactive--;
}
qs->port_stats[SGE_PSTATS_LRO_FLUSHED]++;
}
static __inline struct t3_lro_session *
lro_new_session(struct sge_qset *qs, struct mbuf *m, uint32_t rss_hash)
{
struct lro_state *l = &qs->lro;
int idx = LRO_SESSION_IDX_HINT_HASH(rss_hash);
struct t3_lro_session *s = &l->sess[idx];
DPRINTF("%s(qs=%p, m=%p, rss_hash=0x%x)\n", __FUNCTION__,
qs, m, rss_hash);
if (__predict_true(!s->head))
goto done;
if (l->nactive > MAX_LRO_SES)
panic("MAX_LRO_PER_QSET exceeded");
if (l->nactive == MAX_LRO_SES) {
lro_flush_session(qs, s, m);
qs->port_stats[SGE_PSTATS_LRO_X_STREAMS]++;
return s;
}
while (1) {
LRO_IDX_INC(idx);
s = &l->sess[idx];
if (!s->head)
break;
}
done:
lro_new_session_init(s, m);
l->nactive++;
return s;
}
static __inline int
lro_update_session(struct t3_lro_session *s, struct mbuf *m)
{
struct mbuf *sm = s->head;
struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(sm, uint8_t *) + 2);
struct cpl_rx_pkt *ncpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + 2);
struct ip *nih = (struct ip *)(mtod(m, uint8_t *) + IPH_OFFSET);
struct tcphdr *th, *nth = (struct tcphdr *)(nih + 1);
uint32_t seq = ntohl(nth->th_seq);
int plen, tcpiphlen, olen = (nth->th_off << 2) - sizeof (*nth);
DPRINTF("%s(s=%p, m=%p)\n", __FUNCTION__, s, m);
if (cpl->vlan_valid && cpl->vlan != ncpl->vlan) {
return -1;
}
if (__predict_false(seq != s->seq)) {
DPRINTF("sequence mismatch\n");
return -1;
}
MBUF_HEADER_CHECK(sm);
th = (struct tcphdr *)(mtod(sm, uint8_t *) + IPH_OFFSET + sizeof (struct ip));
if (olen) {
uint32_t *ptr = (uint32_t *)(th + 1);
uint32_t *nptr = (uint32_t *)(nth + 1);
if (__predict_false(ntohl(*(ptr + 1)) > ntohl(*(nptr + 1)) ||
!*(nptr + 2))) {
return -1;
}
*(ptr + 1) = *(nptr + 1);
*(ptr + 2) = *(nptr + 2);
}
th->th_ack = nth->th_ack;
th->th_win = nth->th_win;
tcpiphlen = (nth->th_off << 2) + sizeof (*nih);
plen = ntohs(nih->ip_len) - tcpiphlen;
s->seq += plen;
s->ip_len += plen;
sm->m_pkthdr.len += plen;
/*
* XXX FIX ME
*
*
*/
#if 0
/* XXX this I *do not* understand */
if (plen > skb_shinfo(s->skb)->gso_size)
skb_shinfo(s->skb)->gso_size = plen;
#endif
#if __FreeBSD_version > 700000
if (plen > sm->m_pkthdr.tso_segsz)
sm->m_pkthdr.tso_segsz = plen;
#endif
DPRINTF("m_adj(%d)\n", (int)(IPH_OFFSET + tcpiphlen));
m_adj(m, IPH_OFFSET + tcpiphlen);
#if 0
if (__predict_false(!skb_shinfo(s->skb)->frag_list))
skb_shinfo(s->skb)->frag_list = skb;
#endif
#if 0
/*
* XXX we really need to be able to
* support vectors of buffers in FreeBSD
*/
int nr = skb_shinfo(s->skb)->nr_frags;
skb_shinfo(s->skb)->frags[nr].page = frag->page;
skb_shinfo(s->skb)->frags[nr].page_offset =
frag->page_offset + IPH_OFFSET + tcpiphlen;
skb_shinfo(s->skb)->frags[nr].size = plen;
skb_shinfo(s->skb)->nr_frags = ++nr;
#endif
return (0);
}
void
t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro)
{
struct sge_qset *qs = rspq_to_qset(rq);
struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
struct ether_header *eh = (struct ether_header *)(cpl + 1);
struct ip *ih;
struct tcphdr *th;
struct t3_lro_session *s = NULL;
struct port_info *pi = qs->port;
if (lro == 0)
goto no_lro;
if (!can_lro_packet(cpl, rss_csum))
goto no_lro;
if (&adap->port[cpl->iff] != pi)
panic("bad port index %d\n", cpl->iff);
ih = (struct ip *)(eh + 1);
th = (struct tcphdr *)(ih + 1);
s = lro_lookup(&qs->lro,
LRO_SESSION_IDX_HINT_HASH(rss_hash), ih, th);
if (__predict_false(!can_lro_tcpsegment(th))) {
goto no_lro;
} else if (__predict_false(!s)) {
s = lro_new_session(qs, m, rss_hash);
} else {
if (lro_update_session(s, m)) {
lro_flush_session(qs, s, m);
}
if (__predict_false(s->head->m_pkthdr.len + pi->ifp->if_mtu > 65535)) {
lro_flush_session(qs, s, NULL);
}
}
qs->port_stats[SGE_PSTATS_LRO_QUEUED]++;
return;
no_lro:
if (s)
lro_flush_session(qs, s, NULL);
if (m->m_len == 0 || m->m_pkthdr.len == 0 || (m->m_flags & M_PKTHDR) == 0)
DPRINTF("rx_eth_lro mbuf len=%d pktlen=%d flags=0x%x\n",
m->m_len, m->m_pkthdr.len, m->m_flags);
t3_rx_eth(pi, rq, m, ethpad);
}
void
t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state)
{
unsigned int idx = state->active_idx;
while (state->nactive) {
struct t3_lro_session *s = &state->sess[idx];
if (s->head)
lro_flush_session(qs, s, NULL);
LRO_IDX_INC(idx);
}
}

View File

@ -68,9 +68,6 @@ __FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_main.c,v 1.21 2007/05/29 04:18:21 kmac
#ifdef __FreeBSD__
#include <net/ethernet.h>
#endif
#ifdef __NetBSD__
#define ETHER_ADDR_LEN 6
#endif
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
@ -79,6 +76,7 @@ __FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_main.c,v 1.21 2007/05/29 04:18:21 kmac
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_inarp.h>
#ifdef __FreeBSD__
#include <netinet/if_ether.h>
#endif
@ -147,6 +145,11 @@ static int cxgb_controller_probe(device_t);
static int cxgb_controller_attach(device_t);
static int cxgb_controller_detach(device_t);
#endif
#ifdef __NetBSD__
static int cxgb_controller_match(device_t dev, struct cfdata *match, void *context);
static void cxgb_controller_attach(device_t dev, device_t self, void *context);
static int cxgb_controller_detach(device_t dev, int flags);
#endif
static void cxgb_free(struct adapter *);
static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
unsigned int end);
@ -180,15 +183,28 @@ static driver_t cxgb_controller_driver = {
static devclass_t cxgb_controller_devclass;
DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
#endif
#ifdef __NetBSD__
CFATTACH_DECL(cxgbc, sizeof(struct adapter), cxgb_controller_match, cxgb_controller_attach, cxgb_controller_detach, NULL);
#endif
/*
* Attachment glue for the ports. Attachment is done directly to the
* controller device.
*/
#ifdef __FreeBSD__
static int cxgb_port_probe(device_t);
static int cxgb_port_attach(device_t);
static int cxgb_port_detach(device_t);
#endif
#ifdef __NetBSD__
static int cxgb_port_match(device_t dev, struct cfdata *match, void *context);
static void cxgb_port_attach(device_t dev, device_t self, void *context);
static int cxgb_port_detach(device_t dev, int flags);
#endif
#ifdef __FreeBSD__
static device_method_t cxgb_port_methods[] = {
DEVMETHOD(device_probe, cxgb_port_probe),
DEVMETHOD(device_attach, cxgb_port_attach),
@ -206,6 +222,10 @@ static devclass_t cxgb_port_devclass;
DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
#endif
#ifdef __NetBSD__
CFATTACH_DECL(cxgbp, 0, cxgb_port_match, cxgb_port_attach, cxgb_port_detach, NULL);
#endif
#define SGE_MSIX_COUNT (SGE_QSETS + 1)
extern int collapse_mbufs;
@ -245,6 +265,20 @@ SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
"use a single queue-set per port");
#endif
#ifdef __NetBSD__
/*
* The driver enables offload as a default.
* To disable it, use ofld_disable = 1.
*/
static int ofld_disable = 0;
/*
* The driver uses an auto-queue algorithm by default.
* To disable it and force a single queue-set per port, use singleq = 1.
*/
static int singleq = 1;
#endif
enum {
MAX_TXQ_ENTRIES = 16384,
MAX_CTRL_TXQ_ENTRIES = 1024,
@ -341,6 +375,57 @@ cxgb_controller_probe(device_t dev)
}
#endif
#ifdef __NetBSD__
static struct cxgb_ident *cxgb_get_ident(struct pci_attach_args *pa)
{
struct cxgb_ident *id;
int vendorid, deviceid;
vendorid = PCI_VENDOR(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG));
deviceid = PCI_PRODUCT(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG));
for (id = cxgb_identifiers; id->desc != NULL; id++) {
if ((id->vendor == vendorid) &&
(id->device == deviceid)) {
return (id);
}
}
return (NULL);
}
static const struct adapter_info *cxgb_get_adapter_info(struct pci_attach_args *pa)
{
struct cxgb_ident *id;
const struct adapter_info *ai;
id = cxgb_get_ident(pa);
if (id == NULL)
return (NULL);
ai = t3_get_adapter_info(id->index);
return (ai);
}
static int cxgb_controller_match(device_t dev, struct cfdata *match, void *context)
{
struct pci_attach_args *pa = context;
const struct adapter_info *ai;
static int printed = 0;
ai = cxgb_get_adapter_info(pa);
if (ai == NULL)
return (0);
if (printed++ == 0)
{
printf("%s RNIC, %d port(s)", ai->desc, ai->nports);
}
return (100); // ???????????
}
#endif
#ifdef __FreeBSD__
static int
upgrade_fw(adapter_t *sc)
@ -374,18 +459,36 @@ upgrade_fw(adapter_t *sc)
#ifdef __FreeBSD__
static int
cxgb_controller_attach(device_t dev)
#endif
#ifdef __NetBSD__
static void
cxgb_controller_attach(device_t dev, device_t self, void *context)
#endif
{
device_t child;
const struct adapter_info *ai;
struct adapter *sc;
int i, reg, msi_needed, error = 0;
#ifdef __FreeBSD__
struct adapter *sc = device_get_softc(dev);
#endif
#ifdef __NetBSD__
struct adapter *sc = (struct adapter *)self; // device is first thing in adapter
struct pci_attach_args *pa = context;
#endif
int i, error = 0;
#ifdef MSI_SUPPORTED
int reg, msi_needed;
#endif
uint32_t vers;
int port_qsets = 1;
sc = device_get_softc(dev);
sc->dev = dev;
#ifdef __NetBSD__
sc->chip = pa->pa_pc;
sc->tag = pa->pa_tag;
#endif
sc->msi_count = 0;
#ifdef __FreeBSD__
/* find the PCIe link width and set max read request to 4KB*/
if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
uint16_t lnk, pectl;
@ -401,7 +504,7 @@ cxgb_controller_attach(device_t dev)
"PCIe x%d Link, expect reduced performance\n",
sc->link_width);
}
pci_enable_busmaster(dev);
/*
@ -414,16 +517,24 @@ cxgb_controller_attach(device_t dev)
device_printf(dev, "Cannot allocate BAR\n");
return (ENXIO);
}
#endif
mtx_init(&sc->sge.reg_lock, "SGE reg lock", NULL, MTX_DEF);
mtx_init(&sc->lock, "cxgb controller lock", NULL, MTX_DEF);
mtx_init(&sc->mdio_lock, "cxgb mdio", NULL, MTX_DEF);
#ifdef __FreeBSD__
sc->bt = rman_get_bustag(sc->regs_res);
sc->bh = rman_get_bushandle(sc->regs_res);
sc->mmio_len = rman_get_size(sc->regs_res);
#endif
#ifdef __FreeBSD__
ai = cxgb_get_adapter_info(dev);
#endif
#ifdef __NetBSD__
ai = cxgb_get_adapter_info(pa);
#endif
if (t3_prep_adapter(sc, ai, 1) < 0) {
error = ENODEV;
goto out;
@ -499,7 +610,12 @@ cxgb_controller_attach(device_t dev)
#endif
/* Create a periodic callout for checking adapter status */
#ifdef __FreeBSD__
callout_init_mtx(&sc->cxgb_tick_ch, &sc->lock, CALLOUT_RETURNUNLOCKED);
#endif
#ifdef __NetBSD__
callout_init(&sc->cxgb_tick_ch);
#endif
if (t3_check_fw_version(sc) != 0) {
/*
@ -513,27 +629,44 @@ cxgb_controller_attach(device_t dev)
}
if ((sc->flags & USING_MSIX) && !singleq)
#ifdef __FreeBSD__
port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
#endif
#ifdef __NetBSD__
port_qsets = (SGE_QSETS/(sc)->params.nports);
#endif
/*
* Create a child device for each MAC. The ethernet attachment
* will be done in these children.
*/
for (i = 0; i < (sc)->params.nports; i++) {
#ifdef __FreeBSD__
if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
device_printf(dev, "failed to add child port\n");
error = EINVAL;
goto out;
}
#endif
#ifdef __NetBSD__
// XXXXXXXXXXXXXXXXXXXXXXXXXX
#endif
sc->portdev[i] = child;
sc->port[i].adapter = sc;
sc->port[i].nqsets = port_qsets;
sc->port[i].first_qset = i*port_qsets;
sc->port[i].port = i;
#ifdef __FreeBSD__
device_set_softc(child, &sc->port[i]);
#endif
}
#ifdef __FreeBSD__
if ((error = bus_generic_attach(dev)) != 0)
goto out;
#endif
#ifdef __NetBSD__
// XXXXXXXXXXXXXXXXXXXXXXXXX
#endif
/*
* XXX need to poll for link status
@ -562,16 +695,21 @@ cxgb_controller_attach(device_t dev)
out:
if (error)
cxgb_free(sc);
#ifdef __FreeBSD__
return (error);
#endif
}
#ifdef __FreeBSD__
static int
cxgb_controller_detach(device_t dev)
{
struct adapter *sc;
sc = device_get_softc(dev);
#ifdef __FreeBSD__
struct adapter *sc = device_get_softc(dev);
#endif
#ifdef __NetBSD__
struct adapter *sc = (struct adapter *)dev; // device is first thing in adapter
#endif
cxgb_free(sc);
@ -601,13 +739,18 @@ cxgb_free(struct adapter *sc)
bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
sc->msix_regs_res);
}
#endif
/*
* XXX need to drain the ifq by hand until
* it is taught about mbuf iovecs
*/
#ifdef __FreeBSD__
callout_drain(&sc->cxgb_tick_ch);
#endif
#ifdef __NetBSD__
callout_stop(&sc->cxgb_tick_ch);
#endif
t3_sge_deinit_sw(sc);
@ -748,24 +891,32 @@ cxgb_setup_msix(adapter_t *sc, int msix_count)
return (0);
}
#ifdef __FreeBSD__
static int
cxgb_port_probe(device_t dev)
{
#ifdef __FreeBSD__
struct port_info *p;
struct port_info *p = device_get_softc(dev);
char buf[80];
p = device_get_softc(dev);
snprintf(buf, sizeof(buf), "Port %d %s", p->port, p->port_type->desc);
device_set_desc_copy(dev, buf);
#endif
#ifdef __NetBSD__
printf("Huh?!?\n");
#endif
return (0);
}
#endif
#ifdef __NetBSD__
static int cxgb_port_match(device_t dev, struct cfdata *match, void *context);
{
struct port_info *p = (struct port_info *)dev; // device is first thing in adapter
char buf[80];
snprintf(buf, sizeof(buf), "Port %d %s", p->port, p->port_type->desc);
// device_set_desc_copy(dev, buf);
return (0);
}
#endif
#ifdef __FreeBSD__
static int
@ -778,7 +929,9 @@ cxgb_makedev(struct port_info *pi)
cxgb_cdevsw->d_version = D_VERSION;
cxgb_cdevsw->d_name = strdup(pi->ifp->if_xname, M_DEVBUF);
#ifdef __FreeBSD__
cxgb_cdevsw->d_ioctl = cxgb_extension_ioctl;
#endif
pi->port_cdev = make_dev(cxgb_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
pi->ifp->if_xname);
@ -792,7 +945,7 @@ cxgb_makedev(struct port_info *pi)
}
#endif
#ifdef __FreeBSD__
#ifdef TSO_SUPPORTED
#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
/* Don't enable TSO6 yet */
@ -804,20 +957,43 @@ cxgb_makedev(struct port_info *pi)
#define IFCAP_TSO4 0x0
#define CSUM_TSO 0x0
#endif
#endif
#ifdef __NetBSD__
#define IFCAP_HWCSUM (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx)
#define IFCAP_RXCSUM IFCAP_CSUM_IPv4_Rx
#define IFCAP_TXCSUM IFCAP_CSUM_IPv4_Tx
#ifdef TSO_SUPPORTED
#define CXGB_CAP (IFCAP_HWCSUM | IFCAP_TSO)
/* Don't enable TSO6 yet */
#define CXGB_CAP_ENABLE (IFCAP_HWCSUM | IFCAP_TSO4)
#else
#define CXGB_CAP (IFCAP_HWCSUM)
/* Don't enable TSO6 yet */
#define CXGB_CAP_ENABLE (IFCAP_HWCSUM)
#define IFCAP_TSO4 0x0
#define CSUM_TSO 0x0
#endif
#endif
#ifdef __FreeBSD__
static int
cxgb_port_attach(device_t dev)
static int cxgb_port_attach(device_t dev)
#endif
#ifdef __NetBSD__
static void cxgb_port_attach(device_t dev, device_t self, void *context)
#endif
{
struct port_info *p;
#ifdef __FreeBSD__
struct port_info *p = device_get_softc(dev);
#endif
#ifdef __NetBSD__
struct port_info *p = (struct port_info *)dev; // device is first thing in adapter
#endif
struct ifnet *ifp;
int media_flags;
int err;
char buf[64];
p = device_get_softc(dev);
snprintf(buf, sizeof(buf), "cxgb port %d", p->port);
mtx_init(&p->lock, buf, 0, MTX_DEF);
@ -844,10 +1020,17 @@ cxgb_port_attach(device_t dev)
IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
IFQ_SET_READY(&ifp->if_snd);
#ifdef __FreeBSD__
ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
#endif
#ifdef __NetBSD__
ifp->if_capabilities = ifp->if_capenable = 0;
#endif
ifp->if_capabilities |= CXGB_CAP;
ifp->if_capenable |= CXGB_CAP_ENABLE;
#ifdef __FreeBSD__
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
#endif
ifp->if_baudrate = 100000000;
ether_ifattach(ifp, p->hw_addr);
@ -904,9 +1087,12 @@ cxgb_port_attach(device_t dev)
static int
cxgb_port_detach(device_t dev)
{
struct port_info *p;
p = device_get_softc(dev);
#ifdef __FreeBSD__
struct port_info *p = device_get_softc(dev);
#endif
#ifdef __NetBSD__
struct port_info *p = (struct port_info *)dev; // device is first thing in adapter
#endif
PORT_LOCK(p);
cxgb_stop_locked(p);
@ -929,7 +1115,6 @@ cxgb_port_detach(device_t dev)
return (0);
}
#endif
void
t3_fatal_err(struct adapter *sc)
@ -1257,12 +1442,13 @@ cxgb_up(struct adapter *sc)
int err = 0;
if ((sc->flags & FULL_INIT_DONE) == 0) {
#ifdef __FreeBSD__
if ((sc->flags & FW_UPTODATE) == 0)
err = upgrade_fw(sc);
if (err)
goto out;
#endif
err = t3_init_hw(sc, 0);
if (err)
@ -1280,6 +1466,7 @@ cxgb_up(struct adapter *sc)
t3_intr_clear(sc);
#ifdef __FreeBSD__ // XXXXXXXXXXXXXXXXXXXXXXXXXXX
/* If it's MSI or INTx, allocate a single interrupt for everything */
if ((sc->flags & USING_MSIX) == 0) {
if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
@ -1302,6 +1489,7 @@ cxgb_up(struct adapter *sc)
} else {
cxgb_setup_msix(sc, sc->msi_count);
}
#endif
t3_sge_start(sc);
t3_intr_enable(sc);
@ -1311,7 +1499,9 @@ cxgb_up(struct adapter *sc)
sc->flags |= QUEUES_BOUND;
out:
return (err);
#ifdef __FreeBSD__
irq_err:
#endif
CH_ERR(sc, "request_irq failed, err %d\n", err);
goto out;
}
@ -1330,40 +1520,51 @@ cxgb_down(struct adapter *sc)
for (i = 0; i < SGE_QSETS; i++) {
if (sc->msix_intr_tag[i] != NULL) {
#ifdef __FreeBSD__ // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
sc->msix_intr_tag[i]);
#endif
sc->msix_intr_tag[i] = NULL;
}
if (sc->msix_irq_res[i] != NULL) {
#ifdef __FreeBSD__ // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
bus_release_resource(sc->dev, SYS_RES_IRQ,
sc->msix_irq_rid[i], sc->msix_irq_res[i]);
#endif
sc->msix_irq_res[i] = NULL;
}
}
if (sc->intr_tag != NULL) {
#ifdef __FreeBSD__ // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
#endif
sc->intr_tag = NULL;
}
if (sc->irq_res != NULL) {
device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
sc->irq_rid, sc->irq_res);
#ifdef __FreeBSD__ // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
sc->irq_res);
#endif
sc->irq_res = NULL;
}
callout_drain(&sc->sge_timer_ch);
#ifdef __FreeBSD__
callout_drain(&sc->sge_timer_ch);
taskqueue_drain(sc->tq, &sc->slow_intr_task);
taskqueue_drain(sc->tq, &sc->timer_reclaim_task);
#endif
#ifdef __NetBSD__
callout_stop(&sc->sge_timer_ch);
#endif
}
#ifdef __FreeBSD__
static int
offload_open(struct port_info *pi)
{
#ifdef __FreeBSD__
struct adapter *adapter = pi->adapter;
struct toedev *tdev = TOEDEV(pi->ifp);
int adap_up = adapter->open_device_map & PORT_MASK;
@ -1405,11 +1606,16 @@ out:
cxgb_set_dummy_ops(tdev);
}
return (err);
#endif
#ifdef __NetBSD__
return (0);
#endif
}
static int
offload_close(struct toedev *tdev)
{
#ifdef __FreeBSD__
struct adapter *adapter = tdev2adap(tdev);
if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
@ -1428,9 +1634,9 @@ offload_close(struct toedev *tdev)
ADAPTER_UNLOCK(adapter);
cxgb_offload_deactivate(adapter);
#endif
return 0;
}
#endif
static void
cxgb_init(void *arg)
@ -1466,9 +1672,11 @@ cxgb_init_locked(struct port_info *p)
ADAPTER_UNLOCK(p->adapter);
if (is_offload(sc) && !ofld_disable) {
err = offload_open(p);
#ifdef __FreeBSD__
if (err)
log(LOG_WARNING,
"Could not initialize offload capabilities\n");
#endif
}
cxgb_link_start(p);
t3_port_intr_enable(sc, p->port);
@ -1477,8 +1685,14 @@ cxgb_init_locked(struct port_info *p)
cxgb_tick, sc);
PORT_LOCK(p);
#ifdef __FreeBSD__
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
#endif
#ifdef __NetBSD__
ifp->if_flags |= IFF_RUNNING;
ifp->if_flags &= ~IFF_OACTIVE;
#endif
PORT_UNLOCK(p);
}
@ -1506,7 +1720,12 @@ cxgb_stop_locked(struct port_info *p)
t3_port_intr_disable(p->adapter, p->port);
PORT_LOCK(p);
#ifdef __FreeBSD__
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#endif
#ifdef __NetBSD__
ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
#endif
PORT_UNLOCK(p);
p->phy.ops->power_down(&p->phy, 1);
t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
@ -1549,7 +1768,13 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
case SIOCGIFADDR:
if (ifa->ifa_addr->sa_family == AF_INET) {
ifp->if_flags |= IFF_UP;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
#ifdef __FreeBSD__
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
#endif
#ifdef __NetBSD__
if (!(ifp->if_flags & IFF_RUNNING))
#endif
{
cxgb_init(p);
}
arp_ifinit(ifp, ifa);
@ -1559,7 +1784,13 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
case SIOCSIFFLAGS:
if (ifp->if_flags & IFF_UP) {
PORT_LOCK(p);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
#ifdef __FreeBSD__
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
#endif
#ifdef __NetBSD__
if (ifp->if_flags & IFF_RUNNING)
#endif
{
flags = p->if_flags;
if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
((ifp->if_flags ^ flags) & IFF_ALLMULTI))
@ -1570,9 +1801,20 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
p->if_flags = ifp->if_flags;
PORT_UNLOCK(p);
} else {
#ifdef __FreeBSD__
callout_drain(&p->adapter->cxgb_tick_ch);
#endif
#ifdef __NetBSD__
callout_stop(&p->adapter->cxgb_tick_ch);
#endif
PORT_LOCK(p);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
#ifdef __FreeBSD__
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
#endif
#ifdef __NetBSD__
if (ifp->if_flags & IFF_RUNNING)
#endif
{
cxgb_stop_locked(p);
} else {
adapter_t *sc = p->adapter;
@ -1595,11 +1837,15 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
if (mask & IFCAP_TXCSUM) {
if (IFCAP_TXCSUM & ifp->if_capenable) {
ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
#ifdef __FreeBSD__
ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
| CSUM_TSO);
#endif
} else {
ifp->if_capenable |= IFCAP_TXCSUM;
#ifdef __FreeBSD__
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
#endif
}
} else if (mask & IFCAP_RXCSUM) {
if (IFCAP_RXCSUM & ifp->if_capenable) {
@ -1611,10 +1857,14 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
if (mask & IFCAP_TSO4) {
if (IFCAP_TSO4 & ifp->if_capenable) {
ifp->if_capenable &= ~IFCAP_TSO4;
#ifdef __FreeBSD__
ifp->if_hwassist &= ~CSUM_TSO;
#endif
} else if (IFCAP_TXCSUM & ifp->if_capenable) {
ifp->if_capenable |= IFCAP_TSO4;
#ifdef __FreeBSD__
ifp->if_hwassist |= CSUM_TSO;
#endif
} else {
if (cxgb_debug)
printf("cxgb requires tx checksum offload"
@ -1644,7 +1894,12 @@ cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
if (!p->link_config.link_ok)
return (ENXIO);
#ifdef __FreeBSD__
if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
#endif
#ifdef __NetBSD__
if (IFQ_IS_EMPTY(&ifp->if_snd))
#endif
return (ENOBUFS);
qs = &p->adapter->sge.qs[p->first_qset];
@ -1655,13 +1910,23 @@ cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
in_use_init = txq->in_use;
while ((txq->in_use - in_use_init < txmax) &&
(txq->size > txq->in_use + TX_MAX_DESC)) {
#ifdef __FreeBSD__
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
#endif
#ifdef __NetBSD__
IFQ_DEQUEUE(&ifp->if_snd, m);
#endif
if (m == NULL)
break;
/*
* Convert chain to M_IOVEC
*/
#ifdef __FreeBSD__
KASSERT((m->m_flags & M_IOVEC) == 0, ("IOVEC set too early"));
#endif
#ifdef __NetBSD__
KASSERT((m->m_flags & M_IOVEC) == 0);
#endif
m0 = m;
#ifdef INVARIANTS
/*
@ -1687,13 +1952,23 @@ cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
m = m0;
if ((err = t3_encap(p, &m)) != 0)
break;
#ifdef __FreeBSD__
BPF_MTAP(ifp, m);
#endif
#ifdef __NetBSD__
bpf_mtap((ifp)->if_bpf, (m));
#endif
}
mtx_unlock(&txq->lock);
if (__predict_false(err)) {
if (err == ENOMEM) {
#ifdef __FreeBSD__
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
#endif
#ifdef __NetBSD__
ifp->if_flags |= IFF_OACTIVE;
#endif
IFQ_LOCK(&ifp->if_snd);
IFQ_DRV_PREPEND(&ifp->if_snd, m);
IFQ_UNLOCK(&ifp->if_snd);
@ -1702,11 +1977,20 @@ cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
if (err == 0 && m == NULL) {
return (ENOBUFS);
}
#ifdef __FreeBSD__
if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) &&
(ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
return (ENOSPC);
}
#endif
#ifdef __NetBSD__
if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) &&
(ifp->if_flags & IFF_OACTIVE) == 0) {
ifp->if_flags |= IFF_OACTIVE;
return (ENOSPC);
}
#endif
return (err);
}
@ -1840,12 +2124,22 @@ check_t3b2_mac(struct adapter *adapter)
struct ifnet *ifp = p->ifp;
int status;
#ifdef __FreeBSD__
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
#endif
#ifdef __NetBSD__
if ((ifp->if_flags & IFF_RUNNING) == 0)
#endif
continue;
status = 0;
PORT_LOCK(p);
#ifdef __FreeBSD__
if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
#endif
#ifdef __NetBSD__
if ((ifp->if_flags & IFF_RUNNING))
#endif
status = t3b2_mac_watchdog_task(&p->mac);
if (status == 1)
p->mac.stats.num_toggled++;
@ -1891,6 +2185,7 @@ in_range(int val, int lo, int hi)
return val < 0 || (val <= hi && val >= lo);
}
#ifdef __FreeBSD__
static int
cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
int fflag, struct thread *td)
@ -2289,6 +2584,7 @@ cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
return (error);
}
#endif
static __inline void
reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,

View File

@ -46,11 +46,11 @@ $FreeBSD: src/sys/dev/cxgb/cxgb_offload.h,v 1.1 2007/05/25 09:48:19 kmacy Exp $
#include <dev/cxgb/common/cxgb_t3_cpl.h>
#endif
#ifdef __NetBSD__
#include <dev/pci/cxgb_tcb.h>
// #include <dev/pci/cxgb_l2t.h>
#include "cxgb_tcb.h"
#include "cxgb_l2t.h"
// #include <ulp/toecore/toedev.h>
#include <dev/pci/cxgb_t3_cpl.h>
#include "cxgb_toedev.h"
#include "cxgb_t3_cpl.h"
#endif
struct adapter;

View File

@ -30,6 +30,9 @@ $FreeBSD: src/sys/dev/cxgb/cxgb_osdep.h,v 1.10 2007/05/27 22:07:47 kmacy Exp $
***************************************************************************/
#ifndef _CXGB_OSDEP_H_
#define _CXGB_OSDEP_H_
#include <sys/param.h>
#include <sys/systm.h>
#ifdef __FreeBSD__
@ -49,10 +52,20 @@ $FreeBSD: src/sys/dev/cxgb/cxgb_osdep.h,v 1.10 2007/05/27 22:07:47 kmacy Exp $
#ifdef __NetBSD__
#include <dev/pci/cxgb_version.h>
#include <dev/pci/cxgb_config.h>
#endif
#ifndef _CXGB_OSDEP_H_
#define _CXGB_OSDEP_H_
struct task
{
void *context;
};
static inline void critical_enter(void)
{
}
static inline void critical_exit(void)
{
}
#endif
struct sge_rspq;

View File

@ -714,7 +714,12 @@ int
t3_sge_init_sw(adapter_t *sc)
{
#ifdef __FreeBSD__
callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
#endif
#ifdef __NetBSD__
callout_init(&sc->sge_timer_ch);
#endif
callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
TASK_INIT(&sc->timer_reclaim_task, 0, sge_timer_reclaim, sc);
TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
@ -2139,7 +2144,14 @@ t3_rx_eth(struct port_info *pi, struct sge_rspq *rq, struct mbuf *m, int ethpad)
if (&pi->adapter->port[cpl->iff] != pi)
panic("bad port index %d m->m_data=%p\n", cpl->iff, m->m_data);
if ((ifp->if_capenable & IFCAP_RXCSUM) && !cpl->fragment &&
if (
#ifdef __FreeBSD__
(ifp->if_capenable & IFCAP_RXCSUM) &&
#endif
#ifdef __NetBSD__
(ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) &&
#endif
!cpl->fragment &&
cpl->csum_valid && cpl->csum == 0xffff) {
m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;

View File

@ -33,6 +33,7 @@ $FreeBSD: src/sys/dev/cxgb/ulp/toecore/toedev.h,v 1.1 2007/05/25 16:17:59 kmacy
#ifndef _OFFLOAD_DEV_H_
#define _OFFLOAD_DEV_H_
#include <net/route.h>
/* Parameter values for offload_get_phys_egress() */
enum {
@ -84,7 +85,6 @@ enum toetype {
T3B
};
}}}
struct toedev {
char name[TOENAMSIZ]; /* TOE device name */
enum toetype type;