Add proper detach support for Xen3 xennet.

This commit is contained in:
bouyer 2006-04-02 15:32:41 +00:00
parent 02e9db01de
commit e35565d1d4

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_xennet_xenbus.c,v 1.5 2006/04/02 13:56:12 bouyer Exp $ */ /* $NetBSD: if_xennet_xenbus.c,v 1.6 2006/04/02 15:32:41 bouyer Exp $ */
/* /*
* Copyright (c) 2006 Manuel Bouyer. * Copyright (c) 2006 Manuel Bouyer.
@ -61,7 +61,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.5 2006/04/02 13:56:12 bouyer Exp $"); __KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.6 2006/04/02 15:32:41 bouyer Exp $");
#include "opt_xen.h" #include "opt_xen.h"
#include "opt_nfs_boot.h" #include "opt_nfs_boot.h"
@ -122,7 +122,8 @@ int xennet_debug = 0xff;
#define DPRINTFN(n,x) #define DPRINTFN(n,x)
#endif #endif
#define GRANT_INVALID_REF -1 #define GRANT_INVALID_REF -1 /* entry is free */
#define GRANT_STACK_REF -2 /* entry owned by the network stack */
#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
@ -180,9 +181,12 @@ static paddr_t xennet_pages[NET_RX_RING_SIZE];
static int xennet_xenbus_match(struct device *, struct cfdata *, void *); static int xennet_xenbus_match(struct device *, struct cfdata *, void *);
static void xennet_xenbus_attach(struct device *, struct device *, void *); static void xennet_xenbus_attach(struct device *, struct device *, void *);
static int xennet_xenbus_detach(struct device *, int);
static void xennet_backend_changed(struct device *, XenbusState);
static int xennet_xenbus_resume(void *); static int xennet_xenbus_resume(void *);
static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *); static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *);
static void xennet_free_rx_buffer(struct xennet_xenbus_softc *);
static void xennet_tx_complete(struct xennet_xenbus_softc *); static void xennet_tx_complete(struct xennet_xenbus_softc *);
static void xennet_rx_mbuf_free(struct mbuf *, caddr_t, size_t, void *); static void xennet_rx_mbuf_free(struct mbuf *, caddr_t, size_t, void *);
static int xennet_handler(void *); static int xennet_handler(void *);
@ -199,7 +203,7 @@ static int xennet_ioctl(struct ifnet *, u_long, caddr_t);
static void xennet_watchdog(struct ifnet *); static void xennet_watchdog(struct ifnet *);
CFATTACH_DECL(xennet_xenbus, sizeof(struct xennet_xenbus_softc), CFATTACH_DECL(xennet_xenbus, sizeof(struct xennet_xenbus_softc),
xennet_xenbus_match, xennet_xenbus_attach, NULL, NULL); xennet_xenbus_match, xennet_xenbus_attach, xennet_xenbus_detach, NULL);
static int static int
xennet_xenbus_match(struct device *parent, struct cfdata *match, void *aux) xennet_xenbus_match(struct device *parent, struct cfdata *match, void *aux)
@ -258,6 +262,8 @@ xennet_xenbus_attach(struct device *parent, struct device *self, void *aux)
} }
#endif /* XENNET_DEBUG */ #endif /* XENNET_DEBUG */
sc->sc_xbusd = xa->xa_xbusd; sc->sc_xbusd = xa->xa_xbusd;
sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed;
/* initialize free RX and RX request lists */ /* initialize free RX and RX request lists */
SLIST_INIT(&sc->sc_txreq_head); SLIST_INIT(&sc->sc_txreq_head);
for (i = 0; i < NET_TX_RING_SIZE; i++) { for (i = 0; i < NET_TX_RING_SIZE; i++) {
@ -278,7 +284,7 @@ xennet_xenbus_attach(struct device *parent, struct device *self, void *aux)
if (!pmap_extract(pmap_kernel(), rxreq->rxreq_va, if (!pmap_extract(pmap_kernel(), rxreq->rxreq_va,
&rxreq->rxreq_pa)) &rxreq->rxreq_pa))
panic("xennet: no pa for mapped va ?"); panic("xennet: no pa for mapped va ?");
rxreq->rxreq_gntref = GRANT_INVALID_REF;
SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next); SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next);
} }
splx(s); splx(s);
@ -332,6 +338,54 @@ xennet_xenbus_attach(struct device *parent, struct device *self, void *aux)
xennet_xenbus_resume(sc); xennet_xenbus_resume(sc);
} }
static int
xennet_xenbus_detach(struct device *self, int flags)
{
struct xennet_xenbus_softc *sc = (void *)self;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
int s0, s1;
RING_IDX i;
DPRINTF(("%s: xennet_xenbus_detach\n", sc->sc_dev.dv_xname));
s0 = splnet();
xennet_stop(ifp, 1);
/* wait for pending TX to complete, and collect pending RX packets */
xennet_handler(sc);
while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2);
xennet_handler(sc);
}
xennet_free_rx_buffer(sc);
s1 = splvm();
for (i = 0; i < NET_RX_RING_SIZE; i++) {
struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE,
UVM_KMF_WIRED);
}
splx(s1);
ether_ifdetach(ifp);
if_detach(ifp);
while (xengnt_status(sc->sc_tx_ring_gntref)) {
tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2);
}
xengnt_revoke_access(sc->sc_tx_ring_gntref);
uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE,
UVM_KMF_WIRED);
while (xengnt_status(sc->sc_rx_ring_gntref)) {
tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2);
}
xengnt_revoke_access(sc->sc_rx_ring_gntref);
uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE,
UVM_KMF_WIRED);
softintr_disestablish(sc->sc_softintr);
event_remove_handler(sc->sc_evtchn, &xennet_handler, sc);
splx(s0);
DPRINTF(("%s: xennet_xenbus_detach done\n", sc->sc_dev.dv_xname));
return 0;
}
static int static int
xennet_xenbus_resume(void *p) xennet_xenbus_resume(void *p)
{ {
@ -411,6 +465,7 @@ again:
xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction"); xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction");
return -1; return -1;
} }
xennet_alloc_rx_buffer(sc);
sc->sc_backend_status = BEST_CONNECTED; sc->sc_backend_status = BEST_CONNECTED;
return 0; return 0;
@ -420,6 +475,27 @@ abort_transaction:
return error; return error;
} }
static void xennet_backend_changed(struct device *dev, XenbusState new_state)
{
struct xennet_xenbus_softc *sc = (void *)dev;
DPRINTF(("%s: new backend state %d\n", sc->sc_dev.dv_xname, new_state));
switch (new_state) {
case XenbusStateUnknown:
case XenbusStateInitialising:
case XenbusStateInitWait:
case XenbusStateInitialised:
case XenbusStateClosing:
sc->sc_backend_status = BEST_CLOSED;
xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed);
break;
case XenbusStateConnected:
break;
default:
panic("bad backend state %d", new_state);
}
}
static void static void
xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc) xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
{ {
@ -490,6 +566,81 @@ xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
return; return;
} }
static void
xennet_free_rx_buffer(struct xennet_xenbus_softc *sc)
{
paddr_t ma, pa;
vaddr_t va;
RING_IDX i;
mmu_update_t mmu[1];
multicall_entry_t mcl[2];
int s = splbio();
DPRINTF(("%s: xennet_free_rx_buffer\n", sc->sc_dev.dv_xname));
/* get back memory from RX ring */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
/*
* if the buffer is in transit in the network stack, wait for
* the network stack to free it.
*/
while ((volatile grant_ref_t)rxreq->rxreq_gntref ==
GRANT_STACK_REF)
tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2);
if (rxreq->rxreq_gntref != GRANT_INVALID_REF) {
/*
* this req is still granted. Get back the page or
* allocate a new one, and remap it.
*/
SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq,
rxreq_next);
sc->sc_free_rxreql++;
ma = xengnt_revoke_transfer(rxreq->rxreq_gntref);
rxreq->rxreq_gntref = GRANT_INVALID_REF;
if (ma == 0) {
struct xen_memory_reservation xenres;
/*
* transfer not complete, we lost the page.
* Get one from hypervisor
*/
xenres.extent_start = &ma;
xenres.nr_extents = 1;
xenres.extent_order = 0;
xenres.address_bits = 31;
xenres.domid = DOMID_SELF;
if (HYPERVISOR_memory_op(
XENMEM_increase_reservation, &xenres) < 0) {
panic("xennet_free_rx_buffer: "
"can't get memory back");
}
KASSERT(ma != 0);
}
pa = rxreq->rxreq_pa;
va = rxreq->rxreq_va;
/* remap the page */
mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
MULTI_update_va_mapping(&mcl[0], va,
(ma << PAGE_SHIFT) | PG_V | PG_KW,
UVMF_TLB_FLUSH|UVMF_ALL);
xpmap_phys_to_machine_mapping[
(pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
mcl[1].op = __HYPERVISOR_mmu_update;
mcl[1].args[0] = (unsigned long)mmu;
mcl[1].args[1] = 1;
mcl[1].args[2] = 0;
mcl[1].args[3] = DOMID_SELF;
HYPERVISOR_multicall(mcl, 2);
}
}
splx(s);
DPRINTF(("%s: xennet_free_rx_buffer done\n", sc->sc_dev.dv_xname));
}
static void static void
xennet_rx_mbuf_free(struct mbuf *m, caddr_t buf, size_t size, void *arg) xennet_rx_mbuf_free(struct mbuf *m, caddr_t buf, size_t size, void *arg)
{ {
@ -499,8 +650,11 @@ xennet_rx_mbuf_free(struct mbuf *m, caddr_t buf, size_t size, void *arg)
SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next); SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next);
sc->sc_free_rxreql++; sc->sc_free_rxreql++;
if (sc->sc_free_rxreql >= NET_RX_RING_SIZE / 2) req->rxreq_gntref = GRANT_INVALID_REF;
if (sc->sc_free_rxreql >= NET_RX_RING_SIZE / 2 &&
__predict_true(sc->sc_backend_status == BEST_CONNECTED)) {
xennet_alloc_rx_buffer(sc); xennet_alloc_rx_buffer(sc);
}
if (m) if (m)
pool_cache_put(&mbpool_cache, m); pool_cache_put(&mbpool_cache, m);
@ -508,7 +662,7 @@ xennet_rx_mbuf_free(struct mbuf *m, caddr_t buf, size_t size, void *arg)
static void static void
xennet_tx_complete(struct xennet_xenbus_softc * sc) xennet_tx_complete(struct xennet_xenbus_softc *sc)
{ {
struct xennet_txreq *req; struct xennet_txreq *req;
struct ifnet *ifp = &sc->sc_ethercom.ec_if; struct ifnet *ifp = &sc->sc_ethercom.ec_if;
@ -596,14 +750,7 @@ again:
RING_PUSH_REQUESTS(&sc->sc_rx_ring); RING_PUSH_REQUESTS(&sc->sc_rx_ring);
continue; continue;
} }
req->rxreq_gntref = GRANT_INVALID_REF;
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (__predict_false(m == NULL)) {
printf("xennet: rx no mbuf\n");
ifp->if_ierrors++;
sc->sc_rx_ring.rsp_cons = i;
return 1;
}
pa = req->rxreq_pa; pa = req->rxreq_pa;
va = req->rxreq_va; va = req->rxreq_va;
@ -632,16 +779,24 @@ again:
/* packet not for us */ /* packet not for us */
xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE,
req); req);
m_freem(m);
continue; continue;
} }
} }
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (__predict_false(m == NULL)) {
printf("xennet: rx no mbuf\n");
ifp->if_ierrors++;
xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
continue;
}
m->m_pkthdr.rcvif = ifp; m->m_pkthdr.rcvif = ifp;
if (__predict_true(sc->sc_rx_ring.req_prod_pvt != if (__predict_true(sc->sc_rx_ring.req_prod_pvt !=
sc->sc_rx_ring.sring->rsp_prod)) { sc->sc_rx_ring.sring->rsp_prod)) {
m->m_len = m->m_pkthdr.len = rx->status; m->m_len = m->m_pkthdr.len = rx->status;
MEXTADD(m, pktp, rx->status, MEXTADD(m, pktp, rx->status,
M_DEVBUF, xennet_rx_mbuf_free, req); M_DEVBUF, xennet_rx_mbuf_free, req);
req->rxreq_gntref = GRANT_STACK_REF;
} else { } else {
/* /*
* This was our last receive buffer, allocate * This was our last receive buffer, allocate
@ -921,22 +1076,16 @@ xennet_init(struct ifnet *ifp)
DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", sc->sc_dev.dv_xname)); DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", sc->sc_dev.dv_xname));
if (ifp->if_flags & IFF_UP) { if ((ifp->if_flags & IFF_RUNNING) == 0) {
if ((ifp->if_flags & IFF_RUNNING) == 0) { sc->sc_rx_ring.sring->rsp_event =
xennet_alloc_rx_buffer(sc); sc->sc_rx_ring.rsp_cons + 1;
sc->sc_rx_ring.sring->rsp_event = hypervisor_enable_event(sc->sc_evtchn);
sc->sc_rx_ring.rsp_cons + 1; hypervisor_notify_via_evtchn(sc->sc_evtchn);
hypervisor_enable_event(sc->sc_evtchn);
hypervisor_notify_via_evtchn(sc->sc_evtchn);
xennet_reset(sc);
}
ifp->if_flags |= IFF_RUNNING;
ifp->if_flags &= ~IFF_OACTIVE;
ifp->if_timer = 0;
} else {
ifp->if_flags &= ~IFF_RUNNING;
xennet_reset(sc); xennet_reset(sc);
} }
ifp->if_flags |= IFF_RUNNING;
ifp->if_flags &= ~IFF_OACTIVE;
ifp->if_timer = 0;
splx(s); splx(s);
return 0; return 0;
} }
@ -944,7 +1093,13 @@ xennet_init(struct ifnet *ifp)
void void
xennet_stop(struct ifnet *ifp, int disable) xennet_stop(struct ifnet *ifp, int disable)
{ {
struct xennet_xenbus_softc *sc = ifp->if_softc;
int s = splnet();
ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
hypervisor_mask_event(sc->sc_evtchn);
xennet_reset(sc);
splx(s);
} }
void void