Remplace __insn_barrier() with x86_lfence() where appropriate.

While here, replace a panic() with a return of error code in xbdback.c.
This commit is contained in:
bouyer 2005-03-26 21:22:45 +00:00
parent 57d0b7f835
commit 13e1355ec4
5 changed files with 41 additions and 34 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: ctrl_if.c,v 1.3 2005/03/17 15:31:17 bouyer Exp $ */
/* $NetBSD: ctrl_if.c,v 1.4 2005/03/26 21:22:45 bouyer Exp $ */
/******************************************************************************
* ctrl_if.c
@ -9,7 +9,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ctrl_if.c,v 1.3 2005/03/17 15:31:17 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: ctrl_if.c,v 1.4 2005/03/26 21:22:45 bouyer Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -101,7 +101,7 @@ static void __ctrl_if_tx_tasklet(unsigned long data)
CONTROL_RING_IDX rp;
rp = ctrl_if->tx_resp_prod;
__insn_barrier(); /* Ensure we see all requests up to 'rp'. */
x86_lfence(); /* Ensure we see all requests up to 'rp'. */
while ( ctrl_if_tx_resp_cons != rp )
{
@ -163,7 +163,7 @@ static void __ctrl_if_rx_tasklet(unsigned long data)
dp = ctrl_if_rxmsg_deferred_prod;
rp = ctrl_if->rx_req_prod;
__insn_barrier(); /* Ensure we see all requests up to 'rp'. */
x86_lfence(); /* Ensure we see all requests up to 'rp'. */
while ( ctrl_if_rx_req_cons != rp )
{
@ -253,7 +253,7 @@ ctrl_if_send_message_noblock(
memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)],
msg, sizeof(*msg));
__insn_barrier(); /* Write the message before letting the controller peek at it. */
x86_lfence(); /* Write the message before letting the controller peek at it. */
ctrl_if->tx_req_prod++;
simple_unlock(&ctrl_if_lock);
@ -300,7 +300,7 @@ static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
struct rsp_wait *wait = (struct rsp_wait *)id;
memcpy(wait->msg, msg, sizeof(*msg));
__insn_barrier();
x86_lfence();
wait->done = 1;
wakeup(wait);
@ -351,7 +351,7 @@ ctrl_if_enqueue_space_callback(
* the task is not executed despite the ring being non-full then we will
* certainly return 'not full'.
*/
__insn_barrier();
x86_lfence();
return TX_FULL(ctrl_if);
}
#endif
@ -379,7 +379,7 @@ ctrl_if_send_response(
if ( dmsg != msg )
memcpy(dmsg, msg, sizeof(*msg));
__insn_barrier(); /* Write the message before letting the controller peek at it. */
x86_lfence(); /* Write the message before letting the controller peek at it. */
ctrl_if->rx_resp_prod++;
simple_unlock(&ctrl_if_lock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_xennet.c,v 1.15 2005/03/18 11:15:40 cube Exp $ */
/* $NetBSD: if_xennet.c,v 1.16 2005/03/26 21:22:45 bouyer Exp $ */
/*
*
@ -33,7 +33,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: if_xennet.c,v 1.15 2005/03/18 11:15:40 cube Exp $");
__KERNEL_RCSID(0, "$NetBSD: if_xennet.c,v 1.16 2005/03/26 21:22:45 bouyer Exp $");
#include "opt_inet.h"
#include "rnd.h"
@ -824,7 +824,7 @@ network_tx_buf_gc(struct xennet_softc *sc)
*/
sc->sc_tx->event = /* atomic */
prod + (sc->sc_tx_entries >> 1) + 1;
__insn_barrier();
x86_lfence();
} while (prod != sc->sc_tx->resp_prod);
if (sc->sc_tx->resp_prod == sc->sc_tx->req_prod)
@ -1060,11 +1060,12 @@ xennet_start(struct ifnet *ifp)
txreq->addr = xpmap_ptom(pa);
txreq->size = m->m_pkthdr.len;
__insn_barrier();
x86_lfence();
idx++;
sc->sc_tx->req_prod = idx;
sc->sc_tx_entries++; /* XXX atomic */
x86_lfence();
#ifdef XENNET_DEBUG
DPRINTFN(XEDB_MEM, ("packet addr %p/%p, physical %p/%p, "
@ -1087,7 +1088,7 @@ xennet_start(struct ifnet *ifp)
network_tx_buf_gc(sc);
__insn_barrier();
x86_lfence();
if (sc->sc_tx->resp_prod != idx)
hypervisor_notify_via_evtchn(sc->sc_evtchn);

View File

@ -1,4 +1,4 @@
/* $NetBSD: xbd.c,v 1.14 2005/03/11 15:51:25 bouyer Exp $ */
/* $NetBSD: xbd.c,v 1.15 2005/03/26 21:22:45 bouyer Exp $ */
/*
*
@ -33,7 +33,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xbd.c,v 1.14 2005/03/11 15:51:25 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: xbd.c,v 1.15 2005/03/26 21:22:45 bouyer Exp $");
#include "xbd.h"
#include "rnd.h"
@ -1412,7 +1412,7 @@ xbd_response_handler(void *arg)
BLKIF_RING_IDX i, rp;
rp = blk_ring->resp_prod;
__insn_barrier(); /* Ensure we see queued responses up to 'rp'. */
x86_lfence(); /* Ensure we see queued responses up to 'rp'. */
for (i = resp_cons; i != rp; i++) {
ring_resp = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;

View File

@ -1,4 +1,4 @@
/* $NetBSD: xbdback.c,v 1.4 2005/03/15 23:39:18 bouyer Exp $ */
/* $NetBSD: xbdback.c,v 1.5 2005/03/26 21:22:45 bouyer Exp $ */
/*
* Copyright (c) 2005 Manuel Bouyer.
@ -496,7 +496,7 @@ xbdback_evthandler(void *arg)
int error;
req_prod = xbdi->blk_ring->req_prod;
__insn_barrier(); /* ensure we see all requests up to req_prod */
x86_lfence(); /* ensure we see all requests up to req_prod */
/*
* note that we'll eventually get a full ring of request.
* in this case, MASK_BLKIF_IDX(req_cons) == MASK_BLKIF_IDX(req_prod)
@ -711,7 +711,7 @@ xbdback_probe(struct xbdback_instance *xbdi, blkif_request_t *req)
}
xbd_req = pool_get(&xbdback_request_pool, PR_NOWAIT);
if (xbd_req == NULL) {
panic("xbd_req"); /* XXX */
return ENOMEM;
}
xbd_req->rq_xbdi = xbdi;
if (xbdback_map_shm(req, xbd_req) != 0) {
@ -756,7 +756,7 @@ xbdback_send_reply(struct xbdback_instance *xbdi, int id, int op, int status)
resp->operation = op;
resp->status = status;
xbdi->resp_prod++;
__insn_barrier(); /* ensure guest see all our replies */
x86_lfence(); /* ensure guest see all our replies */
xbdi->blk_ring->resp_prod = xbdi->resp_prod;
hypervisor_notify_via_evtchn(xbdi->evtchn);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: xennetback.c,v 1.4 2005/03/11 11:34:32 bouyer Exp $ */
/* $NetBSD: xennetback.c,v 1.5 2005/03/26 21:22:45 bouyer Exp $ */
/*
* Copyright (c) 2005 Manuel Bouyer.
@ -466,10 +466,8 @@ xennetback_evthandler(void *arg)
{
struct xnetback_instance *xneti = arg;
struct ifnet *ifp = &xneti->xni_if;
netif_tx_request_t *txreq;
netif_tx_response_t *txresp;
NETIF_RING_IDX req_prod;
NETIF_RING_IDX req_cons, resp_prod, i;
vaddr_t pkt;
@ -477,10 +475,9 @@ xennetback_evthandler(void *arg)
struct mbuf *m;
int do_event = 0;
again:
req_prod = xneti->xni_txring->req_prod;
__insn_barrier(); /* ensure we see all requests up to req_prod */
x86_lfence(); /* ensure we see all requests up to req_prod */
resp_prod = xneti->xni_txring->resp_prod;
req_cons = xneti->xni_txring->req_cons;
XENPRINTF(("%s event req_prod %d resp_prod %d req_cons %d event %d\n",
@ -569,13 +566,17 @@ again:
}
if (xneti->xni_txring->event == resp_prod)
do_event = 1;
__insn_barrier(); /* make sure the guest see out responses */
x86_lfence(); /* make sure the guest see our responses */
xneti->xni_txring->req_cons = req_cons;
xneti->xni_txring->resp_prod = resp_prod;
/*
* make sure the guest will see our replies before testing for more
* work.
*/
x86_lfence(); /* ensure we see all requests up to req_prod */
if (i > 0)
goto again; /* more work to do ? */
if (do_event) {
__insn_barrier();
XENPRINTF(("%s send event\n", xneti->xni_if.if_xname));
hypervisor_notify_via_evtchn(xneti->xni_evtchn);
}
@ -613,9 +614,11 @@ xennetback_ifstart(struct ifnet *ifp)
NETIF_RING_IDX resp_prod = xneti->xni_rxring->resp_prod;
int need_event = 0;
if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
return;
__insn_barrier();
x86_lfence();
while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
mmup = xstart_mmu;
mclp = xstart_mcl;
@ -686,17 +689,17 @@ xennetback_ifstart(struct ifnet *ifp)
printf("%s: xstart_mcl[%d] failed\n",
ifp->if_xname, j);
}
__insn_barrier();
x86_lfence();
/* update pointer */
xneti->xni_rxring->resp_prod += i;
__insn_barrier();
x86_lfence();
/* check if we need to allocate new xmit pages */
if (xmit_pages_alloc < 0)
xennetback_get_new_xmit_pages();
}
/* send event, if needed */
if (do_event) {
__insn_barrier();
x86_lfence();
XENPRINTF(("%s receive event\n", xneti->xni_if.if_xname));
hypervisor_notify_via_evtchn(xneti->xni_evtchn);
}
@ -716,8 +719,6 @@ xennetback_ifinit(struct ifnet *ifp)
struct xnetback_instance *xneti = ifp->if_softc;
int s = splnet();
/* cancel pending I/O - possible ? */
if ((ifp->if_flags & IFF_UP) == 0) {
splx(s);
return 0;
@ -731,7 +732,12 @@ xennetback_ifinit(struct ifnet *ifp)
static void
xennetback_ifstop(struct ifnet *ifp, int disable)
{
//struct xnetback_instance *xneti = ifp->if_softc;
struct xnetback_instance *xneti = ifp->if_softc;
int s = splnet();
ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
if (xneti->status == CONNECTED) {
xennetback_evthandler(ifp->if_softc); /* flush pending RX requests */
}
splx(s);
}