Apply patch from xsa155: make sure that the backend won't read parts of the

request again (possibly because of compiler optimisations), by using
copies and barrier.
From XSA155:
The compiler can emit optimizations in the PV backend drivers which
can lead to double fetch vulnerabilities. Specifically the shared
memory between the frontend and backend can be fetched twice (during
which time the frontend can alter the contents) possibly leading to
arbitrary code execution in backend.
This commit is contained in:
bouyer 2016-01-06 15:28:40 +00:00
parent 2f89dbfb73
commit 13ee92e7ec
4 changed files with 66 additions and 43 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: ring.h,v 1.2 2011/12/07 15:40:15 cegger Exp $ */
/* $NetBSD: ring.h,v 1.3 2016/01/06 15:28:40 bouyer Exp $ */
/******************************************************************************
* ring.h
*
@ -236,6 +236,20 @@ typedef struct __name##_back_ring __name##_back_ring_t
#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
/*
* Get a local copy of a request.
*
* Use this in preference to RING_GET_REQUEST() so all processing is
* done on a local copy that cannot be modified by the other end.
*
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
* to be ineffective where _req is a struct which consists of only bitfields.
*/
#define RING_COPY_REQUEST(_r, _idx, _req) do { \
/* Use volatile to force the copy into _req. */ \
*(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
} while (0)
#define RING_GET_RESPONSE(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))

View File

@ -1,4 +1,4 @@
/* $NetBSD: pciback.c,v 1.9 2014/03/29 19:28:30 christos Exp $ */
/* $NetBSD: pciback.c,v 1.10 2016/01/06 15:28:40 bouyer Exp $ */
/*
* Copyright (c) 2009 Manuel Bouyer.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pciback.c,v 1.9 2014/03/29 19:28:30 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: pciback.c,v 1.10 2016/01/06 15:28:40 bouyer Exp $");
#include "opt_xen.h"
@ -188,6 +188,7 @@ struct pb_xenbus_instance {
/* communication with the domU */
unsigned int pbx_evtchn; /* our even channel */
struct xen_pci_sharedinfo *pbx_sh_info;
struct xen_pci_op op;
grant_handle_t pbx_shinfo_handle; /* to unmap shared page */
};
@ -721,13 +722,16 @@ pciback_xenbus_evthandler(void * arg)
{
struct pb_xenbus_instance *pbxi = arg;
struct pciback_pci_dev *pbd;
struct xen_pci_op *op = &pbxi->pbx_sh_info->op;
struct xen_pci_op *op = &pbxi->op;
u_int bus, dev, func;
hypervisor_clear_event(pbxi->pbx_evtchn);
if (xen_atomic_test_bit(&pbxi->pbx_sh_info->flags,
_XEN_PCIF_active) == 0)
return 0;
memcpy(op, &pbxi->pbx_sh_info->op, sizeof (struct xen_pci_op));
__insn_barrier();
if (op->domain != 0) {
aprint_error("pciback: domain %d != 0", op->domain);
op->err = XEN_PCI_ERR_dev_not_found;
@ -794,6 +798,8 @@ pciback_xenbus_evthandler(void * arg)
aprint_error("pciback: unknown cmd %d\n", op->cmd);
op->err = XEN_PCI_ERR_not_implemented;
}
pbxi->pbx_sh_info->op.value = op->value;
pbxi->pbx_sh_info->op.err = op->err;
end:
xen_atomic_clear_bit(&pbxi->pbx_sh_info->flags, _XEN_PCIF_active);
hypervisor_notify_via_evtchn(pbxi->pbx_evtchn);

View File

@ -1,4 +1,4 @@
/* $NetBSD: xbdback_xenbus.c,v 1.61 2015/11/15 14:42:03 bouyer Exp $ */
/* $NetBSD: xbdback_xenbus.c,v 1.62 2016/01/06 15:28:40 bouyer Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.61 2015/11/15 14:42:03 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.62 2016/01/06 15:28:40 bouyer Exp $");
#include <sys/atomic.h>
#include <sys/buf.h>
@ -1022,6 +1022,7 @@ xbdback_co_main_loop(struct xbdback_instance *xbdi, void *obj)
req->sector_number = req64->sector_number;
break;
}
__insn_barrier();
XENPRINTF(("xbdback op %d req_cons 0x%x req_prod 0x%x "
"resp_prod 0x%x id %" PRIu64 "\n", req->operation,
xbdi->xbdi_ring.ring_n.req_cons,

View File

@ -1,4 +1,4 @@
/* $NetBSD: xennetback_xenbus.c,v 1.53 2015/11/19 17:01:40 christos Exp $ */
/* $NetBSD: xennetback_xenbus.c,v 1.54 2016/01/06 15:28:40 bouyer Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.53 2015/11/19 17:01:40 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.54 2016/01/06 15:28:40 bouyer Exp $");
#include "opt_xen.h"
@ -733,7 +733,7 @@ xennetback_evthandler(void *arg)
{
struct xnetback_instance *xneti = arg;
struct ifnet *ifp = &xneti->xni_if;
netif_tx_request_t *txreq;
netif_tx_request_t txreq;
struct xni_pkt *pkt;
vaddr_t pkt_va;
struct mbuf *m;
@ -751,15 +751,15 @@ xennetback_evthandler(void *arg)
receive_pending);
if (receive_pending == 0)
break;
txreq = RING_GET_REQUEST(&xneti->xni_txring, req_cons);
RING_COPY_REQUEST(&xneti->xni_txring, req_cons, &txreq);
xen_rmb();
XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname,
txreq->size));
txreq.size));
req_cons++;
if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
(IFF_UP | IFF_RUNNING))) {
/* interface not up, drop */
xennetback_tx_response(xneti, txreq->id,
xennetback_tx_response(xneti, txreq.id,
NETIF_RSP_DROPPED);
continue;
}
@ -767,12 +767,12 @@ xennetback_evthandler(void *arg)
/*
* Do some sanity checks, and map the packet's page.
*/
const char *msg = xennetback_tx_check_packet(txreq,
const char *msg = xennetback_tx_check_packet(&txreq,
xneti->xni_ec.ec_capenable & ETHERCAP_VLAN_MTU);
if (msg) {
printf("%s: packet with size %d is %s\n",
ifp->if_xname, txreq->size, msg);
xennetback_tx_response(xneti, txreq->id,
ifp->if_xname, txreq.size, msg);
xennetback_tx_response(xneti, txreq.id,
NETIF_RSP_ERROR);
ifp->if_ierrors++;
continue;
@ -785,15 +785,15 @@ xennetback_evthandler(void *arg)
if (ratecheck(&lasttime, &xni_pool_errintvl))
printf("%s: mbuf alloc failed\n",
ifp->if_xname);
xennetback_tx_response(xneti, txreq->id,
xennetback_tx_response(xneti, txreq.id,
NETIF_RSP_DROPPED);
ifp->if_ierrors++;
continue;
}
XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n",
xneti->xni_if.if_xname, txreq->offset,
txreq->size, txreq->id, MASK_NETIF_TX_IDX(req_cons)));
xneti->xni_if.if_xname, txreq.offset,
txreq.size, txreq.id, MASK_NETIF_TX_IDX(req_cons)));
pkt = pool_get(&xni_pkt_pool, PR_NOWAIT);
if (__predict_false(pkt == NULL)) {
@ -801,16 +801,16 @@ xennetback_evthandler(void *arg)
if (ratecheck(&lasttime, &xni_pool_errintvl))
printf("%s: xnbpkt alloc failed\n",
ifp->if_xname);
xennetback_tx_response(xneti, txreq->id,
xennetback_tx_response(xneti, txreq.id,
NETIF_RSP_DROPPED);
ifp->if_ierrors++;
m_freem(m);
continue;
}
err = xen_shm_map(1, xneti->xni_domid, &txreq->gref, &pkt_va,
err = xen_shm_map(1, xneti->xni_domid, &txreq.gref, &pkt_va,
&pkt->pkt_handle, XSHM_RO);
if (__predict_false(err == ENOMEM)) {
xennetback_tx_response(xneti, txreq->id,
xennetback_tx_response(xneti, txreq.id,
NETIF_RSP_DROPPED);
ifp->if_ierrors++;
pool_put(&xni_pkt_pool, pkt);
@ -821,7 +821,7 @@ xennetback_evthandler(void *arg)
if (__predict_false(err)) {
printf("%s: mapping foreing page failed: %d\n",
xneti->xni_if.if_xname, err);
xennetback_tx_response(xneti, txreq->id,
xennetback_tx_response(xneti, txreq.id,
NETIF_RSP_ERROR);
ifp->if_ierrors++;
pool_put(&xni_pkt_pool, pkt);
@ -831,13 +831,13 @@ xennetback_evthandler(void *arg)
if ((ifp->if_flags & IFF_PROMISC) == 0) {
struct ether_header *eh =
(void*)(pkt_va + txreq->offset);
(void*)(pkt_va + txreq.offset);
if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost,
ETHER_ADDR_LEN) != 0) {
xni_pkt_unmap(pkt, pkt_va);
m_freem(m);
xennetback_tx_response(xneti, txreq->id,
xennetback_tx_response(xneti, txreq.id,
NETIF_RSP_OKAY);
continue; /* packet is not for us */
}
@ -856,31 +856,31 @@ so always copy for now.
* ack it. Delaying it until the mbuf is
* freed will stall transmit.
*/
m->m_len = min(MHLEN, txreq->size);
m->m_len = min(MHLEN, txreq.size);
m->m_pkthdr.len = 0;
m_copyback(m, 0, txreq->size,
(void *)(pkt_va + txreq->offset));
m_copyback(m, 0, txreq.size,
(void *)(pkt_va + txreq.offset));
xni_pkt_unmap(pkt, pkt_va);
if (m->m_pkthdr.len < txreq->size) {
if (m->m_pkthdr.len < txreq.size) {
ifp->if_ierrors++;
m_freem(m);
xennetback_tx_response(xneti, txreq->id,
xennetback_tx_response(xneti, txreq.id,
NETIF_RSP_DROPPED);
continue;
}
xennetback_tx_response(xneti, txreq->id,
xennetback_tx_response(xneti, txreq.id,
NETIF_RSP_OKAY);
} else {
pkt->pkt_id = txreq->id;
pkt->pkt_id = txreq.id;
pkt->pkt_xneti = xneti;
MEXTADD(m, pkt_va + txreq->offset,
txreq->size, M_DEVBUF, xennetback_tx_free, pkt);
m->m_pkthdr.len = m->m_len = txreq->size;
MEXTADD(m, pkt_va + txreq.offset,
txreq.size, M_DEVBUF, xennetback_tx_free, pkt);
m->m_pkthdr.len = m->m_len = txreq.size;
m->m_flags |= M_EXT_ROMAP;
}
if ((txreq->flags & NETTXF_csum_blank) != 0) {
if ((txreq.flags & NETTXF_csum_blank) != 0) {
xennet_checksum_fill(&m);
if (m == NULL) {
ifp->if_ierrors++;
@ -964,6 +964,7 @@ xennetback_ifsoftstart_transfer(void *arg)
mmu_update_t *mmup;
multicall_entry_t *mclp;
netif_rx_response_t *rxresp;
netif_rx_request_t rxreq;
RING_IDX req_prod, resp_prod;
int do_event = 0;
gnttab_transfer_t *gop;
@ -1039,10 +1040,10 @@ xennetback_ifsoftstart_transfer(void *arg)
nppitems++;
}
/* start filling ring */
gop->ref = RING_GET_REQUEST(&xneti->xni_rxring,
xneti->xni_rxring.req_cons)->gref;
id = RING_GET_REQUEST(&xneti->xni_rxring,
xneti->xni_rxring.req_cons)->id;
RING_COPY_REQUEST(&xneti->xni_rxring,
xneti->xni_rxring.req_cons, &rxreq);
gop->ref = rxreq.gref;
id = rxreq.id;
xen_rmb();
xneti->xni_rxring.req_cons++;
rxresp = RING_GET_RESPONSE(&xneti->xni_rxring,
@ -1209,6 +1210,7 @@ xennetback_ifsoftstart_copy(void *arg)
paddr_t xmit_ma;
int i, j;
netif_rx_response_t *rxresp;
netif_rx_request_t rxreq;
RING_IDX req_prod, resp_prod;
int do_event = 0;
gnttab_copy_t *gop;
@ -1320,16 +1322,16 @@ xennetback_ifsoftstart_copy(void *arg)
gop->source.domid = DOMID_SELF;
gop->source.u.gmfn = xmit_ma >> PAGE_SHIFT;
gop->dest.u.ref = RING_GET_REQUEST(&xneti->xni_rxring,
xneti->xni_rxring.req_cons)->gref;
RING_COPY_REQUEST(&xneti->xni_rxring,
xneti->xni_rxring.req_cons, &rxreq);
gop->dest.u.ref = rxreq.gref;
gop->dest.offset = 0;
gop->dest.domid = xneti->xni_domid;
gop->len = m->m_pkthdr.len;
gop++;
id = RING_GET_REQUEST(&xneti->xni_rxring,
xneti->xni_rxring.req_cons)->id;
id = rxreq.id;
xen_rmb();
xneti->xni_rxring.req_cons++;
rxresp = RING_GET_RESPONSE(&xneti->xni_rxring,