mark with XXXSMP all remaining spl*() and tsleep() calls

This commit is contained in:
jdolecek 2018-06-24 20:28:57 +00:00
parent 555482fa99
commit 8c3f655828
10 changed files with 74 additions and 56 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: x86_xpmap.c,v 1.74 2017/09/16 09:28:38 maxv Exp $ */
/* $NetBSD: x86_xpmap.c,v 1.75 2018/06/24 20:28:57 jdolecek Exp $ */
/*
* Copyright (c) 2017 The NetBSD Foundation, Inc.
@ -95,7 +95,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.74 2017/09/16 09:28:38 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.75 2018/06/24 20:28:57 jdolecek Exp $");
#include "opt_xen.h"
#include "opt_ddb.h"
@ -186,7 +186,7 @@ xen_set_ldt(vaddr_t base, uint32_t entries)
base, entries, ptp));
pmap_pte_clearbits(ptp, PG_RW);
}
s = splvm();
s = splvm(); /* XXXSMP */
xpq_queue_set_ldt(base, entries);
splx(s);
}
@ -320,7 +320,7 @@ xpq_queue_tlb_flush(void)
void
xpq_flush_cache(void)
{
int s = splvm();
int s = splvm(); /* XXXSMP */
xpq_flush_queue();
@ -955,7 +955,7 @@ void
xen_set_user_pgd(paddr_t page)
{
struct mmuext_op op;
int s = splvm();
int s = splvm(); /* XXXSMP */
xpq_flush_queue();
op.cmd = MMUEXT_NEW_USER_BASEPTR;

View File

@ -1,4 +1,4 @@
/* $NetBSD: xen_bus_dma.c,v 1.26 2012/06/30 23:36:20 jym Exp $ */
/* $NetBSD: xen_bus_dma.c,v 1.27 2018/06/24 20:28:57 jdolecek Exp $ */
/* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
/*-
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.26 2012/06/30 23:36:20 jym Exp $");
__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.27 2018/06/24 20:28:57 jdolecek Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -126,7 +126,7 @@ _xen_alloc_contig(bus_size_t size, bus_size_t alignment,
pg = NULL;
goto failed;
}
s = splvm();
s = splvm(); /* XXXSMP */
/* Map the new extent in place of the old pages */
for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
pgnext = pg->pageq.queue.tqe_next;
@ -160,7 +160,7 @@ failed:
uvm_pagefree(pg);
}
/* remplace the pages that we already gave to Xen */
s = splvm();
s = splvm(); /* XXXSMP */
for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) {
pgnext = pg->pageq.queue.tqe_next;
set_xen_guest_handle(res.extent_start, &mfn);

View File

@ -1,4 +1,4 @@
/* $NetBSD: xen_shm_machdep.c,v 1.10 2011/09/02 22:25:08 dyoung Exp $ */
/* $NetBSD: xen_shm_machdep.c,v 1.11 2018/06/24 20:28:57 jdolecek Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.10 2011/09/02 22:25:08 dyoung Exp $");
__KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.11 2018/06/24 20:28:57 jdolecek Exp $");
#include <sys/types.h>
@ -132,6 +132,7 @@ xen_shm_map(int nentries, int domid, grant_ref_t *grefp, vaddr_t *vap,
panic("xen_shm_map");
}
#endif
/* XXXSMP */
s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
/*
* if a driver is waiting for ressources, don't try to allocate
@ -211,6 +212,7 @@ xen_shm_unmap(vaddr_t va, int nentries, grant_handle_t *handlep)
if (__predict_false(ret))
panic("xen_shm_unmap: unmap failed");
va = va >> PAGE_SHIFT;
/* XXXSMP */
s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
vmem_free(xen_shm_arena, va, nentries);
while (__predict_false((xshmc = SIMPLEQ_FIRST(&xen_shm_callbacks))
@ -219,11 +221,11 @@ xen_shm_unmap(vaddr_t va, int nentries, grant_handle_t *handlep)
splx(s);
if (xshmc->xshmc_callback(xshmc->xshmc_arg) == 0) {
/* callback succeeded */
s = splvm();
s = splvm(); /* XXXSMP */
pool_put(&xen_shm_callback_pool, xshmc);
} else {
/* callback failed, probably out of ressources */
s = splvm();
s = splvm(); /* XXXSMP */
SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc,
xshmc_entries);
@ -239,7 +241,7 @@ xen_shm_callback(int (*callback)(void *), void *arg)
struct xen_shm_callback_entry *xshmc;
int s;
s = splvm();
s = splvm(); /* XXXSMP */
xshmc = pool_get(&xen_shm_callback_pool, PR_NOWAIT);
if (xshmc == NULL) {
splx(s);

View File

@ -1,4 +1,4 @@
/* $NetBSD: xenfunc.c,v 1.17 2017/10/15 10:58:32 maxv Exp $ */
/* $NetBSD: xenfunc.c,v 1.18 2018/06/24 20:28:57 jdolecek Exp $ */
/*
*
@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.17 2017/10/15 10:58:32 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.18 2018/06/24 20:28:57 jdolecek Exp $");
#include <sys/param.h>
@ -53,7 +53,7 @@ void xen_set_ldt(vaddr_t, uint32_t);
void
invlpg(vaddr_t addr)
{
int s = splvm();
int s = splvm(); /* XXXSMP */
xpq_queue_invlpg(addr);
splx(s);
}
@ -101,7 +101,7 @@ rcr0(void)
void
lcr3(vaddr_t val)
{
int s = splvm();
int s = splvm(); /* XXXSMP */
xpq_queue_pt_switch(xpmap_ptom_masked(val));
splx(s);
}
@ -110,7 +110,7 @@ lcr3(vaddr_t val)
void
tlbflush(void)
{
int s = splvm();
int s = splvm(); /* XXXSMP */
xpq_queue_tlb_flush();
splx(s);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: balloon.c,v 1.18 2016/12/23 17:01:10 cherry Exp $ */
/* $NetBSD: balloon.c,v 1.19 2018/06/24 20:28:58 jdolecek Exp $ */
/*-
* Copyright (c) 2010 The NetBSD Foundation, Inc.
@ -75,7 +75,7 @@
#endif
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: balloon.c,v 1.18 2016/12/23 17:01:10 cherry Exp $");
__KERNEL_RCSID(0, "$NetBSD: balloon.c,v 1.19 2018/06/24 20:28:58 jdolecek Exp $");
#include <sys/inttypes.h>
#include <sys/device.h>
@ -275,7 +275,7 @@ xenmem_get_maxreservation(void)
int s;
unsigned int ret;
s = splvm();
s = splvm(); /* XXXSMP */
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation,
& (domid_t) { DOMID_SELF });
@ -295,7 +295,7 @@ xenmem_get_currentreservation(void)
{
int s, ret;
s = splvm();
s = splvm(); /* XXXSMP */
ret = HYPERVISOR_memory_op(XENMEM_current_reservation,
& (domid_t) { DOMID_SELF });
splx(s);
@ -401,7 +401,7 @@ balloon_inflate(struct balloon_xenbus_softc *sc, size_t tpages)
mfn_list[rpages] = xpmap_ptom(pa) >> PAGE_SHIFT;
s = splvm();
s = splvm(); /* XXXSMP */
/* Invalidate pg */
xpmap_ptom_unmap(pa);
splx(s);
@ -415,7 +415,7 @@ balloon_inflate(struct balloon_xenbus_softc *sc, size_t tpages)
set_xen_guest_handle(reservation.extent_start, mfn_list);
reservation.nr_extents = rpages;
s = splvm();
s = splvm(); /* XXXSMP */
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&reservation);
splx(s);
@ -481,7 +481,7 @@ balloon_deflate(struct balloon_xenbus_softc *sc, size_t tpages)
set_xen_guest_handle(reservation.extent_start, mfn_list);
reservation.nr_extents = tpages;
s = splvm();
s = splvm(); /* XXXSMP */
ret = HYPERVISOR_memory_op(XENMEM_increase_reservation, &reservation);
splx(s);
@ -505,7 +505,7 @@ balloon_deflate(struct balloon_xenbus_softc *sc, size_t tpages)
pa = pmap_pa_end;
/* P2M update */
s = splvm();
s = splvm(); /* XXXSMP */
pmap_pa_end += PAGE_SIZE; /* XXX: TLB flush ?*/
xpmap_ptom_map(pa, ptoa(mfn_list[rpages]));
xpq_queue_machphys_update(ptoa(mfn_list[rpages]), pa);
@ -513,7 +513,7 @@ balloon_deflate(struct balloon_xenbus_softc *sc, size_t tpages)
if (uvm_physseg_plug(atop(pa), 1, NULL) == false) {
/* Undo P2M */
s = splvm();
s = splvm(); /* XXXSMP */
xpmap_ptom_unmap(pa);
xpq_queue_machphys_update(ptoa(mfn_list[rpages]), 0);
pmap_pa_end -= PAGE_SIZE; /* XXX: TLB flush ?*/
@ -540,7 +540,7 @@ balloon_deflate(struct balloon_xenbus_softc *sc, size_t tpages)
/* Update P->M */
pa = VM_PAGE_TO_PHYS(bpg_entry->pg);
s = splvm();
s = splvm(); /* XXXSMP */
xpmap_ptom_map(pa, ptoa(mfn_list[rpages]));
xpq_queue_machphys_update(ptoa(mfn_list[rpages]), pa);

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_xennet_xenbus.c,v 1.75 2018/06/22 04:17:41 msaitoh Exp $ */
/* $NetBSD: if_xennet_xenbus.c,v 1.76 2018/06/24 20:28:58 jdolecek Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -84,7 +84,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.75 2018/06/22 04:17:41 msaitoh Exp $");
__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.76 2018/06/24 20:28:58 jdolecek Exp $");
#include "opt_xen.h"
#include "opt_nfs_boot.h"
@ -322,7 +322,7 @@ xennet_xenbus_attach(device_t parent, device_t self, void *aux)
}
mutex_init(&sc->sc_rx_lock, MUTEX_DEFAULT, IPL_NET);
SLIST_INIT(&sc->sc_rxreq_head);
s = splvm();
s = splvm(); /* XXXSMP */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
rxreq->rxreq_id = i;
@ -426,12 +426,13 @@ xennet_xenbus_detach(device_t self, int flags)
/* wait for pending TX to complete, and collect pending RX packets */
xennet_handler(sc);
while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
/* XXXSMP */
tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2);
xennet_handler(sc);
}
xennet_free_rx_buffer(sc);
s1 = splvm();
s1 = splvm(); /* XXXSMP */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE,
@ -446,12 +447,14 @@ xennet_xenbus_detach(device_t self, int flags)
rnd_detach_source(&sc->sc_rnd_source);
while (xengnt_status(sc->sc_tx_ring_gntref)) {
/* XXXSMP */
tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2);
}
xengnt_revoke_access(sc->sc_tx_ring_gntref);
uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE,
UVM_KMF_WIRED);
while (xengnt_status(sc->sc_rx_ring_gntref)) {
/* XXXSMP */
tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2);
}
xengnt_revoke_access(sc->sc_rx_ring_gntref);
@ -624,6 +627,7 @@ xennet_xenbus_suspend(device_t dev, const pmf_qual_t *qual)
/* process any outstanding TX responses, then collect RX packets */
xennet_handler(sc);
while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
/* XXXSMP */
tsleep(xennet_xenbus_suspend, PRIBIO, "xnet_suspend", hz/2);
xennet_handler(sc);
}
@ -752,7 +756,7 @@ out_loop:
* outstanding in the page update queue -- make sure we flush
* those first!
*/
s = splvm();
s = splvm(); /* XXXSMP */
xpq_flush_queue();
splx(s);
/* now decrease reservation */

View File

@ -1,4 +1,4 @@
/* $NetBSD: xbd_xenbus.c,v 1.78 2017/11/11 21:03:01 riastradh Exp $ */
/* $NetBSD: xbd_xenbus.c,v 1.79 2018/06/24 20:28:58 jdolecek Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -50,7 +50,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.78 2017/11/11 21:03:01 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.79 2018/06/24 20:28:58 jdolecek Exp $");
#include "opt_xen.h"
@ -323,14 +323,16 @@ xbd_xenbus_detach(device_t dev, int flags)
if (rc != 0)
return rc;
s = splbio();
s = splbio(); /* XXXSMP */
DPRINTF(("%s: xbd_detach\n", device_xname(dev)));
if (sc->sc_shutdown == BLKIF_SHUTDOWN_RUN) {
sc->sc_shutdown = BLKIF_SHUTDOWN_LOCAL;
/* wait for requests to complete */
while (sc->sc_backend_status == BLKIF_STATE_CONNECTED &&
disk_isbusy(&sc->sc_dksc.sc_dkdev))
disk_isbusy(&sc->sc_dksc.sc_dkdev)) {
/* XXXSMP */
tsleep(xbd_xenbus_detach, PRIBIO, "xbddetach", hz/2);
}
xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosing);
}
@ -341,8 +343,10 @@ xbd_xenbus_detach(device_t dev, int flags)
return EALREADY;
}
while (xenbus_read_driver_state(sc->sc_xbusd->xbusd_otherend)
!= XenbusStateClosed)
!= XenbusStateClosed) {
/* XXXSMP */
tsleep(xbd_xenbus_detach, PRIBIO, "xbddetach2", hz/2);
}
splx(s);
/* locate the major number */
@ -373,6 +377,7 @@ xbd_xenbus_detach(device_t dev, int flags)
intr_disestablish(sc->sc_ih);
while (xengnt_status(sc->sc_ring_gntref)) {
/* XXXSMP */
tsleep(xbd_xenbus_detach, PRIBIO, "xbd_ref", hz/2);
}
xengnt_revoke_access(sc->sc_ring_gntref);
@ -392,11 +397,13 @@ xbd_xenbus_suspend(device_t dev, const pmf_qual_t *qual) {
sc = device_private(dev);
s = splbio();
s = splbio(); /* XXXSMP */
/* wait for requests to complete, then suspend device */
while (sc->sc_backend_status == BLKIF_STATE_CONNECTED &&
disk_isbusy(&sc->sc_dksc.sc_dkdev))
disk_isbusy(&sc->sc_dksc.sc_dkdev)) {
/* XXXSMP */
tsleep(xbd_xenbus_suspend, PRIBIO, "xbdsuspend", hz/2);
}
hypervisor_mask_event(sc->sc_evtchn);
sc->sc_backend_status = BLKIF_STATE_SUSPENDED;
@ -529,13 +536,15 @@ static void xbd_backend_changed(void *arg, XenbusState new_state)
case XenbusStateInitialised:
break;
case XenbusStateClosing:
s = splbio();
s = splbio(); /* XXXSMP */
if (sc->sc_shutdown == BLKIF_SHUTDOWN_RUN)
sc->sc_shutdown = BLKIF_SHUTDOWN_REMOTE;
/* wait for requests to complete */
while (sc->sc_backend_status == BLKIF_STATE_CONNECTED &&
disk_isbusy(&sc->sc_dksc.sc_dkdev))
disk_isbusy(&sc->sc_dksc.sc_dkdev)) {
/* XXXSMP */
tsleep(xbd_xenbus_detach, PRIBIO, "xbddetach", hz/2);
}
splx(s);
xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed);
break;
@ -846,10 +855,11 @@ xbdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
return EOPNOTSUPP;
}
s = splbio();
s = splbio(); /* XXXSMP */
while (RING_FULL(&sc->sc_ring)) {
sc->sc_xbdreq_wait = 1;
/* XXXSMP */
tsleep(&sc->sc_xbdreq_wait, PRIBIO, "xbdreq", 0);
}
sc->sc_xbdreq_wait = 0;
@ -873,6 +883,7 @@ xbdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
hypervisor_notify_via_evtchn(sc->sc_evtchn);
/* request sent, no wait for completion */
while (xbdreq->req_sync.s_done == 0) {
/* XXXSMP */
tsleep(xbdreq, PRIBIO, "xbdsync", 0);
}
if (xbdreq->req_sync.s_error == BLKIF_RSP_EOPNOTSUPP)
@ -1019,7 +1030,7 @@ err:
static int
xbd_map_align(struct xbd_req *req)
{
int s = splvm();
int s = splvm(); /* XXXSMP - bogus? */
int rc;
rc = uvm_km_kmem_alloc(kmem_va_arena,
@ -1041,7 +1052,7 @@ xbd_unmap_align(struct xbd_req *req)
if (req->req_bp->b_flags & B_READ)
memcpy(req->req_bp->b_data, req->req_data,
req->req_bp->b_bcount);
s = splvm();
s = splvm(); /* XXXSMP - bogus? */
uvm_km_kmem_free(kmem_va_arena, (vaddr_t)req->req_data, req->req_bp->b_bcount);
splx(s);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: xbdback_xenbus.c,v 1.66 2018/06/24 20:15:00 jdolecek Exp $ */
/* $NetBSD: xbdback_xenbus.c,v 1.67 2018/06/24 20:28:58 jdolecek Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.66 2018/06/24 20:15:00 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.67 2018/06/24 20:28:58 jdolecek Exp $");
#include <sys/atomic.h>
#include <sys/buf.h>
@ -1783,7 +1783,7 @@ xbdback_map_shm(struct xbdback_io *xbd_io)
xbd_io->xio_mapped = 1;
return xbdi;
case ENOMEM:
s = splvm();
s = splvm(); /* XXXSMP */
if (!xbdback_shmcb) {
if (xen_shm_callback(xbdback_shm_callback, xbdi)
!= 0) {
@ -1819,7 +1819,7 @@ xbdback_shm_callback(void *arg)
* IPL_BIO and IPL_NET levels. Raise to the lowest priority level
* that can mask both.
*/
s = splvm();
s = splvm(); /* XXXSMP */
while(!SIMPLEQ_EMPTY(&xbdback_shmq)) {
struct xbdback_instance *xbdi;
struct xbdback_io *xbd_io;

View File

@ -1,4 +1,4 @@
/* $NetBSD: xen_machdep.c,v 1.15 2016/06/08 01:59:06 jnemeth Exp $ */
/* $NetBSD: xen_machdep.c,v 1.16 2018/06/24 20:28:58 jdolecek Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -53,7 +53,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.15 2016/06/08 01:59:06 jnemeth Exp $");
__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.16 2018/06/24 20:28:58 jdolecek Exp $");
#include "opt_xen.h"
@ -352,7 +352,7 @@ static void
xen_suspend_domain(void)
{
paddr_t mfn;
int s = splvm();
int s = splvm(); /* XXXSMP */
/*
* console becomes unavailable when suspended, so

View File

@ -1,4 +1,4 @@
/* $NetBSD: xpci_xenbus.c,v 1.15 2017/03/29 09:04:35 msaitoh Exp $ */
/* $NetBSD: xpci_xenbus.c,v 1.16 2018/06/24 20:28:58 jdolecek Exp $ */
/*
* Copyright (c) 2009 Manuel Bouyer.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xpci_xenbus.c,v 1.15 2017/03/29 09:04:35 msaitoh Exp $");
__KERNEL_RCSID(0, "$NetBSD: xpci_xenbus.c,v 1.16 2018/06/24 20:28:58 jdolecek Exp $");
#include "opt_xen.h"
@ -256,12 +256,13 @@ xpci_backend_changed(void *arg, XenbusState new_state)
case XenbusStateInitialised:
break;
case XenbusStateClosing:
s = splbio();
s = splbio(); /* XXXSMP */
sc->sc_shutdown = 1;
/* wait for requests to complete */
#if 0
while (sc->sc_backend_status == XPCI_STATE_CONNECTED &&
sc->sc_dksc.sc_dkdev.dk_stats->io_busy > 0)
/* XXXSMP */
tsleep(xpci_xenbus_detach, PRIBIO, "xpcidetach",
hz/2);
#endif