block: convert qemu_aio_flush() calls to bdrv_drain_all()
Many places in QEMU call qemu_aio_flush() to complete all pending asynchronous I/O. Most of these places actually want to drain all block requests but there is no block layer API to do so. This patch introduces the bdrv_drain_all() API to wait for requests across all BlockDriverStates to complete. As a bonus we perform checks after qemu_aio_wait() to ensure that requests really have finished. Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
5f8b6491f2
commit
922453bca6
@ -387,7 +387,7 @@ static int mig_save_device_dirty(Monitor *mon, QEMUFile *f,
|
||||
|
||||
for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
|
||||
if (bmds_aio_inflight(bmds, sector)) {
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
}
|
||||
if (bdrv_get_dirty(bmds->bs, sector)) {
|
||||
|
||||
|
19
block.c
19
block.c
@ -846,6 +846,25 @@ void bdrv_close_all(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for pending requests to complete across all BlockDriverStates
|
||||
*
|
||||
* This function does not flush data to disk, use bdrv_flush_all() for that
|
||||
* after calling this function.
|
||||
*/
|
||||
void bdrv_drain_all(void)
|
||||
{
|
||||
BlockDriverState *bs;
|
||||
|
||||
qemu_aio_flush();
|
||||
|
||||
/* If requests are still pending there is a bug somewhere */
|
||||
QTAILQ_FOREACH(bs, &bdrv_states, list) {
|
||||
assert(QLIST_EMPTY(&bs->tracked_requests));
|
||||
assert(qemu_co_queue_empty(&bs->throttled_reqs));
|
||||
}
|
||||
}
|
||||
|
||||
/* make a BlockDriverState anonymous by removing from bdrv_state list.
|
||||
Also, NULL terminate the device_name to prevent double remove */
|
||||
void bdrv_make_anon(BlockDriverState *bs)
|
||||
|
1
block.h
1
block.h
@ -214,6 +214,7 @@ int bdrv_flush(BlockDriverState *bs);
|
||||
int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
|
||||
void bdrv_flush_all(void);
|
||||
void bdrv_close_all(void);
|
||||
void bdrv_drain_all(void);
|
||||
|
||||
int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
|
||||
int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
|
||||
|
@ -653,7 +653,7 @@ int do_snapshot_blkdev(Monitor *mon, const QDict *qdict, QObject **ret_data)
|
||||
goto out;
|
||||
}
|
||||
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
bdrv_flush(bs);
|
||||
|
||||
bdrv_close(bs);
|
||||
@ -840,7 +840,7 @@ int do_drive_del(Monitor *mon, const QDict *qdict, QObject **ret_data)
|
||||
}
|
||||
|
||||
/* quiesce block driver; prevent further io */
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
bdrv_flush(bs);
|
||||
bdrv_close(bs);
|
||||
|
||||
|
2
cpus.c
2
cpus.c
@ -396,7 +396,7 @@ static void do_vm_stop(RunState state)
|
||||
pause_all_vcpus();
|
||||
runstate_set(state);
|
||||
vm_state_notify(0, state);
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
bdrv_flush_all();
|
||||
monitor_protocol_event(QEVENT_STOP, NULL);
|
||||
}
|
||||
|
@ -200,8 +200,9 @@ static void pmac_ide_flush(DBDMA_io *io)
|
||||
{
|
||||
MACIOIDEState *m = io->opaque;
|
||||
|
||||
if (m->aiocb)
|
||||
qemu_aio_flush();
|
||||
if (m->aiocb) {
|
||||
bdrv_drain_all();
|
||||
}
|
||||
}
|
||||
|
||||
/* PowerMac IDE memory IO */
|
||||
|
@ -309,7 +309,7 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
|
||||
* aio operation with preadv/pwritev.
|
||||
*/
|
||||
if (bm->bus->dma->aiocb) {
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
assert(bm->bus->dma->aiocb == NULL);
|
||||
assert((bm->status & BM_STATUS_DMAING) == 0);
|
||||
}
|
||||
|
@ -474,7 +474,7 @@ static void virtio_blk_reset(VirtIODevice *vdev)
|
||||
* This should cancel pending requests, but can't do nicely until there
|
||||
* are per-device request lists.
|
||||
*/
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
}
|
||||
|
||||
/* coalesce internal state, copy to pci i/o region 0
|
||||
|
@ -120,7 +120,7 @@ static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t v
|
||||
devices, and bit 2 the non-primary-master IDE devices. */
|
||||
if (val & UNPLUG_ALL_IDE_DISKS) {
|
||||
DPRINTF("unplug disks\n");
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
bdrv_flush_all();
|
||||
pci_unplug_disks(s->pci_dev.bus);
|
||||
}
|
||||
|
@ -1853,9 +1853,9 @@ int main(int argc, char **argv)
|
||||
command_loop();
|
||||
|
||||
/*
|
||||
* Make sure all outstanding requests get flushed the program exits.
|
||||
* Make sure all outstanding requests complete before the program exits.
|
||||
*/
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
|
||||
if (bs) {
|
||||
bdrv_delete(bs);
|
||||
|
2
savevm.c
2
savevm.c
@ -2104,7 +2104,7 @@ int load_vmstate(const char *name)
|
||||
}
|
||||
|
||||
/* Flush all IO requests so they don't interfere with the new state. */
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
|
||||
bs = NULL;
|
||||
while ((bs = bdrv_next(bs))) {
|
||||
|
@ -351,7 +351,7 @@ void xen_invalidate_map_cache(void)
|
||||
MapCacheRev *reventry;
|
||||
|
||||
/* Flush pending AIO before destroying the mapcache */
|
||||
qemu_aio_flush();
|
||||
bdrv_drain_all();
|
||||
|
||||
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
|
||||
DPRINTF("There should be no locked mappings at this time, "
|
||||
|
Loading…
Reference in New Issue
Block a user