block: protect tracked_requests and flush_queue with reqs_lock

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20170605123908.18777-14-pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
This commit is contained in:
Paolo Bonzini 2017-06-05 14:39:02 +02:00 committed by Fam Zheng
parent 47fec59941
commit 3783fa3dd3
3 changed files with 24 additions and 7 deletions

View File

@ -320,6 +320,7 @@ BlockDriverState *bdrv_new(void)
QLIST_INIT(&bs->op_blockers[i]); QLIST_INIT(&bs->op_blockers[i]);
} }
notifier_with_return_list_init(&bs->before_write_notifiers); notifier_with_return_list_init(&bs->before_write_notifiers);
qemu_co_mutex_init(&bs->reqs_lock);
bs->refcnt = 1; bs->refcnt = 1;
bs->aio_context = qemu_get_aio_context(); bs->aio_context = qemu_get_aio_context();

View File

@ -378,8 +378,10 @@ static void tracked_request_end(BdrvTrackedRequest *req)
atomic_dec(&req->bs->serialising_in_flight); atomic_dec(&req->bs->serialising_in_flight);
} }
qemu_co_mutex_lock(&req->bs->reqs_lock);
QLIST_REMOVE(req, list); QLIST_REMOVE(req, list);
qemu_co_queue_restart_all(&req->wait_queue); qemu_co_queue_restart_all(&req->wait_queue);
qemu_co_mutex_unlock(&req->bs->reqs_lock);
} }
/** /**
@ -404,7 +406,9 @@ static void tracked_request_begin(BdrvTrackedRequest *req,
qemu_co_queue_init(&req->wait_queue); qemu_co_queue_init(&req->wait_queue);
qemu_co_mutex_lock(&bs->reqs_lock);
QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
qemu_co_mutex_unlock(&bs->reqs_lock);
} }
static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
@ -526,6 +530,7 @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
do { do {
retry = false; retry = false;
qemu_co_mutex_lock(&bs->reqs_lock);
QLIST_FOREACH(req, &bs->tracked_requests, list) { QLIST_FOREACH(req, &bs->tracked_requests, list) {
if (req == self || (!req->serialising && !self->serialising)) { if (req == self || (!req->serialising && !self->serialising)) {
continue; continue;
@ -544,7 +549,7 @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
* (instead of producing a deadlock in the former case). */ * (instead of producing a deadlock in the former case). */
if (!req->waiting_for) { if (!req->waiting_for) {
self->waiting_for = req; self->waiting_for = req;
qemu_co_queue_wait(&req->wait_queue, NULL); qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
self->waiting_for = NULL; self->waiting_for = NULL;
retry = true; retry = true;
waited = true; waited = true;
@ -552,6 +557,7 @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
} }
} }
} }
qemu_co_mutex_unlock(&bs->reqs_lock);
} while (retry); } while (retry);
return waited; return waited;
@ -2291,14 +2297,17 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
goto early_exit; goto early_exit;
} }
qemu_co_mutex_lock(&bs->reqs_lock);
current_gen = atomic_read(&bs->write_gen); current_gen = atomic_read(&bs->write_gen);
/* Wait until any previous flushes are completed */ /* Wait until any previous flushes are completed */
while (bs->active_flush_req) { while (bs->active_flush_req) {
qemu_co_queue_wait(&bs->flush_queue, NULL); qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
} }
/* Flushes reach this point in nondecreasing current_gen order. */
bs->active_flush_req = true; bs->active_flush_req = true;
qemu_co_mutex_unlock(&bs->reqs_lock);
/* Write back all layers by calling one driver function */ /* Write back all layers by calling one driver function */
if (bs->drv->bdrv_co_flush) { if (bs->drv->bdrv_co_flush) {
@ -2370,9 +2379,12 @@ out:
if (ret == 0) { if (ret == 0) {
bs->flushed_gen = current_gen; bs->flushed_gen = current_gen;
} }
qemu_co_mutex_lock(&bs->reqs_lock);
bs->active_flush_req = false; bs->active_flush_req = false;
/* Return value is ignored - it's ok if wait queue is empty */ /* Return value is ignored - it's ok if wait queue is empty */
qemu_co_queue_next(&bs->flush_queue); qemu_co_queue_next(&bs->flush_queue);
qemu_co_mutex_unlock(&bs->reqs_lock);
early_exit: early_exit:
bdrv_dec_in_flight(bs); bdrv_dec_in_flight(bs);

View File

@ -609,11 +609,6 @@ struct BlockDriverState {
uint64_t write_threshold_offset; uint64_t write_threshold_offset;
NotifierWithReturn write_threshold_notifier; NotifierWithReturn write_threshold_notifier;
QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
CoQueue flush_queue; /* Serializing flush queue */
bool active_flush_req; /* Flush request in flight? */
unsigned int flushed_gen; /* Flushed write generation */
QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
/* Offset after the highest byte written to */ /* Offset after the highest byte written to */
@ -647,6 +642,15 @@ struct BlockDriverState {
/* Accessed with atomic ops. */ /* Accessed with atomic ops. */
int quiesce_counter; int quiesce_counter;
unsigned int write_gen; /* Current data generation */ unsigned int write_gen; /* Current data generation */
/* Protected by reqs_lock. */
CoMutex reqs_lock;
QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
CoQueue flush_queue; /* Serializing flush queue */
bool active_flush_req; /* Flush request in flight? */
/* Only read/written by whoever has set active_flush_req to true. */
unsigned int flushed_gen; /* Flushed write generation */
}; };
struct BlockBackendRootState { struct BlockBackendRootState {