From 04b3fb39c815e6de67c5003e610d1cdecc911980 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Wed, 17 Jun 2020 14:21:57 +0100 Subject: [PATCH] block/nvme: don't access CQE after moving cq.head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do not access a CQE after incrementing q->cq.head and releasing q->lock. It is unlikely that this causes problems in practice but it's a latent bug. The reason why it should be safe at the moment is that completion processing is not re-entrant and the CQ doorbell isn't written until the end of nvme_process_completion(). Make this change now because QEMU expects completion processing to be re-entrant and later patches will do that. Signed-off-by: Stefan Hajnoczi Reviewed-by: Sergio Lopez Reviewed-by: Philippe Mathieu-Daudé Message-id: 20200617132201.1832152-4-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi --- block/nvme.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/block/nvme.c b/block/nvme.c index d567ece3f4..344893811a 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -321,11 +321,14 @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) q->busy = true; assert(q->inflight >= 0); while (q->inflight) { + int ret; int16_t cid; + c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES]; if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) { break; } + ret = nvme_translate_error(c); q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE; if (!q->cq.head) { q->cq_phase = !q->cq_phase; @@ -344,7 +347,7 @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) preq->busy = false; preq->cb = preq->opaque = NULL; qemu_mutex_unlock(&q->lock); - req.cb(req.opaque, nvme_translate_error(c)); + req.cb(req.opaque, ret); qemu_mutex_lock(&q->lock); q->inflight--; progress = true;