hw/nvme: fix aio cancel in dsm

When the DSM operation is cancelled asynchronously, we set iocb->ret to
-ECANCELED. However, the callback function only checks the return value
of the completed aio, which may have completed succesfully prior to the
cancellation and thus the callback ends up continuing the dsm operation
instead of bailing out. Fix this.

Secondly, fix a potential use-after-free by removing the bottom half and
enqueuing the completion directly.

Fixes: d7d1474fd8 ("hw/nvme: reimplement dsm to allow cancellation")
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
This commit is contained in:
Klaus Jensen 2022-11-10 07:59:50 +01:00
parent 36a251c346
commit 818b9b8f5e

View File

@ -2329,7 +2329,6 @@ typedef struct NvmeDSMAIOCB {
BlockAIOCB common;
BlockAIOCB *aiocb;
NvmeRequest *req;
QEMUBH *bh;
int ret;
NvmeDsmRange *range;
@ -2351,7 +2350,7 @@ static void nvme_dsm_cancel(BlockAIOCB *aiocb)
} else {
/*
* We only reach this if nvme_dsm_cancel() has already been called or
* the command ran to completion and nvme_dsm_bh is scheduled to run.
* the command ran to completion.
*/
assert(iocb->idx == iocb->nr);
}
@ -2362,17 +2361,6 @@ static const AIOCBInfo nvme_dsm_aiocb_info = {
.cancel_async = nvme_dsm_cancel,
};
static void nvme_dsm_bh(void *opaque)
{
NvmeDSMAIOCB *iocb = opaque;
iocb->common.cb(iocb->common.opaque, iocb->ret);
qemu_bh_delete(iocb->bh);
iocb->bh = NULL;
qemu_aio_unref(iocb);
}
static void nvme_dsm_cb(void *opaque, int ret);
static void nvme_dsm_md_cb(void *opaque, int ret)
@ -2384,16 +2372,10 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
uint64_t slba;
uint32_t nlb;
if (ret < 0) {
iocb->ret = ret;
if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
goto done;
}
if (!ns->lbaf.ms) {
nvme_dsm_cb(iocb, 0);
return;
}
range = &iocb->range[iocb->idx - 1];
slba = le64_to_cpu(range->slba);
nlb = le32_to_cpu(range->nlb);
@ -2406,7 +2388,6 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_ZERO);
if (ret) {
if (ret < 0) {
iocb->ret = ret;
goto done;
}
@ -2420,8 +2401,7 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
return;
done:
iocb->aiocb = NULL;
qemu_bh_schedule(iocb->bh);
nvme_dsm_cb(iocb, ret);
}
static void nvme_dsm_cb(void *opaque, int ret)
@ -2434,7 +2414,9 @@ static void nvme_dsm_cb(void *opaque, int ret)
uint64_t slba;
uint32_t nlb;
if (ret < 0) {
if (iocb->ret < 0) {
goto done;
} else if (ret < 0) {
iocb->ret = ret;
goto done;
}
@ -2468,7 +2450,8 @@ next:
done:
iocb->aiocb = NULL;
qemu_bh_schedule(iocb->bh);
iocb->common.cb(iocb->common.opaque, iocb->ret);
qemu_aio_unref(iocb);
}
static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
@ -2486,7 +2469,6 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
nvme_misc_cb, req);
iocb->req = req;
iocb->bh = qemu_bh_new(nvme_dsm_bh, iocb);
iocb->ret = 0;
iocb->range = g_new(NvmeDsmRange, nr);
iocb->nr = nr;