hw/block/nvme: drain namespaces on sq deletion
For most commands, when issuing an AIO, the BlockAIOCB is stored in the NvmeRequest aiocb pointer when the AIO is issued. The main use of this is cancelling AIOs when deleting submission queues (it is currently not used for Abort). However, some commands like Dataset Management Zone Management Send (zone reset) may involve more than one AIO and here the AIOs are issued without saving a reference to the BlockAIOCB. This is a problem since nvme_del_sq() will attempt to cancel outstanding AIOs, potentially with an invalid BlockAIOCB since the aiocb pointer is not NULL'ed when the request structure is recycled. Fix this by 1. making sure the aiocb pointer is NULL'ed when requests are recycled 2. only attempt to cancel the AIO if the aiocb is non-NULL 3. if any AIOs could not be cancelled, drain all aio as a last resort. Fixes:dc04d25e2f
("hw/block/nvme: add support for the format nvm command") Fixes:c94973288c
("hw/block/nvme: add broadcast nsid support flush command") Fixes:e4e430b3d6
("hw/block/nvme: add simple copy command") Fixes:5f5dc4c6a9
("hw/block/nvme: zero out zones on reset") Fixes:2605257a26
("hw/block/nvme: add the dataset management command") Cc: Gollu Appalanaidu <anaidu.gollu@samsung.com> Cc: Minwoo Im <minwoo.im@samsung.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
This commit is contained in:
parent
5cefe28708
commit
98f84f5a4e
@ -470,6 +470,7 @@ static void nvme_req_clear(NvmeRequest *req)
|
|||||||
{
|
{
|
||||||
req->ns = NULL;
|
req->ns = NULL;
|
||||||
req->opaque = NULL;
|
req->opaque = NULL;
|
||||||
|
req->aiocb = NULL;
|
||||||
memset(&req->cqe, 0x0, sizeof(req->cqe));
|
memset(&req->cqe, 0x0, sizeof(req->cqe));
|
||||||
req->status = NVME_SUCCESS;
|
req->status = NVME_SUCCESS;
|
||||||
}
|
}
|
||||||
@ -3687,6 +3688,7 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
|
|||||||
NvmeSQueue *sq;
|
NvmeSQueue *sq;
|
||||||
NvmeCQueue *cq;
|
NvmeCQueue *cq;
|
||||||
uint16_t qid = le16_to_cpu(c->qid);
|
uint16_t qid = le16_to_cpu(c->qid);
|
||||||
|
uint32_t nsid;
|
||||||
|
|
||||||
if (unlikely(!qid || nvme_check_sqid(n, qid))) {
|
if (unlikely(!qid || nvme_check_sqid(n, qid))) {
|
||||||
trace_pci_nvme_err_invalid_del_sq(qid);
|
trace_pci_nvme_err_invalid_del_sq(qid);
|
||||||
@ -3698,9 +3700,26 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
|
|||||||
sq = n->sq[qid];
|
sq = n->sq[qid];
|
||||||
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
|
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
|
||||||
r = QTAILQ_FIRST(&sq->out_req_list);
|
r = QTAILQ_FIRST(&sq->out_req_list);
|
||||||
assert(r->aiocb);
|
if (r->aiocb) {
|
||||||
blk_aio_cancel(r->aiocb);
|
blk_aio_cancel(r->aiocb);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Drain all namespaces if there are still outstanding requests that we
|
||||||
|
* could not cancel explicitly.
|
||||||
|
*/
|
||||||
|
if (!QTAILQ_EMPTY(&sq->out_req_list)) {
|
||||||
|
for (nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) {
|
||||||
|
NvmeNamespace *ns = nvme_ns(n, nsid);
|
||||||
|
if (ns) {
|
||||||
|
nvme_ns_drain(ns);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(QTAILQ_EMPTY(&sq->out_req_list));
|
||||||
|
|
||||||
if (!nvme_check_cqid(n, sq->cqid)) {
|
if (!nvme_check_cqid(n, sq->cqid)) {
|
||||||
cq = n->cq[sq->cqid];
|
cq = n->cq[sq->cqid];
|
||||||
QTAILQ_REMOVE(&cq->sq_list, sq, entry);
|
QTAILQ_REMOVE(&cq->sq_list, sq, entry);
|
||||||
|
Loading…
Reference in New Issue
Block a user