2018-01-16 09:08:55 +03:00
|
|
|
/*
|
|
|
|
* NVMe block driver based on vfio
|
|
|
|
*
|
|
|
|
* Copyright 2016 - 2018 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Fam Zheng <famz@redhat.com>
|
|
|
|
* Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include <linux/vfio.h>
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qapi/qmp/qdict.h"
|
|
|
|
#include "qapi/qmp/qstring.h"
|
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
#include "qemu/cutils.h"
|
2018-02-01 14:18:46 +03:00
|
|
|
#include "qemu/option.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
#include "qemu/vfio-helpers.h"
|
|
|
|
#include "block/block_int.h"
|
2019-09-17 14:58:19 +03:00
|
|
|
#include "sysemu/replay.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
#include "trace.h"
|
|
|
|
|
2018-01-16 09:08:59 +03:00
|
|
|
#include "block/nvme.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
#define NVME_SQ_ENTRY_BYTES 64
|
|
|
|
#define NVME_CQ_ENTRY_BYTES 16
|
|
|
|
#define NVME_QUEUE_SIZE 128
|
|
|
|
#define NVME_BAR_SIZE 8192
|
|
|
|
|
2020-06-17 16:21:58 +03:00
|
|
|
/*
|
|
|
|
* We have to leave one slot empty as that is the full queue case where
|
|
|
|
* head == tail + 1.
|
|
|
|
*/
|
|
|
|
#define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
typedef struct BDRVNVMeState BDRVNVMeState;
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
typedef struct {
|
|
|
|
int32_t head, tail;
|
|
|
|
uint8_t *queue;
|
|
|
|
uint64_t iova;
|
|
|
|
/* Hardware MMIO register */
|
|
|
|
volatile uint32_t *doorbell;
|
|
|
|
} NVMeQueue;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
BlockCompletionFunc *cb;
|
|
|
|
void *opaque;
|
|
|
|
int cid;
|
|
|
|
void *prp_list_page;
|
|
|
|
uint64_t prp_list_iova;
|
2020-06-17 16:21:58 +03:00
|
|
|
int free_req_next; /* q->reqs[] index of next free req */
|
2018-01-16 09:08:55 +03:00
|
|
|
} NVMeRequest;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
QemuMutex lock;
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
/* Read from I/O code path, initialized under BQL */
|
|
|
|
BDRVNVMeState *s;
|
|
|
|
int index;
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
/* Fields protected by BQL */
|
|
|
|
uint8_t *prp_list_pages;
|
|
|
|
|
|
|
|
/* Fields protected by @lock */
|
2020-06-17 16:21:59 +03:00
|
|
|
CoQueue free_req_queue;
|
2018-01-16 09:08:55 +03:00
|
|
|
NVMeQueue sq, cq;
|
|
|
|
int cq_phase;
|
2020-06-17 16:21:58 +03:00
|
|
|
int free_req_head;
|
|
|
|
NVMeRequest reqs[NVME_NUM_REQS];
|
2018-01-16 09:08:55 +03:00
|
|
|
bool busy;
|
|
|
|
int need_kick;
|
|
|
|
int inflight;
|
|
|
|
} NVMeQueuePair;
|
|
|
|
|
|
|
|
/* Memory mapped registers */
|
|
|
|
typedef volatile struct {
|
|
|
|
uint64_t cap;
|
|
|
|
uint32_t vs;
|
|
|
|
uint32_t intms;
|
|
|
|
uint32_t intmc;
|
|
|
|
uint32_t cc;
|
|
|
|
uint32_t reserved0;
|
|
|
|
uint32_t csts;
|
|
|
|
uint32_t nssr;
|
|
|
|
uint32_t aqa;
|
|
|
|
uint64_t asq;
|
|
|
|
uint64_t acq;
|
|
|
|
uint32_t cmbloc;
|
|
|
|
uint32_t cmbsz;
|
|
|
|
uint8_t reserved1[0xec0];
|
|
|
|
uint8_t cmd_set_specfic[0x100];
|
|
|
|
uint32_t doorbells[];
|
2019-02-25 14:59:30 +03:00
|
|
|
} NVMeRegs;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
struct BDRVNVMeState {
|
2018-01-16 09:08:55 +03:00
|
|
|
AioContext *aio_context;
|
|
|
|
QEMUVFIOState *vfio;
|
|
|
|
NVMeRegs *regs;
|
|
|
|
/* The submission/completion queue pairs.
|
|
|
|
* [0]: admin queue.
|
|
|
|
* [1..]: io queues.
|
|
|
|
*/
|
|
|
|
NVMeQueuePair **queues;
|
|
|
|
int nr_queues;
|
|
|
|
size_t page_size;
|
|
|
|
/* How many uint32_t elements does each doorbell entry take. */
|
|
|
|
size_t doorbell_scale;
|
|
|
|
bool write_cache_supported;
|
|
|
|
EventNotifier irq_notifier;
|
2019-07-16 19:30:19 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
uint64_t nsze; /* Namespace size reported by identify command */
|
|
|
|
int nsid; /* The namespace id to read/write data. */
|
2019-07-30 14:48:12 +03:00
|
|
|
int blkshift;
|
2019-07-16 19:30:19 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
uint64_t max_transfer;
|
2018-08-13 17:43:20 +03:00
|
|
|
bool plugged;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2019-09-13 16:36:26 +03:00
|
|
|
bool supports_write_zeroes;
|
2019-09-13 16:36:27 +03:00
|
|
|
bool supports_discard;
|
2019-09-13 16:36:26 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
CoMutex dma_map_lock;
|
|
|
|
CoQueue dma_flush_queue;
|
|
|
|
|
|
|
|
/* Total size of mapped qiov, accessed under dma_map_lock */
|
|
|
|
int dma_map_count;
|
2019-02-01 22:29:30 +03:00
|
|
|
|
|
|
|
/* PCI address (required for nvme_refresh_filename()) */
|
|
|
|
char *device;
|
2020-06-17 16:22:00 +03:00
|
|
|
};
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
#define NVME_BLOCK_OPT_DEVICE "device"
|
|
|
|
#define NVME_BLOCK_OPT_NAMESPACE "namespace"
|
|
|
|
|
|
|
|
static QemuOptsList runtime_opts = {
|
|
|
|
.name = "nvme",
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
|
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = NVME_BLOCK_OPT_DEVICE,
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
.help = "NVMe PCI device address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = NVME_BLOCK_OPT_NAMESPACE,
|
|
|
|
.type = QEMU_OPT_NUMBER,
|
|
|
|
.help = "NVMe namespace",
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q,
|
|
|
|
int nentries, int entry_bytes, Error **errp)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
size_t bytes;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
bytes = ROUND_UP(nentries * entry_bytes, s->page_size);
|
|
|
|
q->head = q->tail = 0;
|
|
|
|
q->queue = qemu_try_blockalign0(bs, bytes);
|
|
|
|
|
|
|
|
if (!q->queue) {
|
|
|
|
error_setg(errp, "Cannot allocate queue");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
|
|
|
|
if (r) {
|
|
|
|
error_setg(errp, "Cannot map queue");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_free_queue_pair(NVMeQueuePair *q)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
|
|
|
qemu_vfree(q->prp_list_pages);
|
|
|
|
qemu_vfree(q->sq.queue);
|
|
|
|
qemu_vfree(q->cq.queue);
|
|
|
|
qemu_mutex_destroy(&q->lock);
|
|
|
|
g_free(q);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_free_req_queue_cb(void *opaque)
|
|
|
|
{
|
|
|
|
NVMeQueuePair *q = opaque;
|
|
|
|
|
|
|
|
qemu_mutex_lock(&q->lock);
|
|
|
|
while (qemu_co_enter_next(&q->free_req_queue, &q->lock)) {
|
|
|
|
/* Retry all pending requests */
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(&q->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
|
|
|
|
int idx, int size,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
NVMeQueuePair *q = g_new0(NVMeQueuePair, 1);
|
|
|
|
uint64_t prp_list_iova;
|
|
|
|
|
|
|
|
qemu_mutex_init(&q->lock);
|
2020-06-17 16:22:00 +03:00
|
|
|
q->s = s;
|
2018-01-16 09:08:55 +03:00
|
|
|
q->index = idx;
|
|
|
|
qemu_co_queue_init(&q->free_req_queue);
|
2020-06-17 16:21:58 +03:00
|
|
|
q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_NUM_REQS);
|
2018-01-16 09:08:55 +03:00
|
|
|
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
|
2020-06-17 16:21:58 +03:00
|
|
|
s->page_size * NVME_NUM_REQS,
|
2018-01-16 09:08:55 +03:00
|
|
|
false, &prp_list_iova);
|
|
|
|
if (r) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2020-06-17 16:21:58 +03:00
|
|
|
q->free_req_head = -1;
|
|
|
|
for (i = 0; i < NVME_NUM_REQS; i++) {
|
2018-01-16 09:08:55 +03:00
|
|
|
NVMeRequest *req = &q->reqs[i];
|
|
|
|
req->cid = i + 1;
|
2020-06-17 16:21:58 +03:00
|
|
|
req->free_req_next = q->free_req_head;
|
|
|
|
q->free_req_head = i;
|
2018-01-16 09:08:55 +03:00
|
|
|
req->prp_list_page = q->prp_list_pages + i * s->page_size;
|
|
|
|
req->prp_list_iova = prp_list_iova + i * s->page_size;
|
|
|
|
}
|
2020-06-17 16:21:58 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
nvme_init_queue(bs, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
q->sq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale];
|
|
|
|
|
|
|
|
nvme_init_queue(bs, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
goto fail;
|
|
|
|
}
|
2019-07-16 19:30:18 +03:00
|
|
|
q->cq.doorbell = &s->regs->doorbells[(idx * 2 + 1) * s->doorbell_scale];
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
return q;
|
|
|
|
fail:
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_free_queue_pair(q);
|
2018-01-16 09:08:55 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With q->lock */
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_kick(NVMeQueuePair *q)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
2020-06-17 16:22:00 +03:00
|
|
|
BDRVNVMeState *s = q->s;
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
if (s->plugged || !q->need_kick) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
trace_nvme_kick(s, q->index);
|
|
|
|
assert(!(q->sq.tail & 0xFF00));
|
|
|
|
/* Fence the write to submission queue entry before notifying the device. */
|
|
|
|
smp_wmb();
|
|
|
|
*q->sq.doorbell = cpu_to_le32(q->sq.tail);
|
|
|
|
q->inflight += q->need_kick;
|
|
|
|
q->need_kick = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find a free request element if any, otherwise:
|
|
|
|
* a) if in coroutine context, try to wait for one to become available;
|
|
|
|
* b) if not in coroutine, return NULL;
|
|
|
|
*/
|
|
|
|
static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
|
|
|
|
{
|
2020-06-17 16:21:58 +03:00
|
|
|
NVMeRequest *req;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
qemu_mutex_lock(&q->lock);
|
2020-06-17 16:21:58 +03:00
|
|
|
|
|
|
|
while (q->free_req_head == -1) {
|
2018-01-16 09:08:55 +03:00
|
|
|
if (qemu_in_coroutine()) {
|
|
|
|
trace_nvme_free_req_queue_wait(q);
|
|
|
|
qemu_co_queue_wait(&q->free_req_queue, &q->lock);
|
|
|
|
} else {
|
|
|
|
qemu_mutex_unlock(&q->lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2020-06-17 16:21:58 +03:00
|
|
|
|
|
|
|
req = &q->reqs[q->free_req_head];
|
|
|
|
q->free_req_head = req->free_req_next;
|
|
|
|
req->free_req_next = -1;
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_mutex_unlock(&q->lock);
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
2020-06-17 16:21:58 +03:00
|
|
|
/* With q->lock */
|
|
|
|
static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
|
|
|
|
{
|
|
|
|
req->free_req_next = q->free_req_head;
|
|
|
|
q->free_req_head = req - q->reqs;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With q->lock */
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_wake_free_req_locked(NVMeQueuePair *q)
|
2020-06-17 16:21:58 +03:00
|
|
|
{
|
|
|
|
if (!qemu_co_queue_empty(&q->free_req_queue)) {
|
2020-06-17 16:22:00 +03:00
|
|
|
replay_bh_schedule_oneshot_event(q->s->aio_context,
|
2020-06-17 16:21:58 +03:00
|
|
|
nvme_free_req_queue_cb, q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert a request in the freelist and wake waiters */
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req)
|
2020-06-17 16:21:58 +03:00
|
|
|
{
|
|
|
|
qemu_mutex_lock(&q->lock);
|
|
|
|
nvme_put_free_req_locked(q, req);
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_wake_free_req_locked(q);
|
2020-06-17 16:21:58 +03:00
|
|
|
qemu_mutex_unlock(&q->lock);
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
static inline int nvme_translate_error(const NvmeCqe *c)
|
|
|
|
{
|
|
|
|
uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF;
|
|
|
|
if (status) {
|
|
|
|
trace_nvme_error(le32_to_cpu(c->result),
|
|
|
|
le16_to_cpu(c->sq_head),
|
|
|
|
le16_to_cpu(c->sq_id),
|
|
|
|
le16_to_cpu(c->cid),
|
|
|
|
le16_to_cpu(status));
|
|
|
|
}
|
|
|
|
switch (status) {
|
|
|
|
case 0:
|
|
|
|
return 0;
|
|
|
|
case 1:
|
|
|
|
return -ENOSYS;
|
|
|
|
case 2:
|
|
|
|
return -EINVAL;
|
|
|
|
default:
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With q->lock */
|
2020-06-17 16:22:00 +03:00
|
|
|
static bool nvme_process_completion(NVMeQueuePair *q)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
2020-06-17 16:22:00 +03:00
|
|
|
BDRVNVMeState *s = q->s;
|
2018-01-16 09:08:55 +03:00
|
|
|
bool progress = false;
|
|
|
|
NVMeRequest *preq;
|
|
|
|
NVMeRequest req;
|
|
|
|
NvmeCqe *c;
|
|
|
|
|
|
|
|
trace_nvme_process_completion(s, q->index, q->inflight);
|
|
|
|
if (q->busy || s->plugged) {
|
|
|
|
trace_nvme_process_completion_queue_busy(s, q->index);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
q->busy = true;
|
|
|
|
assert(q->inflight >= 0);
|
|
|
|
while (q->inflight) {
|
2020-06-17 16:21:57 +03:00
|
|
|
int ret;
|
2018-01-16 09:08:55 +03:00
|
|
|
int16_t cid;
|
2020-06-17 16:21:57 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES];
|
2019-07-16 19:30:20 +03:00
|
|
|
if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
|
2018-01-16 09:08:55 +03:00
|
|
|
break;
|
|
|
|
}
|
2020-06-17 16:21:57 +03:00
|
|
|
ret = nvme_translate_error(c);
|
2018-01-16 09:08:55 +03:00
|
|
|
q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
|
|
|
|
if (!q->cq.head) {
|
|
|
|
q->cq_phase = !q->cq_phase;
|
|
|
|
}
|
|
|
|
cid = le16_to_cpu(c->cid);
|
|
|
|
if (cid == 0 || cid > NVME_QUEUE_SIZE) {
|
|
|
|
fprintf(stderr, "Unexpected CID in completion queue: %" PRIu32 "\n",
|
|
|
|
cid);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
trace_nvme_complete_command(s, q->index, cid);
|
|
|
|
preq = &q->reqs[cid - 1];
|
|
|
|
req = *preq;
|
|
|
|
assert(req.cid == cid);
|
|
|
|
assert(req.cb);
|
2020-06-17 16:21:58 +03:00
|
|
|
nvme_put_free_req_locked(q, preq);
|
2018-01-16 09:08:55 +03:00
|
|
|
preq->cb = preq->opaque = NULL;
|
|
|
|
qemu_mutex_unlock(&q->lock);
|
2020-06-17 16:21:57 +03:00
|
|
|
req.cb(req.opaque, ret);
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_mutex_lock(&q->lock);
|
|
|
|
q->inflight--;
|
|
|
|
progress = true;
|
|
|
|
}
|
|
|
|
if (progress) {
|
|
|
|
/* Notify the device so it can post more completions. */
|
|
|
|
smp_mb_release();
|
|
|
|
*q->cq.doorbell = cpu_to_le32(q->cq.head);
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_wake_free_req_locked(q);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
q->busy = false;
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_trace_command(const NvmeCmd *cmd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 8; ++i) {
|
|
|
|
uint8_t *cmdp = (uint8_t *)cmd + i * 8;
|
|
|
|
trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3],
|
|
|
|
cmdp[4], cmdp[5], cmdp[6], cmdp[7]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
|
2018-01-16 09:08:55 +03:00
|
|
|
NvmeCmd *cmd, BlockCompletionFunc cb,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
assert(!req->cb);
|
|
|
|
req->cb = cb;
|
|
|
|
req->opaque = opaque;
|
|
|
|
cmd->cid = cpu_to_le32(req->cid);
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
trace_nvme_submit_command(q->s, q->index, req->cid);
|
2018-01-16 09:08:55 +03:00
|
|
|
nvme_trace_command(cmd);
|
|
|
|
qemu_mutex_lock(&q->lock);
|
|
|
|
memcpy((uint8_t *)q->sq.queue +
|
|
|
|
q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
|
|
|
|
q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
|
|
|
|
q->need_kick++;
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_kick(q);
|
|
|
|
nvme_process_completion(q);
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_mutex_unlock(&q->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_cmd_sync_cb(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
int *pret = opaque;
|
|
|
|
*pret = ret;
|
block: Fix hangs in synchronous APIs with iothreads
In the block layer, synchronous APIs are often implemented by creating a
coroutine that calls the asynchronous coroutine-based implementation and
then waiting for completion with BDRV_POLL_WHILE().
For this to work with iothreads (more specifically, when the synchronous
API is called in a thread that is not the home thread of the block
device, so that the coroutine will run in a different thread), we must
make sure to call aio_wait_kick() at the end of the operation. Many
places are missing this, so that BDRV_POLL_WHILE() keeps hanging even if
the condition has long become false.
Note that bdrv_dec_in_flight() involves an aio_wait_kick() call. This
corresponds to the BDRV_POLL_WHILE() in the drain functions, but it is
generally not enough for most other operations because they haven't set
the return value in the coroutine entry stub yet. To avoid race
conditions there, we need to kick after setting the return value.
The race window is small enough that the problem doesn't usually surface
in the common path. However, it does surface and causes easily
reproducible hangs if the operation can return early before even calling
bdrv_inc/dec_in_flight, which many of them do (trivial error or no-op
success paths).
The bug in bdrv_truncate(), bdrv_check() and bdrv_invalidate_cache() is
slightly different: These functions even neglected to schedule the
coroutine in the home thread of the node. This avoids the hang, but is
obviously wrong, too. Fix those to schedule the coroutine in the right
AioContext in addition to adding aio_wait_kick() calls.
Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-01-07 15:02:48 +03:00
|
|
|
aio_wait_kick();
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
|
|
|
|
NvmeCmd *cmd)
|
|
|
|
{
|
|
|
|
NVMeRequest *req;
|
|
|
|
int ret = -EINPROGRESS;
|
|
|
|
req = nvme_get_free_req(q);
|
|
|
|
if (!req) {
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_submit_command(q, req, cmd, nvme_cmd_sync_cb, &ret);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
BDRV_POLL_WHILE(bs, ret == -EINPROGRESS);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
NvmeIdCtrl *idctrl;
|
|
|
|
NvmeIdNs *idns;
|
2019-07-16 19:30:19 +03:00
|
|
|
NvmeLBAF *lbaf;
|
2018-01-16 09:08:55 +03:00
|
|
|
uint8_t *resp;
|
2019-09-13 16:36:26 +03:00
|
|
|
uint16_t oncs;
|
2019-07-30 14:48:12 +03:00
|
|
|
int r;
|
2018-01-16 09:08:55 +03:00
|
|
|
uint64_t iova;
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = NVME_ADM_CMD_IDENTIFY,
|
|
|
|
.cdw10 = cpu_to_le32(0x1),
|
|
|
|
};
|
|
|
|
|
|
|
|
resp = qemu_try_blockalign0(bs, sizeof(NvmeIdCtrl));
|
|
|
|
if (!resp) {
|
|
|
|
error_setg(errp, "Cannot allocate buffer for identify response");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
idctrl = (NvmeIdCtrl *)resp;
|
|
|
|
idns = (NvmeIdNs *)resp;
|
|
|
|
r = qemu_vfio_dma_map(s->vfio, resp, sizeof(NvmeIdCtrl), true, &iova);
|
|
|
|
if (r) {
|
|
|
|
error_setg(errp, "Cannot map buffer for DMA");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cmd.prp1 = cpu_to_le64(iova);
|
|
|
|
|
|
|
|
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
|
|
|
|
error_setg(errp, "Failed to identify controller");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (le32_to_cpu(idctrl->nn) < namespace) {
|
|
|
|
error_setg(errp, "Invalid namespace");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
s->write_cache_supported = le32_to_cpu(idctrl->vwc) & 0x1;
|
|
|
|
s->max_transfer = (idctrl->mdts ? 1 << idctrl->mdts : 0) * s->page_size;
|
|
|
|
/* For now the page list buffer per command is one page, to hold at most
|
|
|
|
* s->page_size / sizeof(uint64_t) entries. */
|
|
|
|
s->max_transfer = MIN_NON_ZERO(s->max_transfer,
|
|
|
|
s->page_size / sizeof(uint64_t) * s->page_size);
|
|
|
|
|
2019-09-13 16:36:26 +03:00
|
|
|
oncs = le16_to_cpu(idctrl->oncs);
|
|
|
|
s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROS);
|
2019-09-13 16:36:27 +03:00
|
|
|
s->supports_discard = !!(oncs & NVME_ONCS_DSM);
|
2019-09-13 16:36:26 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
memset(resp, 0, 4096);
|
|
|
|
|
|
|
|
cmd.cdw10 = 0;
|
|
|
|
cmd.nsid = cpu_to_le32(namespace);
|
|
|
|
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
|
|
|
|
error_setg(errp, "Failed to identify namespace");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->nsze = le64_to_cpu(idns->nsze);
|
2019-07-16 19:30:19 +03:00
|
|
|
lbaf = &idns->lbaf[NVME_ID_NS_FLBAS_INDEX(idns->flbas)];
|
|
|
|
|
2019-09-13 16:36:26 +03:00
|
|
|
if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(idns->dlfeat) &&
|
|
|
|
NVME_ID_NS_DLFEAT_READ_BEHAVIOR(idns->dlfeat) ==
|
|
|
|
NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) {
|
|
|
|
bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP;
|
|
|
|
}
|
|
|
|
|
2019-07-16 19:30:19 +03:00
|
|
|
if (lbaf->ms) {
|
|
|
|
error_setg(errp, "Namespaces with metadata are not yet supported");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-07-30 14:48:12 +03:00
|
|
|
if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 ||
|
|
|
|
(1 << lbaf->ds) > s->page_size)
|
|
|
|
{
|
|
|
|
error_setg(errp, "Namespace has unsupported block size (2^%d)",
|
|
|
|
lbaf->ds);
|
2019-07-16 19:30:19 +03:00
|
|
|
goto out;
|
|
|
|
}
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2019-07-16 19:30:19 +03:00
|
|
|
s->blkshift = lbaf->ds;
|
2018-01-16 09:08:55 +03:00
|
|
|
out:
|
|
|
|
qemu_vfio_dma_unmap(s->vfio, resp);
|
|
|
|
qemu_vfree(resp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nvme_poll_queues(BDRVNVMeState *s)
|
|
|
|
{
|
|
|
|
bool progress = false;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < s->nr_queues; i++) {
|
|
|
|
NVMeQueuePair *q = s->queues[i];
|
2020-06-17 16:21:55 +03:00
|
|
|
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
|
|
|
|
NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do an early check for completions. q->lock isn't needed because
|
|
|
|
* nvme_process_completion() only runs in the event loop thread and
|
|
|
|
* cannot race with itself.
|
|
|
|
*/
|
|
|
|
if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_mutex_lock(&q->lock);
|
2020-06-17 16:22:00 +03:00
|
|
|
while (nvme_process_completion(q)) {
|
2018-01-16 09:08:55 +03:00
|
|
|
/* Keep polling */
|
|
|
|
progress = true;
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(&q->lock);
|
|
|
|
}
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_handle_event(EventNotifier *n)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
|
|
|
|
|
|
|
|
trace_nvme_handle_event(s);
|
|
|
|
event_notifier_test_and_clear(n);
|
|
|
|
nvme_poll_queues(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
int n = s->nr_queues;
|
|
|
|
NVMeQueuePair *q;
|
|
|
|
NvmeCmd cmd;
|
|
|
|
int queue_size = NVME_QUEUE_SIZE;
|
|
|
|
|
|
|
|
q = nvme_create_queue_pair(bs, n, queue_size, errp);
|
|
|
|
if (!q) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
cmd = (NvmeCmd) {
|
|
|
|
.opcode = NVME_ADM_CMD_CREATE_CQ,
|
|
|
|
.prp1 = cpu_to_le64(q->cq.iova),
|
|
|
|
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
|
|
|
|
.cdw11 = cpu_to_le32(0x3),
|
|
|
|
};
|
|
|
|
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
|
|
|
|
error_setg(errp, "Failed to create io queue [%d]", n);
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_free_queue_pair(q);
|
2018-01-16 09:08:55 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
cmd = (NvmeCmd) {
|
|
|
|
.opcode = NVME_ADM_CMD_CREATE_SQ,
|
|
|
|
.prp1 = cpu_to_le64(q->sq.iova),
|
|
|
|
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
|
|
|
|
.cdw11 = cpu_to_le32(0x1 | (n << 16)),
|
|
|
|
};
|
|
|
|
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
|
|
|
|
error_setg(errp, "Failed to create io queue [%d]", n);
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_free_queue_pair(q);
|
2018-01-16 09:08:55 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
|
|
|
|
s->queues[n] = q;
|
|
|
|
s->nr_queues++;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nvme_poll_cb(void *opaque)
|
|
|
|
{
|
|
|
|
EventNotifier *e = opaque;
|
|
|
|
BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
|
|
|
|
|
|
|
|
trace_nvme_poll_cb(s);
|
2020-04-01 19:53:14 +03:00
|
|
|
return nvme_poll_queues(s);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
int ret;
|
|
|
|
uint64_t cap;
|
|
|
|
uint64_t timeout_ms;
|
|
|
|
uint64_t deadline, now;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
qemu_co_mutex_init(&s->dma_map_lock);
|
|
|
|
qemu_co_queue_init(&s->dma_flush_queue);
|
2019-02-01 22:29:30 +03:00
|
|
|
s->device = g_strdup(device);
|
2018-01-16 09:08:55 +03:00
|
|
|
s->nsid = namespace;
|
|
|
|
s->aio_context = bdrv_get_aio_context(bs);
|
|
|
|
ret = event_notifier_init(&s->irq_notifier, 0);
|
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to init event notifier");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->vfio = qemu_vfio_open_pci(device, errp);
|
|
|
|
if (!s->vfio) {
|
|
|
|
ret = -EINVAL;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp);
|
|
|
|
if (!s->regs) {
|
|
|
|
ret = -EINVAL;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform initialize sequence as described in NVMe spec "7.6.1
|
|
|
|
* Initialization". */
|
|
|
|
|
|
|
|
cap = le64_to_cpu(s->regs->cap);
|
|
|
|
if (!(cap & (1ULL << 37))) {
|
|
|
|
error_setg(errp, "Device doesn't support NVMe command set");
|
|
|
|
ret = -EINVAL;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF)));
|
|
|
|
s->doorbell_scale = (4 << (((cap >> 32) & 0xF))) / sizeof(uint32_t);
|
|
|
|
bs->bl.opt_mem_alignment = s->page_size;
|
|
|
|
timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000);
|
|
|
|
|
|
|
|
/* Reset device to get a clean state. */
|
|
|
|
s->regs->cc = cpu_to_le32(le32_to_cpu(s->regs->cc) & 0xFE);
|
|
|
|
/* Wait for CSTS.RDY = 0. */
|
|
|
|
deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * 1000000ULL;
|
|
|
|
while (le32_to_cpu(s->regs->csts) & 0x1) {
|
|
|
|
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
|
|
|
|
error_setg(errp, "Timeout while waiting for device to reset (%"
|
|
|
|
PRId64 " ms)",
|
|
|
|
timeout_ms);
|
|
|
|
ret = -ETIMEDOUT;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up admin queue. */
|
|
|
|
s->queues = g_new(NVMeQueuePair *, 1);
|
|
|
|
s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
|
|
|
|
if (!s->queues[0]) {
|
|
|
|
ret = -EINVAL;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
nvme: Set number of queues later in nvme_init()
When creating the admin queue in nvme_init() the variable that
holds the number of queues created is modified before actual
queue creation. This is a problem because if creating the queue
fails then the variable is left in inconsistent state. This was
actually observed when I tried to hotplug a nvme disk. The
control got to nvme_file_open() which called nvme_init() which
failed and thus nvme_close() was called which in turn called
nvme_free_queue_pair() with queue being NULL. This lead to an
instant crash:
#0 0x000055d9507ec211 in nvme_free_queue_pair (bs=0x55d952ddb880, q=0x0) at block/nvme.c:164
#1 0x000055d9507ee180 in nvme_close (bs=0x55d952ddb880) at block/nvme.c:729
#2 0x000055d9507ee3d5 in nvme_file_open (bs=0x55d952ddb880, options=0x55d952bb1410, flags=147456, errp=0x7ffd8e19e200) at block/nvme.c:781
#3 0x000055d9507629f3 in bdrv_open_driver (bs=0x55d952ddb880, drv=0x55d95109c1e0 <bdrv_nvme>, node_name=0x0, options=0x55d952bb1410, open_flags=147456, errp=0x7ffd8e19e310) at block.c:1291
#4 0x000055d9507633d6 in bdrv_open_common (bs=0x55d952ddb880, file=0x0, options=0x55d952bb1410, errp=0x7ffd8e19e310) at block.c:1551
#5 0x000055d950766881 in bdrv_open_inherit (filename=0x0, reference=0x0, options=0x55d952bb1410, flags=32768, parent=0x55d9538ce420, child_role=0x55d950eaade0 <child_file>, errp=0x7ffd8e19e510) at block.c:3063
#6 0x000055d950765ae4 in bdrv_open_child_bs (filename=0x0, options=0x55d9541cdff0, bdref_key=0x55d950af33aa "file", parent=0x55d9538ce420, child_role=0x55d950eaade0 <child_file>, allow_none=true, errp=0x7ffd8e19e510) at block.c:2712
#7 0x000055d950766633 in bdrv_open_inherit (filename=0x0, reference=0x0, options=0x55d9541cdff0, flags=0, parent=0x0, child_role=0x0, errp=0x7ffd8e19e908) at block.c:3011
#8 0x000055d950766dba in bdrv_open (filename=0x0, reference=0x0, options=0x55d953d00390, flags=0, errp=0x7ffd8e19e908) at block.c:3156
#9 0x000055d9507cb635 in blk_new_open (filename=0x0, reference=0x0, options=0x55d953d00390, flags=0, errp=0x7ffd8e19e908) at block/block-backend.c:389
#10 0x000055d950465ec5 in blockdev_init (file=0x0, bs_opts=0x55d953d00390, errp=0x7ffd8e19e908) at blockdev.c:602
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Message-id: 927aae40b617ba7d4b6c7ffe74e6d7a2595f8e86.1562770546.git.mprivozn@redhat.com
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-07-10 17:57:44 +03:00
|
|
|
s->nr_queues = 1;
|
2018-01-16 09:08:55 +03:00
|
|
|
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
|
|
|
|
s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
|
|
|
|
s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova);
|
|
|
|
s->regs->acq = cpu_to_le64(s->queues[0]->cq.iova);
|
|
|
|
|
|
|
|
/* After setting up all control registers we can enable device now. */
|
|
|
|
s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
|
|
|
|
(ctz32(NVME_SQ_ENTRY_BYTES) << 16) |
|
|
|
|
0x1);
|
|
|
|
/* Wait for CSTS.RDY = 1. */
|
|
|
|
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
|
|
|
deadline = now + timeout_ms * 1000000;
|
|
|
|
while (!(le32_to_cpu(s->regs->csts) & 0x1)) {
|
|
|
|
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
|
|
|
|
error_setg(errp, "Timeout while waiting for device to start (%"
|
|
|
|
PRId64 " ms)",
|
|
|
|
timeout_ms);
|
|
|
|
ret = -ETIMEDOUT;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
|
|
|
|
VFIO_PCI_MSIX_IRQ_INDEX, errp);
|
|
|
|
if (ret) {
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
|
|
|
|
false, nvme_handle_event, nvme_poll_cb);
|
|
|
|
|
2018-02-13 04:52:40 +03:00
|
|
|
nvme_identify(bs, namespace, &local_err);
|
2018-01-16 09:08:55 +03:00
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
ret = -EIO;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up command queues. */
|
|
|
|
if (!nvme_add_io_queue(bs, errp)) {
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
2018-07-12 05:54:20 +03:00
|
|
|
out:
|
|
|
|
/* Cleaning up is done in nvme_file_open() upon error. */
|
2018-01-16 09:08:55 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
|
|
|
|
*
|
|
|
|
* nvme://0000:44:00.0/1
|
|
|
|
*
|
|
|
|
* where the "nvme://" is a fixed form of the protocol prefix, the middle part
|
|
|
|
* is the PCI address, and the last part is the namespace number starting from
|
|
|
|
* 1 according to the NVMe spec. */
|
|
|
|
static void nvme_parse_filename(const char *filename, QDict *options,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int pref = strlen("nvme://");
|
|
|
|
|
|
|
|
if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) {
|
|
|
|
const char *tmp = filename + pref;
|
|
|
|
char *device;
|
|
|
|
const char *namespace;
|
|
|
|
unsigned long ns;
|
|
|
|
const char *slash = strchr(tmp, '/');
|
|
|
|
if (!slash) {
|
2018-03-23 17:32:01 +03:00
|
|
|
qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp);
|
2018-01-16 09:08:55 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
device = g_strndup(tmp, slash - tmp);
|
2018-03-23 17:32:01 +03:00
|
|
|
qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device);
|
2018-01-16 09:08:55 +03:00
|
|
|
g_free(device);
|
|
|
|
namespace = slash + 1;
|
|
|
|
if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) {
|
|
|
|
error_setg(errp, "Invalid namespace '%s', positive number expected",
|
|
|
|
namespace);
|
|
|
|
return;
|
|
|
|
}
|
2018-03-23 17:32:01 +03:00
|
|
|
qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE,
|
|
|
|
*namespace ? namespace : "1");
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = NVME_ADM_CMD_SET_FEATURES,
|
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
|
|
|
.cdw10 = cpu_to_le32(0x06),
|
|
|
|
.cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
|
|
|
|
};
|
|
|
|
|
|
|
|
ret = nvme_cmd_sync(bs, s->queues[0], &cmd);
|
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to configure NVMe write cache");
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_close(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
for (i = 0; i < s->nr_queues; ++i) {
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_free_queue_pair(s->queues[i]);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
2018-07-12 05:54:20 +03:00
|
|
|
g_free(s->queues);
|
2018-01-16 09:08:55 +03:00
|
|
|
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
|
|
|
|
false, NULL, NULL);
|
2018-07-12 05:54:20 +03:00
|
|
|
event_notifier_cleanup(&s->irq_notifier);
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
|
|
|
|
qemu_vfio_close(s->vfio);
|
2019-02-01 22:29:30 +03:00
|
|
|
|
|
|
|
g_free(s->device);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
const char *device;
|
|
|
|
QemuOpts *opts;
|
|
|
|
int namespace;
|
|
|
|
int ret;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
2019-09-13 16:36:26 +03:00
|
|
|
bs->supported_write_flags = BDRV_REQ_FUA;
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
|
|
|
|
qemu_opts_absorb_qdict(opts, options, &error_abort);
|
|
|
|
device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE);
|
|
|
|
if (!device) {
|
|
|
|
error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required");
|
|
|
|
qemu_opts_del(opts);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1);
|
|
|
|
ret = nvme_init(bs, device, namespace, errp);
|
|
|
|
qemu_opts_del(opts);
|
|
|
|
if (ret) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (flags & BDRV_O_NOCACHE) {
|
|
|
|
if (!s->write_cache_supported) {
|
|
|
|
error_setg(errp,
|
|
|
|
"NVMe controller doesn't support write cache configuration");
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
|
|
|
ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE),
|
|
|
|
errp);
|
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
nvme_close(bs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int64_t nvme_getlength(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2019-07-16 19:30:19 +03:00
|
|
|
return s->nsze << s->blkshift;
|
|
|
|
}
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2019-07-30 14:48:12 +03:00
|
|
|
static uint32_t nvme_get_blocksize(BlockDriverState *bs)
|
2019-07-16 19:30:19 +03:00
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2019-07-30 14:48:12 +03:00
|
|
|
assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12);
|
|
|
|
return UINT32_C(1) << s->blkshift;
|
2019-07-16 19:30:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
|
|
|
|
{
|
2019-07-30 14:48:12 +03:00
|
|
|
uint32_t blocksize = nvme_get_blocksize(bs);
|
2019-07-16 19:30:19 +03:00
|
|
|
bsz->phys = blocksize;
|
|
|
|
bsz->log = blocksize;
|
|
|
|
return 0;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Called with s->dma_map_lock */
|
|
|
|
static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
|
|
|
|
QEMUIOVector *qiov)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
s->dma_map_count -= qiov->size;
|
|
|
|
if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
|
|
|
|
r = qemu_vfio_dma_reset_temporary(s->vfio);
|
|
|
|
if (!r) {
|
|
|
|
qemu_co_queue_restart_all(&s->dma_flush_queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Called with s->dma_map_lock */
|
|
|
|
static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
|
|
|
|
NVMeRequest *req, QEMUIOVector *qiov)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
uint64_t *pagelist = req->prp_list_page;
|
|
|
|
int i, j, r;
|
|
|
|
int entries = 0;
|
|
|
|
|
|
|
|
assert(qiov->size);
|
|
|
|
assert(QEMU_IS_ALIGNED(qiov->size, s->page_size));
|
|
|
|
assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t));
|
|
|
|
for (i = 0; i < qiov->niov; ++i) {
|
|
|
|
bool retry = true;
|
|
|
|
uint64_t iova;
|
|
|
|
try_map:
|
|
|
|
r = qemu_vfio_dma_map(s->vfio,
|
|
|
|
qiov->iov[i].iov_base,
|
|
|
|
qiov->iov[i].iov_len,
|
|
|
|
true, &iova);
|
|
|
|
if (r == -ENOMEM && retry) {
|
|
|
|
retry = false;
|
|
|
|
trace_nvme_dma_flush_queue_wait(s);
|
|
|
|
if (s->dma_map_count) {
|
|
|
|
trace_nvme_dma_map_flush(s);
|
|
|
|
qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock);
|
|
|
|
} else {
|
|
|
|
r = qemu_vfio_dma_reset_temporary(s->vfio);
|
|
|
|
if (r) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
goto try_map;
|
|
|
|
}
|
|
|
|
if (r) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) {
|
2018-11-01 13:38:07 +03:00
|
|
|
pagelist[entries++] = cpu_to_le64(iova + j * s->page_size);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base,
|
|
|
|
qiov->iov[i].iov_len / s->page_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
s->dma_map_count += qiov->size;
|
|
|
|
|
|
|
|
assert(entries <= s->page_size / sizeof(uint64_t));
|
|
|
|
switch (entries) {
|
|
|
|
case 0:
|
|
|
|
abort();
|
|
|
|
case 1:
|
2018-11-01 13:38:07 +03:00
|
|
|
cmd->prp1 = pagelist[0];
|
2018-01-16 09:08:55 +03:00
|
|
|
cmd->prp2 = 0;
|
|
|
|
break;
|
|
|
|
case 2:
|
2018-11-01 13:38:07 +03:00
|
|
|
cmd->prp1 = pagelist[0];
|
|
|
|
cmd->prp2 = pagelist[1];
|
2018-01-16 09:08:55 +03:00
|
|
|
break;
|
|
|
|
default:
|
2018-11-01 13:38:07 +03:00
|
|
|
cmd->prp1 = pagelist[0];
|
|
|
|
cmd->prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
|
2018-01-16 09:08:55 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
|
|
|
|
for (i = 0; i < entries; ++i) {
|
|
|
|
trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
/* No need to unmap [0 - i) iovs even if we've failed, since we don't
|
|
|
|
* increment s->dma_map_count. This is okay for fixed mapping memory areas
|
|
|
|
* because they are already mapped before calling this function; for
|
|
|
|
* temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
|
|
|
|
* calling qemu_vfio_dma_reset_temporary when necessary. */
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
Coroutine *co;
|
|
|
|
int ret;
|
|
|
|
AioContext *ctx;
|
|
|
|
} NVMeCoData;
|
|
|
|
|
|
|
|
static void nvme_rw_cb_bh(void *opaque)
|
|
|
|
{
|
|
|
|
NVMeCoData *data = opaque;
|
|
|
|
qemu_coroutine_enter(data->co);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_rw_cb(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
NVMeCoData *data = opaque;
|
|
|
|
data->ret = ret;
|
|
|
|
if (!data->co) {
|
|
|
|
/* The rw coroutine hasn't yielded, don't try to enter. */
|
|
|
|
return;
|
|
|
|
}
|
2019-09-17 14:58:19 +03:00
|
|
|
replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
|
|
|
|
uint64_t offset, uint64_t bytes,
|
|
|
|
QEMUIOVector *qiov,
|
|
|
|
bool is_write,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
NVMeQueuePair *ioq = s->queues[1];
|
|
|
|
NVMeRequest *req;
|
2019-07-16 19:30:19 +03:00
|
|
|
|
|
|
|
uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
|
2018-01-16 09:08:55 +03:00
|
|
|
(flags & BDRV_REQ_FUA ? 1 << 30 : 0);
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ,
|
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
2019-07-16 19:30:19 +03:00
|
|
|
.cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
|
|
|
|
.cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
|
2018-01-16 09:08:55 +03:00
|
|
|
.cdw12 = cpu_to_le32(cdw12),
|
|
|
|
};
|
|
|
|
NVMeCoData data = {
|
|
|
|
.ctx = bdrv_get_aio_context(bs),
|
|
|
|
.ret = -EINPROGRESS,
|
|
|
|
};
|
|
|
|
|
|
|
|
trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
|
|
|
|
assert(s->nr_queues > 1);
|
|
|
|
req = nvme_get_free_req(ioq);
|
|
|
|
assert(req);
|
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->dma_map_lock);
|
|
|
|
r = nvme_cmd_map_qiov(bs, &cmd, req, qiov);
|
|
|
|
qemu_co_mutex_unlock(&s->dma_map_lock);
|
|
|
|
if (r) {
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_put_free_req_and_wake(ioq, req);
|
2018-01-16 09:08:55 +03:00
|
|
|
return r;
|
|
|
|
}
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
data.co = qemu_coroutine_self();
|
|
|
|
while (data.ret == -EINPROGRESS) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->dma_map_lock);
|
|
|
|
r = nvme_cmd_unmap_qiov(bs, qiov);
|
|
|
|
qemu_co_mutex_unlock(&s->dma_map_lock);
|
|
|
|
if (r) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_nvme_rw_done(s, is_write, offset, bytes, data.ret);
|
|
|
|
return data.ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nvme_qiov_aligned(BlockDriverState *bs,
|
|
|
|
const QEMUIOVector *qiov)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
for (i = 0; i < qiov->niov; ++i) {
|
|
|
|
if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) ||
|
|
|
|
!QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) {
|
|
|
|
trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
|
|
|
|
qiov->iov[i].iov_len, s->page_size);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|
|
|
QEMUIOVector *qiov, bool is_write, int flags)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
int r;
|
|
|
|
uint8_t *buf = NULL;
|
|
|
|
QEMUIOVector local_qiov;
|
|
|
|
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, s->page_size));
|
|
|
|
assert(QEMU_IS_ALIGNED(bytes, s->page_size));
|
|
|
|
assert(bytes <= s->max_transfer);
|
|
|
|
if (nvme_qiov_aligned(bs, qiov)) {
|
|
|
|
return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
|
|
|
|
}
|
|
|
|
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
|
|
|
|
buf = qemu_try_blockalign(bs, bytes);
|
|
|
|
|
|
|
|
if (!buf) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
qemu_iovec_init(&local_qiov, 1);
|
|
|
|
if (is_write) {
|
|
|
|
qemu_iovec_to_buf(qiov, 0, buf, bytes);
|
|
|
|
}
|
|
|
|
qemu_iovec_add(&local_qiov, buf, bytes);
|
|
|
|
r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags);
|
|
|
|
qemu_iovec_destroy(&local_qiov);
|
|
|
|
if (!r && !is_write) {
|
|
|
|
qemu_iovec_from_buf(qiov, 0, buf, bytes);
|
|
|
|
}
|
|
|
|
qemu_vfree(buf);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
|
|
|
|
uint64_t offset, uint64_t bytes,
|
|
|
|
QEMUIOVector *qiov, int flags)
|
|
|
|
{
|
|
|
|
return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
|
|
|
|
uint64_t offset, uint64_t bytes,
|
|
|
|
QEMUIOVector *qiov, int flags)
|
|
|
|
{
|
|
|
|
return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
NVMeQueuePair *ioq = s->queues[1];
|
|
|
|
NVMeRequest *req;
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = NVME_CMD_FLUSH,
|
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
|
|
|
};
|
|
|
|
NVMeCoData data = {
|
|
|
|
.ctx = bdrv_get_aio_context(bs),
|
|
|
|
.ret = -EINPROGRESS,
|
|
|
|
};
|
|
|
|
|
|
|
|
assert(s->nr_queues > 1);
|
|
|
|
req = nvme_get_free_req(ioq);
|
|
|
|
assert(req);
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
data.co = qemu_coroutine_self();
|
|
|
|
if (data.ret == -EINPROGRESS) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
return data.ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-13 16:36:26 +03:00
|
|
|
static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
|
|
|
|
int64_t offset,
|
|
|
|
int bytes,
|
|
|
|
BdrvRequestFlags flags)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
NVMeQueuePair *ioq = s->queues[1];
|
|
|
|
NVMeRequest *req;
|
|
|
|
|
|
|
|
uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
|
|
|
|
|
|
|
|
if (!s->supports_write_zeroes) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = NVME_CMD_WRITE_ZEROS,
|
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
|
|
|
.cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
|
|
|
|
.cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
|
|
|
|
};
|
|
|
|
|
|
|
|
NVMeCoData data = {
|
|
|
|
.ctx = bdrv_get_aio_context(bs),
|
|
|
|
.ret = -EINPROGRESS,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (flags & BDRV_REQ_MAY_UNMAP) {
|
|
|
|
cdw12 |= (1 << 25);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & BDRV_REQ_FUA) {
|
|
|
|
cdw12 |= (1 << 30);
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd.cdw12 = cpu_to_le32(cdw12);
|
|
|
|
|
|
|
|
trace_nvme_write_zeroes(s, offset, bytes, flags);
|
|
|
|
assert(s->nr_queues > 1);
|
|
|
|
req = nvme_get_free_req(ioq);
|
|
|
|
assert(req);
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
2019-09-13 16:36:26 +03:00
|
|
|
|
|
|
|
data.co = qemu_coroutine_self();
|
|
|
|
while (data.ret == -EINPROGRESS) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_nvme_rw_done(s, true, offset, bytes, data.ret);
|
|
|
|
return data.ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-13 16:36:27 +03:00
|
|
|
static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
|
|
|
|
int64_t offset,
|
|
|
|
int bytes)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
NVMeQueuePair *ioq = s->queues[1];
|
|
|
|
NVMeRequest *req;
|
|
|
|
NvmeDsmRange *buf;
|
|
|
|
QEMUIOVector local_qiov;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = NVME_CMD_DSM,
|
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
|
|
|
.cdw10 = cpu_to_le32(0), /*number of ranges - 0 based*/
|
|
|
|
.cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/
|
|
|
|
};
|
|
|
|
|
|
|
|
NVMeCoData data = {
|
|
|
|
.ctx = bdrv_get_aio_context(bs),
|
|
|
|
.ret = -EINPROGRESS,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!s->supports_discard) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(s->nr_queues > 1);
|
|
|
|
|
|
|
|
buf = qemu_try_blockalign0(bs, s->page_size);
|
|
|
|
if (!buf) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf->nlb = cpu_to_le32(bytes >> s->blkshift);
|
|
|
|
buf->slba = cpu_to_le64(offset >> s->blkshift);
|
|
|
|
buf->cattr = 0;
|
|
|
|
|
|
|
|
qemu_iovec_init(&local_qiov, 1);
|
|
|
|
qemu_iovec_add(&local_qiov, buf, 4096);
|
|
|
|
|
|
|
|
req = nvme_get_free_req(ioq);
|
|
|
|
assert(req);
|
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->dma_map_lock);
|
|
|
|
ret = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov);
|
|
|
|
qemu_co_mutex_unlock(&s->dma_map_lock);
|
|
|
|
|
|
|
|
if (ret) {
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_put_free_req_and_wake(ioq, req);
|
2019-09-13 16:36:27 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_nvme_dsm(s, offset, bytes);
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
2019-09-13 16:36:27 +03:00
|
|
|
|
|
|
|
data.co = qemu_coroutine_self();
|
|
|
|
while (data.ret == -EINPROGRESS) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->dma_map_lock);
|
|
|
|
ret = nvme_cmd_unmap_qiov(bs, &local_qiov);
|
|
|
|
qemu_co_mutex_unlock(&s->dma_map_lock);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = data.ret;
|
|
|
|
trace_nvme_dsm_done(s, offset, bytes, ret);
|
|
|
|
out:
|
|
|
|
qemu_iovec_destroy(&local_qiov);
|
|
|
|
qemu_vfree(buf);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
|
|
|
|
BlockReopenQueue *queue, Error **errp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-01 22:29:28 +03:00
|
|
|
static void nvme_refresh_filename(BlockDriverState *bs)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
2019-02-01 22:29:30 +03:00
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2019-02-01 22:29:30 +03:00
|
|
|
snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nvme://%s/%i",
|
|
|
|
s->device, s->nsid);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
bs->bl.opt_mem_alignment = s->page_size;
|
|
|
|
bs->bl.request_alignment = s->page_size;
|
|
|
|
bs->bl.max_transfer = s->max_transfer;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_detach_aio_context(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
|
|
|
|
false, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_attach_aio_context(BlockDriverState *bs,
|
|
|
|
AioContext *new_context)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
s->aio_context = new_context;
|
|
|
|
aio_set_event_notifier(new_context, &s->irq_notifier,
|
|
|
|
false, nvme_handle_event, nvme_poll_cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_aio_plug(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2018-08-13 17:43:20 +03:00
|
|
|
assert(!s->plugged);
|
|
|
|
s->plugged = true;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_aio_unplug(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
assert(s->plugged);
|
2018-08-13 17:43:20 +03:00
|
|
|
s->plugged = false;
|
|
|
|
for (i = 1; i < s->nr_queues; i++) {
|
|
|
|
NVMeQueuePair *q = s->queues[i];
|
|
|
|
qemu_mutex_lock(&q->lock);
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_kick(q);
|
|
|
|
nvme_process_completion(q);
|
2018-08-13 17:43:20 +03:00
|
|
|
qemu_mutex_unlock(&q->lock);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:08:57 +03:00
|
|
|
static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
|
|
|
|
if (ret) {
|
|
|
|
/* FIXME: we may run out of IOVA addresses after repeated
|
|
|
|
* bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
|
|
|
|
* doesn't reclaim addresses for fixed mappings. */
|
|
|
|
error_report("nvme_register_buf failed: %s", strerror(-ret));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_unregister_buf(BlockDriverState *bs, void *host)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
qemu_vfio_dma_unmap(s->vfio, host);
|
|
|
|
}
|
|
|
|
|
2019-02-01 22:29:25 +03:00
|
|
|
static const char *const nvme_strong_runtime_opts[] = {
|
|
|
|
NVME_BLOCK_OPT_DEVICE,
|
|
|
|
NVME_BLOCK_OPT_NAMESPACE,
|
|
|
|
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
static BlockDriver bdrv_nvme = {
|
|
|
|
.format_name = "nvme",
|
|
|
|
.protocol_name = "nvme",
|
|
|
|
.instance_size = sizeof(BDRVNVMeState),
|
|
|
|
|
2020-03-26 04:12:18 +03:00
|
|
|
.bdrv_co_create_opts = bdrv_co_create_opts_simple,
|
|
|
|
.create_opts = &bdrv_create_opts_simple,
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
.bdrv_parse_filename = nvme_parse_filename,
|
|
|
|
.bdrv_file_open = nvme_file_open,
|
|
|
|
.bdrv_close = nvme_close,
|
|
|
|
.bdrv_getlength = nvme_getlength,
|
2019-07-16 19:30:19 +03:00
|
|
|
.bdrv_probe_blocksizes = nvme_probe_blocksizes,
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
.bdrv_co_preadv = nvme_co_preadv,
|
|
|
|
.bdrv_co_pwritev = nvme_co_pwritev,
|
2019-09-13 16:36:26 +03:00
|
|
|
|
|
|
|
.bdrv_co_pwrite_zeroes = nvme_co_pwrite_zeroes,
|
2019-09-13 16:36:27 +03:00
|
|
|
.bdrv_co_pdiscard = nvme_co_pdiscard,
|
2019-09-13 16:36:26 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
.bdrv_co_flush_to_disk = nvme_co_flush,
|
|
|
|
.bdrv_reopen_prepare = nvme_reopen_prepare,
|
|
|
|
|
|
|
|
.bdrv_refresh_filename = nvme_refresh_filename,
|
|
|
|
.bdrv_refresh_limits = nvme_refresh_limits,
|
2019-02-01 22:29:25 +03:00
|
|
|
.strong_runtime_opts = nvme_strong_runtime_opts,
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
.bdrv_detach_aio_context = nvme_detach_aio_context,
|
|
|
|
.bdrv_attach_aio_context = nvme_attach_aio_context,
|
|
|
|
|
|
|
|
.bdrv_io_plug = nvme_aio_plug,
|
|
|
|
.bdrv_io_unplug = nvme_aio_unplug,
|
2018-01-16 09:08:57 +03:00
|
|
|
|
|
|
|
.bdrv_register_buf = nvme_register_buf,
|
|
|
|
.bdrv_unregister_buf = nvme_unregister_buf,
|
2018-01-16 09:08:55 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static void bdrv_nvme_init(void)
|
|
|
|
{
|
|
|
|
bdrv_register(&bdrv_nvme);
|
|
|
|
}
|
|
|
|
|
|
|
|
block_init(bdrv_nvme_init);
|