2018-01-16 09:08:55 +03:00
|
|
|
/*
|
|
|
|
* NVMe block driver based on vfio
|
|
|
|
*
|
|
|
|
* Copyright 2016 - 2018 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Fam Zheng <famz@redhat.com>
|
|
|
|
* Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include <linux/vfio.h>
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qapi/qmp/qdict.h"
|
|
|
|
#include "qapi/qmp/qstring.h"
|
2023-09-13 23:00:43 +03:00
|
|
|
#include "qemu/defer-call.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
#include "qemu/cutils.h"
|
2018-02-01 14:18:46 +03:00
|
|
|
#include "qemu/option.h"
|
2022-02-26 21:07:23 +03:00
|
|
|
#include "qemu/memalign.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
#include "qemu/vfio-helpers.h"
|
2022-12-21 16:35:49 +03:00
|
|
|
#include "block/block-io.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
#include "block/block_int.h"
|
2023-05-30 21:09:55 +03:00
|
|
|
#include "sysemu/block-backend.h"
|
2019-09-17 14:58:19 +03:00
|
|
|
#include "sysemu/replay.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
#include "trace.h"
|
|
|
|
|
2018-01-16 09:08:59 +03:00
|
|
|
#include "block/nvme.h"
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
#define NVME_SQ_ENTRY_BYTES 64
|
|
|
|
#define NVME_CQ_ENTRY_BYTES 16
|
|
|
|
#define NVME_QUEUE_SIZE 128
|
2020-09-22 11:38:17 +03:00
|
|
|
#define NVME_DOORBELL_SIZE 4096
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2020-06-17 16:21:58 +03:00
|
|
|
/*
|
|
|
|
* We have to leave one slot empty as that is the full queue case where
|
|
|
|
* head == tail + 1.
|
|
|
|
*/
|
|
|
|
#define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
typedef struct BDRVNVMeState BDRVNVMeState;
|
|
|
|
|
2020-10-29 12:32:49 +03:00
|
|
|
/* Same index is used for queues and IRQs */
|
|
|
|
#define INDEX_ADMIN 0
|
|
|
|
#define INDEX_IO(n) (1 + n)
|
|
|
|
|
|
|
|
/* This driver shares a single MSIX IRQ for the admin and I/O queues */
|
|
|
|
enum {
|
|
|
|
MSIX_SHARED_IRQ_IDX = 0,
|
|
|
|
MSIX_IRQ_COUNT = 1
|
|
|
|
};
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
typedef struct {
|
|
|
|
int32_t head, tail;
|
|
|
|
uint8_t *queue;
|
|
|
|
uint64_t iova;
|
|
|
|
/* Hardware MMIO register */
|
|
|
|
volatile uint32_t *doorbell;
|
|
|
|
} NVMeQueue;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
BlockCompletionFunc *cb;
|
|
|
|
void *opaque;
|
|
|
|
int cid;
|
|
|
|
void *prp_list_page;
|
|
|
|
uint64_t prp_list_iova;
|
2020-06-17 16:21:58 +03:00
|
|
|
int free_req_next; /* q->reqs[] index of next free req */
|
2018-01-16 09:08:55 +03:00
|
|
|
} NVMeRequest;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
QemuMutex lock;
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
/* Read from I/O code path, initialized under BQL */
|
|
|
|
BDRVNVMeState *s;
|
|
|
|
int index;
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
/* Fields protected by BQL */
|
|
|
|
uint8_t *prp_list_pages;
|
|
|
|
|
|
|
|
/* Fields protected by @lock */
|
2020-06-17 16:21:59 +03:00
|
|
|
CoQueue free_req_queue;
|
2018-01-16 09:08:55 +03:00
|
|
|
NVMeQueue sq, cq;
|
|
|
|
int cq_phase;
|
2020-06-17 16:21:58 +03:00
|
|
|
int free_req_head;
|
|
|
|
NVMeRequest reqs[NVME_NUM_REQS];
|
2018-01-16 09:08:55 +03:00
|
|
|
int need_kick;
|
|
|
|
int inflight;
|
2020-06-17 16:22:01 +03:00
|
|
|
|
|
|
|
/* Thread-safe, no lock necessary */
|
|
|
|
QEMUBH *completion_bh;
|
2018-01-16 09:08:55 +03:00
|
|
|
} NVMeQueuePair;
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
struct BDRVNVMeState {
|
2018-01-16 09:08:55 +03:00
|
|
|
AioContext *aio_context;
|
|
|
|
QEMUVFIOState *vfio;
|
2020-10-29 12:33:04 +03:00
|
|
|
void *bar0_wo_map;
|
2020-09-22 11:38:17 +03:00
|
|
|
/* Memory mapped registers */
|
|
|
|
volatile struct {
|
|
|
|
uint32_t sq_tail;
|
|
|
|
uint32_t cq_head;
|
|
|
|
} *doorbells;
|
2018-01-16 09:08:55 +03:00
|
|
|
/* The submission/completion queue pairs.
|
|
|
|
* [0]: admin queue.
|
|
|
|
* [1..]: io queues.
|
|
|
|
*/
|
|
|
|
NVMeQueuePair **queues;
|
2020-10-29 12:32:50 +03:00
|
|
|
unsigned queue_count;
|
2018-01-16 09:08:55 +03:00
|
|
|
size_t page_size;
|
|
|
|
/* How many uint32_t elements does each doorbell entry take. */
|
|
|
|
size_t doorbell_scale;
|
|
|
|
bool write_cache_supported;
|
2020-08-21 22:53:59 +03:00
|
|
|
EventNotifier irq_notifier[MSIX_IRQ_COUNT];
|
2019-07-16 19:30:19 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
uint64_t nsze; /* Namespace size reported by identify command */
|
|
|
|
int nsid; /* The namespace id to read/write data. */
|
2019-07-30 14:48:12 +03:00
|
|
|
int blkshift;
|
2019-07-16 19:30:19 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
uint64_t max_transfer;
|
|
|
|
|
2019-09-13 16:36:26 +03:00
|
|
|
bool supports_write_zeroes;
|
2019-09-13 16:36:27 +03:00
|
|
|
bool supports_discard;
|
2019-09-13 16:36:26 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
CoMutex dma_map_lock;
|
|
|
|
CoQueue dma_flush_queue;
|
|
|
|
|
|
|
|
/* Total size of mapped qiov, accessed under dma_map_lock */
|
|
|
|
int dma_map_count;
|
2019-02-01 22:29:30 +03:00
|
|
|
|
|
|
|
/* PCI address (required for nvme_refresh_filename()) */
|
|
|
|
char *device;
|
block/nvme: Add driver statistics for access alignment and hw errors
Keep statistics of some hardware errors, and number of
aligned/unaligned I/O accesses.
QMP example booting a full RHEL 8.3 aarch64 guest:
{ "execute": "query-blockstats" }
{
"return": [
{
"device": "",
"node-name": "drive0",
"stats": {
"flush_total_time_ns": 6026948,
"wr_highest_offset": 3383991230464,
"wr_total_time_ns": 807450995,
"failed_wr_operations": 0,
"failed_rd_operations": 0,
"wr_merged": 3,
"wr_bytes": 50133504,
"failed_unmap_operations": 0,
"failed_flush_operations": 0,
"account_invalid": false,
"rd_total_time_ns": 1846979900,
"flush_operations": 130,
"wr_operations": 659,
"rd_merged": 1192,
"rd_bytes": 218244096,
"account_failed": false,
"idle_time_ns": 2678641497,
"rd_operations": 7406,
},
"driver-specific": {
"driver": "nvme",
"completion-errors": 0,
"unaligned-accesses": 2959,
"aligned-accesses": 4477
},
"qdev": "/machine/peripheral-anon/device[0]/virtio-backend"
}
]
}
Suggested-by: Stefan Hajnoczi <stefanha@gmail.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Message-id: 20201001162939.1567915-1-philmd@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2020-10-01 19:29:39 +03:00
|
|
|
|
|
|
|
struct {
|
|
|
|
uint64_t completion_errors;
|
|
|
|
uint64_t aligned_accesses;
|
|
|
|
uint64_t unaligned_accesses;
|
|
|
|
} stats;
|
2020-06-17 16:22:00 +03:00
|
|
|
};
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
#define NVME_BLOCK_OPT_DEVICE "device"
|
|
|
|
#define NVME_BLOCK_OPT_NAMESPACE "namespace"
|
|
|
|
|
2020-06-17 16:22:01 +03:00
|
|
|
static void nvme_process_completion_bh(void *opaque);
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
static QemuOptsList runtime_opts = {
|
|
|
|
.name = "nvme",
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
|
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = NVME_BLOCK_OPT_DEVICE,
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
.help = "NVMe PCI device address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = NVME_BLOCK_OPT_NAMESPACE,
|
|
|
|
.type = QEMU_OPT_NUMBER,
|
|
|
|
.help = "NVMe namespace",
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2020-10-29 12:32:52 +03:00
|
|
|
/* Returns true on success, false on failure. */
|
|
|
|
static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
|
2020-10-29 12:32:50 +03:00
|
|
|
unsigned nentries, size_t entry_bytes, Error **errp)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
block/nvme: Fix missing ERRP_GUARD() for error_prepend()
As the comment in qapi/error, passing @errp to error_prepend() requires
ERRP_GUARD():
* = Why, when and how to use ERRP_GUARD() =
*
* Without ERRP_GUARD(), use of the @errp parameter is restricted:
...
* - It should not be passed to error_prepend(), error_vprepend() or
* error_append_hint(), because that doesn't work with &error_fatal.
* ERRP_GUARD() lifts these restrictions.
*
* To use ERRP_GUARD(), add it right at the beginning of the function.
* @errp can then be used without worrying about the argument being
* NULL or &error_fatal.
ERRP_GUARD() could avoid the case when @errp is &error_fatal, the user
can't see this additional information, because exit() happens in
error_setg earlier than information is added [1].
In nvme.c, there are 3 functions passing @errp to error_prepend()
without ERRP_GUARD():
- nvme_init_queue()
- nvme_create_queue_pair()
- nvme_identify()
All these 3 functions take their @errp parameters from the
nvme_file_open(), which is a BlockDriver.bdrv_nvme() method and its
@errp points to its caller's local_err.
Though these 3 cases haven't trigger the issue like [1] said, to
follow the requirement of @errp, add missing ERRP_GUARD() at their
beginning.
[1]: Issue description in the commit message of commit ae7c80a7bd73
("error: New macro ERRP_GUARD()").
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Fam Zheng <fam@euphon.net>
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Hanna Reitz <hreitz@redhat.com>
Cc: qemu-block@nongnu.org
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <20240311033822.3142585-7-zhao1.liu@linux.intel.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
2024-03-11 06:37:59 +03:00
|
|
|
ERRP_GUARD();
|
2018-01-16 09:08:55 +03:00
|
|
|
size_t bytes;
|
|
|
|
int r;
|
|
|
|
|
2022-03-23 18:57:22 +03:00
|
|
|
bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size());
|
2018-01-16 09:08:55 +03:00
|
|
|
q->head = q->tail = 0;
|
2022-03-23 18:57:22 +03:00
|
|
|
q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes);
|
2018-01-16 09:08:55 +03:00
|
|
|
if (!q->queue) {
|
|
|
|
error_setg(errp, "Cannot allocate queue");
|
2020-10-29 12:32:52 +03:00
|
|
|
return false;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
2020-08-21 22:53:53 +03:00
|
|
|
memset(q->queue, 0, bytes);
|
2021-09-02 10:00:20 +03:00
|
|
|
r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);
|
2018-01-16 09:08:55 +03:00
|
|
|
if (r) {
|
2021-09-02 10:00:20 +03:00
|
|
|
error_prepend(errp, "Cannot map queue: ");
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
2021-09-02 10:00:20 +03:00
|
|
|
return r == 0;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
2021-10-06 19:49:29 +03:00
|
|
|
static void nvme_free_queue(NVMeQueue *q)
|
|
|
|
{
|
|
|
|
qemu_vfree(q->queue);
|
|
|
|
}
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_free_queue_pair(NVMeQueuePair *q)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
2021-10-06 19:49:28 +03:00
|
|
|
trace_nvme_free_queue_pair(q->index, q, &q->cq, &q->sq);
|
2020-06-17 16:22:01 +03:00
|
|
|
if (q->completion_bh) {
|
|
|
|
qemu_bh_delete(q->completion_bh);
|
|
|
|
}
|
2021-10-06 19:49:29 +03:00
|
|
|
nvme_free_queue(&q->sq);
|
|
|
|
nvme_free_queue(&q->cq);
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_vfree(q->prp_list_pages);
|
|
|
|
qemu_mutex_destroy(&q->lock);
|
|
|
|
g_free(q);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_free_req_queue_cb(void *opaque)
|
|
|
|
{
|
|
|
|
NVMeQueuePair *q = opaque;
|
|
|
|
|
|
|
|
qemu_mutex_lock(&q->lock);
|
2021-12-08 18:22:46 +03:00
|
|
|
while (q->free_req_head != -1 &&
|
|
|
|
qemu_co_enter_next(&q->free_req_queue, &q->lock)) {
|
|
|
|
/* Retry waiting requests */
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
qemu_mutex_unlock(&q->lock);
|
|
|
|
}
|
|
|
|
|
2020-08-21 22:53:57 +03:00
|
|
|
static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
|
|
|
|
AioContext *aio_context,
|
2020-10-29 12:32:50 +03:00
|
|
|
unsigned idx, size_t size,
|
2018-01-16 09:08:55 +03:00
|
|
|
Error **errp)
|
|
|
|
{
|
block/nvme: Fix missing ERRP_GUARD() for error_prepend()
As the comment in qapi/error, passing @errp to error_prepend() requires
ERRP_GUARD():
* = Why, when and how to use ERRP_GUARD() =
*
* Without ERRP_GUARD(), use of the @errp parameter is restricted:
...
* - It should not be passed to error_prepend(), error_vprepend() or
* error_append_hint(), because that doesn't work with &error_fatal.
* ERRP_GUARD() lifts these restrictions.
*
* To use ERRP_GUARD(), add it right at the beginning of the function.
* @errp can then be used without worrying about the argument being
* NULL or &error_fatal.
ERRP_GUARD() could avoid the case when @errp is &error_fatal, the user
can't see this additional information, because exit() happens in
error_setg earlier than information is added [1].
In nvme.c, there are 3 functions passing @errp to error_prepend()
without ERRP_GUARD():
- nvme_init_queue()
- nvme_create_queue_pair()
- nvme_identify()
All these 3 functions take their @errp parameters from the
nvme_file_open(), which is a BlockDriver.bdrv_nvme() method and its
@errp points to its caller's local_err.
Though these 3 cases haven't trigger the issue like [1] said, to
follow the requirement of @errp, add missing ERRP_GUARD() at their
beginning.
[1]: Issue description in the commit message of commit ae7c80a7bd73
("error: New macro ERRP_GUARD()").
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Fam Zheng <fam@euphon.net>
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Hanna Reitz <hreitz@redhat.com>
Cc: qemu-block@nongnu.org
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <20240311033822.3142585-7-zhao1.liu@linux.intel.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
2024-03-11 06:37:59 +03:00
|
|
|
ERRP_GUARD();
|
2018-01-16 09:08:55 +03:00
|
|
|
int i, r;
|
2020-08-21 22:53:47 +03:00
|
|
|
NVMeQueuePair *q;
|
2018-01-16 09:08:55 +03:00
|
|
|
uint64_t prp_list_iova;
|
2020-10-29 12:33:02 +03:00
|
|
|
size_t bytes;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2020-08-21 22:53:47 +03:00
|
|
|
q = g_try_new0(NVMeQueuePair, 1);
|
|
|
|
if (!q) {
|
2021-09-02 10:00:19 +03:00
|
|
|
error_setg(errp, "Cannot allocate queue pair");
|
2020-08-21 22:53:47 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-10-29 12:32:48 +03:00
|
|
|
trace_nvme_create_queue_pair(idx, q, size, aio_context,
|
|
|
|
event_notifier_get_fd(s->irq_notifier));
|
2020-10-29 12:33:02 +03:00
|
|
|
bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
|
2022-03-23 18:57:22 +03:00
|
|
|
qemu_real_host_page_size());
|
|
|
|
q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes);
|
2020-08-21 22:53:47 +03:00
|
|
|
if (!q->prp_list_pages) {
|
2021-09-02 10:00:19 +03:00
|
|
|
error_setg(errp, "Cannot allocate PRP page list");
|
2020-08-21 22:53:47 +03:00
|
|
|
goto fail;
|
|
|
|
}
|
2020-10-29 12:33:02 +03:00
|
|
|
memset(q->prp_list_pages, 0, bytes);
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_mutex_init(&q->lock);
|
2020-06-17 16:22:00 +03:00
|
|
|
q->s = s;
|
2018-01-16 09:08:55 +03:00
|
|
|
q->index = idx;
|
|
|
|
qemu_co_queue_init(&q->free_req_queue);
|
2020-08-21 22:53:57 +03:00
|
|
|
q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
|
2020-10-29 12:33:02 +03:00
|
|
|
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
|
2021-09-02 10:00:20 +03:00
|
|
|
false, &prp_list_iova, errp);
|
2018-01-16 09:08:55 +03:00
|
|
|
if (r) {
|
2021-09-02 10:00:20 +03:00
|
|
|
error_prepend(errp, "Cannot map buffer for DMA: ");
|
2018-01-16 09:08:55 +03:00
|
|
|
goto fail;
|
|
|
|
}
|
2020-06-17 16:21:58 +03:00
|
|
|
q->free_req_head = -1;
|
|
|
|
for (i = 0; i < NVME_NUM_REQS; i++) {
|
2018-01-16 09:08:55 +03:00
|
|
|
NVMeRequest *req = &q->reqs[i];
|
|
|
|
req->cid = i + 1;
|
2020-06-17 16:21:58 +03:00
|
|
|
req->free_req_next = q->free_req_head;
|
|
|
|
q->free_req_head = i;
|
2018-01-16 09:08:55 +03:00
|
|
|
req->prp_list_page = q->prp_list_pages + i * s->page_size;
|
|
|
|
req->prp_list_iova = prp_list_iova + i * s->page_size;
|
|
|
|
}
|
2020-06-17 16:21:58 +03:00
|
|
|
|
2020-10-29 12:32:52 +03:00
|
|
|
if (!nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, errp)) {
|
2018-01-16 09:08:55 +03:00
|
|
|
goto fail;
|
|
|
|
}
|
2020-09-22 11:38:17 +03:00
|
|
|
q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2020-10-29 12:32:52 +03:00
|
|
|
if (!nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, errp)) {
|
2018-01-16 09:08:55 +03:00
|
|
|
goto fail;
|
|
|
|
}
|
2020-09-22 11:38:17 +03:00
|
|
|
q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
return q;
|
|
|
|
fail:
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_free_queue_pair(q);
|
2018-01-16 09:08:55 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With q->lock */
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_kick(NVMeQueuePair *q)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
2020-06-17 16:22:00 +03:00
|
|
|
BDRVNVMeState *s = q->s;
|
|
|
|
|
2023-05-30 21:09:55 +03:00
|
|
|
if (!q->need_kick) {
|
2018-01-16 09:08:55 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
trace_nvme_kick(s, q->index);
|
|
|
|
assert(!(q->sq.tail & 0xFF00));
|
|
|
|
/* Fence the write to submission queue entry before notifying the device. */
|
|
|
|
smp_wmb();
|
|
|
|
*q->sq.doorbell = cpu_to_le32(q->sq.tail);
|
|
|
|
q->inflight += q->need_kick;
|
|
|
|
q->need_kick = 0;
|
|
|
|
}
|
|
|
|
|
2022-09-22 11:48:59 +03:00
|
|
|
static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
2020-06-17 16:21:58 +03:00
|
|
|
NVMeRequest *req;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2020-06-17 16:21:58 +03:00
|
|
|
req = &q->reqs[q->free_req_head];
|
|
|
|
q->free_req_head = req->free_req_next;
|
|
|
|
req->free_req_next = -1;
|
2018-01-16 09:08:55 +03:00
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
2022-09-22 11:48:59 +03:00
|
|
|
/* Return a free request element if any, otherwise return NULL. */
|
|
|
|
static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
|
|
|
|
{
|
|
|
|
QEMU_LOCK_GUARD(&q->lock);
|
|
|
|
if (q->free_req_head == -1) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return nvme_get_free_req_nofail_locked(q);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for a free request to become available if necessary, then
|
|
|
|
* return it.
|
|
|
|
*/
|
|
|
|
static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
|
|
|
|
{
|
|
|
|
QEMU_LOCK_GUARD(&q->lock);
|
|
|
|
|
|
|
|
while (q->free_req_head == -1) {
|
|
|
|
trace_nvme_free_req_queue_wait(q->s, q->index);
|
|
|
|
qemu_co_queue_wait(&q->free_req_queue, &q->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nvme_get_free_req_nofail_locked(q);
|
|
|
|
}
|
|
|
|
|
2020-06-17 16:21:58 +03:00
|
|
|
/* With q->lock */
|
|
|
|
static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
|
|
|
|
{
|
|
|
|
req->free_req_next = q->free_req_head;
|
|
|
|
q->free_req_head = req - q->reqs;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With q->lock */
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_wake_free_req_locked(NVMeQueuePair *q)
|
2020-06-17 16:21:58 +03:00
|
|
|
{
|
|
|
|
if (!qemu_co_queue_empty(&q->free_req_queue)) {
|
2020-06-17 16:22:00 +03:00
|
|
|
replay_bh_schedule_oneshot_event(q->s->aio_context,
|
2020-06-17 16:21:58 +03:00
|
|
|
nvme_free_req_queue_cb, q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert a request in the freelist and wake waiters */
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req)
|
2020-06-17 16:21:58 +03:00
|
|
|
{
|
|
|
|
qemu_mutex_lock(&q->lock);
|
|
|
|
nvme_put_free_req_locked(q, req);
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_wake_free_req_locked(q);
|
2020-06-17 16:21:58 +03:00
|
|
|
qemu_mutex_unlock(&q->lock);
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
static inline int nvme_translate_error(const NvmeCqe *c)
|
|
|
|
{
|
|
|
|
uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF;
|
|
|
|
if (status) {
|
|
|
|
trace_nvme_error(le32_to_cpu(c->result),
|
|
|
|
le16_to_cpu(c->sq_head),
|
|
|
|
le16_to_cpu(c->sq_id),
|
|
|
|
le16_to_cpu(c->cid),
|
|
|
|
le16_to_cpu(status));
|
|
|
|
}
|
|
|
|
switch (status) {
|
|
|
|
case 0:
|
|
|
|
return 0;
|
|
|
|
case 1:
|
|
|
|
return -ENOSYS;
|
|
|
|
case 2:
|
|
|
|
return -EINVAL;
|
|
|
|
default:
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With q->lock */
|
2020-06-17 16:22:00 +03:00
|
|
|
static bool nvme_process_completion(NVMeQueuePair *q)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
2020-06-17 16:22:00 +03:00
|
|
|
BDRVNVMeState *s = q->s;
|
2018-01-16 09:08:55 +03:00
|
|
|
bool progress = false;
|
|
|
|
NVMeRequest *preq;
|
|
|
|
NVMeRequest req;
|
|
|
|
NvmeCqe *c;
|
|
|
|
|
|
|
|
trace_nvme_process_completion(s, q->index, q->inflight);
|
2020-06-17 16:22:01 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Support re-entrancy when a request cb() function invokes aio_poll().
|
|
|
|
* Pending completions must be visible to aio_poll() so that a cb()
|
|
|
|
* function can wait for the completion of another request.
|
|
|
|
*
|
|
|
|
* The aio_poll() loop will execute our BH and we'll resume completion
|
|
|
|
* processing there.
|
|
|
|
*/
|
|
|
|
qemu_bh_schedule(q->completion_bh);
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
assert(q->inflight >= 0);
|
|
|
|
while (q->inflight) {
|
2020-06-17 16:21:57 +03:00
|
|
|
int ret;
|
2018-01-16 09:08:55 +03:00
|
|
|
int16_t cid;
|
2020-06-17 16:21:57 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES];
|
2019-07-16 19:30:20 +03:00
|
|
|
if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
|
2018-01-16 09:08:55 +03:00
|
|
|
break;
|
|
|
|
}
|
2020-06-17 16:21:57 +03:00
|
|
|
ret = nvme_translate_error(c);
|
block/nvme: Add driver statistics for access alignment and hw errors
Keep statistics of some hardware errors, and number of
aligned/unaligned I/O accesses.
QMP example booting a full RHEL 8.3 aarch64 guest:
{ "execute": "query-blockstats" }
{
"return": [
{
"device": "",
"node-name": "drive0",
"stats": {
"flush_total_time_ns": 6026948,
"wr_highest_offset": 3383991230464,
"wr_total_time_ns": 807450995,
"failed_wr_operations": 0,
"failed_rd_operations": 0,
"wr_merged": 3,
"wr_bytes": 50133504,
"failed_unmap_operations": 0,
"failed_flush_operations": 0,
"account_invalid": false,
"rd_total_time_ns": 1846979900,
"flush_operations": 130,
"wr_operations": 659,
"rd_merged": 1192,
"rd_bytes": 218244096,
"account_failed": false,
"idle_time_ns": 2678641497,
"rd_operations": 7406,
},
"driver-specific": {
"driver": "nvme",
"completion-errors": 0,
"unaligned-accesses": 2959,
"aligned-accesses": 4477
},
"qdev": "/machine/peripheral-anon/device[0]/virtio-backend"
}
]
}
Suggested-by: Stefan Hajnoczi <stefanha@gmail.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Message-id: 20201001162939.1567915-1-philmd@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2020-10-01 19:29:39 +03:00
|
|
|
if (ret) {
|
|
|
|
s->stats.completion_errors++;
|
|
|
|
}
|
2018-01-16 09:08:55 +03:00
|
|
|
q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
|
|
|
|
if (!q->cq.head) {
|
|
|
|
q->cq_phase = !q->cq_phase;
|
|
|
|
}
|
|
|
|
cid = le16_to_cpu(c->cid);
|
2023-11-06 18:00:28 +03:00
|
|
|
if (cid == 0 || cid > NVME_NUM_REQS) {
|
|
|
|
warn_report("NVMe: Unexpected CID in completion queue: %" PRIu32
|
|
|
|
", should be within: 1..%u inclusively", cid,
|
|
|
|
NVME_NUM_REQS);
|
2018-01-16 09:08:55 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
trace_nvme_complete_command(s, q->index, cid);
|
|
|
|
preq = &q->reqs[cid - 1];
|
|
|
|
req = *preq;
|
|
|
|
assert(req.cid == cid);
|
|
|
|
assert(req.cb);
|
2020-06-17 16:21:58 +03:00
|
|
|
nvme_put_free_req_locked(q, preq);
|
2018-01-16 09:08:55 +03:00
|
|
|
preq->cb = preq->opaque = NULL;
|
2020-06-17 16:22:01 +03:00
|
|
|
q->inflight--;
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_mutex_unlock(&q->lock);
|
2020-06-17 16:21:57 +03:00
|
|
|
req.cb(req.opaque, ret);
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_mutex_lock(&q->lock);
|
|
|
|
progress = true;
|
|
|
|
}
|
|
|
|
if (progress) {
|
|
|
|
/* Notify the device so it can post more completions. */
|
|
|
|
smp_mb_release();
|
|
|
|
*q->cq.doorbell = cpu_to_le32(q->cq.head);
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_wake_free_req_locked(q);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
2020-06-17 16:22:01 +03:00
|
|
|
|
|
|
|
qemu_bh_cancel(q->completion_bh);
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2020-06-17 16:22:01 +03:00
|
|
|
static void nvme_process_completion_bh(void *opaque)
|
|
|
|
{
|
|
|
|
NVMeQueuePair *q = opaque;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're being invoked because a nvme_process_completion() cb() function
|
|
|
|
* called aio_poll(). The callback may be waiting for further completions
|
|
|
|
* so notify the device that it has space to fill in more completions now.
|
|
|
|
*/
|
|
|
|
smp_mb_release();
|
|
|
|
*q->cq.doorbell = cpu_to_le32(q->cq.head);
|
|
|
|
nvme_wake_free_req_locked(q);
|
|
|
|
|
|
|
|
nvme_process_completion(q);
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
static void nvme_trace_command(const NvmeCmd *cmd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2020-08-21 22:53:46 +03:00
|
|
|
if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW)) {
|
|
|
|
return;
|
|
|
|
}
|
2018-01-16 09:08:55 +03:00
|
|
|
for (i = 0; i < 8; ++i) {
|
|
|
|
uint8_t *cmdp = (uint8_t *)cmd + i * 8;
|
|
|
|
trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3],
|
|
|
|
cmdp[4], cmdp[5], cmdp[6], cmdp[7]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
static void nvme_deferred_fn(void *opaque)
|
2023-05-30 21:09:55 +03:00
|
|
|
{
|
|
|
|
NVMeQueuePair *q = opaque;
|
|
|
|
|
|
|
|
QEMU_LOCK_GUARD(&q->lock);
|
|
|
|
nvme_kick(q);
|
|
|
|
nvme_process_completion(q);
|
|
|
|
}
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
|
2018-01-16 09:08:55 +03:00
|
|
|
NvmeCmd *cmd, BlockCompletionFunc cb,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
assert(!req->cb);
|
|
|
|
req->cb = cb;
|
|
|
|
req->opaque = opaque;
|
2020-10-29 12:33:05 +03:00
|
|
|
cmd->cid = cpu_to_le16(req->cid);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
trace_nvme_submit_command(q->s, q->index, req->cid);
|
2018-01-16 09:08:55 +03:00
|
|
|
nvme_trace_command(cmd);
|
|
|
|
qemu_mutex_lock(&q->lock);
|
|
|
|
memcpy((uint8_t *)q->sq.queue +
|
|
|
|
q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
|
|
|
|
q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
|
|
|
|
q->need_kick++;
|
|
|
|
qemu_mutex_unlock(&q->lock);
|
2023-07-12 22:16:28 +03:00
|
|
|
|
2023-09-13 23:00:42 +03:00
|
|
|
defer_call(nvme_deferred_fn, q);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
2020-10-29 12:32:57 +03:00
|
|
|
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
|
|
|
int *pret = opaque;
|
|
|
|
*pret = ret;
|
block: Fix hangs in synchronous APIs with iothreads
In the block layer, synchronous APIs are often implemented by creating a
coroutine that calls the asynchronous coroutine-based implementation and
then waiting for completion with BDRV_POLL_WHILE().
For this to work with iothreads (more specifically, when the synchronous
API is called in a thread that is not the home thread of the block
device, so that the coroutine will run in a different thread), we must
make sure to call aio_wait_kick() at the end of the operation. Many
places are missing this, so that BDRV_POLL_WHILE() keeps hanging even if
the condition has long become false.
Note that bdrv_dec_in_flight() involves an aio_wait_kick() call. This
corresponds to the BDRV_POLL_WHILE() in the drain functions, but it is
generally not enough for most other operations because they haven't set
the return value in the coroutine entry stub yet. To avoid race
conditions there, we need to kick after setting the return value.
The race window is small enough that the problem doesn't usually surface
in the common path. However, it does surface and causes easily
reproducible hangs if the operation can return early before even calling
bdrv_inc/dec_in_flight, which many of them do (trivial error or no-op
success paths).
The bug in bdrv_truncate(), bdrv_check() and bdrv_invalidate_cache() is
slightly different: These functions even neglected to schedule the
coroutine in the home thread of the node. This avoids the hang, but is
obviously wrong, too. Fix those to schedule the coroutine in the right
AioContext in addition to adding aio_wait_kick() calls.
Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-01-07 15:02:48 +03:00
|
|
|
aio_wait_kick();
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
2020-10-29 12:32:57 +03:00
|
|
|
static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
2020-10-29 12:32:57 +03:00
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
NVMeQueuePair *q = s->queues[INDEX_ADMIN];
|
2020-08-21 22:53:56 +03:00
|
|
|
AioContext *aio_context = bdrv_get_aio_context(bs);
|
2018-01-16 09:08:55 +03:00
|
|
|
NVMeRequest *req;
|
|
|
|
int ret = -EINPROGRESS;
|
2022-09-22 11:48:59 +03:00
|
|
|
req = nvme_get_free_req_nowait(q);
|
2018-01-16 09:08:55 +03:00
|
|
|
if (!req) {
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2020-10-29 12:32:57 +03:00
|
|
|
nvme_submit_command(q, req, cmd, nvme_admin_cmd_sync_cb, &ret);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2020-08-21 22:53:56 +03:00
|
|
|
AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS);
|
2018-01-16 09:08:55 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-10-29 12:32:51 +03:00
|
|
|
/* Returns true on success, false on failure. */
|
|
|
|
static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
block/nvme: Fix missing ERRP_GUARD() for error_prepend()
As the comment in qapi/error, passing @errp to error_prepend() requires
ERRP_GUARD():
* = Why, when and how to use ERRP_GUARD() =
*
* Without ERRP_GUARD(), use of the @errp parameter is restricted:
...
* - It should not be passed to error_prepend(), error_vprepend() or
* error_append_hint(), because that doesn't work with &error_fatal.
* ERRP_GUARD() lifts these restrictions.
*
* To use ERRP_GUARD(), add it right at the beginning of the function.
* @errp can then be used without worrying about the argument being
* NULL or &error_fatal.
ERRP_GUARD() could avoid the case when @errp is &error_fatal, the user
can't see this additional information, because exit() happens in
error_setg earlier than information is added [1].
In nvme.c, there are 3 functions passing @errp to error_prepend()
without ERRP_GUARD():
- nvme_init_queue()
- nvme_create_queue_pair()
- nvme_identify()
All these 3 functions take their @errp parameters from the
nvme_file_open(), which is a BlockDriver.bdrv_nvme() method and its
@errp points to its caller's local_err.
Though these 3 cases haven't trigger the issue like [1] said, to
follow the requirement of @errp, add missing ERRP_GUARD() at their
beginning.
[1]: Issue description in the commit message of commit ae7c80a7bd73
("error: New macro ERRP_GUARD()").
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Fam Zheng <fam@euphon.net>
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Hanna Reitz <hreitz@redhat.com>
Cc: qemu-block@nongnu.org
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <20240311033822.3142585-7-zhao1.liu@linux.intel.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
2024-03-11 06:37:59 +03:00
|
|
|
ERRP_GUARD();
|
2018-01-16 09:08:55 +03:00
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2020-10-29 12:32:51 +03:00
|
|
|
bool ret = false;
|
2021-10-06 19:49:27 +03:00
|
|
|
QEMU_AUTO_VFREE union {
|
2020-08-21 22:53:52 +03:00
|
|
|
NvmeIdCtrl ctrl;
|
|
|
|
NvmeIdNs ns;
|
2021-10-06 19:49:27 +03:00
|
|
|
} *id = NULL;
|
2019-07-16 19:30:19 +03:00
|
|
|
NvmeLBAF *lbaf;
|
2019-09-13 16:36:26 +03:00
|
|
|
uint16_t oncs;
|
2019-07-30 14:48:12 +03:00
|
|
|
int r;
|
2018-01-16 09:08:55 +03:00
|
|
|
uint64_t iova;
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = NVME_ADM_CMD_IDENTIFY,
|
|
|
|
.cdw10 = cpu_to_le32(0x1),
|
|
|
|
};
|
2022-03-23 18:57:22 +03:00
|
|
|
size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size());
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2022-03-23 18:57:22 +03:00
|
|
|
id = qemu_try_memalign(qemu_real_host_page_size(), id_size);
|
2020-08-21 22:53:51 +03:00
|
|
|
if (!id) {
|
2018-01-16 09:08:55 +03:00
|
|
|
error_setg(errp, "Cannot allocate buffer for identify response");
|
|
|
|
goto out;
|
|
|
|
}
|
2021-09-02 10:00:20 +03:00
|
|
|
r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova, errp);
|
2018-01-16 09:08:55 +03:00
|
|
|
if (r) {
|
2021-09-02 10:00:20 +03:00
|
|
|
error_prepend(errp, "Cannot map buffer for DMA: ");
|
2018-01-16 09:08:55 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-10-29 12:33:00 +03:00
|
|
|
memset(id, 0, id_size);
|
2020-08-21 22:53:53 +03:00
|
|
|
cmd.dptr.prp1 = cpu_to_le64(iova);
|
2020-10-29 12:32:57 +03:00
|
|
|
if (nvme_admin_cmd_sync(bs, &cmd)) {
|
2018-01-16 09:08:55 +03:00
|
|
|
error_setg(errp, "Failed to identify controller");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-08-21 22:53:52 +03:00
|
|
|
if (le32_to_cpu(id->ctrl.nn) < namespace) {
|
2018-01-16 09:08:55 +03:00
|
|
|
error_setg(errp, "Invalid namespace");
|
|
|
|
goto out;
|
|
|
|
}
|
2020-08-21 22:53:52 +03:00
|
|
|
s->write_cache_supported = le32_to_cpu(id->ctrl.vwc) & 0x1;
|
|
|
|
s->max_transfer = (id->ctrl.mdts ? 1 << id->ctrl.mdts : 0) * s->page_size;
|
2018-01-16 09:08:55 +03:00
|
|
|
/* For now the page list buffer per command is one page, to hold at most
|
|
|
|
* s->page_size / sizeof(uint64_t) entries. */
|
|
|
|
s->max_transfer = MIN_NON_ZERO(s->max_transfer,
|
|
|
|
s->page_size / sizeof(uint64_t) * s->page_size);
|
|
|
|
|
2020-08-21 22:53:52 +03:00
|
|
|
oncs = le16_to_cpu(id->ctrl.oncs);
|
2020-03-31 00:10:13 +03:00
|
|
|
s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES);
|
2019-09-13 16:36:27 +03:00
|
|
|
s->supports_discard = !!(oncs & NVME_ONCS_DSM);
|
2019-09-13 16:36:26 +03:00
|
|
|
|
2020-10-29 12:33:00 +03:00
|
|
|
memset(id, 0, id_size);
|
2018-01-16 09:08:55 +03:00
|
|
|
cmd.cdw10 = 0;
|
|
|
|
cmd.nsid = cpu_to_le32(namespace);
|
2020-10-29 12:32:57 +03:00
|
|
|
if (nvme_admin_cmd_sync(bs, &cmd)) {
|
2018-01-16 09:08:55 +03:00
|
|
|
error_setg(errp, "Failed to identify namespace");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-08-21 22:53:52 +03:00
|
|
|
s->nsze = le64_to_cpu(id->ns.nsze);
|
|
|
|
lbaf = &id->ns.lbaf[NVME_ID_NS_FLBAS_INDEX(id->ns.flbas)];
|
2019-07-16 19:30:19 +03:00
|
|
|
|
2020-08-21 22:53:52 +03:00
|
|
|
if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id->ns.dlfeat) &&
|
|
|
|
NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id->ns.dlfeat) ==
|
2019-09-13 16:36:26 +03:00
|
|
|
NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) {
|
|
|
|
bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP;
|
|
|
|
}
|
|
|
|
|
2019-07-16 19:30:19 +03:00
|
|
|
if (lbaf->ms) {
|
|
|
|
error_setg(errp, "Namespaces with metadata are not yet supported");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-07-30 14:48:12 +03:00
|
|
|
if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 ||
|
|
|
|
(1 << lbaf->ds) > s->page_size)
|
|
|
|
{
|
|
|
|
error_setg(errp, "Namespace has unsupported block size (2^%d)",
|
|
|
|
lbaf->ds);
|
2019-07-16 19:30:19 +03:00
|
|
|
goto out;
|
|
|
|
}
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2020-10-29 12:32:51 +03:00
|
|
|
ret = true;
|
2019-07-16 19:30:19 +03:00
|
|
|
s->blkshift = lbaf->ds;
|
2018-01-16 09:08:55 +03:00
|
|
|
out:
|
2020-08-21 22:53:51 +03:00
|
|
|
qemu_vfio_dma_unmap(s->vfio, id);
|
2020-10-29 12:32:51 +03:00
|
|
|
|
|
|
|
return ret;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
aio-posix: split poll check from ready handler
Adaptive polling measures the execution time of the polling check plus
handlers called when a polled event becomes ready. Handlers can take a
significant amount of time, making it look like polling was running for
a long time when in fact the event handler was running for a long time.
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
device's virtqueue becomes ready can take 10s of microseconds. This
can exceed the default polling interval (32 microseconds) and cause
adaptive polling to stop polling.
By excluding the handler's execution time from the polling check we make
the adaptive polling calculation more accurate. As a result, the event
loop now stays in polling mode where previously it would have fallen
back to file descriptor monitoring.
The following data was collected with virtio-blk num-queues=2
event_idx=off using an IOThread. Before:
168k IOPS, IOThread syscalls:
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
174k IOPS (+3.6%), IOThread syscalls:
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
the IOThread stays in polling mode instead of falling back to file
descriptor monitoring.
As usual, polling is not implemented on Windows so this patch ignores
the new io_poll_read() callback in aio-win32.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20211207132336.36627-2-stefanha@redhat.com
[Fixed up aio_set_event_notifier() calls in
tests/unit/test-fdmon-epoll.c added after this series was queued.
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
|
|
|
static void nvme_poll_queue(NVMeQueuePair *q)
|
2020-08-21 22:53:58 +03:00
|
|
|
{
|
|
|
|
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
|
|
|
|
NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
|
|
|
|
|
2020-10-29 12:32:46 +03:00
|
|
|
trace_nvme_poll_queue(q->s, q->index);
|
2020-08-21 22:53:58 +03:00
|
|
|
/*
|
|
|
|
* Do an early check for completions. q->lock isn't needed because
|
|
|
|
* nvme_process_completion() only runs in the event loop thread and
|
|
|
|
* cannot race with itself.
|
|
|
|
*/
|
|
|
|
if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
|
aio-posix: split poll check from ready handler
Adaptive polling measures the execution time of the polling check plus
handlers called when a polled event becomes ready. Handlers can take a
significant amount of time, making it look like polling was running for
a long time when in fact the event handler was running for a long time.
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
device's virtqueue becomes ready can take 10s of microseconds. This
can exceed the default polling interval (32 microseconds) and cause
adaptive polling to stop polling.
By excluding the handler's execution time from the polling check we make
the adaptive polling calculation more accurate. As a result, the event
loop now stays in polling mode where previously it would have fallen
back to file descriptor monitoring.
The following data was collected with virtio-blk num-queues=2
event_idx=off using an IOThread. Before:
168k IOPS, IOThread syscalls:
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
174k IOPS (+3.6%), IOThread syscalls:
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
the IOThread stays in polling mode instead of falling back to file
descriptor monitoring.
As usual, polling is not implemented on Windows so this patch ignores
the new io_poll_read() callback in aio-win32.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20211207132336.36627-2-stefanha@redhat.com
[Fixed up aio_set_event_notifier() calls in
tests/unit/test-fdmon-epoll.c added after this series was queued.
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
|
|
|
return;
|
2020-08-21 22:53:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
qemu_mutex_lock(&q->lock);
|
|
|
|
while (nvme_process_completion(q)) {
|
|
|
|
/* Keep polling */
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(&q->lock);
|
|
|
|
}
|
|
|
|
|
aio-posix: split poll check from ready handler
Adaptive polling measures the execution time of the polling check plus
handlers called when a polled event becomes ready. Handlers can take a
significant amount of time, making it look like polling was running for
a long time when in fact the event handler was running for a long time.
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
device's virtqueue becomes ready can take 10s of microseconds. This
can exceed the default polling interval (32 microseconds) and cause
adaptive polling to stop polling.
By excluding the handler's execution time from the polling check we make
the adaptive polling calculation more accurate. As a result, the event
loop now stays in polling mode where previously it would have fallen
back to file descriptor monitoring.
The following data was collected with virtio-blk num-queues=2
event_idx=off using an IOThread. Before:
168k IOPS, IOThread syscalls:
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
174k IOPS (+3.6%), IOThread syscalls:
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
the IOThread stays in polling mode instead of falling back to file
descriptor monitoring.
As usual, polling is not implemented on Windows so this patch ignores
the new io_poll_read() callback in aio-win32.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20211207132336.36627-2-stefanha@redhat.com
[Fixed up aio_set_event_notifier() calls in
tests/unit/test-fdmon-epoll.c added after this series was queued.
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
|
|
|
static void nvme_poll_queues(BDRVNVMeState *s)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2020-10-29 12:32:50 +03:00
|
|
|
for (i = 0; i < s->queue_count; i++) {
|
aio-posix: split poll check from ready handler
Adaptive polling measures the execution time of the polling check plus
handlers called when a polled event becomes ready. Handlers can take a
significant amount of time, making it look like polling was running for
a long time when in fact the event handler was running for a long time.
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
device's virtqueue becomes ready can take 10s of microseconds. This
can exceed the default polling interval (32 microseconds) and cause
adaptive polling to stop polling.
By excluding the handler's execution time from the polling check we make
the adaptive polling calculation more accurate. As a result, the event
loop now stays in polling mode where previously it would have fallen
back to file descriptor monitoring.
The following data was collected with virtio-blk num-queues=2
event_idx=off using an IOThread. Before:
168k IOPS, IOThread syscalls:
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
174k IOPS (+3.6%), IOThread syscalls:
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
the IOThread stays in polling mode instead of falling back to file
descriptor monitoring.
As usual, polling is not implemented on Windows so this patch ignores
the new io_poll_read() callback in aio-win32.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20211207132336.36627-2-stefanha@redhat.com
[Fixed up aio_set_event_notifier() calls in
tests/unit/test-fdmon-epoll.c added after this series was queued.
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
|
|
|
nvme_poll_queue(s->queues[i]);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_handle_event(EventNotifier *n)
|
|
|
|
{
|
2020-08-21 22:53:59 +03:00
|
|
|
BDRVNVMeState *s = container_of(n, BDRVNVMeState,
|
|
|
|
irq_notifier[MSIX_SHARED_IRQ_IDX]);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
trace_nvme_handle_event(s);
|
|
|
|
event_notifier_test_and_clear(n);
|
|
|
|
nvme_poll_queues(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2020-10-29 12:32:50 +03:00
|
|
|
unsigned n = s->queue_count;
|
2018-01-16 09:08:55 +03:00
|
|
|
NVMeQueuePair *q;
|
|
|
|
NvmeCmd cmd;
|
2020-10-29 12:32:50 +03:00
|
|
|
unsigned queue_size = NVME_QUEUE_SIZE;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2020-10-29 12:32:54 +03:00
|
|
|
assert(n <= UINT16_MAX);
|
2020-08-21 22:53:57 +03:00
|
|
|
q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
|
|
|
|
n, queue_size, errp);
|
2018-01-16 09:08:55 +03:00
|
|
|
if (!q) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
cmd = (NvmeCmd) {
|
|
|
|
.opcode = NVME_ADM_CMD_CREATE_CQ,
|
2020-07-06 09:12:46 +03:00
|
|
|
.dptr.prp1 = cpu_to_le64(q->cq.iova),
|
2020-10-29 12:32:54 +03:00
|
|
|
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
|
|
|
|
.cdw11 = cpu_to_le32(NVME_CQ_IEN | NVME_CQ_PC),
|
2018-01-16 09:08:55 +03:00
|
|
|
};
|
2020-10-29 12:32:57 +03:00
|
|
|
if (nvme_admin_cmd_sync(bs, &cmd)) {
|
2020-10-29 12:32:50 +03:00
|
|
|
error_setg(errp, "Failed to create CQ io queue [%u]", n);
|
2020-08-21 22:53:50 +03:00
|
|
|
goto out_error;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
cmd = (NvmeCmd) {
|
|
|
|
.opcode = NVME_ADM_CMD_CREATE_SQ,
|
2020-07-06 09:12:46 +03:00
|
|
|
.dptr.prp1 = cpu_to_le64(q->sq.iova),
|
2020-10-29 12:32:54 +03:00
|
|
|
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
|
|
|
|
.cdw11 = cpu_to_le32(NVME_SQ_PC | (n << 16)),
|
2018-01-16 09:08:55 +03:00
|
|
|
};
|
2020-10-29 12:32:57 +03:00
|
|
|
if (nvme_admin_cmd_sync(bs, &cmd)) {
|
2020-10-29 12:32:50 +03:00
|
|
|
error_setg(errp, "Failed to create SQ io queue [%u]", n);
|
2020-08-21 22:53:50 +03:00
|
|
|
goto out_error;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
|
|
|
|
s->queues[n] = q;
|
2020-10-29 12:32:50 +03:00
|
|
|
s->queue_count++;
|
2018-01-16 09:08:55 +03:00
|
|
|
return true;
|
2020-08-21 22:53:50 +03:00
|
|
|
out_error:
|
|
|
|
nvme_free_queue_pair(q);
|
|
|
|
return false;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool nvme_poll_cb(void *opaque)
|
|
|
|
{
|
|
|
|
EventNotifier *e = opaque;
|
2020-08-21 22:53:59 +03:00
|
|
|
BDRVNVMeState *s = container_of(e, BDRVNVMeState,
|
|
|
|
irq_notifier[MSIX_SHARED_IRQ_IDX]);
|
aio-posix: split poll check from ready handler
Adaptive polling measures the execution time of the polling check plus
handlers called when a polled event becomes ready. Handlers can take a
significant amount of time, making it look like polling was running for
a long time when in fact the event handler was running for a long time.
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
device's virtqueue becomes ready can take 10s of microseconds. This
can exceed the default polling interval (32 microseconds) and cause
adaptive polling to stop polling.
By excluding the handler's execution time from the polling check we make
the adaptive polling calculation more accurate. As a result, the event
loop now stays in polling mode where previously it would have fallen
back to file descriptor monitoring.
The following data was collected with virtio-blk num-queues=2
event_idx=off using an IOThread. Before:
168k IOPS, IOThread syscalls:
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
174k IOPS (+3.6%), IOThread syscalls:
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
the IOThread stays in polling mode instead of falling back to file
descriptor monitoring.
As usual, polling is not implemented on Windows so this patch ignores
the new io_poll_read() callback in aio-win32.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20211207132336.36627-2-stefanha@redhat.com
[Fixed up aio_set_event_notifier() calls in
tests/unit/test-fdmon-epoll.c added after this series was queued.
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
|
|
|
int i;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
aio-posix: split poll check from ready handler
Adaptive polling measures the execution time of the polling check plus
handlers called when a polled event becomes ready. Handlers can take a
significant amount of time, making it look like polling was running for
a long time when in fact the event handler was running for a long time.
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
device's virtqueue becomes ready can take 10s of microseconds. This
can exceed the default polling interval (32 microseconds) and cause
adaptive polling to stop polling.
By excluding the handler's execution time from the polling check we make
the adaptive polling calculation more accurate. As a result, the event
loop now stays in polling mode where previously it would have fallen
back to file descriptor monitoring.
The following data was collected with virtio-blk num-queues=2
event_idx=off using an IOThread. Before:
168k IOPS, IOThread syscalls:
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
174k IOPS (+3.6%), IOThread syscalls:
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
the IOThread stays in polling mode instead of falling back to file
descriptor monitoring.
As usual, polling is not implemented on Windows so this patch ignores
the new io_poll_read() callback in aio-win32.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20211207132336.36627-2-stefanha@redhat.com
[Fixed up aio_set_event_notifier() calls in
tests/unit/test-fdmon-epoll.c added after this series was queued.
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
|
|
|
for (i = 0; i < s->queue_count; i++) {
|
|
|
|
NVMeQueuePair *q = s->queues[i];
|
|
|
|
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
|
|
|
|
NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* q->lock isn't needed because nvme_process_completion() only runs in
|
|
|
|
* the event loop thread and cannot race with itself.
|
|
|
|
*/
|
|
|
|
if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_poll_ready(EventNotifier *e)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = container_of(e, BDRVNVMeState,
|
|
|
|
irq_notifier[MSIX_SHARED_IRQ_IDX]);
|
|
|
|
|
|
|
|
nvme_poll_queues(s);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2020-10-29 12:32:56 +03:00
|
|
|
NVMeQueuePair *q;
|
2020-08-21 22:53:57 +03:00
|
|
|
AioContext *aio_context = bdrv_get_aio_context(bs);
|
2018-01-16 09:08:55 +03:00
|
|
|
int ret;
|
|
|
|
uint64_t cap;
|
2021-01-28 00:21:37 +03:00
|
|
|
uint32_t ver;
|
2018-01-16 09:08:55 +03:00
|
|
|
uint64_t timeout_ms;
|
|
|
|
uint64_t deadline, now;
|
2020-09-22 11:38:19 +03:00
|
|
|
volatile NvmeBar *regs = NULL;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
qemu_co_mutex_init(&s->dma_map_lock);
|
|
|
|
qemu_co_queue_init(&s->dma_flush_queue);
|
2019-02-01 22:29:30 +03:00
|
|
|
s->device = g_strdup(device);
|
2018-01-16 09:08:55 +03:00
|
|
|
s->nsid = namespace;
|
|
|
|
s->aio_context = bdrv_get_aio_context(bs);
|
2020-08-21 22:53:59 +03:00
|
|
|
ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
|
2018-01-16 09:08:55 +03:00
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to init event notifier");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->vfio = qemu_vfio_open_pci(device, errp);
|
|
|
|
if (!s->vfio) {
|
|
|
|
ret = -EINVAL;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
2020-09-22 11:38:18 +03:00
|
|
|
regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar),
|
|
|
|
PROT_READ | PROT_WRITE, errp);
|
|
|
|
if (!regs) {
|
2018-01-16 09:08:55 +03:00
|
|
|
ret = -EINVAL;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
/* Perform initialize sequence as described in NVMe spec "7.6.1
|
|
|
|
* Initialization". */
|
|
|
|
|
2020-09-22 11:38:19 +03:00
|
|
|
cap = le64_to_cpu(regs->cap);
|
2020-10-29 12:32:45 +03:00
|
|
|
trace_nvme_controller_capability_raw(cap);
|
|
|
|
trace_nvme_controller_capability("Maximum Queue Entries Supported",
|
|
|
|
1 + NVME_CAP_MQES(cap));
|
|
|
|
trace_nvme_controller_capability("Contiguous Queues Required",
|
|
|
|
NVME_CAP_CQR(cap));
|
|
|
|
trace_nvme_controller_capability("Doorbell Stride",
|
2021-01-28 00:21:36 +03:00
|
|
|
1 << (2 + NVME_CAP_DSTRD(cap)));
|
2020-10-29 12:32:45 +03:00
|
|
|
trace_nvme_controller_capability("Subsystem Reset Supported",
|
|
|
|
NVME_CAP_NSSRS(cap));
|
|
|
|
trace_nvme_controller_capability("Memory Page Size Minimum",
|
|
|
|
1 << (12 + NVME_CAP_MPSMIN(cap)));
|
|
|
|
trace_nvme_controller_capability("Memory Page Size Maximum",
|
|
|
|
1 << (12 + NVME_CAP_MPSMAX(cap)));
|
2020-09-22 11:38:20 +03:00
|
|
|
if (!NVME_CAP_CSS(cap)) {
|
2018-01-16 09:08:55 +03:00
|
|
|
error_setg(errp, "Device doesn't support NVMe command set");
|
|
|
|
ret = -EINVAL;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
2020-10-29 12:32:59 +03:00
|
|
|
s->page_size = 1u << (12 + NVME_CAP_MPSMIN(cap));
|
2020-09-22 11:38:20 +03:00
|
|
|
s->doorbell_scale = (4 << NVME_CAP_DSTRD(cap)) / sizeof(uint32_t);
|
2018-01-16 09:08:55 +03:00
|
|
|
bs->bl.opt_mem_alignment = s->page_size;
|
2020-10-29 12:32:58 +03:00
|
|
|
bs->bl.request_alignment = s->page_size;
|
2020-09-22 11:38:20 +03:00
|
|
|
timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2021-01-28 00:21:37 +03:00
|
|
|
ver = le32_to_cpu(regs->vs);
|
|
|
|
trace_nvme_controller_spec_version(extract32(ver, 16, 16),
|
|
|
|
extract32(ver, 8, 8),
|
|
|
|
extract32(ver, 0, 8));
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
/* Reset device to get a clean state. */
|
2020-09-22 11:38:19 +03:00
|
|
|
regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
|
2018-01-16 09:08:55 +03:00
|
|
|
/* Wait for CSTS.RDY = 0. */
|
2020-08-21 22:53:45 +03:00
|
|
|
deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
|
2020-09-22 11:38:20 +03:00
|
|
|
while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
|
2018-01-16 09:08:55 +03:00
|
|
|
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
|
|
|
|
error_setg(errp, "Timeout while waiting for device to reset (%"
|
|
|
|
PRId64 " ms)",
|
|
|
|
timeout_ms);
|
|
|
|
ret = -ETIMEDOUT;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-29 12:33:04 +03:00
|
|
|
s->bar0_wo_map = qemu_vfio_pci_map_bar(s->vfio, 0, 0,
|
|
|
|
sizeof(NvmeBar) + NVME_DOORBELL_SIZE,
|
|
|
|
PROT_WRITE, errp);
|
|
|
|
s->doorbells = (void *)((uintptr_t)s->bar0_wo_map + sizeof(NvmeBar));
|
2020-09-22 11:38:17 +03:00
|
|
|
if (!s->doorbells) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
/* Set up admin queue. */
|
|
|
|
s->queues = g_new(NVMeQueuePair *, 1);
|
2020-10-29 12:32:56 +03:00
|
|
|
q = nvme_create_queue_pair(s, aio_context, 0, NVME_QUEUE_SIZE, errp);
|
|
|
|
if (!q) {
|
2018-01-16 09:08:55 +03:00
|
|
|
ret = -EINVAL;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
2020-10-29 12:32:56 +03:00
|
|
|
s->queues[INDEX_ADMIN] = q;
|
2020-10-29 12:32:50 +03:00
|
|
|
s->queue_count = 1;
|
2020-10-29 12:32:55 +03:00
|
|
|
QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000);
|
|
|
|
regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
|
|
|
|
((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
|
2020-10-29 12:32:56 +03:00
|
|
|
regs->asq = cpu_to_le64(q->sq.iova);
|
|
|
|
regs->acq = cpu_to_le64(q->cq.iova);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
/* After setting up all control registers we can enable device now. */
|
2020-09-22 11:38:20 +03:00
|
|
|
regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
|
|
|
|
(ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
|
|
|
|
CC_EN_MASK);
|
2018-01-16 09:08:55 +03:00
|
|
|
/* Wait for CSTS.RDY = 1. */
|
|
|
|
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
2020-09-22 11:38:21 +03:00
|
|
|
deadline = now + timeout_ms * SCALE_MS;
|
2020-09-22 11:38:20 +03:00
|
|
|
while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
|
2018-01-16 09:08:55 +03:00
|
|
|
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
|
|
|
|
error_setg(errp, "Timeout while waiting for device to start (%"
|
|
|
|
PRId64 " ms)",
|
|
|
|
timeout_ms);
|
|
|
|
ret = -ETIMEDOUT;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-21 22:53:59 +03:00
|
|
|
ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
|
2018-01-16 09:08:55 +03:00
|
|
|
VFIO_PCI_MSIX_IRQ_INDEX, errp);
|
|
|
|
if (ret) {
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
2020-08-21 22:53:59 +03:00
|
|
|
aio_set_event_notifier(bdrv_get_aio_context(bs),
|
|
|
|
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
|
aio: remove aio_disable_external() API
All callers now pass is_external=false to aio_set_fd_handler() and
aio_set_event_notifier(). The aio_disable_external() API that
temporarily disables fd handlers that were registered is_external=true
is therefore dead code.
Remove aio_disable_external(), aio_enable_external(), and the
is_external arguments to aio_set_fd_handler() and
aio_set_event_notifier().
The entire test-fdmon-epoll test is removed because its sole purpose was
testing aio_disable_external().
Parts of this patch were generated using the following coccinelle
(https://coccinelle.lip6.fr/) semantic patch:
@@
expression ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque;
@@
- aio_set_fd_handler(ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque)
+ aio_set_fd_handler(ctx, fd, io_read, io_write, io_poll, io_poll_ready, opaque)
@@
expression ctx, notifier, is_external, io_read, io_poll, io_poll_ready;
@@
- aio_set_event_notifier(ctx, notifier, is_external, io_read, io_poll, io_poll_ready)
+ aio_set_event_notifier(ctx, notifier, io_read, io_poll, io_poll_ready)
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20230516190238.8401-21-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-05-16 22:02:38 +03:00
|
|
|
nvme_handle_event, nvme_poll_cb,
|
aio-posix: split poll check from ready handler
Adaptive polling measures the execution time of the polling check plus
handlers called when a polled event becomes ready. Handlers can take a
significant amount of time, making it look like polling was running for
a long time when in fact the event handler was running for a long time.
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
device's virtqueue becomes ready can take 10s of microseconds. This
can exceed the default polling interval (32 microseconds) and cause
adaptive polling to stop polling.
By excluding the handler's execution time from the polling check we make
the adaptive polling calculation more accurate. As a result, the event
loop now stays in polling mode where previously it would have fallen
back to file descriptor monitoring.
The following data was collected with virtio-blk num-queues=2
event_idx=off using an IOThread. Before:
168k IOPS, IOThread syscalls:
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
174k IOPS (+3.6%), IOThread syscalls:
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
the IOThread stays in polling mode instead of falling back to file
descriptor monitoring.
As usual, polling is not implemented on Windows so this patch ignores
the new io_poll_read() callback in aio-win32.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20211207132336.36627-2-stefanha@redhat.com
[Fixed up aio_set_event_notifier() calls in
tests/unit/test-fdmon-epoll.c added after this series was queued.
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
|
|
|
nvme_poll_ready);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2020-10-29 12:32:51 +03:00
|
|
|
if (!nvme_identify(bs, namespace, errp)) {
|
2018-01-16 09:08:55 +03:00
|
|
|
ret = -EIO;
|
2018-07-12 05:54:20 +03:00
|
|
|
goto out;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up command queues. */
|
|
|
|
if (!nvme_add_io_queue(bs, errp)) {
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
2018-07-12 05:54:20 +03:00
|
|
|
out:
|
2020-09-22 11:38:18 +03:00
|
|
|
if (regs) {
|
|
|
|
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)regs, 0, sizeof(NvmeBar));
|
|
|
|
}
|
|
|
|
|
2018-07-12 05:54:20 +03:00
|
|
|
/* Cleaning up is done in nvme_file_open() upon error. */
|
2018-01-16 09:08:55 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
|
|
|
|
*
|
|
|
|
* nvme://0000:44:00.0/1
|
|
|
|
*
|
|
|
|
* where the "nvme://" is a fixed form of the protocol prefix, the middle part
|
|
|
|
* is the PCI address, and the last part is the namespace number starting from
|
|
|
|
* 1 according to the NVMe spec. */
|
|
|
|
static void nvme_parse_filename(const char *filename, QDict *options,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int pref = strlen("nvme://");
|
|
|
|
|
|
|
|
if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) {
|
|
|
|
const char *tmp = filename + pref;
|
|
|
|
char *device;
|
|
|
|
const char *namespace;
|
|
|
|
unsigned long ns;
|
|
|
|
const char *slash = strchr(tmp, '/');
|
|
|
|
if (!slash) {
|
2018-03-23 17:32:01 +03:00
|
|
|
qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp);
|
2018-01-16 09:08:55 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
device = g_strndup(tmp, slash - tmp);
|
2018-03-23 17:32:01 +03:00
|
|
|
qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device);
|
2018-01-16 09:08:55 +03:00
|
|
|
g_free(device);
|
|
|
|
namespace = slash + 1;
|
|
|
|
if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) {
|
|
|
|
error_setg(errp, "Invalid namespace '%s', positive number expected",
|
|
|
|
namespace);
|
|
|
|
return;
|
|
|
|
}
|
2018-03-23 17:32:01 +03:00
|
|
|
qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE,
|
|
|
|
*namespace ? namespace : "1");
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = NVME_ADM_CMD_SET_FEATURES,
|
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
|
|
|
.cdw10 = cpu_to_le32(0x06),
|
|
|
|
.cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
|
|
|
|
};
|
|
|
|
|
2020-10-29 12:32:57 +03:00
|
|
|
ret = nvme_admin_cmd_sync(bs, &cmd);
|
2018-01-16 09:08:55 +03:00
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to configure NVMe write cache");
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_close(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
2020-10-29 12:32:50 +03:00
|
|
|
for (unsigned i = 0; i < s->queue_count; ++i) {
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_free_queue_pair(s->queues[i]);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
2018-07-12 05:54:20 +03:00
|
|
|
g_free(s->queues);
|
2020-08-21 22:53:59 +03:00
|
|
|
aio_set_event_notifier(bdrv_get_aio_context(bs),
|
|
|
|
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
|
aio: remove aio_disable_external() API
All callers now pass is_external=false to aio_set_fd_handler() and
aio_set_event_notifier(). The aio_disable_external() API that
temporarily disables fd handlers that were registered is_external=true
is therefore dead code.
Remove aio_disable_external(), aio_enable_external(), and the
is_external arguments to aio_set_fd_handler() and
aio_set_event_notifier().
The entire test-fdmon-epoll test is removed because its sole purpose was
testing aio_disable_external().
Parts of this patch were generated using the following coccinelle
(https://coccinelle.lip6.fr/) semantic patch:
@@
expression ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque;
@@
- aio_set_fd_handler(ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque)
+ aio_set_fd_handler(ctx, fd, io_read, io_write, io_poll, io_poll_ready, opaque)
@@
expression ctx, notifier, is_external, io_read, io_poll, io_poll_ready;
@@
- aio_set_event_notifier(ctx, notifier, is_external, io_read, io_poll, io_poll_ready)
+ aio_set_event_notifier(ctx, notifier, io_read, io_poll, io_poll_ready)
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20230516190238.8401-21-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-05-16 22:02:38 +03:00
|
|
|
NULL, NULL, NULL);
|
2020-08-21 22:53:59 +03:00
|
|
|
event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
|
2020-10-29 12:33:04 +03:00
|
|
|
qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
|
|
|
|
0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
|
2018-01-16 09:08:55 +03:00
|
|
|
qemu_vfio_close(s->vfio);
|
2019-02-01 22:29:30 +03:00
|
|
|
|
|
|
|
g_free(s->device);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
const char *device;
|
|
|
|
QemuOpts *opts;
|
|
|
|
int namespace;
|
|
|
|
int ret;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
2019-09-13 16:36:26 +03:00
|
|
|
bs->supported_write_flags = BDRV_REQ_FUA;
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
|
|
|
|
qemu_opts_absorb_qdict(opts, options, &error_abort);
|
|
|
|
device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE);
|
|
|
|
if (!device) {
|
|
|
|
error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required");
|
|
|
|
qemu_opts_del(opts);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1);
|
|
|
|
ret = nvme_init(bs, device, namespace, errp);
|
|
|
|
qemu_opts_del(opts);
|
|
|
|
if (ret) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (flags & BDRV_O_NOCACHE) {
|
|
|
|
if (!s->write_cache_supported) {
|
|
|
|
error_setg(errp,
|
|
|
|
"NVMe controller doesn't support write cache configuration");
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
|
|
|
ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE),
|
|
|
|
errp);
|
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
nvme_close(bs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-01-13 23:42:04 +03:00
|
|
|
static int64_t coroutine_fn nvme_co_getlength(BlockDriverState *bs)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2019-07-16 19:30:19 +03:00
|
|
|
return s->nsze << s->blkshift;
|
|
|
|
}
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2019-07-30 14:48:12 +03:00
|
|
|
static uint32_t nvme_get_blocksize(BlockDriverState *bs)
|
2019-07-16 19:30:19 +03:00
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2019-07-30 14:48:12 +03:00
|
|
|
assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12);
|
|
|
|
return UINT32_C(1) << s->blkshift;
|
2019-07-16 19:30:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
|
|
|
|
{
|
2019-07-30 14:48:12 +03:00
|
|
|
uint32_t blocksize = nvme_get_blocksize(bs);
|
2019-07-16 19:30:19 +03:00
|
|
|
bsz->phys = blocksize;
|
|
|
|
bsz->log = blocksize;
|
|
|
|
return 0;
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Called with s->dma_map_lock */
|
|
|
|
static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
|
|
|
|
QEMUIOVector *qiov)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
s->dma_map_count -= qiov->size;
|
|
|
|
if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
|
|
|
|
r = qemu_vfio_dma_reset_temporary(s->vfio);
|
|
|
|
if (!r) {
|
|
|
|
qemu_co_queue_restart_all(&s->dma_flush_queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Called with s->dma_map_lock */
|
|
|
|
static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
|
|
|
|
NVMeRequest *req, QEMUIOVector *qiov)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
uint64_t *pagelist = req->prp_list_page;
|
|
|
|
int i, j, r;
|
|
|
|
int entries = 0;
|
2021-09-02 10:00:25 +03:00
|
|
|
Error *local_err = NULL, **errp = NULL;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
assert(qiov->size);
|
|
|
|
assert(QEMU_IS_ALIGNED(qiov->size, s->page_size));
|
|
|
|
assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t));
|
|
|
|
for (i = 0; i < qiov->niov; ++i) {
|
|
|
|
bool retry = true;
|
|
|
|
uint64_t iova;
|
2020-10-29 12:33:03 +03:00
|
|
|
size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
|
2022-03-23 18:57:22 +03:00
|
|
|
qemu_real_host_page_size());
|
2018-01-16 09:08:55 +03:00
|
|
|
try_map:
|
|
|
|
r = qemu_vfio_dma_map(s->vfio,
|
|
|
|
qiov->iov[i].iov_base,
|
2021-09-02 10:00:25 +03:00
|
|
|
len, true, &iova, errp);
|
2021-07-23 22:58:43 +03:00
|
|
|
if (r == -ENOSPC) {
|
|
|
|
/*
|
|
|
|
* In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
|
|
|
|
* ioctl returns -ENOSPC to signal the user exhausted the DMA
|
|
|
|
* mappings available for a container since Linux kernel commit
|
|
|
|
* 492855939bdb ("vfio/type1: Limit DMA mappings per container",
|
|
|
|
* April 2019, see CVE-2019-3882).
|
|
|
|
*
|
|
|
|
* This block driver already handles this error path by checking
|
|
|
|
* for the -ENOMEM error, so we directly replace -ENOSPC by
|
|
|
|
* -ENOMEM. Beside, -ENOSPC has a specific meaning for blockdev
|
|
|
|
* coroutines: it triggers BLOCKDEV_ON_ERROR_ENOSPC and
|
|
|
|
* BLOCK_ERROR_ACTION_STOP which stops the VM, asking the operator
|
|
|
|
* to add more storage to the blockdev. Not something we can do
|
|
|
|
* easily with an IOMMU :)
|
|
|
|
*/
|
|
|
|
r = -ENOMEM;
|
|
|
|
}
|
2018-01-16 09:08:55 +03:00
|
|
|
if (r == -ENOMEM && retry) {
|
2021-07-23 22:58:43 +03:00
|
|
|
/*
|
|
|
|
* We exhausted the DMA mappings available for our container:
|
|
|
|
* recycle the volatile IOVA mappings.
|
|
|
|
*/
|
2018-01-16 09:08:55 +03:00
|
|
|
retry = false;
|
|
|
|
trace_nvme_dma_flush_queue_wait(s);
|
|
|
|
if (s->dma_map_count) {
|
|
|
|
trace_nvme_dma_map_flush(s);
|
|
|
|
qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock);
|
|
|
|
} else {
|
|
|
|
r = qemu_vfio_dma_reset_temporary(s->vfio);
|
|
|
|
if (r) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
2021-09-02 10:00:25 +03:00
|
|
|
errp = &local_err;
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
goto try_map;
|
|
|
|
}
|
|
|
|
if (r) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) {
|
2018-11-01 13:38:07 +03:00
|
|
|
pagelist[entries++] = cpu_to_le64(iova + j * s->page_size);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base,
|
|
|
|
qiov->iov[i].iov_len / s->page_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
s->dma_map_count += qiov->size;
|
|
|
|
|
|
|
|
assert(entries <= s->page_size / sizeof(uint64_t));
|
|
|
|
switch (entries) {
|
|
|
|
case 0:
|
|
|
|
abort();
|
|
|
|
case 1:
|
2020-07-06 09:12:46 +03:00
|
|
|
cmd->dptr.prp1 = pagelist[0];
|
|
|
|
cmd->dptr.prp2 = 0;
|
2018-01-16 09:08:55 +03:00
|
|
|
break;
|
|
|
|
case 2:
|
2020-07-06 09:12:46 +03:00
|
|
|
cmd->dptr.prp1 = pagelist[0];
|
|
|
|
cmd->dptr.prp2 = pagelist[1];
|
2018-01-16 09:08:55 +03:00
|
|
|
break;
|
|
|
|
default:
|
2020-07-06 09:12:46 +03:00
|
|
|
cmd->dptr.prp1 = pagelist[0];
|
|
|
|
cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
|
2018-01-16 09:08:55 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
|
|
|
|
for (i = 0; i < entries; ++i) {
|
|
|
|
trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
/* No need to unmap [0 - i) iovs even if we've failed, since we don't
|
|
|
|
* increment s->dma_map_count. This is okay for fixed mapping memory areas
|
|
|
|
* because they are already mapped before calling this function; for
|
|
|
|
* temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
|
|
|
|
* calling qemu_vfio_dma_reset_temporary when necessary. */
|
2021-09-02 10:00:25 +03:00
|
|
|
if (local_err) {
|
|
|
|
error_reportf_err(local_err, "Cannot map buffer for DMA: ");
|
|
|
|
}
|
2018-01-16 09:08:55 +03:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
Coroutine *co;
|
|
|
|
int ret;
|
|
|
|
AioContext *ctx;
|
|
|
|
} NVMeCoData;
|
|
|
|
|
|
|
|
static void nvme_rw_cb_bh(void *opaque)
|
|
|
|
{
|
|
|
|
NVMeCoData *data = opaque;
|
|
|
|
qemu_coroutine_enter(data->co);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_rw_cb(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
NVMeCoData *data = opaque;
|
|
|
|
data->ret = ret;
|
|
|
|
if (!data->co) {
|
|
|
|
/* The rw coroutine hasn't yielded, don't try to enter. */
|
|
|
|
return;
|
|
|
|
}
|
2019-09-17 14:58:19 +03:00
|
|
|
replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
|
|
|
|
uint64_t offset, uint64_t bytes,
|
|
|
|
QEMUIOVector *qiov,
|
|
|
|
bool is_write,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2020-08-21 22:53:48 +03:00
|
|
|
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
2018-01-16 09:08:55 +03:00
|
|
|
NVMeRequest *req;
|
2019-07-16 19:30:19 +03:00
|
|
|
|
|
|
|
uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
|
2018-01-16 09:08:55 +03:00
|
|
|
(flags & BDRV_REQ_FUA ? 1 << 30 : 0);
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ,
|
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
2019-07-16 19:30:19 +03:00
|
|
|
.cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
|
|
|
|
.cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
|
2018-01-16 09:08:55 +03:00
|
|
|
.cdw12 = cpu_to_le32(cdw12),
|
|
|
|
};
|
|
|
|
NVMeCoData data = {
|
|
|
|
.ctx = bdrv_get_aio_context(bs),
|
|
|
|
.ret = -EINPROGRESS,
|
|
|
|
};
|
|
|
|
|
|
|
|
trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
|
2020-10-29 12:32:50 +03:00
|
|
|
assert(s->queue_count > 1);
|
2018-01-16 09:08:55 +03:00
|
|
|
req = nvme_get_free_req(ioq);
|
|
|
|
assert(req);
|
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->dma_map_lock);
|
|
|
|
r = nvme_cmd_map_qiov(bs, &cmd, req, qiov);
|
|
|
|
qemu_co_mutex_unlock(&s->dma_map_lock);
|
|
|
|
if (r) {
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_put_free_req_and_wake(ioq, req);
|
2018-01-16 09:08:55 +03:00
|
|
|
return r;
|
|
|
|
}
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
data.co = qemu_coroutine_self();
|
|
|
|
while (data.ret == -EINPROGRESS) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->dma_map_lock);
|
|
|
|
r = nvme_cmd_unmap_qiov(bs, qiov);
|
|
|
|
qemu_co_mutex_unlock(&s->dma_map_lock);
|
|
|
|
if (r) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_nvme_rw_done(s, is_write, offset, bytes, data.ret);
|
|
|
|
return data.ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nvme_qiov_aligned(BlockDriverState *bs,
|
|
|
|
const QEMUIOVector *qiov)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
for (i = 0; i < qiov->niov; ++i) {
|
2020-10-29 12:33:03 +03:00
|
|
|
if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
|
2022-03-23 18:57:22 +03:00
|
|
|
qemu_real_host_page_size()) ||
|
|
|
|
!QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size())) {
|
2018-01-16 09:08:55 +03:00
|
|
|
trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
|
|
|
|
qiov->iov[i].iov_len, s->page_size);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-09-22 11:49:10 +03:00
|
|
|
static coroutine_fn int nvme_co_prw(BlockDriverState *bs,
|
|
|
|
uint64_t offset, uint64_t bytes,
|
|
|
|
QEMUIOVector *qiov, bool is_write,
|
|
|
|
int flags)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
int r;
|
2021-10-06 19:49:27 +03:00
|
|
|
QEMU_AUTO_VFREE uint8_t *buf = NULL;
|
2018-01-16 09:08:55 +03:00
|
|
|
QEMUIOVector local_qiov;
|
2022-03-23 18:57:22 +03:00
|
|
|
size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size());
|
2018-01-16 09:08:55 +03:00
|
|
|
assert(QEMU_IS_ALIGNED(offset, s->page_size));
|
|
|
|
assert(QEMU_IS_ALIGNED(bytes, s->page_size));
|
|
|
|
assert(bytes <= s->max_transfer);
|
|
|
|
if (nvme_qiov_aligned(bs, qiov)) {
|
block/nvme: Add driver statistics for access alignment and hw errors
Keep statistics of some hardware errors, and number of
aligned/unaligned I/O accesses.
QMP example booting a full RHEL 8.3 aarch64 guest:
{ "execute": "query-blockstats" }
{
"return": [
{
"device": "",
"node-name": "drive0",
"stats": {
"flush_total_time_ns": 6026948,
"wr_highest_offset": 3383991230464,
"wr_total_time_ns": 807450995,
"failed_wr_operations": 0,
"failed_rd_operations": 0,
"wr_merged": 3,
"wr_bytes": 50133504,
"failed_unmap_operations": 0,
"failed_flush_operations": 0,
"account_invalid": false,
"rd_total_time_ns": 1846979900,
"flush_operations": 130,
"wr_operations": 659,
"rd_merged": 1192,
"rd_bytes": 218244096,
"account_failed": false,
"idle_time_ns": 2678641497,
"rd_operations": 7406,
},
"driver-specific": {
"driver": "nvme",
"completion-errors": 0,
"unaligned-accesses": 2959,
"aligned-accesses": 4477
},
"qdev": "/machine/peripheral-anon/device[0]/virtio-backend"
}
]
}
Suggested-by: Stefan Hajnoczi <stefanha@gmail.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Message-id: 20201001162939.1567915-1-philmd@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2020-10-01 19:29:39 +03:00
|
|
|
s->stats.aligned_accesses++;
|
2018-01-16 09:08:55 +03:00
|
|
|
return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
|
|
|
|
}
|
block/nvme: Add driver statistics for access alignment and hw errors
Keep statistics of some hardware errors, and number of
aligned/unaligned I/O accesses.
QMP example booting a full RHEL 8.3 aarch64 guest:
{ "execute": "query-blockstats" }
{
"return": [
{
"device": "",
"node-name": "drive0",
"stats": {
"flush_total_time_ns": 6026948,
"wr_highest_offset": 3383991230464,
"wr_total_time_ns": 807450995,
"failed_wr_operations": 0,
"failed_rd_operations": 0,
"wr_merged": 3,
"wr_bytes": 50133504,
"failed_unmap_operations": 0,
"failed_flush_operations": 0,
"account_invalid": false,
"rd_total_time_ns": 1846979900,
"flush_operations": 130,
"wr_operations": 659,
"rd_merged": 1192,
"rd_bytes": 218244096,
"account_failed": false,
"idle_time_ns": 2678641497,
"rd_operations": 7406,
},
"driver-specific": {
"driver": "nvme",
"completion-errors": 0,
"unaligned-accesses": 2959,
"aligned-accesses": 4477
},
"qdev": "/machine/peripheral-anon/device[0]/virtio-backend"
}
]
}
Suggested-by: Stefan Hajnoczi <stefanha@gmail.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Message-id: 20201001162939.1567915-1-philmd@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2020-10-01 19:29:39 +03:00
|
|
|
s->stats.unaligned_accesses++;
|
2018-01-16 09:08:55 +03:00
|
|
|
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
|
2022-03-23 18:57:22 +03:00
|
|
|
buf = qemu_try_memalign(qemu_real_host_page_size(), len);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
if (!buf) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
qemu_iovec_init(&local_qiov, 1);
|
|
|
|
if (is_write) {
|
|
|
|
qemu_iovec_to_buf(qiov, 0, buf, bytes);
|
|
|
|
}
|
|
|
|
qemu_iovec_add(&local_qiov, buf, bytes);
|
|
|
|
r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags);
|
|
|
|
qemu_iovec_destroy(&local_qiov);
|
|
|
|
if (!r && !is_write) {
|
|
|
|
qemu_iovec_from_buf(qiov, 0, buf, bytes);
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
|
block: use int64_t instead of uint64_t in driver read handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver read handlers parameters which are already 64bit to
signed type.
While being here, convert also flags parameter to be BdrvRequestFlags.
Now let's consider all callers. Simple
git grep '\->bdrv_\(aio\|co\)_preadv\(_part\)\?'
shows that's there three callers of driver function:
bdrv_driver_preadv() in block/io.c, passes int64_t, checked by
bdrv_check_qiov_request() to be non-negative.
qcow2_load_vmstate() does bdrv_check_qiov_request().
do_perform_cow_read() has uint64_t argument. And a lot of things in
qcow2 driver are uint64_t, so converting it is big job. But we must
not work with requests that don't satisfy bdrv_check_qiov_request(),
so let's just assert it here.
Still, the functions may be called directly, not only by drv->...
Let's check:
git grep '\.bdrv_\(aio\|co\)_preadv\(_part\)\?\s*=' | \
awk '{print $4}' | sed 's/,//' | sed 's/&//' | sort | uniq | \
while read func; do git grep "$func(" | \
grep -v "$func(BlockDriverState"; done
The only one such caller:
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1);
...
ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0);
in tests/unit/test-bdrv-drain.c, and it's OK obviously.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-4-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: fix typos]
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:27:59 +03:00
|
|
|
int64_t offset, int64_t bytes,
|
|
|
|
QEMUIOVector *qiov,
|
|
|
|
BdrvRequestFlags flags)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
|
|
|
return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
|
block: use int64_t instead of uint64_t in driver write handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver write handlers parameters which are already 64bit to
signed type.
While being here, convert also flags parameter to be BdrvRequestFlags.
Now let's consider all callers. Simple
git grep '\->bdrv_\(aio\|co\)_pwritev\(_part\)\?'
shows that's there three callers of driver function:
bdrv_driver_pwritev() and bdrv_driver_pwritev_compressed() in
block/io.c, both pass int64_t, checked by bdrv_check_qiov_request() to
be non-negative.
qcow2_save_vmstate() does bdrv_check_qiov_request().
Still, the functions may be called directly, not only by drv->...
Let's check:
git grep '\.bdrv_\(aio\|co\)_pwritev\(_part\)\?\s*=' | \
awk '{print $4}' | sed 's/,//' | sed 's/&//' | sort | uniq | \
while read func; do git grep "$func(" | \
grep -v "$func(BlockDriverState"; done
shows several callers:
qcow2:
qcow2_co_truncate() write at most up to @offset, which is checked in
generic qcow2_co_truncate() by bdrv_check_request().
qcow2_co_pwritev_compressed_task() pass the request (or part of the
request) that already went through normal write path, so it should
be OK
qcow:
qcow_co_pwritev_compressed() pass int64_t, it's updated by this patch
quorum:
quorum_co_pwrite_zeroes() pass int64_t and int - OK
throttle:
throttle_co_pwritev_compressed() pass int64_t, it's updated by this
patch
vmdk:
vmdk_co_pwritev_compressed() pass int64_t, it's updated by this
patch
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-5-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:28:00 +03:00
|
|
|
int64_t offset, int64_t bytes,
|
|
|
|
QEMUIOVector *qiov,
|
|
|
|
BdrvRequestFlags flags)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
|
|
|
return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2020-08-21 22:53:48 +03:00
|
|
|
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
2018-01-16 09:08:55 +03:00
|
|
|
NVMeRequest *req;
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = NVME_CMD_FLUSH,
|
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
|
|
|
};
|
|
|
|
NVMeCoData data = {
|
|
|
|
.ctx = bdrv_get_aio_context(bs),
|
|
|
|
.ret = -EINPROGRESS,
|
|
|
|
};
|
|
|
|
|
2020-10-29 12:32:50 +03:00
|
|
|
assert(s->queue_count > 1);
|
2018-01-16 09:08:55 +03:00
|
|
|
req = nvme_get_free_req(ioq);
|
|
|
|
assert(req);
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
data.co = qemu_coroutine_self();
|
|
|
|
if (data.ret == -EINPROGRESS) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
return data.ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-13 16:36:26 +03:00
|
|
|
static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
|
|
|
|
int64_t offset,
|
block: use int64_t instead of int in driver write_zeroes handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver write_zeroes handlers bytes parameter to int64_t.
The only caller of all updated function is bdrv_co_do_pwrite_zeroes().
bdrv_co_do_pwrite_zeroes() itself is of course OK with widening of
callee parameter type. Also, bdrv_co_do_pwrite_zeroes()'s
max_write_zeroes is limited to INT_MAX. So, updated functions all are
safe, they will not get "bytes" larger than before.
Still, let's look through all updated functions, and add assertions to
the ones which are actually unprepared to values larger than INT_MAX.
For these drivers also set explicit max_pwrite_zeroes limit.
Let's go:
blkdebug: calculations can't overflow, thanks to
bdrv_check_qiov_request() in generic layer. rule_check() and
bdrv_co_pwrite_zeroes() both have 64bit argument.
blklogwrites: pass to blk_log_writes_co_log() with 64bit argument.
blkreplay, copy-on-read, filter-compress: pass to
bdrv_co_pwrite_zeroes() which is OK
copy-before-write: Calls cbw_do_copy_before_write() and
bdrv_co_pwrite_zeroes, both have 64bit argument.
file-posix: both handler calls raw_do_pwrite_zeroes, which is updated.
In raw_do_pwrite_zeroes() calculations are OK due to
bdrv_check_qiov_request(), bytes go to RawPosixAIOData::aio_nbytes
which is uint64_t.
Check also where that uint64_t gets handed:
handle_aiocb_write_zeroes_block() passes a uint64_t[2] to
ioctl(BLKZEROOUT), handle_aiocb_write_zeroes() calls do_fallocate()
which takes off_t (and we compile to always have 64-bit off_t), as
does handle_aiocb_write_zeroes_unmap. All look safe.
gluster: bytes go to GlusterAIOCB::size which is int64_t and to
glfs_zerofill_async works with off_t.
iscsi: Aha, here we deal with iscsi_writesame16_task() that has
uint32_t num_blocks argument and iscsi_writesame16_task() has
uint16_t argument. Make comments, add assertions and clarify
max_pwrite_zeroes calculation.
iscsi_allocmap_() functions already has int64_t argument
is_byte_request_lun_aligned is simple to update, do it.
mirror_top: pass to bdrv_mirror_top_do_write which has uint64_t
argument
nbd: Aha, here we have protocol limitation, and NBDRequest::len is
uint32_t. max_pwrite_zeroes is cleanly set to 32bit value, so we are
OK for now.
nvme: Again, protocol limitation. And no inherent limit for
write-zeroes at all. But from code that calculates cdw12 it's obvious
that we do have limit and alignment. Let's clarify it. Also,
obviously the code is not prepared to handle bytes=0. Let's handle
this case too.
trace events already 64bit
preallocate: pass to handle_write() and bdrv_co_pwrite_zeroes(), both
64bit.
rbd: pass to qemu_rbd_start_co() which is 64bit.
qcow2: offset + bytes and alignment still works good (thanks to
bdrv_check_qiov_request()), so tail calculation is OK
qcow2_subcluster_zeroize() has 64bit argument, should be OK
trace events updated
qed: qed_co_request wants int nb_sectors. Also in code we have size_t
used for request length which may be 32bit. So, let's just keep
INT_MAX as a limit (aligning it down to pwrite_zeroes_alignment) and
don't care.
raw-format: Is OK. raw_adjust_offset and bdrv_co_pwrite_zeroes are both
64bit.
throttle: Both throttle_group_co_io_limits_intercept() and
bdrv_co_pwrite_zeroes() are 64bit.
vmdk: pass to vmdk_pwritev which is 64bit
quorum: pass to quorum_co_pwritev() which is 64bit
Hooray!
At this point all block drivers are prepared to support 64bit
write-zero requests, or have explicitly set max_pwrite_zeroes.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-8-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: use <= rather than < in assertions relying on max_pwrite_zeroes]
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:28:03 +03:00
|
|
|
int64_t bytes,
|
2019-09-13 16:36:26 +03:00
|
|
|
BdrvRequestFlags flags)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2020-08-21 22:53:48 +03:00
|
|
|
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
2019-09-13 16:36:26 +03:00
|
|
|
NVMeRequest *req;
|
block: use int64_t instead of int in driver write_zeroes handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver write_zeroes handlers bytes parameter to int64_t.
The only caller of all updated function is bdrv_co_do_pwrite_zeroes().
bdrv_co_do_pwrite_zeroes() itself is of course OK with widening of
callee parameter type. Also, bdrv_co_do_pwrite_zeroes()'s
max_write_zeroes is limited to INT_MAX. So, updated functions all are
safe, they will not get "bytes" larger than before.
Still, let's look through all updated functions, and add assertions to
the ones which are actually unprepared to values larger than INT_MAX.
For these drivers also set explicit max_pwrite_zeroes limit.
Let's go:
blkdebug: calculations can't overflow, thanks to
bdrv_check_qiov_request() in generic layer. rule_check() and
bdrv_co_pwrite_zeroes() both have 64bit argument.
blklogwrites: pass to blk_log_writes_co_log() with 64bit argument.
blkreplay, copy-on-read, filter-compress: pass to
bdrv_co_pwrite_zeroes() which is OK
copy-before-write: Calls cbw_do_copy_before_write() and
bdrv_co_pwrite_zeroes, both have 64bit argument.
file-posix: both handler calls raw_do_pwrite_zeroes, which is updated.
In raw_do_pwrite_zeroes() calculations are OK due to
bdrv_check_qiov_request(), bytes go to RawPosixAIOData::aio_nbytes
which is uint64_t.
Check also where that uint64_t gets handed:
handle_aiocb_write_zeroes_block() passes a uint64_t[2] to
ioctl(BLKZEROOUT), handle_aiocb_write_zeroes() calls do_fallocate()
which takes off_t (and we compile to always have 64-bit off_t), as
does handle_aiocb_write_zeroes_unmap. All look safe.
gluster: bytes go to GlusterAIOCB::size which is int64_t and to
glfs_zerofill_async works with off_t.
iscsi: Aha, here we deal with iscsi_writesame16_task() that has
uint32_t num_blocks argument and iscsi_writesame16_task() has
uint16_t argument. Make comments, add assertions and clarify
max_pwrite_zeroes calculation.
iscsi_allocmap_() functions already has int64_t argument
is_byte_request_lun_aligned is simple to update, do it.
mirror_top: pass to bdrv_mirror_top_do_write which has uint64_t
argument
nbd: Aha, here we have protocol limitation, and NBDRequest::len is
uint32_t. max_pwrite_zeroes is cleanly set to 32bit value, so we are
OK for now.
nvme: Again, protocol limitation. And no inherent limit for
write-zeroes at all. But from code that calculates cdw12 it's obvious
that we do have limit and alignment. Let's clarify it. Also,
obviously the code is not prepared to handle bytes=0. Let's handle
this case too.
trace events already 64bit
preallocate: pass to handle_write() and bdrv_co_pwrite_zeroes(), both
64bit.
rbd: pass to qemu_rbd_start_co() which is 64bit.
qcow2: offset + bytes and alignment still works good (thanks to
bdrv_check_qiov_request()), so tail calculation is OK
qcow2_subcluster_zeroize() has 64bit argument, should be OK
trace events updated
qed: qed_co_request wants int nb_sectors. Also in code we have size_t
used for request length which may be 32bit. So, let's just keep
INT_MAX as a limit (aligning it down to pwrite_zeroes_alignment) and
don't care.
raw-format: Is OK. raw_adjust_offset and bdrv_co_pwrite_zeroes are both
64bit.
throttle: Both throttle_group_co_io_limits_intercept() and
bdrv_co_pwrite_zeroes() are 64bit.
vmdk: pass to vmdk_pwritev which is 64bit
quorum: pass to quorum_co_pwritev() which is 64bit
Hooray!
At this point all block drivers are prepared to support 64bit
write-zero requests, or have explicitly set max_pwrite_zeroes.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-8-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: use <= rather than < in assertions relying on max_pwrite_zeroes]
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:28:03 +03:00
|
|
|
uint32_t cdw12;
|
2019-09-13 16:36:26 +03:00
|
|
|
|
|
|
|
if (!s->supports_write_zeroes) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
block: use int64_t instead of int in driver write_zeroes handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver write_zeroes handlers bytes parameter to int64_t.
The only caller of all updated function is bdrv_co_do_pwrite_zeroes().
bdrv_co_do_pwrite_zeroes() itself is of course OK with widening of
callee parameter type. Also, bdrv_co_do_pwrite_zeroes()'s
max_write_zeroes is limited to INT_MAX. So, updated functions all are
safe, they will not get "bytes" larger than before.
Still, let's look through all updated functions, and add assertions to
the ones which are actually unprepared to values larger than INT_MAX.
For these drivers also set explicit max_pwrite_zeroes limit.
Let's go:
blkdebug: calculations can't overflow, thanks to
bdrv_check_qiov_request() in generic layer. rule_check() and
bdrv_co_pwrite_zeroes() both have 64bit argument.
blklogwrites: pass to blk_log_writes_co_log() with 64bit argument.
blkreplay, copy-on-read, filter-compress: pass to
bdrv_co_pwrite_zeroes() which is OK
copy-before-write: Calls cbw_do_copy_before_write() and
bdrv_co_pwrite_zeroes, both have 64bit argument.
file-posix: both handler calls raw_do_pwrite_zeroes, which is updated.
In raw_do_pwrite_zeroes() calculations are OK due to
bdrv_check_qiov_request(), bytes go to RawPosixAIOData::aio_nbytes
which is uint64_t.
Check also where that uint64_t gets handed:
handle_aiocb_write_zeroes_block() passes a uint64_t[2] to
ioctl(BLKZEROOUT), handle_aiocb_write_zeroes() calls do_fallocate()
which takes off_t (and we compile to always have 64-bit off_t), as
does handle_aiocb_write_zeroes_unmap. All look safe.
gluster: bytes go to GlusterAIOCB::size which is int64_t and to
glfs_zerofill_async works with off_t.
iscsi: Aha, here we deal with iscsi_writesame16_task() that has
uint32_t num_blocks argument and iscsi_writesame16_task() has
uint16_t argument. Make comments, add assertions and clarify
max_pwrite_zeroes calculation.
iscsi_allocmap_() functions already has int64_t argument
is_byte_request_lun_aligned is simple to update, do it.
mirror_top: pass to bdrv_mirror_top_do_write which has uint64_t
argument
nbd: Aha, here we have protocol limitation, and NBDRequest::len is
uint32_t. max_pwrite_zeroes is cleanly set to 32bit value, so we are
OK for now.
nvme: Again, protocol limitation. And no inherent limit for
write-zeroes at all. But from code that calculates cdw12 it's obvious
that we do have limit and alignment. Let's clarify it. Also,
obviously the code is not prepared to handle bytes=0. Let's handle
this case too.
trace events already 64bit
preallocate: pass to handle_write() and bdrv_co_pwrite_zeroes(), both
64bit.
rbd: pass to qemu_rbd_start_co() which is 64bit.
qcow2: offset + bytes and alignment still works good (thanks to
bdrv_check_qiov_request()), so tail calculation is OK
qcow2_subcluster_zeroize() has 64bit argument, should be OK
trace events updated
qed: qed_co_request wants int nb_sectors. Also in code we have size_t
used for request length which may be 32bit. So, let's just keep
INT_MAX as a limit (aligning it down to pwrite_zeroes_alignment) and
don't care.
raw-format: Is OK. raw_adjust_offset and bdrv_co_pwrite_zeroes are both
64bit.
throttle: Both throttle_group_co_io_limits_intercept() and
bdrv_co_pwrite_zeroes() are 64bit.
vmdk: pass to vmdk_pwritev which is 64bit
quorum: pass to quorum_co_pwritev() which is 64bit
Hooray!
At this point all block drivers are prepared to support 64bit
write-zero requests, or have explicitly set max_pwrite_zeroes.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-8-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: use <= rather than < in assertions relying on max_pwrite_zeroes]
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:28:03 +03:00
|
|
|
if (bytes == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
|
|
|
|
/*
|
|
|
|
* We should not lose information. pwrite_zeroes_alignment and
|
|
|
|
* max_pwrite_zeroes guarantees it.
|
|
|
|
*/
|
|
|
|
assert(((cdw12 + 1) << s->blkshift) == bytes);
|
|
|
|
|
2019-09-13 16:36:26 +03:00
|
|
|
NvmeCmd cmd = {
|
2020-03-31 00:10:13 +03:00
|
|
|
.opcode = NVME_CMD_WRITE_ZEROES,
|
2019-09-13 16:36:26 +03:00
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
|
|
|
.cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
|
|
|
|
.cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
|
|
|
|
};
|
|
|
|
|
|
|
|
NVMeCoData data = {
|
|
|
|
.ctx = bdrv_get_aio_context(bs),
|
|
|
|
.ret = -EINPROGRESS,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (flags & BDRV_REQ_MAY_UNMAP) {
|
|
|
|
cdw12 |= (1 << 25);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & BDRV_REQ_FUA) {
|
|
|
|
cdw12 |= (1 << 30);
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd.cdw12 = cpu_to_le32(cdw12);
|
|
|
|
|
|
|
|
trace_nvme_write_zeroes(s, offset, bytes, flags);
|
2020-10-29 12:32:50 +03:00
|
|
|
assert(s->queue_count > 1);
|
2019-09-13 16:36:26 +03:00
|
|
|
req = nvme_get_free_req(ioq);
|
|
|
|
assert(req);
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
2019-09-13 16:36:26 +03:00
|
|
|
|
|
|
|
data.co = qemu_coroutine_self();
|
|
|
|
while (data.ret == -EINPROGRESS) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_nvme_rw_done(s, true, offset, bytes, data.ret);
|
|
|
|
return data.ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-13 16:36:27 +03:00
|
|
|
static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
|
|
|
|
int64_t offset,
|
block: use int64_t instead of int in driver discard handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver discard handlers bytes parameter to int64_t.
The only caller of all updated function is bdrv_co_pdiscard in
block/io.c. It is already prepared to work with 64bit requests, but
pass at most max(bs->bl.max_pdiscard, INT_MAX) to the driver.
Let's look at all updated functions:
blkdebug: all calculations are still OK, thanks to
bdrv_check_qiov_request().
both rule_check and bdrv_co_pdiscard are 64bit
blklogwrites: pass to blk_loc_writes_co_log which is 64bit
blkreplay, copy-on-read, filter-compress: pass to bdrv_co_pdiscard, OK
copy-before-write: pass to bdrv_co_pdiscard which is 64bit and to
cbw_do_copy_before_write which is 64bit
file-posix: one handler calls raw_account_discard() is 64bit and both
handlers calls raw_do_pdiscard(). Update raw_do_pdiscard, which pass
to RawPosixAIOData::aio_nbytes, which is 64bit (and calls
raw_account_discard())
gluster: somehow, third argument of glfs_discard_async is size_t.
Let's set max_pdiscard accordingly.
iscsi: iscsi_allocmap_set_invalid is 64bit,
!is_byte_request_lun_aligned is 64bit.
list.num is uint32_t. Let's clarify max_pdiscard and
pdiscard_alignment.
mirror_top: pass to bdrv_mirror_top_do_write() which is
64bit
nbd: protocol limitation. max_pdiscard is alredy set strict enough,
keep it as is for now.
nvme: buf.nlb is uint32_t and we do shift. So, add corresponding limits
to nvme_refresh_limits().
preallocate: pass to bdrv_co_pdiscard() which is 64bit.
rbd: pass to qemu_rbd_start_co() which is 64bit.
qcow2: calculations are still OK, thanks to bdrv_check_qiov_request(),
qcow2_cluster_discard() is 64bit.
raw-format: raw_adjust_offset() is 64bit, bdrv_co_pdiscard too.
throttle: pass to bdrv_co_pdiscard() which is 64bit and to
throttle_group_co_io_limits_intercept() which is 64bit as well.
test-block-iothread: bytes argument is unused
Great! Now all drivers are prepared to handle 64bit discard requests,
or else have explicit max_pdiscard limits.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-11-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:28:06 +03:00
|
|
|
int64_t bytes)
|
2019-09-13 16:36:27 +03:00
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2020-08-21 22:53:48 +03:00
|
|
|
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
2019-09-13 16:36:27 +03:00
|
|
|
NVMeRequest *req;
|
2021-10-06 19:49:27 +03:00
|
|
|
QEMU_AUTO_VFREE NvmeDsmRange *buf = NULL;
|
2019-09-13 16:36:27 +03:00
|
|
|
QEMUIOVector local_qiov;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
NvmeCmd cmd = {
|
|
|
|
.opcode = NVME_CMD_DSM,
|
|
|
|
.nsid = cpu_to_le32(s->nsid),
|
|
|
|
.cdw10 = cpu_to_le32(0), /*number of ranges - 0 based*/
|
|
|
|
.cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/
|
|
|
|
};
|
|
|
|
|
|
|
|
NVMeCoData data = {
|
|
|
|
.ctx = bdrv_get_aio_context(bs),
|
|
|
|
.ret = -EINPROGRESS,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!s->supports_discard) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2020-10-29 12:32:50 +03:00
|
|
|
assert(s->queue_count > 1);
|
2019-09-13 16:36:27 +03:00
|
|
|
|
block: use int64_t instead of int in driver discard handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver discard handlers bytes parameter to int64_t.
The only caller of all updated function is bdrv_co_pdiscard in
block/io.c. It is already prepared to work with 64bit requests, but
pass at most max(bs->bl.max_pdiscard, INT_MAX) to the driver.
Let's look at all updated functions:
blkdebug: all calculations are still OK, thanks to
bdrv_check_qiov_request().
both rule_check and bdrv_co_pdiscard are 64bit
blklogwrites: pass to blk_loc_writes_co_log which is 64bit
blkreplay, copy-on-read, filter-compress: pass to bdrv_co_pdiscard, OK
copy-before-write: pass to bdrv_co_pdiscard which is 64bit and to
cbw_do_copy_before_write which is 64bit
file-posix: one handler calls raw_account_discard() is 64bit and both
handlers calls raw_do_pdiscard(). Update raw_do_pdiscard, which pass
to RawPosixAIOData::aio_nbytes, which is 64bit (and calls
raw_account_discard())
gluster: somehow, third argument of glfs_discard_async is size_t.
Let's set max_pdiscard accordingly.
iscsi: iscsi_allocmap_set_invalid is 64bit,
!is_byte_request_lun_aligned is 64bit.
list.num is uint32_t. Let's clarify max_pdiscard and
pdiscard_alignment.
mirror_top: pass to bdrv_mirror_top_do_write() which is
64bit
nbd: protocol limitation. max_pdiscard is alredy set strict enough,
keep it as is for now.
nvme: buf.nlb is uint32_t and we do shift. So, add corresponding limits
to nvme_refresh_limits().
preallocate: pass to bdrv_co_pdiscard() which is 64bit.
rbd: pass to qemu_rbd_start_co() which is 64bit.
qcow2: calculations are still OK, thanks to bdrv_check_qiov_request(),
qcow2_cluster_discard() is 64bit.
raw-format: raw_adjust_offset() is 64bit, bdrv_co_pdiscard too.
throttle: pass to bdrv_co_pdiscard() which is 64bit and to
throttle_group_co_io_limits_intercept() which is 64bit as well.
test-block-iothread: bytes argument is unused
Great! Now all drivers are prepared to handle 64bit discard requests,
or else have explicit max_pdiscard limits.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-11-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:28:06 +03:00
|
|
|
/*
|
|
|
|
* Filling the @buf requires @offset and @bytes to satisfy restrictions
|
|
|
|
* defined in nvme_refresh_limits().
|
|
|
|
*/
|
|
|
|
assert(QEMU_IS_ALIGNED(bytes, 1UL << s->blkshift));
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, 1UL << s->blkshift));
|
|
|
|
assert((bytes >> s->blkshift) <= UINT32_MAX);
|
|
|
|
|
2020-08-21 22:53:54 +03:00
|
|
|
buf = qemu_try_memalign(s->page_size, s->page_size);
|
2019-09-13 16:36:27 +03:00
|
|
|
if (!buf) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2020-08-21 22:53:53 +03:00
|
|
|
memset(buf, 0, s->page_size);
|
2019-09-13 16:36:27 +03:00
|
|
|
buf->nlb = cpu_to_le32(bytes >> s->blkshift);
|
|
|
|
buf->slba = cpu_to_le64(offset >> s->blkshift);
|
|
|
|
buf->cattr = 0;
|
|
|
|
|
|
|
|
qemu_iovec_init(&local_qiov, 1);
|
|
|
|
qemu_iovec_add(&local_qiov, buf, 4096);
|
|
|
|
|
|
|
|
req = nvme_get_free_req(ioq);
|
|
|
|
assert(req);
|
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->dma_map_lock);
|
|
|
|
ret = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov);
|
|
|
|
qemu_co_mutex_unlock(&s->dma_map_lock);
|
|
|
|
|
|
|
|
if (ret) {
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_put_free_req_and_wake(ioq, req);
|
2019-09-13 16:36:27 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_nvme_dsm(s, offset, bytes);
|
|
|
|
|
2020-06-17 16:22:00 +03:00
|
|
|
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
2019-09-13 16:36:27 +03:00
|
|
|
|
|
|
|
data.co = qemu_coroutine_self();
|
|
|
|
while (data.ret == -EINPROGRESS) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->dma_map_lock);
|
|
|
|
ret = nvme_cmd_unmap_qiov(bs, &local_qiov);
|
|
|
|
qemu_co_mutex_unlock(&s->dma_map_lock);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = data.ret;
|
|
|
|
trace_nvme_dsm_done(s, offset, bytes, ret);
|
|
|
|
out:
|
|
|
|
qemu_iovec_destroy(&local_qiov);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-12-10 15:52:02 +03:00
|
|
|
static int coroutine_fn nvme_co_truncate(BlockDriverState *bs, int64_t offset,
|
|
|
|
bool exact, PreallocMode prealloc,
|
|
|
|
BdrvRequestFlags flags, Error **errp)
|
|
|
|
{
|
|
|
|
int64_t cur_length;
|
|
|
|
|
|
|
|
if (prealloc != PREALLOC_MODE_OFF) {
|
|
|
|
error_setg(errp, "Unsupported preallocation mode '%s'",
|
|
|
|
PreallocMode_str(prealloc));
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2023-01-13 23:42:04 +03:00
|
|
|
cur_length = nvme_co_getlength(bs);
|
2020-12-10 15:52:02 +03:00
|
|
|
if (offset != cur_length && exact) {
|
|
|
|
error_setg(errp, "Cannot resize NVMe devices");
|
|
|
|
return -ENOTSUP;
|
|
|
|
} else if (offset > cur_length) {
|
|
|
|
error_setg(errp, "Cannot grow NVMe devices");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-09-13 16:36:27 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
|
|
|
|
BlockReopenQueue *queue, Error **errp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-01 22:29:28 +03:00
|
|
|
static void nvme_refresh_filename(BlockDriverState *bs)
|
2018-01-16 09:08:55 +03:00
|
|
|
{
|
2019-02-01 22:29:30 +03:00
|
|
|
BDRVNVMeState *s = bs->opaque;
|
2018-01-16 09:08:55 +03:00
|
|
|
|
2019-02-01 22:29:30 +03:00
|
|
|
snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nvme://%s/%i",
|
|
|
|
s->device, s->nsid);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
bs->bl.opt_mem_alignment = s->page_size;
|
|
|
|
bs->bl.request_alignment = s->page_size;
|
|
|
|
bs->bl.max_transfer = s->max_transfer;
|
block: use int64_t instead of int in driver write_zeroes handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver write_zeroes handlers bytes parameter to int64_t.
The only caller of all updated function is bdrv_co_do_pwrite_zeroes().
bdrv_co_do_pwrite_zeroes() itself is of course OK with widening of
callee parameter type. Also, bdrv_co_do_pwrite_zeroes()'s
max_write_zeroes is limited to INT_MAX. So, updated functions all are
safe, they will not get "bytes" larger than before.
Still, let's look through all updated functions, and add assertions to
the ones which are actually unprepared to values larger than INT_MAX.
For these drivers also set explicit max_pwrite_zeroes limit.
Let's go:
blkdebug: calculations can't overflow, thanks to
bdrv_check_qiov_request() in generic layer. rule_check() and
bdrv_co_pwrite_zeroes() both have 64bit argument.
blklogwrites: pass to blk_log_writes_co_log() with 64bit argument.
blkreplay, copy-on-read, filter-compress: pass to
bdrv_co_pwrite_zeroes() which is OK
copy-before-write: Calls cbw_do_copy_before_write() and
bdrv_co_pwrite_zeroes, both have 64bit argument.
file-posix: both handler calls raw_do_pwrite_zeroes, which is updated.
In raw_do_pwrite_zeroes() calculations are OK due to
bdrv_check_qiov_request(), bytes go to RawPosixAIOData::aio_nbytes
which is uint64_t.
Check also where that uint64_t gets handed:
handle_aiocb_write_zeroes_block() passes a uint64_t[2] to
ioctl(BLKZEROOUT), handle_aiocb_write_zeroes() calls do_fallocate()
which takes off_t (and we compile to always have 64-bit off_t), as
does handle_aiocb_write_zeroes_unmap. All look safe.
gluster: bytes go to GlusterAIOCB::size which is int64_t and to
glfs_zerofill_async works with off_t.
iscsi: Aha, here we deal with iscsi_writesame16_task() that has
uint32_t num_blocks argument and iscsi_writesame16_task() has
uint16_t argument. Make comments, add assertions and clarify
max_pwrite_zeroes calculation.
iscsi_allocmap_() functions already has int64_t argument
is_byte_request_lun_aligned is simple to update, do it.
mirror_top: pass to bdrv_mirror_top_do_write which has uint64_t
argument
nbd: Aha, here we have protocol limitation, and NBDRequest::len is
uint32_t. max_pwrite_zeroes is cleanly set to 32bit value, so we are
OK for now.
nvme: Again, protocol limitation. And no inherent limit for
write-zeroes at all. But from code that calculates cdw12 it's obvious
that we do have limit and alignment. Let's clarify it. Also,
obviously the code is not prepared to handle bytes=0. Let's handle
this case too.
trace events already 64bit
preallocate: pass to handle_write() and bdrv_co_pwrite_zeroes(), both
64bit.
rbd: pass to qemu_rbd_start_co() which is 64bit.
qcow2: offset + bytes and alignment still works good (thanks to
bdrv_check_qiov_request()), so tail calculation is OK
qcow2_subcluster_zeroize() has 64bit argument, should be OK
trace events updated
qed: qed_co_request wants int nb_sectors. Also in code we have size_t
used for request length which may be 32bit. So, let's just keep
INT_MAX as a limit (aligning it down to pwrite_zeroes_alignment) and
don't care.
raw-format: Is OK. raw_adjust_offset and bdrv_co_pwrite_zeroes are both
64bit.
throttle: Both throttle_group_co_io_limits_intercept() and
bdrv_co_pwrite_zeroes() are 64bit.
vmdk: pass to vmdk_pwritev which is 64bit
quorum: pass to quorum_co_pwritev() which is 64bit
Hooray!
At this point all block drivers are prepared to support 64bit
write-zero requests, or have explicitly set max_pwrite_zeroes.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-8-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: use <= rather than < in assertions relying on max_pwrite_zeroes]
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:28:03 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Look at nvme_co_pwrite_zeroes: after shift and decrement we should get
|
|
|
|
* at most 0xFFFF
|
|
|
|
*/
|
|
|
|
bs->bl.max_pwrite_zeroes = 1ULL << (s->blkshift + 16);
|
|
|
|
bs->bl.pwrite_zeroes_alignment = MAX(bs->bl.request_alignment,
|
|
|
|
1UL << s->blkshift);
|
block: use int64_t instead of int in driver discard handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver discard handlers bytes parameter to int64_t.
The only caller of all updated function is bdrv_co_pdiscard in
block/io.c. It is already prepared to work with 64bit requests, but
pass at most max(bs->bl.max_pdiscard, INT_MAX) to the driver.
Let's look at all updated functions:
blkdebug: all calculations are still OK, thanks to
bdrv_check_qiov_request().
both rule_check and bdrv_co_pdiscard are 64bit
blklogwrites: pass to blk_loc_writes_co_log which is 64bit
blkreplay, copy-on-read, filter-compress: pass to bdrv_co_pdiscard, OK
copy-before-write: pass to bdrv_co_pdiscard which is 64bit and to
cbw_do_copy_before_write which is 64bit
file-posix: one handler calls raw_account_discard() is 64bit and both
handlers calls raw_do_pdiscard(). Update raw_do_pdiscard, which pass
to RawPosixAIOData::aio_nbytes, which is 64bit (and calls
raw_account_discard())
gluster: somehow, third argument of glfs_discard_async is size_t.
Let's set max_pdiscard accordingly.
iscsi: iscsi_allocmap_set_invalid is 64bit,
!is_byte_request_lun_aligned is 64bit.
list.num is uint32_t. Let's clarify max_pdiscard and
pdiscard_alignment.
mirror_top: pass to bdrv_mirror_top_do_write() which is
64bit
nbd: protocol limitation. max_pdiscard is alredy set strict enough,
keep it as is for now.
nvme: buf.nlb is uint32_t and we do shift. So, add corresponding limits
to nvme_refresh_limits().
preallocate: pass to bdrv_co_pdiscard() which is 64bit.
rbd: pass to qemu_rbd_start_co() which is 64bit.
qcow2: calculations are still OK, thanks to bdrv_check_qiov_request(),
qcow2_cluster_discard() is 64bit.
raw-format: raw_adjust_offset() is 64bit, bdrv_co_pdiscard too.
throttle: pass to bdrv_co_pdiscard() which is 64bit and to
throttle_group_co_io_limits_intercept() which is 64bit as well.
test-block-iothread: bytes argument is unused
Great! Now all drivers are prepared to handle 64bit discard requests,
or else have explicit max_pdiscard limits.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-11-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:28:06 +03:00
|
|
|
|
|
|
|
bs->bl.max_pdiscard = (uint64_t)UINT32_MAX << s->blkshift;
|
|
|
|
bs->bl.pdiscard_alignment = MAX(bs->bl.request_alignment,
|
|
|
|
1UL << s->blkshift);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_detach_aio_context(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
2020-10-29 12:32:50 +03:00
|
|
|
for (unsigned i = 0; i < s->queue_count; i++) {
|
2020-06-17 16:22:01 +03:00
|
|
|
NVMeQueuePair *q = s->queues[i];
|
|
|
|
|
|
|
|
qemu_bh_delete(q->completion_bh);
|
|
|
|
q->completion_bh = NULL;
|
|
|
|
}
|
|
|
|
|
2020-08-21 22:53:59 +03:00
|
|
|
aio_set_event_notifier(bdrv_get_aio_context(bs),
|
|
|
|
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
|
aio: remove aio_disable_external() API
All callers now pass is_external=false to aio_set_fd_handler() and
aio_set_event_notifier(). The aio_disable_external() API that
temporarily disables fd handlers that were registered is_external=true
is therefore dead code.
Remove aio_disable_external(), aio_enable_external(), and the
is_external arguments to aio_set_fd_handler() and
aio_set_event_notifier().
The entire test-fdmon-epoll test is removed because its sole purpose was
testing aio_disable_external().
Parts of this patch were generated using the following coccinelle
(https://coccinelle.lip6.fr/) semantic patch:
@@
expression ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque;
@@
- aio_set_fd_handler(ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque)
+ aio_set_fd_handler(ctx, fd, io_read, io_write, io_poll, io_poll_ready, opaque)
@@
expression ctx, notifier, is_external, io_read, io_poll, io_poll_ready;
@@
- aio_set_event_notifier(ctx, notifier, is_external, io_read, io_poll, io_poll_ready)
+ aio_set_event_notifier(ctx, notifier, io_read, io_poll, io_poll_ready)
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20230516190238.8401-21-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-05-16 22:02:38 +03:00
|
|
|
NULL, NULL, NULL);
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_attach_aio_context(BlockDriverState *bs,
|
|
|
|
AioContext *new_context)
|
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
s->aio_context = new_context;
|
2020-08-21 22:53:59 +03:00
|
|
|
aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
|
aio: remove aio_disable_external() API
All callers now pass is_external=false to aio_set_fd_handler() and
aio_set_event_notifier(). The aio_disable_external() API that
temporarily disables fd handlers that were registered is_external=true
is therefore dead code.
Remove aio_disable_external(), aio_enable_external(), and the
is_external arguments to aio_set_fd_handler() and
aio_set_event_notifier().
The entire test-fdmon-epoll test is removed because its sole purpose was
testing aio_disable_external().
Parts of this patch were generated using the following coccinelle
(https://coccinelle.lip6.fr/) semantic patch:
@@
expression ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque;
@@
- aio_set_fd_handler(ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque)
+ aio_set_fd_handler(ctx, fd, io_read, io_write, io_poll, io_poll_ready, opaque)
@@
expression ctx, notifier, is_external, io_read, io_poll, io_poll_ready;
@@
- aio_set_event_notifier(ctx, notifier, is_external, io_read, io_poll, io_poll_ready)
+ aio_set_event_notifier(ctx, notifier, io_read, io_poll, io_poll_ready)
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20230516190238.8401-21-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-05-16 22:02:38 +03:00
|
|
|
nvme_handle_event, nvme_poll_cb,
|
aio-posix: split poll check from ready handler
Adaptive polling measures the execution time of the polling check plus
handlers called when a polled event becomes ready. Handlers can take a
significant amount of time, making it look like polling was running for
a long time when in fact the event handler was running for a long time.
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
device's virtqueue becomes ready can take 10s of microseconds. This
can exceed the default polling interval (32 microseconds) and cause
adaptive polling to stop polling.
By excluding the handler's execution time from the polling check we make
the adaptive polling calculation more accurate. As a result, the event
loop now stays in polling mode where previously it would have fallen
back to file descriptor monitoring.
The following data was collected with virtio-blk num-queues=2
event_idx=off using an IOThread. Before:
168k IOPS, IOThread syscalls:
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
174k IOPS (+3.6%), IOThread syscalls:
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
the IOThread stays in polling mode instead of falling back to file
descriptor monitoring.
As usual, polling is not implemented on Windows so this patch ignores
the new io_poll_read() callback in aio-win32.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20211207132336.36627-2-stefanha@redhat.com
[Fixed up aio_set_event_notifier() calls in
tests/unit/test-fdmon-epoll.c added after this series was queued.
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
|
|
|
nvme_poll_ready);
|
2020-06-17 16:22:01 +03:00
|
|
|
|
2020-10-29 12:32:50 +03:00
|
|
|
for (unsigned i = 0; i < s->queue_count; i++) {
|
2020-06-17 16:22:01 +03:00
|
|
|
NVMeQueuePair *q = s->queues[i];
|
|
|
|
|
|
|
|
q->completion_bh =
|
|
|
|
aio_bh_new(new_context, nvme_process_completion_bh, q);
|
|
|
|
}
|
2018-01-16 09:08:55 +03:00
|
|
|
}
|
|
|
|
|
2022-10-13 21:59:02 +03:00
|
|
|
static bool nvme_register_buf(BlockDriverState *bs, void *host, size_t size,
|
|
|
|
Error **errp)
|
2018-01-16 09:08:57 +03:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
2022-10-13 21:59:02 +03:00
|
|
|
/*
|
|
|
|
* FIXME: we may run out of IOVA addresses after repeated
|
|
|
|
* bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
|
|
|
|
* doesn't reclaim addresses for fixed mappings.
|
|
|
|
*/
|
|
|
|
ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, errp);
|
|
|
|
return ret == 0;
|
2018-01-16 09:08:57 +03:00
|
|
|
}
|
|
|
|
|
2022-10-13 21:58:59 +03:00
|
|
|
static void nvme_unregister_buf(BlockDriverState *bs, void *host, size_t size)
|
2018-01-16 09:08:57 +03:00
|
|
|
{
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
qemu_vfio_dma_unmap(s->vfio, host);
|
|
|
|
}
|
|
|
|
|
block/nvme: Add driver statistics for access alignment and hw errors
Keep statistics of some hardware errors, and number of
aligned/unaligned I/O accesses.
QMP example booting a full RHEL 8.3 aarch64 guest:
{ "execute": "query-blockstats" }
{
"return": [
{
"device": "",
"node-name": "drive0",
"stats": {
"flush_total_time_ns": 6026948,
"wr_highest_offset": 3383991230464,
"wr_total_time_ns": 807450995,
"failed_wr_operations": 0,
"failed_rd_operations": 0,
"wr_merged": 3,
"wr_bytes": 50133504,
"failed_unmap_operations": 0,
"failed_flush_operations": 0,
"account_invalid": false,
"rd_total_time_ns": 1846979900,
"flush_operations": 130,
"wr_operations": 659,
"rd_merged": 1192,
"rd_bytes": 218244096,
"account_failed": false,
"idle_time_ns": 2678641497,
"rd_operations": 7406,
},
"driver-specific": {
"driver": "nvme",
"completion-errors": 0,
"unaligned-accesses": 2959,
"aligned-accesses": 4477
},
"qdev": "/machine/peripheral-anon/device[0]/virtio-backend"
}
]
}
Suggested-by: Stefan Hajnoczi <stefanha@gmail.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Message-id: 20201001162939.1567915-1-philmd@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2020-10-01 19:29:39 +03:00
|
|
|
static BlockStatsSpecific *nvme_get_specific_stats(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
|
|
|
|
BDRVNVMeState *s = bs->opaque;
|
|
|
|
|
|
|
|
stats->driver = BLOCKDEV_DRIVER_NVME;
|
|
|
|
stats->u.nvme = (BlockStatsSpecificNvme) {
|
|
|
|
.completion_errors = s->stats.completion_errors,
|
|
|
|
.aligned_accesses = s->stats.aligned_accesses,
|
|
|
|
.unaligned_accesses = s->stats.unaligned_accesses,
|
|
|
|
};
|
|
|
|
|
|
|
|
return stats;
|
|
|
|
}
|
|
|
|
|
2019-02-01 22:29:25 +03:00
|
|
|
static const char *const nvme_strong_runtime_opts[] = {
|
|
|
|
NVME_BLOCK_OPT_DEVICE,
|
|
|
|
NVME_BLOCK_OPT_NAMESPACE,
|
|
|
|
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
static BlockDriver bdrv_nvme = {
|
|
|
|
.format_name = "nvme",
|
|
|
|
.protocol_name = "nvme",
|
|
|
|
.instance_size = sizeof(BDRVNVMeState),
|
|
|
|
|
2020-03-26 04:12:18 +03:00
|
|
|
.bdrv_co_create_opts = bdrv_co_create_opts_simple,
|
|
|
|
.create_opts = &bdrv_create_opts_simple,
|
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
.bdrv_parse_filename = nvme_parse_filename,
|
|
|
|
.bdrv_file_open = nvme_file_open,
|
|
|
|
.bdrv_close = nvme_close,
|
2023-01-13 23:42:04 +03:00
|
|
|
.bdrv_co_getlength = nvme_co_getlength,
|
2019-07-16 19:30:19 +03:00
|
|
|
.bdrv_probe_blocksizes = nvme_probe_blocksizes,
|
2020-12-10 15:52:02 +03:00
|
|
|
.bdrv_co_truncate = nvme_co_truncate,
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
.bdrv_co_preadv = nvme_co_preadv,
|
|
|
|
.bdrv_co_pwritev = nvme_co_pwritev,
|
2019-09-13 16:36:26 +03:00
|
|
|
|
|
|
|
.bdrv_co_pwrite_zeroes = nvme_co_pwrite_zeroes,
|
2019-09-13 16:36:27 +03:00
|
|
|
.bdrv_co_pdiscard = nvme_co_pdiscard,
|
2019-09-13 16:36:26 +03:00
|
|
|
|
2018-01-16 09:08:55 +03:00
|
|
|
.bdrv_co_flush_to_disk = nvme_co_flush,
|
|
|
|
.bdrv_reopen_prepare = nvme_reopen_prepare,
|
|
|
|
|
|
|
|
.bdrv_refresh_filename = nvme_refresh_filename,
|
|
|
|
.bdrv_refresh_limits = nvme_refresh_limits,
|
2019-02-01 22:29:25 +03:00
|
|
|
.strong_runtime_opts = nvme_strong_runtime_opts,
|
block/nvme: Add driver statistics for access alignment and hw errors
Keep statistics of some hardware errors, and number of
aligned/unaligned I/O accesses.
QMP example booting a full RHEL 8.3 aarch64 guest:
{ "execute": "query-blockstats" }
{
"return": [
{
"device": "",
"node-name": "drive0",
"stats": {
"flush_total_time_ns": 6026948,
"wr_highest_offset": 3383991230464,
"wr_total_time_ns": 807450995,
"failed_wr_operations": 0,
"failed_rd_operations": 0,
"wr_merged": 3,
"wr_bytes": 50133504,
"failed_unmap_operations": 0,
"failed_flush_operations": 0,
"account_invalid": false,
"rd_total_time_ns": 1846979900,
"flush_operations": 130,
"wr_operations": 659,
"rd_merged": 1192,
"rd_bytes": 218244096,
"account_failed": false,
"idle_time_ns": 2678641497,
"rd_operations": 7406,
},
"driver-specific": {
"driver": "nvme",
"completion-errors": 0,
"unaligned-accesses": 2959,
"aligned-accesses": 4477
},
"qdev": "/machine/peripheral-anon/device[0]/virtio-backend"
}
]
}
Suggested-by: Stefan Hajnoczi <stefanha@gmail.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Message-id: 20201001162939.1567915-1-philmd@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2020-10-01 19:29:39 +03:00
|
|
|
.bdrv_get_specific_stats = nvme_get_specific_stats,
|
2018-01-16 09:08:55 +03:00
|
|
|
|
|
|
|
.bdrv_detach_aio_context = nvme_detach_aio_context,
|
|
|
|
.bdrv_attach_aio_context = nvme_attach_aio_context,
|
|
|
|
|
2018-01-16 09:08:57 +03:00
|
|
|
.bdrv_register_buf = nvme_register_buf,
|
|
|
|
.bdrv_unregister_buf = nvme_unregister_buf,
|
2018-01-16 09:08:55 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static void bdrv_nvme_init(void)
|
|
|
|
{
|
|
|
|
bdrv_register(&bdrv_nvme);
|
|
|
|
}
|
|
|
|
|
|
|
|
block_init(bdrv_nvme_init);
|