2014-02-03 13:26:13 +04:00
|
|
|
/*
|
|
|
|
* QEMU Block driver for native access to files on NFS shares
|
|
|
|
*
|
2017-11-27 19:00:07 +03:00
|
|
|
* Copyright (c) 2014-2017 Peter Lieven <pl@kamp.de>
|
2014-02-03 13:26:13 +04:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2016-01-18 21:01:42 +03:00
|
|
|
#include "qemu/osdep.h"
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2020-11-05 15:31:15 +03:00
|
|
|
#if !defined(_WIN32)
|
2014-02-03 13:26:13 +04:00
|
|
|
#include <poll.h>
|
2020-11-05 15:31:15 +03:00
|
|
|
#endif
|
2014-02-03 13:26:13 +04:00
|
|
|
#include "qemu/config-file.h"
|
|
|
|
#include "qemu/error-report.h"
|
2016-03-30 15:46:33 +03:00
|
|
|
#include "qapi/error.h"
|
2022-12-21 16:35:49 +03:00
|
|
|
#include "block/block-io.h"
|
2014-02-03 13:26:13 +04:00
|
|
|
#include "block/block_int.h"
|
2018-06-14 22:14:28 +03:00
|
|
|
#include "block/qdict.h"
|
2014-02-03 13:26:13 +04:00
|
|
|
#include "trace.h"
|
|
|
|
#include "qemu/iov.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2018-02-01 14:18:46 +03:00
|
|
|
#include "qemu/option.h"
|
2016-03-30 15:46:34 +03:00
|
|
|
#include "qemu/cutils.h"
|
2019-09-17 14:58:19 +03:00
|
|
|
#include "sysemu/replay.h"
|
2018-02-11 12:36:01 +03:00
|
|
|
#include "qapi/qapi-visit-block-core.h"
|
2016-10-31 18:05:49 +03:00
|
|
|
#include "qapi/qmp/qdict.h"
|
|
|
|
#include "qapi/qmp/qstring.h"
|
|
|
|
#include "qapi/qobject-input-visitor.h"
|
|
|
|
#include "qapi/qobject-output-visitor.h"
|
2014-02-03 13:26:13 +04:00
|
|
|
#include <nfsc/libnfs.h>
|
|
|
|
|
2016-10-31 18:05:49 +03:00
|
|
|
|
2015-06-26 14:14:01 +03:00
|
|
|
#define QEMU_NFS_MAX_READAHEAD_SIZE 1048576
|
2016-05-19 15:48:03 +03:00
|
|
|
#define QEMU_NFS_MAX_PAGECACHE_SIZE (8388608 / NFS_BLKSIZE)
|
2015-11-09 10:09:33 +03:00
|
|
|
#define QEMU_NFS_MAX_DEBUG_LEVEL 2
|
2015-06-26 14:14:01 +03:00
|
|
|
|
2014-02-03 13:26:13 +04:00
|
|
|
typedef struct NFSClient {
|
|
|
|
struct nfs_context *context;
|
|
|
|
struct nfsfh *fh;
|
|
|
|
int events;
|
|
|
|
bool has_zero_init;
|
2014-05-08 18:34:44 +04:00
|
|
|
AioContext *aio_context;
|
2017-02-22 21:07:24 +03:00
|
|
|
QemuMutex mutex;
|
2020-11-05 15:31:15 +03:00
|
|
|
uint64_t st_blocks;
|
2016-05-19 15:48:02 +03:00
|
|
|
bool cache_used;
|
2016-10-31 18:05:49 +03:00
|
|
|
NFSServer *server;
|
|
|
|
char *path;
|
|
|
|
int64_t uid, gid, tcp_syncnt, readahead, pagecache, debug;
|
2014-02-03 13:26:13 +04:00
|
|
|
} NFSClient;
|
|
|
|
|
|
|
|
typedef struct NFSRPC {
|
2016-10-27 13:48:57 +03:00
|
|
|
BlockDriverState *bs;
|
2014-02-03 13:26:13 +04:00
|
|
|
int ret;
|
|
|
|
int complete;
|
|
|
|
QEMUIOVector *iov;
|
|
|
|
struct stat *st;
|
|
|
|
Coroutine *co;
|
2014-05-08 18:34:44 +04:00
|
|
|
NFSClient *client;
|
2014-02-03 13:26:13 +04:00
|
|
|
} NFSRPC;
|
|
|
|
|
2016-10-31 18:05:49 +03:00
|
|
|
static int nfs_parse_uri(const char *filename, QDict *options, Error **errp)
|
|
|
|
{
|
2024-04-18 13:10:54 +03:00
|
|
|
g_autoptr(GUri) uri = g_uri_parse(filename, G_URI_FLAGS_NONE, NULL);
|
|
|
|
GUriParamsIter qp;
|
|
|
|
const char *uri_server, *uri_path, *uri_query;
|
|
|
|
char *qp_name, *qp_value;
|
|
|
|
GError *gerror = NULL;
|
2016-10-31 18:05:49 +03:00
|
|
|
|
|
|
|
if (!uri) {
|
|
|
|
error_setg(errp, "Invalid URI specified");
|
2024-04-18 13:10:54 +03:00
|
|
|
return -EINVAL;
|
2016-10-31 18:05:49 +03:00
|
|
|
}
|
2024-04-18 13:10:54 +03:00
|
|
|
if (!g_str_equal(g_uri_get_scheme(uri), "nfs")) {
|
2016-10-31 18:05:49 +03:00
|
|
|
error_setg(errp, "URI scheme must be 'nfs'");
|
2024-04-18 13:10:54 +03:00
|
|
|
return -EINVAL;
|
2016-10-31 18:05:49 +03:00
|
|
|
}
|
|
|
|
|
2024-04-18 13:10:54 +03:00
|
|
|
uri_server = g_uri_get_host(uri);
|
|
|
|
if (!uri_server || !uri_server[0]) {
|
2016-10-31 18:05:49 +03:00
|
|
|
error_setg(errp, "missing hostname in URI");
|
2024-04-18 13:10:54 +03:00
|
|
|
return -EINVAL;
|
2016-10-31 18:05:49 +03:00
|
|
|
}
|
|
|
|
|
2024-04-18 13:10:54 +03:00
|
|
|
uri_path = g_uri_get_path(uri);
|
|
|
|
if (!uri_path || !uri_path[0]) {
|
2016-10-31 18:05:49 +03:00
|
|
|
error_setg(errp, "missing file path in URI");
|
2024-04-18 13:10:54 +03:00
|
|
|
return -EINVAL;
|
2016-10-31 18:05:49 +03:00
|
|
|
}
|
|
|
|
|
2024-04-18 13:10:54 +03:00
|
|
|
qdict_put_str(options, "server.host", uri_server);
|
2017-04-28 00:58:17 +03:00
|
|
|
qdict_put_str(options, "server.type", "inet");
|
2024-04-18 13:10:54 +03:00
|
|
|
qdict_put_str(options, "path", uri_path);
|
2016-10-31 18:05:49 +03:00
|
|
|
|
2024-04-18 13:10:54 +03:00
|
|
|
uri_query = g_uri_get_query(uri);
|
|
|
|
if (uri_query) {
|
|
|
|
g_uri_params_iter_init(&qp, uri_query, -1, "&", G_URI_PARAMS_NONE);
|
|
|
|
while (g_uri_params_iter_next(&qp, &qp_name, &qp_value, &gerror)) {
|
|
|
|
uint64_t val;
|
|
|
|
if (!qp_name || gerror) {
|
|
|
|
error_setg(errp, "Failed to parse NFS parameter");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (!qp_value) {
|
|
|
|
error_setg(errp, "Value for NFS parameter expected: %s",
|
|
|
|
qp_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (parse_uint_full(qp_value, 0, &val)) {
|
|
|
|
error_setg(errp, "Invalid value for NFS parameter: %s",
|
|
|
|
qp_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (g_str_equal(qp_name, "uid")) {
|
|
|
|
qdict_put_str(options, "user", qp_value);
|
|
|
|
} else if (g_str_equal(qp_name, "gid")) {
|
|
|
|
qdict_put_str(options, "group", qp_value);
|
|
|
|
} else if (g_str_equal(qp_name, "tcp-syncnt")) {
|
|
|
|
qdict_put_str(options, "tcp-syn-count", qp_value);
|
|
|
|
} else if (g_str_equal(qp_name, "readahead")) {
|
|
|
|
qdict_put_str(options, "readahead-size", qp_value);
|
|
|
|
} else if (g_str_equal(qp_name, "pagecache")) {
|
|
|
|
qdict_put_str(options, "page-cache-size", qp_value);
|
|
|
|
} else if (g_str_equal(qp_name, "debug")) {
|
|
|
|
qdict_put_str(options, "debug", qp_value);
|
|
|
|
} else {
|
|
|
|
error_setg(errp, "Unknown NFS parameter name: %s", qp_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2016-10-31 18:05:49 +03:00
|
|
|
}
|
|
|
|
}
|
2024-04-18 13:10:54 +03:00
|
|
|
|
|
|
|
return 0;
|
2016-10-31 18:05:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs_has_filename_options_conflict(QDict *options, Error **errp)
|
|
|
|
{
|
|
|
|
const QDictEntry *qe;
|
|
|
|
|
|
|
|
for (qe = qdict_first(options); qe; qe = qdict_next(options, qe)) {
|
|
|
|
if (!strcmp(qe->key, "host") ||
|
|
|
|
!strcmp(qe->key, "path") ||
|
|
|
|
!strcmp(qe->key, "user") ||
|
|
|
|
!strcmp(qe->key, "group") ||
|
|
|
|
!strcmp(qe->key, "tcp-syn-count") ||
|
|
|
|
!strcmp(qe->key, "readahead-size") ||
|
|
|
|
!strcmp(qe->key, "page-cache-size") ||
|
2016-11-02 19:50:37 +03:00
|
|
|
!strcmp(qe->key, "debug") ||
|
2016-10-31 18:05:49 +03:00
|
|
|
strstart(qe->key, "server.", NULL))
|
|
|
|
{
|
|
|
|
error_setg(errp, "Option %s cannot be used with a filename",
|
|
|
|
qe->key);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_parse_filename(const char *filename, QDict *options,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
if (nfs_has_filename_options_conflict(options, errp)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
nfs_parse_uri(filename, options, errp);
|
|
|
|
}
|
|
|
|
|
2014-02-03 13:26:13 +04:00
|
|
|
static void nfs_process_read(void *arg);
|
|
|
|
static void nfs_process_write(void *arg);
|
|
|
|
|
2017-02-22 21:07:24 +03:00
|
|
|
/* Called with QemuMutex held. */
|
2014-02-03 13:26:13 +04:00
|
|
|
static void nfs_set_events(NFSClient *client)
|
|
|
|
{
|
|
|
|
int ev = nfs_which_events(client->context);
|
|
|
|
if (ev != client->events) {
|
2015-10-23 06:08:05 +03:00
|
|
|
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
|
2014-05-08 18:34:44 +04:00
|
|
|
(ev & POLLIN) ? nfs_process_read : NULL,
|
2016-12-01 22:26:41 +03:00
|
|
|
(ev & POLLOUT) ? nfs_process_write : NULL,
|
aio-posix: split poll check from ready handler
Adaptive polling measures the execution time of the polling check plus
handlers called when a polled event becomes ready. Handlers can take a
significant amount of time, making it look like polling was running for
a long time when in fact the event handler was running for a long time.
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
device's virtqueue becomes ready can take 10s of microseconds. This
can exceed the default polling interval (32 microseconds) and cause
adaptive polling to stop polling.
By excluding the handler's execution time from the polling check we make
the adaptive polling calculation more accurate. As a result, the event
loop now stays in polling mode where previously it would have fallen
back to file descriptor monitoring.
The following data was collected with virtio-blk num-queues=2
event_idx=off using an IOThread. Before:
168k IOPS, IOThread syscalls:
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
174k IOPS (+3.6%), IOThread syscalls:
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
the IOThread stays in polling mode instead of falling back to file
descriptor monitoring.
As usual, polling is not implemented on Windows so this patch ignores
the new io_poll_read() callback in aio-win32.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20211207132336.36627-2-stefanha@redhat.com
[Fixed up aio_set_event_notifier() calls in
tests/unit/test-fdmon-epoll.c added after this series was queued.
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2021-12-07 16:23:31 +03:00
|
|
|
NULL, NULL, client);
|
2014-02-03 13:26:13 +04:00
|
|
|
|
|
|
|
}
|
|
|
|
client->events = ev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_process_read(void *arg)
|
|
|
|
{
|
|
|
|
NFSClient *client = arg;
|
2017-02-13 16:52:30 +03:00
|
|
|
|
2017-02-22 21:07:24 +03:00
|
|
|
qemu_mutex_lock(&client->mutex);
|
2014-02-03 13:26:13 +04:00
|
|
|
nfs_service(client->context, POLLIN);
|
|
|
|
nfs_set_events(client);
|
2017-02-22 21:07:24 +03:00
|
|
|
qemu_mutex_unlock(&client->mutex);
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_process_write(void *arg)
|
|
|
|
{
|
|
|
|
NFSClient *client = arg;
|
2017-02-13 16:52:30 +03:00
|
|
|
|
2017-02-22 21:07:24 +03:00
|
|
|
qemu_mutex_lock(&client->mutex);
|
2014-02-03 13:26:13 +04:00
|
|
|
nfs_service(client->context, POLLOUT);
|
|
|
|
nfs_set_events(client);
|
2017-02-22 21:07:24 +03:00
|
|
|
qemu_mutex_unlock(&client->mutex);
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
|
|
|
|
2022-09-22 11:49:09 +03:00
|
|
|
static void coroutine_fn nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
|
2014-02-03 13:26:13 +04:00
|
|
|
{
|
|
|
|
*task = (NFSRPC) {
|
2014-05-08 18:34:44 +04:00
|
|
|
.co = qemu_coroutine_self(),
|
2016-10-27 13:48:57 +03:00
|
|
|
.bs = bs,
|
|
|
|
.client = bs->opaque,
|
2014-02-03 13:26:13 +04:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_co_generic_bh_cb(void *opaque)
|
|
|
|
{
|
|
|
|
NFSRPC *task = opaque;
|
2017-02-13 16:52:31 +03:00
|
|
|
|
2014-06-10 11:42:47 +04:00
|
|
|
task->complete = 1;
|
2017-02-13 16:52:31 +03:00
|
|
|
aio_co_wake(task->co);
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
|
|
|
|
2017-02-22 21:07:24 +03:00
|
|
|
/* Called (via nfs_service) with QemuMutex held. */
|
2014-02-03 13:26:13 +04:00
|
|
|
static void
|
|
|
|
nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
|
|
|
|
void *private_data)
|
|
|
|
{
|
|
|
|
NFSRPC *task = private_data;
|
|
|
|
task->ret = ret;
|
2016-10-27 13:48:57 +03:00
|
|
|
assert(!task->st);
|
2014-02-03 13:26:13 +04:00
|
|
|
if (task->ret > 0 && task->iov) {
|
|
|
|
if (task->ret <= task->iov->size) {
|
|
|
|
qemu_iovec_from_buf(task->iov, 0, data, task->ret);
|
|
|
|
} else {
|
|
|
|
task->ret = -EIO;
|
|
|
|
}
|
|
|
|
}
|
2014-03-17 12:37:21 +04:00
|
|
|
if (task->ret < 0) {
|
|
|
|
error_report("NFS Error: %s", nfs_get_error(nfs));
|
|
|
|
}
|
2019-09-17 14:58:19 +03:00
|
|
|
replay_bh_schedule_oneshot_event(task->client->aio_context,
|
|
|
|
nfs_co_generic_bh_cb, task);
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
|
|
|
|
block: use int64_t instead of uint64_t in driver read handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver read handlers parameters which are already 64bit to
signed type.
While being here, convert also flags parameter to be BdrvRequestFlags.
Now let's consider all callers. Simple
git grep '\->bdrv_\(aio\|co\)_preadv\(_part\)\?'
shows that's there three callers of driver function:
bdrv_driver_preadv() in block/io.c, passes int64_t, checked by
bdrv_check_qiov_request() to be non-negative.
qcow2_load_vmstate() does bdrv_check_qiov_request().
do_perform_cow_read() has uint64_t argument. And a lot of things in
qcow2 driver are uint64_t, so converting it is big job. But we must
not work with requests that don't satisfy bdrv_check_qiov_request(),
so let's just assert it here.
Still, the functions may be called directly, not only by drv->...
Let's check:
git grep '\.bdrv_\(aio\|co\)_preadv\(_part\)\?\s*=' | \
awk '{print $4}' | sed 's/,//' | sed 's/&//' | sort | uniq | \
while read func; do git grep "$func(" | \
grep -v "$func(BlockDriverState"; done
The only one such caller:
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1);
...
ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0);
in tests/unit/test-bdrv-drain.c, and it's OK obviously.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-4-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: fix typos]
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:27:59 +03:00
|
|
|
static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, int64_t offset,
|
|
|
|
int64_t bytes, QEMUIOVector *iov,
|
|
|
|
BdrvRequestFlags flags)
|
2014-02-03 13:26:13 +04:00
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
NFSRPC task;
|
|
|
|
|
2016-10-27 13:48:57 +03:00
|
|
|
nfs_co_init_task(bs, &task);
|
2014-02-03 13:26:13 +04:00
|
|
|
task.iov = iov;
|
|
|
|
|
2020-04-04 07:21:08 +03:00
|
|
|
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
|
|
|
if (nfs_pread_async(client->context, client->fh,
|
|
|
|
offset, bytes, nfs_co_generic_cb, &task) != 0) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2020-04-04 07:21:08 +03:00
|
|
|
nfs_set_events(client);
|
|
|
|
}
|
2014-02-03 13:26:13 +04:00
|
|
|
while (!task.complete) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (task.ret < 0) {
|
|
|
|
return task.ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* zero pad short reads */
|
|
|
|
if (task.ret < iov->size) {
|
|
|
|
qemu_iovec_memset(iov, task.ret, 0, iov->size - task.ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
block: use int64_t instead of uint64_t in driver write handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver write handlers parameters which are already 64bit to
signed type.
While being here, convert also flags parameter to be BdrvRequestFlags.
Now let's consider all callers. Simple
git grep '\->bdrv_\(aio\|co\)_pwritev\(_part\)\?'
shows that's there three callers of driver function:
bdrv_driver_pwritev() and bdrv_driver_pwritev_compressed() in
block/io.c, both pass int64_t, checked by bdrv_check_qiov_request() to
be non-negative.
qcow2_save_vmstate() does bdrv_check_qiov_request().
Still, the functions may be called directly, not only by drv->...
Let's check:
git grep '\.bdrv_\(aio\|co\)_pwritev\(_part\)\?\s*=' | \
awk '{print $4}' | sed 's/,//' | sed 's/&//' | sort | uniq | \
while read func; do git grep "$func(" | \
grep -v "$func(BlockDriverState"; done
shows several callers:
qcow2:
qcow2_co_truncate() write at most up to @offset, which is checked in
generic qcow2_co_truncate() by bdrv_check_request().
qcow2_co_pwritev_compressed_task() pass the request (or part of the
request) that already went through normal write path, so it should
be OK
qcow:
qcow_co_pwritev_compressed() pass int64_t, it's updated by this patch
quorum:
quorum_co_pwrite_zeroes() pass int64_t and int - OK
throttle:
throttle_co_pwritev_compressed() pass int64_t, it's updated by this
patch
vmdk:
vmdk_co_pwritev_compressed() pass int64_t, it's updated by this
patch
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-5-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 13:28:00 +03:00
|
|
|
static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, int64_t offset,
|
|
|
|
int64_t bytes, QEMUIOVector *iov,
|
|
|
|
BdrvRequestFlags flags)
|
2014-02-03 13:26:13 +04:00
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
NFSRPC task;
|
|
|
|
char *buf = NULL;
|
2017-02-17 19:39:01 +03:00
|
|
|
bool my_buffer = false;
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2016-10-27 13:48:57 +03:00
|
|
|
nfs_co_init_task(bs, &task);
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2017-02-17 19:39:01 +03:00
|
|
|
if (iov->niov != 1) {
|
|
|
|
buf = g_try_malloc(bytes);
|
|
|
|
if (bytes && buf == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
qemu_iovec_to_buf(iov, 0, buf, bytes);
|
|
|
|
my_buffer = true;
|
|
|
|
} else {
|
|
|
|
buf = iov->iov[0].iov_base;
|
2014-05-20 15:31:20 +04:00
|
|
|
}
|
|
|
|
|
2020-04-04 07:21:08 +03:00
|
|
|
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
|
|
|
if (nfs_pwrite_async(client->context, client->fh,
|
|
|
|
offset, bytes, buf,
|
|
|
|
nfs_co_generic_cb, &task) != 0) {
|
|
|
|
if (my_buffer) {
|
|
|
|
g_free(buf);
|
|
|
|
}
|
|
|
|
return -ENOMEM;
|
2017-02-17 19:39:01 +03:00
|
|
|
}
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2020-04-04 07:21:08 +03:00
|
|
|
nfs_set_events(client);
|
|
|
|
}
|
2014-02-03 13:26:13 +04:00
|
|
|
while (!task.complete) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
2017-02-17 19:39:01 +03:00
|
|
|
if (my_buffer) {
|
|
|
|
g_free(buf);
|
|
|
|
}
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2017-02-17 19:39:00 +03:00
|
|
|
if (task.ret != bytes) {
|
2014-02-03 13:26:13 +04:00
|
|
|
return task.ret < 0 ? task.ret : -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
NFSRPC task;
|
|
|
|
|
2016-10-27 13:48:57 +03:00
|
|
|
nfs_co_init_task(bs, &task);
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2020-04-04 07:21:08 +03:00
|
|
|
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
|
|
|
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
|
|
|
|
&task) != 0) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2020-04-04 07:21:08 +03:00
|
|
|
nfs_set_events(client);
|
|
|
|
}
|
2014-02-03 13:26:13 +04:00
|
|
|
while (!task.complete) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
return task.ret;
|
|
|
|
}
|
|
|
|
|
2014-05-08 18:34:44 +04:00
|
|
|
static void nfs_detach_aio_context(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
|
2015-10-23 06:08:05 +03:00
|
|
|
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
|
aio: remove aio_disable_external() API
All callers now pass is_external=false to aio_set_fd_handler() and
aio_set_event_notifier(). The aio_disable_external() API that
temporarily disables fd handlers that were registered is_external=true
is therefore dead code.
Remove aio_disable_external(), aio_enable_external(), and the
is_external arguments to aio_set_fd_handler() and
aio_set_event_notifier().
The entire test-fdmon-epoll test is removed because its sole purpose was
testing aio_disable_external().
Parts of this patch were generated using the following coccinelle
(https://coccinelle.lip6.fr/) semantic patch:
@@
expression ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque;
@@
- aio_set_fd_handler(ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque)
+ aio_set_fd_handler(ctx, fd, io_read, io_write, io_poll, io_poll_ready, opaque)
@@
expression ctx, notifier, is_external, io_read, io_poll, io_poll_ready;
@@
- aio_set_event_notifier(ctx, notifier, is_external, io_read, io_poll, io_poll_ready)
+ aio_set_event_notifier(ctx, notifier, io_read, io_poll, io_poll_ready)
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20230516190238.8401-21-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-05-16 22:02:38 +03:00
|
|
|
NULL, NULL, NULL, NULL, NULL);
|
2014-05-08 18:34:44 +04:00
|
|
|
client->events = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_attach_aio_context(BlockDriverState *bs,
|
|
|
|
AioContext *new_context)
|
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
|
|
|
|
client->aio_context = new_context;
|
|
|
|
nfs_set_events(client);
|
|
|
|
}
|
|
|
|
|
2014-02-03 13:26:13 +04:00
|
|
|
static void nfs_client_close(NFSClient *client)
|
|
|
|
{
|
|
|
|
if (client->context) {
|
2019-09-10 18:41:09 +03:00
|
|
|
qemu_mutex_lock(&client->mutex);
|
|
|
|
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
|
aio: remove aio_disable_external() API
All callers now pass is_external=false to aio_set_fd_handler() and
aio_set_event_notifier(). The aio_disable_external() API that
temporarily disables fd handlers that were registered is_external=true
is therefore dead code.
Remove aio_disable_external(), aio_enable_external(), and the
is_external arguments to aio_set_fd_handler() and
aio_set_event_notifier().
The entire test-fdmon-epoll test is removed because its sole purpose was
testing aio_disable_external().
Parts of this patch were generated using the following coccinelle
(https://coccinelle.lip6.fr/) semantic patch:
@@
expression ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque;
@@
- aio_set_fd_handler(ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque)
+ aio_set_fd_handler(ctx, fd, io_read, io_write, io_poll, io_poll_ready, opaque)
@@
expression ctx, notifier, is_external, io_read, io_poll, io_poll_ready;
@@
- aio_set_event_notifier(ctx, notifier, is_external, io_read, io_poll, io_poll_ready)
+ aio_set_event_notifier(ctx, notifier, io_read, io_poll, io_poll_ready)
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20230516190238.8401-21-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2023-05-16 22:02:38 +03:00
|
|
|
NULL, NULL, NULL, NULL, NULL);
|
2019-09-10 18:41:09 +03:00
|
|
|
qemu_mutex_unlock(&client->mutex);
|
2014-02-03 13:26:13 +04:00
|
|
|
if (client->fh) {
|
|
|
|
nfs_close(client->context, client->fh);
|
2017-08-08 01:29:09 +03:00
|
|
|
client->fh = NULL;
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
2019-09-10 18:41:10 +03:00
|
|
|
#ifdef LIBNFS_FEATURE_UMOUNT
|
|
|
|
nfs_umount(client->context);
|
|
|
|
#endif
|
2014-02-03 13:26:13 +04:00
|
|
|
nfs_destroy_context(client->context);
|
2017-08-08 01:29:09 +03:00
|
|
|
client->context = NULL;
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
2017-08-08 01:29:09 +03:00
|
|
|
g_free(client->path);
|
|
|
|
qemu_mutex_destroy(&client->mutex);
|
|
|
|
qapi_free_NFSServer(client->server);
|
|
|
|
client->server = NULL;
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_file_close(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
nfs_client_close(client);
|
|
|
|
}
|
|
|
|
|
2018-01-31 21:47:48 +03:00
|
|
|
static int64_t nfs_client_open(NFSClient *client, BlockdevOptionsNfs *opts,
|
2017-04-21 15:27:05 +03:00
|
|
|
int flags, int open_flags, Error **errp)
|
2014-02-03 13:26:13 +04:00
|
|
|
{
|
2017-11-27 19:00:07 +03:00
|
|
|
int64_t ret = -EINVAL;
|
2022-09-08 16:28:15 +03:00
|
|
|
#ifdef _WIN32
|
|
|
|
struct __stat64 st;
|
|
|
|
#else
|
2014-02-03 13:26:13 +04:00
|
|
|
struct stat st;
|
2022-09-08 16:28:15 +03:00
|
|
|
#endif
|
2014-02-03 13:26:13 +04:00
|
|
|
char *file = NULL, *strp = NULL;
|
|
|
|
|
2017-08-08 01:29:09 +03:00
|
|
|
qemu_mutex_init(&client->mutex);
|
2016-10-31 18:05:49 +03:00
|
|
|
|
2018-01-31 21:47:48 +03:00
|
|
|
client->path = g_strdup(opts->path);
|
2016-10-31 18:05:49 +03:00
|
|
|
|
|
|
|
strp = strrchr(client->path, '/');
|
2014-02-03 13:26:13 +04:00
|
|
|
if (strp == NULL) {
|
|
|
|
error_setg(errp, "Invalid URL specified");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
file = g_strdup(strp);
|
|
|
|
*strp = 0;
|
|
|
|
|
2018-01-31 21:47:48 +03:00
|
|
|
/* Steal the NFSServer object from opts; set the original pointer to NULL
|
|
|
|
* to avoid use after free and double free. */
|
|
|
|
client->server = opts->server;
|
|
|
|
opts->server = NULL;
|
2016-10-31 18:05:49 +03:00
|
|
|
|
2014-02-03 13:26:13 +04:00
|
|
|
client->context = nfs_init_context();
|
|
|
|
if (client->context == NULL) {
|
|
|
|
error_setg(errp, "Failed to init NFS context");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-01-31 21:47:48 +03:00
|
|
|
if (opts->has_user) {
|
|
|
|
client->uid = opts->user;
|
2016-10-31 18:05:49 +03:00
|
|
|
nfs_set_uid(client->context, client->uid);
|
|
|
|
}
|
|
|
|
|
2018-01-31 21:47:48 +03:00
|
|
|
if (opts->has_group) {
|
|
|
|
client->gid = opts->group;
|
2016-10-31 18:05:49 +03:00
|
|
|
nfs_set_gid(client->context, client->gid);
|
|
|
|
}
|
|
|
|
|
2018-01-31 21:47:48 +03:00
|
|
|
if (opts->has_tcp_syn_count) {
|
|
|
|
client->tcp_syncnt = opts->tcp_syn_count;
|
2016-10-31 18:05:49 +03:00
|
|
|
nfs_set_tcp_syncnt(client->context, client->tcp_syncnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef LIBNFS_FEATURE_READAHEAD
|
2018-01-31 21:47:48 +03:00
|
|
|
if (opts->has_readahead_size) {
|
2016-10-31 18:05:49 +03:00
|
|
|
if (open_flags & BDRV_O_NOCACHE) {
|
|
|
|
error_setg(errp, "Cannot enable NFS readahead "
|
|
|
|
"if cache.direct = on");
|
2014-02-03 13:26:13 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
2018-01-31 21:47:48 +03:00
|
|
|
client->readahead = opts->readahead_size;
|
2016-10-31 18:05:49 +03:00
|
|
|
if (client->readahead > QEMU_NFS_MAX_READAHEAD_SIZE) {
|
2017-07-12 16:57:41 +03:00
|
|
|
warn_report("Truncating NFS readahead size to %d",
|
|
|
|
QEMU_NFS_MAX_READAHEAD_SIZE);
|
2016-10-31 18:05:49 +03:00
|
|
|
client->readahead = QEMU_NFS_MAX_READAHEAD_SIZE;
|
2014-06-25 02:06:00 +04:00
|
|
|
}
|
2016-10-31 18:05:49 +03:00
|
|
|
nfs_set_readahead(client->context, client->readahead);
|
2016-05-19 15:48:03 +03:00
|
|
|
#ifdef LIBNFS_FEATURE_PAGECACHE
|
2016-10-31 18:05:49 +03:00
|
|
|
nfs_set_pagecache_ttl(client->context, 0);
|
2016-05-19 15:48:03 +03:00
|
|
|
#endif
|
2016-10-31 18:05:49 +03:00
|
|
|
client->cache_used = true;
|
|
|
|
}
|
2016-05-19 15:48:03 +03:00
|
|
|
#endif
|
2016-10-31 18:05:49 +03:00
|
|
|
|
2016-05-19 15:48:03 +03:00
|
|
|
#ifdef LIBNFS_FEATURE_PAGECACHE
|
2018-01-31 21:47:48 +03:00
|
|
|
if (opts->has_page_cache_size) {
|
2016-10-31 18:05:49 +03:00
|
|
|
if (open_flags & BDRV_O_NOCACHE) {
|
|
|
|
error_setg(errp, "Cannot enable NFS pagecache "
|
|
|
|
"if cache.direct = on");
|
|
|
|
goto fail;
|
|
|
|
}
|
2018-01-31 21:47:48 +03:00
|
|
|
client->pagecache = opts->page_cache_size;
|
2016-10-31 18:05:49 +03:00
|
|
|
if (client->pagecache > QEMU_NFS_MAX_PAGECACHE_SIZE) {
|
2017-07-12 16:57:41 +03:00
|
|
|
warn_report("Truncating NFS pagecache size to %d pages",
|
|
|
|
QEMU_NFS_MAX_PAGECACHE_SIZE);
|
2016-10-31 18:05:49 +03:00
|
|
|
client->pagecache = QEMU_NFS_MAX_PAGECACHE_SIZE;
|
|
|
|
}
|
|
|
|
nfs_set_pagecache(client->context, client->pagecache);
|
|
|
|
nfs_set_pagecache_ttl(client->context, 0);
|
|
|
|
client->cache_used = true;
|
|
|
|
}
|
2015-11-09 10:09:33 +03:00
|
|
|
#endif
|
2016-10-31 18:05:49 +03:00
|
|
|
|
2015-11-09 10:09:33 +03:00
|
|
|
#ifdef LIBNFS_FEATURE_DEBUG
|
2018-01-31 21:47:48 +03:00
|
|
|
if (opts->has_debug) {
|
|
|
|
client->debug = opts->debug;
|
2016-10-31 18:05:49 +03:00
|
|
|
/* limit the maximum debug level to avoid potential flooding
|
|
|
|
* of our log files. */
|
|
|
|
if (client->debug > QEMU_NFS_MAX_DEBUG_LEVEL) {
|
2017-07-12 16:57:41 +03:00
|
|
|
warn_report("Limiting NFS debug level to %d",
|
|
|
|
QEMU_NFS_MAX_DEBUG_LEVEL);
|
2016-10-31 18:05:49 +03:00
|
|
|
client->debug = QEMU_NFS_MAX_DEBUG_LEVEL;
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
2016-10-31 18:05:49 +03:00
|
|
|
nfs_set_debug(client->context, client->debug);
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
2016-10-31 18:05:49 +03:00
|
|
|
#endif
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2016-10-31 18:05:49 +03:00
|
|
|
ret = nfs_mount(client->context, client->server->host, client->path);
|
2014-02-03 13:26:13 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
error_setg(errp, "Failed to mount nfs share: %s",
|
|
|
|
nfs_get_error(client->context));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & O_CREAT) {
|
|
|
|
ret = nfs_creat(client->context, file, 0600, &client->fh);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_setg(errp, "Failed to create file: %s",
|
|
|
|
nfs_get_error(client->context));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = nfs_open(client->context, file, flags, &client->fh);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_setg(errp, "Failed to open file : %s",
|
|
|
|
nfs_get_error(client->context));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nfs_fstat(client->context, client->fh, &st);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_setg(errp, "Failed to fstat file: %s",
|
|
|
|
nfs_get_error(client->context));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = DIV_ROUND_UP(st.st_size, BDRV_SECTOR_SIZE);
|
2020-11-05 15:31:15 +03:00
|
|
|
#if !defined(_WIN32)
|
2015-08-27 13:30:41 +03:00
|
|
|
client->st_blocks = st.st_blocks;
|
2020-11-05 15:31:15 +03:00
|
|
|
#endif
|
2014-02-03 13:26:13 +04:00
|
|
|
client->has_zero_init = S_ISREG(st.st_mode);
|
2016-10-31 18:05:49 +03:00
|
|
|
*strp = '/';
|
2014-02-03 13:26:13 +04:00
|
|
|
goto out;
|
2016-10-31 18:05:49 +03:00
|
|
|
|
2014-02-03 13:26:13 +04:00
|
|
|
fail:
|
|
|
|
nfs_client_close(client);
|
|
|
|
out:
|
|
|
|
g_free(file);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-31 18:27:38 +03:00
|
|
|
static BlockdevOptionsNfs *nfs_options_qdict_to_qapi(QDict *options,
|
|
|
|
Error **errp)
|
2018-01-31 21:47:48 +03:00
|
|
|
{
|
|
|
|
BlockdevOptionsNfs *opts = NULL;
|
|
|
|
Visitor *v;
|
2018-05-16 19:08:16 +03:00
|
|
|
const QDictEntry *e;
|
2018-01-31 21:47:48 +03:00
|
|
|
|
2018-06-14 22:14:33 +03:00
|
|
|
v = qobject_input_visitor_new_flat_confused(options, errp);
|
|
|
|
if (!v) {
|
2018-01-31 18:27:38 +03:00
|
|
|
return NULL;
|
2018-01-31 21:47:48 +03:00
|
|
|
}
|
|
|
|
|
2020-07-07 19:06:07 +03:00
|
|
|
visit_type_BlockdevOptionsNfs(v, NULL, &opts, errp);
|
2018-01-31 21:47:48 +03:00
|
|
|
visit_free(v);
|
2020-07-07 19:06:07 +03:00
|
|
|
if (!opts) {
|
2018-01-31 18:27:38 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-05-16 19:08:16 +03:00
|
|
|
/* Remove the processed options from the QDict (the visitor processes
|
|
|
|
* _all_ options in the QDict) */
|
|
|
|
while ((e = qdict_first(options))) {
|
|
|
|
qdict_del(options, e->key);
|
|
|
|
}
|
|
|
|
|
2018-01-31 18:27:38 +03:00
|
|
|
return opts;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int64_t nfs_client_open_qdict(NFSClient *client, QDict *options,
|
|
|
|
int flags, int open_flags, Error **errp)
|
|
|
|
{
|
|
|
|
BlockdevOptionsNfs *opts;
|
2020-12-09 15:17:35 +03:00
|
|
|
int64_t ret;
|
2018-01-31 18:27:38 +03:00
|
|
|
|
|
|
|
opts = nfs_options_qdict_to_qapi(options, errp);
|
|
|
|
if (opts == NULL) {
|
2018-01-31 21:47:48 +03:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nfs_client_open(client, opts, flags, open_flags, errp);
|
|
|
|
fail:
|
|
|
|
qapi_free_BlockdevOptionsNfs(opts);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-02-03 13:26:13 +04:00
|
|
|
static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags,
|
|
|
|
Error **errp) {
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
int64_t ret;
|
|
|
|
|
2014-05-08 18:34:44 +04:00
|
|
|
client->aio_context = bdrv_get_aio_context(bs);
|
|
|
|
|
2018-01-31 21:47:48 +03:00
|
|
|
ret = nfs_client_open_qdict(client, options,
|
|
|
|
(flags & BDRV_O_RDWR) ? O_RDWR : O_RDONLY,
|
|
|
|
bs->open_flags, errp);
|
2014-02-03 13:26:13 +04:00
|
|
|
if (ret < 0) {
|
2016-10-31 18:05:49 +03:00
|
|
|
return ret;
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
2017-08-08 01:29:09 +03:00
|
|
|
|
2014-02-03 13:26:13 +04:00
|
|
|
bs->total_sectors = ret;
|
2020-04-28 23:28:59 +03:00
|
|
|
if (client->has_zero_init) {
|
|
|
|
bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE;
|
|
|
|
}
|
2020-04-01 19:53:14 +03:00
|
|
|
return 0;
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
|
|
|
|
2014-12-02 20:32:44 +03:00
|
|
|
static QemuOptsList nfs_create_opts = {
|
|
|
|
.name = "nfs-create-opts",
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(nfs_create_opts.head),
|
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = BLOCK_OPT_SIZE,
|
|
|
|
.type = QEMU_OPT_SIZE,
|
|
|
|
.help = "Virtual disk size"
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-01-31 18:27:38 +03:00
|
|
|
static int nfs_file_co_create(BlockdevCreateOptions *options, Error **errp)
|
2014-02-03 13:26:13 +04:00
|
|
|
{
|
2018-01-31 18:27:38 +03:00
|
|
|
BlockdevCreateOptionsNfs *opts = &options->u.nfs;
|
block: Use g_new() & friends where that makes obvious sense
g_new(T, n) is neater than g_malloc(sizeof(T) * n). It's also safer,
for two reasons. One, it catches multiplication overflowing size_t.
Two, it returns T * rather than void *, which lets the compiler catch
more type errors.
Patch created with Coccinelle, with two manual changes on top:
* Add const to bdrv_iterate_format() to keep the types straight
* Convert the allocation in bdrv_drop_intermediate(), which Coccinelle
inexplicably misses
Coccinelle semantic patch:
@@
type T;
@@
-g_malloc(sizeof(T))
+g_new(T, 1)
@@
type T;
@@
-g_try_malloc(sizeof(T))
+g_try_new(T, 1)
@@
type T;
@@
-g_malloc0(sizeof(T))
+g_new0(T, 1)
@@
type T;
@@
-g_try_malloc0(sizeof(T))
+g_try_new0(T, 1)
@@
type T;
expression n;
@@
-g_malloc(sizeof(T) * (n))
+g_new(T, n)
@@
type T;
expression n;
@@
-g_try_malloc(sizeof(T) * (n))
+g_try_new(T, n)
@@
type T;
expression n;
@@
-g_malloc0(sizeof(T) * (n))
+g_new0(T, n)
@@
type T;
expression n;
@@
-g_try_malloc0(sizeof(T) * (n))
+g_try_new0(T, n)
@@
type T;
expression p, n;
@@
-g_realloc(p, sizeof(T) * (n))
+g_renew(T, p, n)
@@
type T;
expression p, n;
@@
-g_try_realloc(p, sizeof(T) * (n))
+g_try_renew(T, p, n)
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-08-19 12:31:08 +04:00
|
|
|
NFSClient *client = g_new0(NFSClient, 1);
|
2018-01-31 18:27:38 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
assert(options->driver == BLOCKDEV_DRIVER_NFS);
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2014-05-08 18:34:44 +04:00
|
|
|
client->aio_context = qemu_get_aio_context();
|
|
|
|
|
2018-01-31 18:27:38 +03:00
|
|
|
ret = nfs_client_open(client, opts->location, O_CREAT, 0, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = nfs_ftruncate(client->context, client->fh, opts->size);
|
|
|
|
nfs_client_close(client);
|
|
|
|
|
|
|
|
out:
|
|
|
|
g_free(client);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-26 04:12:17 +03:00
|
|
|
static int coroutine_fn nfs_file_co_create_opts(BlockDriver *drv,
|
|
|
|
const char *url,
|
|
|
|
QemuOpts *opts,
|
2018-01-31 18:27:38 +03:00
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BlockdevCreateOptions *create_options;
|
|
|
|
BlockdevCreateOptionsNfs *nfs_opts;
|
|
|
|
QDict *options;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
create_options = g_new0(BlockdevCreateOptions, 1);
|
|
|
|
create_options->driver = BLOCKDEV_DRIVER_NFS;
|
|
|
|
nfs_opts = &create_options->u.nfs;
|
|
|
|
|
2014-02-03 13:26:13 +04:00
|
|
|
/* Read out options */
|
2018-01-31 18:27:38 +03:00
|
|
|
nfs_opts->size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
|
|
|
|
BDRV_SECTOR_SIZE);
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2016-10-31 18:05:49 +03:00
|
|
|
options = qdict_new();
|
|
|
|
ret = nfs_parse_uri(url, options, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-01-31 18:27:38 +03:00
|
|
|
nfs_opts->location = nfs_options_qdict_to_qapi(options, errp);
|
|
|
|
if (nfs_opts->location == NULL) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nfs_file_co_create(create_options, errp);
|
2014-02-03 13:26:13 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
2018-01-31 18:27:38 +03:00
|
|
|
|
|
|
|
ret = 0;
|
2014-02-03 13:26:13 +04:00
|
|
|
out:
|
2018-04-19 18:01:43 +03:00
|
|
|
qobject_unref(options);
|
2018-01-31 18:27:38 +03:00
|
|
|
qapi_free_BlockdevCreateOptions(create_options);
|
2014-02-03 13:26:13 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs_has_zero_init(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
return client->has_zero_init;
|
|
|
|
}
|
|
|
|
|
2020-11-05 15:31:15 +03:00
|
|
|
#if !defined(_WIN32)
|
2017-02-22 21:07:24 +03:00
|
|
|
/* Called (via nfs_service) with QemuMutex held. */
|
2016-10-27 13:48:57 +03:00
|
|
|
static void
|
|
|
|
nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
|
|
|
|
void *private_data)
|
|
|
|
{
|
|
|
|
NFSRPC *task = private_data;
|
|
|
|
task->ret = ret;
|
|
|
|
if (task->ret == 0) {
|
|
|
|
memcpy(task->st, data, sizeof(struct stat));
|
|
|
|
}
|
|
|
|
if (task->ret < 0) {
|
|
|
|
error_report("NFS Error: %s", nfs_get_error(nfs));
|
|
|
|
}
|
2023-04-12 14:26:06 +03:00
|
|
|
replay_bh_schedule_oneshot_event(task->client->aio_context,
|
|
|
|
nfs_co_generic_bh_cb, task);
|
2016-10-27 13:48:57 +03:00
|
|
|
}
|
|
|
|
|
2023-01-13 23:42:07 +03:00
|
|
|
static int64_t coroutine_fn nfs_co_get_allocated_file_size(BlockDriverState *bs)
|
2014-02-03 13:26:13 +04:00
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
NFSRPC task = {0};
|
|
|
|
struct stat st;
|
|
|
|
|
2015-08-27 13:30:41 +03:00
|
|
|
if (bdrv_is_read_only(bs) &&
|
|
|
|
!(bs->open_flags & BDRV_O_NOCACHE)) {
|
|
|
|
return client->st_blocks * 512;
|
|
|
|
}
|
|
|
|
|
2023-04-12 14:26:06 +03:00
|
|
|
nfs_co_init_task(bs, &task);
|
2014-02-03 13:26:13 +04:00
|
|
|
task.st = &st;
|
2023-04-12 14:26:06 +03:00
|
|
|
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
|
|
|
if (nfs_fstat_async(client->context, client->fh, nfs_get_allocated_file_size_cb,
|
|
|
|
&task) != 0) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2023-04-12 14:26:06 +03:00
|
|
|
nfs_set_events(client);
|
|
|
|
}
|
|
|
|
while (!task.complete) {
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
}
|
2014-02-03 13:26:13 +04:00
|
|
|
|
2015-08-20 13:46:47 +03:00
|
|
|
return (task.ret < 0 ? task.ret : st.st_blocks * 512);
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
2020-11-05 15:31:15 +03:00
|
|
|
#endif
|
2014-02-03 13:26:13 +04:00
|
|
|
|
block: Convert .bdrv_truncate callback to coroutine_fn
bdrv_truncate() is an operation that can block (even for a quite long
time, depending on the PreallocMode) in I/O paths that shouldn't block.
Convert it to a coroutine_fn so that we have the infrastructure for
drivers to make their .bdrv_co_truncate implementation asynchronous.
This change could potentially introduce new race conditions because
bdrv_truncate() isn't necessarily executed atomically any more. Whether
this is a problem needs to be evaluated for each block driver that
supports truncate:
* file-posix/win32, gluster, iscsi, nfs, rbd, ssh, sheepdog: The
protocol drivers are trivially safe because they don't actually yield
yet, so there is no change in behaviour.
* copy-on-read, crypto, raw-format: Essentially just filter drivers that
pass the request to a child node, no problem.
* qcow2: The implementation modifies metadata, so it needs to hold
s->lock to be safe with concurrent I/O requests. In order to avoid
double locking, this requires pulling the locking out into
preallocate_co() and using qcow2_write_caches() instead of
bdrv_flush().
* qed: Does a single header update, this is fine without locking.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-06-21 18:54:35 +03:00
|
|
|
static int coroutine_fn
|
2019-09-18 12:51:40 +03:00
|
|
|
nfs_file_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
|
2020-04-24 15:54:39 +03:00
|
|
|
PreallocMode prealloc, BdrvRequestFlags flags,
|
|
|
|
Error **errp)
|
2014-02-03 13:26:13 +04:00
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
2017-03-28 23:51:29 +03:00
|
|
|
int ret;
|
|
|
|
|
2017-06-13 23:20:52 +03:00
|
|
|
if (prealloc != PREALLOC_MODE_OFF) {
|
|
|
|
error_setg(errp, "Unsupported preallocation mode '%s'",
|
2017-08-24 11:46:08 +03:00
|
|
|
PreallocMode_str(prealloc));
|
2017-06-13 23:20:52 +03:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2017-03-28 23:51:29 +03:00
|
|
|
ret = nfs_ftruncate(client->context, client->fh, offset);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_setg_errno(errp, -ret, "Failed to truncate file");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-02-03 13:26:13 +04:00
|
|
|
}
|
|
|
|
|
2015-08-27 13:30:41 +03:00
|
|
|
/* Note that this will not re-establish a connection with the NFS server
|
|
|
|
* - it is effectively a NOP. */
|
|
|
|
static int nfs_reopen_prepare(BDRVReopenState *state,
|
|
|
|
BlockReopenQueue *queue, Error **errp)
|
|
|
|
{
|
|
|
|
NFSClient *client = state->bs->opaque;
|
2022-09-08 16:28:15 +03:00
|
|
|
#ifdef _WIN32
|
|
|
|
struct __stat64 st;
|
|
|
|
#else
|
2015-08-27 13:30:41 +03:00
|
|
|
struct stat st;
|
2022-09-08 16:28:15 +03:00
|
|
|
#endif
|
2015-08-27 13:30:41 +03:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (state->flags & BDRV_O_RDWR && bdrv_is_read_only(state->bs)) {
|
|
|
|
error_setg(errp, "Cannot open a read-only mount as read-write");
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:48:02 +03:00
|
|
|
if ((state->flags & BDRV_O_NOCACHE) && client->cache_used) {
|
2016-05-19 15:48:03 +03:00
|
|
|
error_setg(errp, "Cannot disable cache if libnfs readahead or"
|
|
|
|
" pagecache is enabled");
|
2016-05-19 15:48:02 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-08-27 13:30:41 +03:00
|
|
|
/* Update cache for read-only reopens */
|
|
|
|
if (!(state->flags & BDRV_O_RDWR)) {
|
|
|
|
ret = nfs_fstat(client->context, client->fh, &st);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_setg(errp, "Failed to fstat file: %s",
|
|
|
|
nfs_get_error(client->context));
|
|
|
|
return ret;
|
|
|
|
}
|
2020-11-05 15:31:15 +03:00
|
|
|
#if !defined(_WIN32)
|
2015-08-27 13:30:41 +03:00
|
|
|
client->st_blocks = st.st_blocks;
|
2020-11-05 15:31:15 +03:00
|
|
|
#endif
|
2015-08-27 13:30:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-01 22:29:28 +03:00
|
|
|
static void nfs_refresh_filename(BlockDriverState *bs)
|
2016-10-31 18:05:49 +03:00
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
|
|
|
|
if (client->uid && !client->gid) {
|
|
|
|
snprintf(bs->exact_filename, sizeof(bs->exact_filename),
|
|
|
|
"nfs://%s%s?uid=%" PRId64, client->server->host, client->path,
|
|
|
|
client->uid);
|
|
|
|
} else if (!client->uid && client->gid) {
|
|
|
|
snprintf(bs->exact_filename, sizeof(bs->exact_filename),
|
|
|
|
"nfs://%s%s?gid=%" PRId64, client->server->host, client->path,
|
|
|
|
client->gid);
|
|
|
|
} else if (client->uid && client->gid) {
|
|
|
|
snprintf(bs->exact_filename, sizeof(bs->exact_filename),
|
|
|
|
"nfs://%s%s?uid=%" PRId64 "&gid=%" PRId64,
|
|
|
|
client->server->host, client->path, client->uid, client->gid);
|
|
|
|
} else {
|
|
|
|
snprintf(bs->exact_filename, sizeof(bs->exact_filename),
|
|
|
|
"nfs://%s%s", client->server->host, client->path);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-29 17:51:45 +03:00
|
|
|
static char * GRAPH_RDLOCK nfs_dirname(BlockDriverState *bs, Error **errp)
|
2019-02-01 22:29:22 +03:00
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
|
|
|
|
if (client->uid || client->gid) {
|
|
|
|
bdrv_refresh_filename(bs);
|
|
|
|
error_setg(errp, "Cannot generate a base directory for NFS node '%s'",
|
|
|
|
bs->filename);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return g_strdup_printf("nfs://%s%s/", client->server->host, client->path);
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:48:03 +03:00
|
|
|
#ifdef LIBNFS_FEATURE_PAGECACHE
|
2018-03-01 19:36:18 +03:00
|
|
|
static void coroutine_fn nfs_co_invalidate_cache(BlockDriverState *bs,
|
|
|
|
Error **errp)
|
2016-05-19 15:48:03 +03:00
|
|
|
{
|
|
|
|
NFSClient *client = bs->opaque;
|
|
|
|
nfs_pagecache_invalidate(client->context, client->fh);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-02-01 22:29:25 +03:00
|
|
|
static const char *nfs_strong_runtime_opts[] = {
|
|
|
|
"path",
|
|
|
|
"user",
|
|
|
|
"group",
|
|
|
|
"server.",
|
|
|
|
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2014-02-03 13:26:13 +04:00
|
|
|
static BlockDriver bdrv_nfs = {
|
2014-05-08 18:34:44 +04:00
|
|
|
.format_name = "nfs",
|
|
|
|
.protocol_name = "nfs",
|
|
|
|
|
|
|
|
.instance_size = sizeof(NFSClient),
|
2016-10-31 18:05:49 +03:00
|
|
|
.bdrv_parse_filename = nfs_parse_filename,
|
2014-12-02 20:32:44 +03:00
|
|
|
.create_opts = &nfs_create_opts,
|
|
|
|
|
2014-05-08 18:34:44 +04:00
|
|
|
.bdrv_has_zero_init = nfs_has_zero_init,
|
2020-11-05 15:31:15 +03:00
|
|
|
/* libnfs does not provide the allocated filesize of a file on win32. */
|
|
|
|
#if !defined(_WIN32)
|
2023-01-13 23:42:07 +03:00
|
|
|
.bdrv_co_get_allocated_file_size = nfs_co_get_allocated_file_size,
|
2020-11-05 15:31:15 +03:00
|
|
|
#endif
|
block: Convert .bdrv_truncate callback to coroutine_fn
bdrv_truncate() is an operation that can block (even for a quite long
time, depending on the PreallocMode) in I/O paths that shouldn't block.
Convert it to a coroutine_fn so that we have the infrastructure for
drivers to make their .bdrv_co_truncate implementation asynchronous.
This change could potentially introduce new race conditions because
bdrv_truncate() isn't necessarily executed atomically any more. Whether
this is a problem needs to be evaluated for each block driver that
supports truncate:
* file-posix/win32, gluster, iscsi, nfs, rbd, ssh, sheepdog: The
protocol drivers are trivially safe because they don't actually yield
yet, so there is no change in behaviour.
* copy-on-read, crypto, raw-format: Essentially just filter drivers that
pass the request to a child node, no problem.
* qcow2: The implementation modifies metadata, so it needs to hold
s->lock to be safe with concurrent I/O requests. In order to avoid
double locking, this requires pulling the locking out into
preallocate_co() and using qcow2_write_caches() instead of
bdrv_flush().
* qed: Does a single header update, this is fine without locking.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-06-21 18:54:35 +03:00
|
|
|
.bdrv_co_truncate = nfs_file_co_truncate,
|
2014-05-08 18:34:44 +04:00
|
|
|
|
2022-11-24 18:22:22 +03:00
|
|
|
.bdrv_open = nfs_file_open,
|
2014-05-08 18:34:44 +04:00
|
|
|
.bdrv_close = nfs_file_close,
|
2018-01-31 18:27:38 +03:00
|
|
|
.bdrv_co_create = nfs_file_co_create,
|
2018-01-18 15:43:45 +03:00
|
|
|
.bdrv_co_create_opts = nfs_file_co_create_opts,
|
2015-08-27 13:30:41 +03:00
|
|
|
.bdrv_reopen_prepare = nfs_reopen_prepare,
|
2014-05-08 18:34:44 +04:00
|
|
|
|
2017-02-17 19:39:00 +03:00
|
|
|
.bdrv_co_preadv = nfs_co_preadv,
|
|
|
|
.bdrv_co_pwritev = nfs_co_pwritev,
|
2014-05-08 18:34:44 +04:00
|
|
|
.bdrv_co_flush_to_disk = nfs_co_flush,
|
|
|
|
|
|
|
|
.bdrv_detach_aio_context = nfs_detach_aio_context,
|
|
|
|
.bdrv_attach_aio_context = nfs_attach_aio_context,
|
2016-10-31 18:05:49 +03:00
|
|
|
.bdrv_refresh_filename = nfs_refresh_filename,
|
2019-02-01 22:29:22 +03:00
|
|
|
.bdrv_dirname = nfs_dirname,
|
2016-05-19 15:48:03 +03:00
|
|
|
|
2019-02-01 22:29:25 +03:00
|
|
|
.strong_runtime_opts = nfs_strong_runtime_opts,
|
|
|
|
|
2016-05-19 15:48:03 +03:00
|
|
|
#ifdef LIBNFS_FEATURE_PAGECACHE
|
2018-03-01 19:36:18 +03:00
|
|
|
.bdrv_co_invalidate_cache = nfs_co_invalidate_cache,
|
2016-05-19 15:48:03 +03:00
|
|
|
#endif
|
2014-02-03 13:26:13 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs_block_init(void)
|
|
|
|
{
|
|
|
|
bdrv_register(&bdrv_nfs);
|
|
|
|
}
|
|
|
|
|
|
|
|
block_init(nfs_block_init);
|