2011-05-19 02:42:42 +04:00
|
|
|
/*
|
2016-06-06 12:52:34 +03:00
|
|
|
* 9p backend
|
2011-05-19 02:42:42 +04:00
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2011
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2021-05-06 16:12:23 +03:00
|
|
|
/*
|
|
|
|
* Not so fast! You might want to read the 9p developer docs first:
|
|
|
|
* https://wiki.qemu.org/Documentation/9p
|
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:10 +03:00
|
|
|
#include "qemu/osdep.h"
|
2011-05-19 02:42:42 +04:00
|
|
|
#include "fsdev/qemu-fsdev.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/thread.h"
|
2015-09-01 16:48:02 +03:00
|
|
|
#include "qemu/coroutine.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2015-11-18 20:57:30 +03:00
|
|
|
#include "coth.h"
|
2022-02-28 01:35:15 +03:00
|
|
|
#include "9p-xattr.h"
|
|
|
|
#include "9p-util.h"
|
2011-05-19 02:42:42 +04:00
|
|
|
|
2020-07-29 11:11:54 +03:00
|
|
|
/*
|
|
|
|
* Intended to be called from bottom-half (e.g. background I/O thread)
|
|
|
|
* context.
|
|
|
|
*/
|
|
|
|
static int do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
V9fsState *s = pdu->s;
|
|
|
|
struct dirent *entry;
|
|
|
|
|
|
|
|
errno = 0;
|
|
|
|
entry = s->ops->readdir(&s->ctx, &fidp->fs);
|
|
|
|
if (!entry && errno) {
|
|
|
|
*dent = NULL;
|
|
|
|
err = -errno;
|
|
|
|
} else {
|
|
|
|
*dent = entry;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-07-29 11:12:33 +03:00
|
|
|
/*
|
|
|
|
* TODO: This will be removed for performance reasons.
|
|
|
|
* Use v9fs_co_readdir_many() instead.
|
|
|
|
*/
|
2016-10-17 15:13:58 +03:00
|
|
|
int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
|
|
|
|
struct dirent **dent)
|
2011-05-19 02:42:42 +04:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2011-08-02 10:06:17 +04:00
|
|
|
if (v9fs_request_cancelled(pdu)) {
|
|
|
|
return -EINTR;
|
|
|
|
}
|
2020-07-29 11:11:54 +03:00
|
|
|
v9fs_co_run_in_worker({
|
|
|
|
err = do_readdir(pdu, fidp, dent);
|
|
|
|
});
|
2011-05-19 02:42:42 +04:00
|
|
|
return err;
|
2020-07-29 11:12:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is solely executed on a background IO thread.
|
|
|
|
*
|
|
|
|
* See v9fs_co_readdir_many() (as its only user) below for details.
|
|
|
|
*/
|
|
|
|
static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
|
|
|
|
struct V9fsDirEnt **entries, off_t offset,
|
|
|
|
int32_t maxsize, bool dostat)
|
|
|
|
{
|
|
|
|
V9fsState *s = pdu->s;
|
|
|
|
V9fsString name;
|
|
|
|
int len, err = 0;
|
|
|
|
int32_t size = 0;
|
|
|
|
off_t saved_dir_pos;
|
|
|
|
struct dirent *dent;
|
|
|
|
struct V9fsDirEnt *e = NULL;
|
|
|
|
V9fsPath path;
|
|
|
|
struct stat stbuf;
|
|
|
|
|
|
|
|
*entries = NULL;
|
|
|
|
v9fs_path_init(&path);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: Here should be a warn_report_once() if lock failed.
|
|
|
|
*
|
|
|
|
* With a good 9p client we should not get into concurrency here,
|
|
|
|
* because a good client would not use the same fid for concurrent
|
|
|
|
* requests. We do the lock here for safety reasons though. However
|
|
|
|
* the client would then suffer performance issues, so better log that
|
|
|
|
* issue here.
|
|
|
|
*/
|
|
|
|
v9fs_readdir_lock(&fidp->fs.dir);
|
|
|
|
|
|
|
|
/* seek directory to requested initial position */
|
|
|
|
if (offset == 0) {
|
|
|
|
s->ops->rewinddir(&s->ctx, &fidp->fs);
|
|
|
|
} else {
|
|
|
|
s->ops->seekdir(&s->ctx, &fidp->fs, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* save the directory position */
|
|
|
|
saved_dir_pos = s->ops->telldir(&s->ctx, &fidp->fs);
|
|
|
|
if (saved_dir_pos < 0) {
|
|
|
|
err = saved_dir_pos;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
/* interrupt loop if request was cancelled by a Tflush request */
|
|
|
|
if (v9fs_request_cancelled(pdu)) {
|
|
|
|
err = -EINTR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get directory entry from fs driver */
|
|
|
|
err = do_readdir(pdu, fidp, &dent);
|
|
|
|
if (err || !dent) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* stop this loop as soon as it would exceed the allowed maximum
|
|
|
|
* response message size for the directory entries collected so far,
|
|
|
|
* because anything beyond that size would need to be discarded by
|
|
|
|
* 9p controller (main thread / top half) anyway
|
|
|
|
*/
|
|
|
|
v9fs_string_init(&name);
|
|
|
|
v9fs_string_sprintf(&name, "%s", dent->d_name);
|
|
|
|
len = v9fs_readdir_response_size(&name);
|
|
|
|
v9fs_string_free(&name);
|
|
|
|
if (size + len > maxsize) {
|
|
|
|
/* this is not an error case actually */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* append next node to result chain */
|
|
|
|
if (!e) {
|
|
|
|
*entries = e = g_malloc0(sizeof(V9fsDirEnt));
|
|
|
|
} else {
|
|
|
|
e = e->next = g_malloc0(sizeof(V9fsDirEnt));
|
|
|
|
}
|
2022-02-16 21:18:21 +03:00
|
|
|
e->dent = qemu_dirent_dup(dent);
|
2020-07-29 11:12:33 +03:00
|
|
|
|
|
|
|
/* perform a full stat() for directory entry if requested by caller */
|
|
|
|
if (dostat) {
|
|
|
|
err = s->ops->name_to_path(
|
|
|
|
&s->ctx, &fidp->path, dent->d_name, &path
|
|
|
|
);
|
|
|
|
if (err < 0) {
|
|
|
|
err = -errno;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = s->ops->lstat(&s->ctx, &path, &stbuf);
|
|
|
|
if (err < 0) {
|
|
|
|
err = -errno;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
e->st = g_malloc0(sizeof(struct stat));
|
|
|
|
memcpy(e->st, &stbuf, sizeof(struct stat));
|
|
|
|
}
|
|
|
|
|
|
|
|
size += len;
|
2022-02-28 01:35:15 +03:00
|
|
|
saved_dir_pos = qemu_dirent_off(dent);
|
2020-07-29 11:12:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* restore (last) saved position */
|
|
|
|
s->ops->seekdir(&s->ctx, &fidp->fs, saved_dir_pos);
|
|
|
|
|
|
|
|
out:
|
|
|
|
v9fs_readdir_unlock(&fidp->fs.dir);
|
|
|
|
v9fs_path_free(&path);
|
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2022-03-03 15:52:14 +03:00
|
|
|
* v9fs_co_readdir_many() - Reads multiple directory entries in one rush.
|
|
|
|
*
|
|
|
|
* @pdu: the causing 9p (T_readdir) client request
|
|
|
|
* @fidp: already opened directory where readdir shall be performed on
|
|
|
|
* @entries: output for directory entries (must not be NULL)
|
|
|
|
* @offset: initial position inside the directory the function shall
|
|
|
|
* seek to before retrieving the directory entries
|
|
|
|
* @maxsize: maximum result message body size (in bytes)
|
|
|
|
* @dostat: whether a stat() should be performed and returned for
|
|
|
|
* each directory entry
|
|
|
|
* Return: resulting response message body size (in bytes) on success,
|
|
|
|
* negative error code otherwise
|
2020-07-29 11:12:33 +03:00
|
|
|
*
|
|
|
|
* Retrieves the requested (max. amount of) directory entries from the fs
|
|
|
|
* driver. This function must only be called by the main IO thread (top half).
|
|
|
|
* Internally this function call will be dispatched to a background IO thread
|
|
|
|
* (bottom half) where it is eventually executed by the fs driver.
|
|
|
|
*
|
2022-03-03 15:52:14 +03:00
|
|
|
* Acquiring multiple directory entries in one rush from the fs
|
2020-07-29 11:12:33 +03:00
|
|
|
* driver, instead of retrieving each directory entry individually, is very
|
|
|
|
* beneficial from performance point of view. Because for every fs driver
|
|
|
|
* request latency is added, which in practice could lead to overall
|
|
|
|
* latencies of several hundred ms for reading all entries (of just a single
|
|
|
|
* directory) if every directory entry was individually requested from fs
|
|
|
|
* driver.
|
|
|
|
*
|
2022-03-03 15:52:14 +03:00
|
|
|
* NOTE: You must ALWAYS call v9fs_free_dirents(entries) after calling
|
2020-07-29 11:12:33 +03:00
|
|
|
* v9fs_co_readdir_many(), both on success and on error cases of this
|
2022-03-03 15:52:14 +03:00
|
|
|
* function, to avoid memory leaks once @entries are no longer needed.
|
2020-07-29 11:12:33 +03:00
|
|
|
*/
|
|
|
|
int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
|
|
|
|
struct V9fsDirEnt **entries,
|
|
|
|
off_t offset, int32_t maxsize,
|
|
|
|
bool dostat)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (v9fs_request_cancelled(pdu)) {
|
|
|
|
return -EINTR;
|
|
|
|
}
|
|
|
|
v9fs_co_run_in_worker({
|
|
|
|
err = do_readdir_many(pdu, fidp, entries, offset, maxsize, dostat);
|
|
|
|
});
|
|
|
|
return err;
|
2011-05-19 02:42:42 +04:00
|
|
|
}
|
|
|
|
|
2011-08-02 10:06:17 +04:00
|
|
|
off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp)
|
2011-05-19 02:42:42 +04:00
|
|
|
{
|
|
|
|
off_t err;
|
2011-08-02 10:06:17 +04:00
|
|
|
V9fsState *s = pdu->s;
|
2011-05-19 02:42:42 +04:00
|
|
|
|
2011-08-02 10:06:17 +04:00
|
|
|
if (v9fs_request_cancelled(pdu)) {
|
|
|
|
return -EINTR;
|
|
|
|
}
|
2011-05-19 02:42:42 +04:00
|
|
|
v9fs_co_run_in_worker(
|
|
|
|
{
|
2011-10-25 10:40:40 +04:00
|
|
|
err = s->ops->telldir(&s->ctx, &fidp->fs);
|
2011-05-19 02:42:42 +04:00
|
|
|
if (err < 0) {
|
|
|
|
err = -errno;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-10-17 15:13:58 +03:00
|
|
|
void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp,
|
|
|
|
off_t offset)
|
2011-05-19 02:42:42 +04:00
|
|
|
{
|
2011-08-02 10:06:17 +04:00
|
|
|
V9fsState *s = pdu->s;
|
|
|
|
if (v9fs_request_cancelled(pdu)) {
|
|
|
|
return;
|
|
|
|
}
|
2011-05-19 02:42:42 +04:00
|
|
|
v9fs_co_run_in_worker(
|
|
|
|
{
|
2011-10-25 10:40:40 +04:00
|
|
|
s->ops->seekdir(&s->ctx, &fidp->fs, offset);
|
2011-05-19 02:42:42 +04:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2016-10-17 15:13:58 +03:00
|
|
|
void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp)
|
2011-05-19 02:42:42 +04:00
|
|
|
{
|
2011-08-02 10:06:17 +04:00
|
|
|
V9fsState *s = pdu->s;
|
|
|
|
if (v9fs_request_cancelled(pdu)) {
|
|
|
|
return;
|
|
|
|
}
|
2011-05-19 02:42:42 +04:00
|
|
|
v9fs_co_run_in_worker(
|
|
|
|
{
|
2011-10-25 10:40:40 +04:00
|
|
|
s->ops->rewinddir(&s->ctx, &fidp->fs);
|
2011-05-19 02:42:42 +04:00
|
|
|
});
|
|
|
|
}
|
2011-08-08 22:14:24 +04:00
|
|
|
|
2016-10-17 15:13:58 +03:00
|
|
|
int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp,
|
|
|
|
V9fsString *name, mode_t mode, uid_t uid,
|
|
|
|
gid_t gid, struct stat *stbuf)
|
2011-08-08 22:14:24 +04:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
FsCred cred;
|
2011-09-09 13:44:18 +04:00
|
|
|
V9fsPath path;
|
2011-08-02 10:06:17 +04:00
|
|
|
V9fsState *s = pdu->s;
|
2011-08-08 22:14:24 +04:00
|
|
|
|
2011-08-02 10:06:17 +04:00
|
|
|
if (v9fs_request_cancelled(pdu)) {
|
2011-11-29 12:52:38 +04:00
|
|
|
return -EINTR;
|
2011-08-02 10:06:17 +04:00
|
|
|
}
|
2011-08-08 22:14:24 +04:00
|
|
|
cred_init(&cred);
|
|
|
|
cred.fc_mode = mode;
|
|
|
|
cred.fc_uid = uid;
|
|
|
|
cred.fc_gid = gid;
|
2011-08-02 10:05:54 +04:00
|
|
|
v9fs_path_read_lock(s);
|
2011-08-08 22:14:24 +04:00
|
|
|
v9fs_co_run_in_worker(
|
|
|
|
{
|
2011-09-09 13:44:18 +04:00
|
|
|
err = s->ops->mkdir(&s->ctx, &fidp->path, name->data, &cred);
|
2011-08-08 22:14:24 +04:00
|
|
|
if (err < 0) {
|
|
|
|
err = -errno;
|
2011-05-24 13:40:56 +04:00
|
|
|
} else {
|
2011-09-09 13:44:18 +04:00
|
|
|
v9fs_path_init(&path);
|
|
|
|
err = v9fs_name_to_path(s, &fidp->path, name->data, &path);
|
|
|
|
if (!err) {
|
|
|
|
err = s->ops->lstat(&s->ctx, &path, stbuf);
|
|
|
|
if (err < 0) {
|
|
|
|
err = -errno;
|
|
|
|
}
|
2011-05-24 13:40:56 +04:00
|
|
|
}
|
2011-09-09 13:44:18 +04:00
|
|
|
v9fs_path_free(&path);
|
2011-08-08 22:14:24 +04:00
|
|
|
}
|
|
|
|
});
|
2011-08-02 10:05:54 +04:00
|
|
|
v9fs_path_unlock(s);
|
2011-08-08 22:14:24 +04:00
|
|
|
return err;
|
|
|
|
}
|
2011-05-07 16:42:42 +04:00
|
|
|
|
2016-10-17 15:13:58 +03:00
|
|
|
int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp)
|
2011-05-07 16:42:42 +04:00
|
|
|
{
|
|
|
|
int err;
|
2011-08-02 10:06:17 +04:00
|
|
|
V9fsState *s = pdu->s;
|
2011-05-07 16:42:42 +04:00
|
|
|
|
2011-08-02 10:06:17 +04:00
|
|
|
if (v9fs_request_cancelled(pdu)) {
|
2011-11-29 12:52:38 +04:00
|
|
|
return -EINTR;
|
2011-08-02 10:06:17 +04:00
|
|
|
}
|
2011-08-02 10:05:54 +04:00
|
|
|
v9fs_path_read_lock(s);
|
2011-05-07 16:42:42 +04:00
|
|
|
v9fs_co_run_in_worker(
|
|
|
|
{
|
2011-10-25 10:40:40 +04:00
|
|
|
err = s->ops->opendir(&s->ctx, &fidp->path, &fidp->fs);
|
|
|
|
if (err < 0) {
|
2011-05-07 16:42:42 +04:00
|
|
|
err = -errno;
|
|
|
|
} else {
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
});
|
2011-08-02 10:05:54 +04:00
|
|
|
v9fs_path_unlock(s);
|
2011-05-18 15:38:34 +04:00
|
|
|
if (!err) {
|
|
|
|
total_open_fd++;
|
|
|
|
if (total_open_fd > open_fd_hw) {
|
2011-08-02 10:06:17 +04:00
|
|
|
v9fs_reclaim_fd(pdu);
|
2011-05-18 15:38:34 +04:00
|
|
|
}
|
|
|
|
}
|
2011-05-07 16:42:42 +04:00
|
|
|
return err;
|
|
|
|
}
|
2011-05-07 19:39:24 +04:00
|
|
|
|
2016-10-17 15:13:58 +03:00
|
|
|
int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs)
|
2011-05-07 19:39:24 +04:00
|
|
|
{
|
|
|
|
int err;
|
2011-08-02 10:06:17 +04:00
|
|
|
V9fsState *s = pdu->s;
|
2011-05-07 19:39:24 +04:00
|
|
|
|
2011-08-02 10:06:17 +04:00
|
|
|
if (v9fs_request_cancelled(pdu)) {
|
2011-11-29 12:52:38 +04:00
|
|
|
return -EINTR;
|
2011-08-02 10:06:17 +04:00
|
|
|
}
|
2011-05-07 19:39:24 +04:00
|
|
|
v9fs_co_run_in_worker(
|
|
|
|
{
|
2011-10-25 10:40:40 +04:00
|
|
|
err = s->ops->closedir(&s->ctx, fs);
|
2011-05-07 19:39:24 +04:00
|
|
|
if (err < 0) {
|
|
|
|
err = -errno;
|
|
|
|
}
|
|
|
|
});
|
2011-05-18 15:38:34 +04:00
|
|
|
if (!err) {
|
|
|
|
total_open_fd--;
|
|
|
|
}
|
2011-05-07 19:39:24 +04:00
|
|
|
return err;
|
|
|
|
}
|