haiku/headers/private/kernel/vfs.h

213 lines
8.5 KiB
C
Raw Normal View History

/*
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef _KERNEL_VFS_H
#define _KERNEL_VFS_H
#include <kernel.h>
#include <lock.h>
#include <util/list.h>
#include <fs_interface.h>
#include <dirent.h>
#include <signal.h>
#include <sys/stat.h>
#include <sys/select.h>
#define DEFAULT_FD_TABLE_SIZE 256
#define MAX_FD_TABLE_SIZE 8192
#define DEFAULT_NODE_MONITORS 4096
#define MAX_NODE_MONITORS 65536
#define B_UNMOUNT_BUSY_PARTITION 0x80000000
struct kernel_args;
struct vm_cache;
struct file_descriptor;
struct selectsync;
struct select_info;
struct pollfd;
struct vnode;
/** The I/O context of a process/team, holds the fd array among others */
typedef struct io_context {
struct vnode *root;
struct vnode *cwd;
mutex io_mutex;
uint32 table_size;
uint32 num_used_fds;
struct file_descriptor **fds;
struct select_info **select_infos;
uint8 *fds_close_on_exec;
struct list node_monitors;
uint32 num_monitors;
uint32 max_monitors;
} io_context;
struct fd_info {
int number;
int32 open_mode;
dev_t device;
ino_t node;
};
/* macro to allocate a iovec array on the stack */
#define IOVECS(name, size) \
uint8 _##name[sizeof(iovecs) + (size)*sizeof(iovec)]; \
iovecs *name = (iovecs *)_##name
#ifdef __cplusplus
extern "C" {
#endif
status_t vfs_init(struct kernel_args *args);
status_t vfs_bootstrap_file_systems(void);
void vfs_mount_boot_file_system(struct kernel_args *args);
void vfs_exec_io_context(void *context);
void *vfs_new_io_context(void *parentContext);
status_t vfs_free_io_context(void *context);
struct rlimit;
int vfs_getrlimit(int resource, struct rlimit * rlp);
int vfs_setrlimit(int resource, const struct rlimit * rlp);
/* calls needed by the VM for paging and by the file cache */
int vfs_get_vnode_from_fd(int fd, bool kernel, struct vnode **_vnode);
status_t vfs_get_vnode_from_path(const char *path, bool kernel,
struct vnode **_vnode);
status_t vfs_get_vnode(dev_t mountID, ino_t vnodeID, bool canWait,
struct vnode **_vnode);
status_t vfs_entry_ref_to_vnode(dev_t mountID, ino_t directoryID,
const char *name, struct vnode **_vnode);
void vfs_vnode_to_node_ref(struct vnode *vnode, dev_t *_mountID,
ino_t *_vnodeID);
status_t vfs_lookup_vnode(dev_t mountID, ino_t vnodeID, struct vnode **_vnode);
void vfs_put_vnode(struct vnode *vnode);
void vfs_acquire_vnode(struct vnode *vnode);
status_t vfs_get_cookie_from_fd(int fd, void **_cookie);
bool vfs_can_page(struct vnode *vnode, void *cookie);
status_t vfs_read_pages(struct vnode *vnode, void *cookie, off_t pos,
* Extracted file_map API out of the file cache - it's now an optional service that can be used by file systems. * Changed the way the file cache works: instead of reading/writing to the underlying device directly, it can now be used for any data source, ie. also network file systems. * As a result, the former pages_io() moved to the VFS layer, and can now be called by a file system via {read|write}_file_io_vec_pages() (naming suggestions are always welcomed :-)). It now gets an FD, and uses that to communicate with the device (via its fs_{read|write}_pages() hooks). * The file_cache_{read|write}() functions must now be called without holding an I/O relevant file system lock. That allows the file cache to prepare the pages without colliding with the page writer, IOW the "mayBlock" flag can go into the attic again (yay!). * This also results in a much better performance when the system does I/O and is low on memory, as the page writer can now finally write back some pages, and that even without maxing out the CPU :) * The API changes put slightly more burden on the fs_{read|write}_pages() hooks, but in combination with the file_map it's still pretty straight forward. It just will have to dispatch the call to the underlying device directly, usually it will just call its fs_{read|write}_pages() hooks via the above mentioned calls. * Ported BFS and FAT to the new API, the latter has not been tested, though. * Also ported the API changes to the fs_shell. I also completely removed its file cache level page handling - the downside is that device access is no longer cached (ie. depends on the host OS now), the upside is that the code is greatly simplified. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22886 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-11-11 00:19:52 +03:00
const iovec *vecs, size_t count, size_t *_numBytes, bool fsReenter);
status_t vfs_write_pages(struct vnode *vnode, void *cookie, off_t pos,
* Extracted file_map API out of the file cache - it's now an optional service that can be used by file systems. * Changed the way the file cache works: instead of reading/writing to the underlying device directly, it can now be used for any data source, ie. also network file systems. * As a result, the former pages_io() moved to the VFS layer, and can now be called by a file system via {read|write}_file_io_vec_pages() (naming suggestions are always welcomed :-)). It now gets an FD, and uses that to communicate with the device (via its fs_{read|write}_pages() hooks). * The file_cache_{read|write}() functions must now be called without holding an I/O relevant file system lock. That allows the file cache to prepare the pages without colliding with the page writer, IOW the "mayBlock" flag can go into the attic again (yay!). * This also results in a much better performance when the system does I/O and is low on memory, as the page writer can now finally write back some pages, and that even without maxing out the CPU :) * The API changes put slightly more burden on the fs_{read|write}_pages() hooks, but in combination with the file_map it's still pretty straight forward. It just will have to dispatch the call to the underlying device directly, usually it will just call its fs_{read|write}_pages() hooks via the above mentioned calls. * Ported BFS and FAT to the new API, the latter has not been tested, though. * Also ported the API changes to the fs_shell. I also completely removed its file cache level page handling - the downside is that device access is no longer cached (ie. depends on the host OS now), the upside is that the code is greatly simplified. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22886 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-11-11 00:19:52 +03:00
const iovec *vecs, size_t count, size_t *_numBytes, bool fsReenter);
status_t vfs_get_vnode_cache(struct vnode *vnode, struct vm_cache **_cache,
bool allocate);
status_t vfs_get_file_map(struct vnode *vnode, off_t offset, size_t size,
struct file_io_vec *vecs, size_t *_count);
status_t vfs_get_fs_node_from_path(dev_t mountID, const char *path,
bool kernel, void **_node);
status_t vfs_stat_vnode(struct vnode *vnode, struct stat *stat);
status_t vfs_get_vnode_name(struct vnode *vnode, char *name, size_t nameSize);
status_t vfs_entry_ref_to_path(dev_t device, ino_t inode, const char *leaf,
char *path, size_t pathLength);
status_t vfs_get_cwd(dev_t *_mountID, ino_t *_vnodeID);
void vfs_unlock_vnode_if_locked(struct file_descriptor *descriptor);
status_t vfs_unmount(dev_t mountID, uint32 flags);
status_t vfs_disconnect_vnode(dev_t mountID, ino_t vnodeID);
void vfs_free_unused_vnodes(int32 level);
status_t vfs_read_stat(int fd, const char *path, bool traverseLeafLink,
struct stat *stat, bool kernel);
/* special module convenience call */
status_t vfs_get_module_path(const char *basePath, const char *moduleName,
char *pathBuffer, size_t bufferSize);
/* service call for whoever needs a normalized path */
status_t vfs_normalize_path(const char *path, char *buffer, size_t bufferSize,
bool kernel);
/* service call for the node monitor */
status_t resolve_mount_point_to_volume_root(dev_t mountID, ino_t nodeID,
dev_t *resolvedMountID, ino_t *resolvedNodeID);
/* calls the syscall dispatcher should use for user file I/O */
dev_t _user_mount(const char *path, const char *device, const char *fs_name,
uint32 flags, const char *args, size_t argsLength);
status_t _user_unmount(const char *path, uint32 flags);
status_t _user_read_fs_info(dev_t device, struct fs_info *info);
status_t _user_write_fs_info(dev_t device, const struct fs_info *info, int mask);
dev_t _user_next_device(int32 *_cookie);
status_t _user_sync(void);
status_t _user_get_next_fd_info(team_id team, uint32 *cookie, struct fd_info *info,
size_t infoSize);
status_t _user_entry_ref_to_path(dev_t device, ino_t inode, const char *leaf,
char *userPath, size_t pathLength);
status_t _user_normalize_path(const char* userPath, bool traverseLink,
char* buffer);
int _user_open_entry_ref(dev_t device, ino_t inode, const char *name, int openMode, int perms);
int _user_open(int fd, const char *path, int openMode, int perms);
int _user_open_dir_node_ref(dev_t device, ino_t inode);
int _user_open_dir_entry_ref(dev_t device, ino_t inode, const char *uname);
int _user_open_dir(int fd, const char *path);
int _user_open_parent_dir(int fd, char *name, size_t nameLength);
status_t _user_fcntl(int fd, int op, uint32 argument);
status_t _user_fsync(int fd);
status_t _user_flock(int fd, int op);
status_t _user_read_stat(int fd, const char *path, bool traverseLink,
struct stat *stat, size_t statSize);
status_t _user_write_stat(int fd, const char *path, bool traverseLink,
const struct stat *stat, size_t statSize, int statMask);
off_t _user_seek(int fd, off_t pos, int seekType);
status_t _user_create_dir_entry_ref(dev_t device, ino_t inode, const char *name, int perms);
status_t _user_create_dir(int fd, const char *path, int perms);
status_t _user_remove_dir(int fd, const char *path);
status_t _user_read_link(int fd, const char *path, char *buffer, size_t *_bufferSize);
status_t _user_write_link(const char *path, const char *toPath);
status_t _user_create_symlink(int fd, const char *path, const char *toPath,
int mode);
status_t _user_create_link(const char *path, const char *toPath);
status_t _user_unlink(int fd, const char *path);
status_t _user_rename(int oldFD, const char *oldpath, int newFD,
const char *newpath);
status_t _user_access(const char *path, int mode);
ssize_t _user_select(int numfds, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
bigtime_t timeout, const sigset_t *sigMask);
ssize_t _user_poll(struct pollfd *fds, int numfds, bigtime_t timeout);
int _user_open_attr_dir(int fd, const char *path);
int _user_create_attr(int fd, const char *name, uint32 type, int openMode);
int _user_open_attr(int fd, const char *name, int openMode);
status_t _user_remove_attr(int fd, const char *name);
status_t _user_rename_attr(int fromFile, const char *fromName, int toFile, const char *toName);
int _user_open_index_dir(dev_t device);
status_t _user_create_index(dev_t device, const char *name, uint32 type, uint32 flags);
status_t _user_read_index_stat(dev_t device, const char *name, struct stat *stat);
status_t _user_remove_index(dev_t device, const char *name);
status_t _user_getcwd(char *buffer, size_t size);
status_t _user_setcwd(int fd, const char *path);
status_t _user_change_root(const char *path);
int _user_open_query(dev_t device, const char *query, size_t queryLength, uint32 flags,
port_id port, int32 token);
/* fd user prototypes (implementation located in fd.c) */
extern ssize_t _user_read(int fd, off_t pos, void *buffer, size_t bufferSize);
extern ssize_t _user_readv(int fd, off_t pos, const iovec *vecs, size_t count);
extern ssize_t _user_write(int fd, off_t pos, const void *buffer, size_t bufferSize);
extern ssize_t _user_writev(int fd, off_t pos, const iovec *vecs, size_t count);
extern status_t _user_ioctl(int fd, ulong cmd, void *data, size_t length);
extern ssize_t _user_read_dir(int fd, struct dirent *buffer, size_t bufferSize, uint32 maxCount);
extern status_t _user_rewind_dir(int fd);
extern status_t _user_close(int fd);
extern int _user_dup(int fd);
extern int _user_dup2(int ofd, int nfd);
extern status_t _user_lock_node(int fd);
extern status_t _user_unlock_node(int fd);
/* vfs entry points... */
#ifdef __cplusplus
}
#endif
#endif /* _KERNEL_VFS_H */