2002-08-10 00:48:30 +04:00
|
|
|
/* Operations on file descriptors
|
2004-11-25 05:56:35 +03:00
|
|
|
*
|
2007-01-25 00:04:49 +03:00
|
|
|
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
|
2004-11-25 05:56:35 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*/
|
2002-08-10 00:48:30 +04:00
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
#include <fd.h>
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
2002-09-30 07:31:42 +04:00
|
|
|
#include <OS.h>
|
|
|
|
|
2004-07-02 06:47:43 +04:00
|
|
|
#include <syscalls.h>
|
2007-10-01 05:37:28 +04:00
|
|
|
#include <util/AutoLock.h>
|
|
|
|
#include <vfs.h>
|
2007-10-02 23:47:31 +04:00
|
|
|
#include <wait_for_objects.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
//#define TRACE_FD
|
|
|
|
#ifdef TRACE_FD
|
2002-07-17 11:55:51 +04:00
|
|
|
# define TRACE(x) dprintf x
|
|
|
|
#else
|
|
|
|
# define TRACE(x)
|
|
|
|
#endif
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
|
2007-10-01 22:48:52 +04:00
|
|
|
static struct file_descriptor* get_fd_locked(struct io_context* context,
|
|
|
|
int fd);
|
2007-10-01 05:37:28 +04:00
|
|
|
static void deselect_select_infos(file_descriptor* descriptor,
|
|
|
|
select_info* infos);
|
|
|
|
|
|
|
|
|
2007-10-01 22:48:52 +04:00
|
|
|
struct FDGetterLocking {
|
|
|
|
inline bool Lock(file_descriptor* /*lockable*/)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void Unlock(file_descriptor* lockable)
|
|
|
|
{
|
|
|
|
put_fd(lockable);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class FDGetter : public AutoLocker<file_descriptor, FDGetterLocking> {
|
|
|
|
public:
|
|
|
|
inline FDGetter()
|
|
|
|
: AutoLocker<file_descriptor, FDGetterLocking>()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
inline FDGetter(io_context* context, int fd, bool contextLocked = false)
|
|
|
|
: AutoLocker<file_descriptor, FDGetterLocking>(
|
|
|
|
contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
inline file_descriptor* SetTo(io_context* context, int fd,
|
|
|
|
bool contextLocked = false)
|
|
|
|
{
|
|
|
|
file_descriptor* descriptor
|
|
|
|
= contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd);
|
|
|
|
AutoLocker<file_descriptor, FDGetterLocking>::SetTo(descriptor, true);
|
|
|
|
return descriptor;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline file_descriptor* FD() const
|
|
|
|
{
|
|
|
|
return fLockable;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
/*** General fd routines ***/
|
|
|
|
|
|
|
|
|
2002-07-17 11:55:51 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
void dump_fd(int fd, struct file_descriptor *descriptor);
|
|
|
|
|
|
|
|
void
|
|
|
|
dump_fd(int fd,struct file_descriptor *descriptor)
|
|
|
|
{
|
2002-11-28 17:41:06 +03:00
|
|
|
dprintf("fd[%d] = %p: type = %ld, ref_count = %ld, ops = %p, u.vnode = %p, u.mount = %p, cookie = %p, open_mode = %lx, pos = %Ld\n",
|
2002-09-30 07:31:42 +04:00
|
|
|
fd, descriptor, descriptor->type, descriptor->ref_count, descriptor->ops,
|
2002-11-28 17:41:06 +03:00
|
|
|
descriptor->u.vnode, descriptor->u.mount, descriptor->cookie, descriptor->open_mode, descriptor->pos);
|
2002-07-17 11:55:51 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-03-31 21:22:33 +04:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
/** Allocates and initializes a new file_descriptor */
|
|
|
|
|
|
|
|
struct file_descriptor *
|
|
|
|
alloc_fd(void)
|
|
|
|
{
|
2007-10-01 05:37:28 +04:00
|
|
|
file_descriptor *descriptor
|
|
|
|
= (file_descriptor*)malloc(sizeof(struct file_descriptor));
|
2002-08-05 09:37:17 +04:00
|
|
|
if (descriptor == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2002-09-26 07:52:10 +04:00
|
|
|
descriptor->u.vnode = NULL;
|
2002-08-05 09:37:17 +04:00
|
|
|
descriptor->cookie = NULL;
|
|
|
|
descriptor->ref_count = 1;
|
2005-03-31 21:22:33 +04:00
|
|
|
descriptor->open_count = 0;
|
2002-08-05 09:37:17 +04:00
|
|
|
descriptor->open_mode = 0;
|
2002-10-08 04:42:17 +04:00
|
|
|
descriptor->pos = 0;
|
2002-08-05 09:37:17 +04:00
|
|
|
|
|
|
|
return descriptor;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-10-06 13:02:59 +04:00
|
|
|
bool
|
|
|
|
fd_close_on_exec(struct io_context *context, int fd)
|
|
|
|
{
|
|
|
|
return CHECK_BIT(context->fds_close_on_exec[fd / 8], fd & 7) ? true : false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
fd_set_close_on_exec(struct io_context *context, int fd, bool closeFD)
|
|
|
|
{
|
|
|
|
if (closeFD)
|
|
|
|
context->fds_close_on_exec[fd / 8] |= (1 << (fd & 7));
|
|
|
|
else
|
|
|
|
context->fds_close_on_exec[fd / 8] &= ~(1 << (fd & 7));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-03-19 06:22:34 +03:00
|
|
|
/** Searches a free slot in the FD table of the provided I/O context, and inserts
|
|
|
|
* the specified descriptor into it.
|
|
|
|
*/
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
int
|
2004-11-25 05:56:35 +03:00
|
|
|
new_fd_etc(struct io_context *context, struct file_descriptor *descriptor, int firstIndex)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
int fd = -1;
|
2003-09-04 08:20:42 +04:00
|
|
|
uint32 i;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-20 04:16:12 +04:00
|
|
|
mutex_lock(&context->io_mutex);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-11-25 05:56:35 +03:00
|
|
|
for (i = firstIndex; i < context->table_size; i++) {
|
2002-07-20 04:16:12 +04:00
|
|
|
if (!context->fds[i]) {
|
2002-07-09 16:24:59 +04:00
|
|
|
fd = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (fd < 0) {
|
2002-10-08 07:24:51 +04:00
|
|
|
fd = B_NO_MORE_FDS;
|
2002-07-09 16:24:59 +04:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2002-07-20 04:16:12 +04:00
|
|
|
context->fds[fd] = descriptor;
|
|
|
|
context->num_used_fds++;
|
2005-03-31 21:22:33 +04:00
|
|
|
atomic_add(&descriptor->open_count, 1);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
err:
|
2002-07-20 04:16:12 +04:00
|
|
|
mutex_unlock(&context->io_mutex);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-11-25 05:56:35 +03:00
|
|
|
int
|
|
|
|
new_fd(struct io_context *context, struct file_descriptor *descriptor)
|
|
|
|
{
|
|
|
|
return new_fd_etc(context, descriptor, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-30 07:31:42 +04:00
|
|
|
/** Reduces the descriptor's reference counter, and frees all resources
|
2005-03-19 06:22:34 +03:00
|
|
|
* when it's no longer used.
|
2002-09-30 07:31:42 +04:00
|
|
|
*/
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
void
|
2002-07-17 11:55:51 +04:00
|
|
|
put_fd(struct file_descriptor *descriptor)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2006-01-15 20:11:48 +03:00
|
|
|
int32 previous = atomic_add(&descriptor->ref_count, -1);
|
|
|
|
|
|
|
|
TRACE(("put_fd(descriptor = %p [ref = %ld, cookie = %p])\n",
|
|
|
|
descriptor, descriptor->ref_count, descriptor->cookie));
|
2003-06-28 08:23:24 +04:00
|
|
|
|
2002-09-30 07:31:42 +04:00
|
|
|
// free the descriptor if we don't need it anymore
|
2006-01-15 20:11:48 +03:00
|
|
|
if (previous == 1) {
|
2005-03-17 23:25:07 +03:00
|
|
|
// free the underlying object
|
2007-01-25 00:04:49 +03:00
|
|
|
if (descriptor->ops != NULL && descriptor->ops->fd_free != NULL)
|
2002-07-17 11:55:51 +04:00
|
|
|
descriptor->ops->fd_free(descriptor);
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
free(descriptor);
|
2006-01-15 20:11:48 +03:00
|
|
|
} else if ((descriptor->open_mode & O_DISCONNECTED) != 0
|
2007-01-25 00:04:49 +03:00
|
|
|
&& previous - 1 == descriptor->open_count
|
|
|
|
&& descriptor->ops != NULL) {
|
2006-01-15 20:11:48 +03:00
|
|
|
// the descriptor has been disconnected - it cannot
|
|
|
|
// be accessed anymore, let's close it (no one is
|
|
|
|
// currently accessing this descriptor)
|
|
|
|
|
|
|
|
if (descriptor->ops->fd_close)
|
|
|
|
descriptor->ops->fd_close(descriptor);
|
|
|
|
if (descriptor->ops->fd_free)
|
|
|
|
descriptor->ops->fd_free(descriptor);
|
|
|
|
|
|
|
|
// prevent this descriptor from being closed/freed again
|
|
|
|
descriptor->open_count = -1;
|
|
|
|
descriptor->ref_count = -1;
|
2007-01-25 00:04:49 +03:00
|
|
|
descriptor->ops = NULL;
|
2006-01-15 20:11:48 +03:00
|
|
|
descriptor->u.vnode = NULL;
|
|
|
|
|
|
|
|
// the file descriptor is kept intact, so that it's not
|
|
|
|
// reused until someone explicetly closes it
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-03-19 06:22:34 +03:00
|
|
|
/** Decrements the open counter of the file descriptor and invokes
|
|
|
|
* its close hook when appropriate.
|
|
|
|
*/
|
|
|
|
|
2005-03-18 04:24:11 +03:00
|
|
|
void
|
|
|
|
close_fd(struct file_descriptor *descriptor)
|
|
|
|
{
|
|
|
|
if (atomic_add(&descriptor->open_count, -1) == 1) {
|
2006-06-02 18:16:13 +04:00
|
|
|
vfs_unlock_vnode_if_locked(descriptor);
|
|
|
|
|
2007-01-25 00:04:49 +03:00
|
|
|
if (descriptor->ops != NULL && descriptor->ops->fd_close != NULL)
|
2005-03-18 04:24:11 +03:00
|
|
|
descriptor->ops->fd_close(descriptor);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-01-15 20:11:48 +03:00
|
|
|
/** This descriptor's underlying object will be closed and freed
|
|
|
|
* as soon as possible (in one of the next calls to put_fd() -
|
|
|
|
* get_fd() will no longer succeed on this descriptor).
|
|
|
|
* This is useful if the underlying object is gone, for instance
|
|
|
|
* when a (mounted) volume got removed unexpectedly.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
disconnect_fd(struct file_descriptor *descriptor)
|
|
|
|
{
|
|
|
|
descriptor->open_mode |= O_DISCONNECTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-01-15 22:26:42 +03:00
|
|
|
void
|
|
|
|
inc_fd_ref_count(struct file_descriptor *descriptor)
|
|
|
|
{
|
|
|
|
atomic_add(&descriptor->ref_count, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
static struct file_descriptor *
|
|
|
|
get_fd_locked(struct io_context *context, int fd)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-10-01 05:37:28 +04:00
|
|
|
if (fd < 0 || (uint32)fd >= context->table_size)
|
2002-07-20 04:16:12 +04:00
|
|
|
return NULL;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
struct file_descriptor *descriptor = context->fds[fd];
|
2004-11-25 05:56:35 +03:00
|
|
|
|
2006-01-15 21:30:07 +03:00
|
|
|
if (descriptor != NULL) {
|
|
|
|
// Disconnected descriptors cannot be accessed anymore
|
|
|
|
if (descriptor->open_mode & O_DISCONNECTED)
|
|
|
|
descriptor = NULL;
|
|
|
|
else
|
2006-01-15 22:26:42 +03:00
|
|
|
inc_fd_ref_count(descriptor);
|
2006-01-15 21:30:07 +03:00
|
|
|
}
|
2002-07-17 11:55:51 +04:00
|
|
|
|
2002-07-20 04:16:12 +04:00
|
|
|
return descriptor;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
struct file_descriptor *
|
|
|
|
get_fd(struct io_context *context, int fd)
|
|
|
|
{
|
|
|
|
MutexLocker(context->io_mutex);
|
|
|
|
|
|
|
|
return get_fd_locked(context, fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-03-19 06:22:34 +03:00
|
|
|
/** Removes the file descriptor from the specified slot.
|
2002-09-30 07:31:42 +04:00
|
|
|
*/
|
|
|
|
|
2005-03-17 23:25:07 +03:00
|
|
|
static struct file_descriptor *
|
2002-07-20 04:16:12 +04:00
|
|
|
remove_fd(struct io_context *context, int fd)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-07-20 04:16:12 +04:00
|
|
|
struct file_descriptor *descriptor = NULL;
|
|
|
|
|
|
|
|
if (fd < 0)
|
2005-03-17 23:25:07 +03:00
|
|
|
return NULL;
|
2002-07-20 04:16:12 +04:00
|
|
|
|
|
|
|
mutex_lock(&context->io_mutex);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-09-04 08:20:42 +04:00
|
|
|
if ((uint32)fd < context->table_size)
|
2002-07-20 04:16:12 +04:00
|
|
|
descriptor = context->fds[fd];
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
select_info* selectInfos = NULL;
|
|
|
|
bool disconnected = false;
|
|
|
|
|
2006-01-15 22:26:42 +03:00
|
|
|
if (descriptor) {
|
|
|
|
// fd is valid
|
2002-07-20 04:16:12 +04:00
|
|
|
context->fds[fd] = NULL;
|
2005-10-06 13:02:59 +04:00
|
|
|
fd_set_close_on_exec(context, fd, false);
|
2002-07-20 04:16:12 +04:00
|
|
|
context->num_used_fds--;
|
2006-01-15 22:26:42 +03:00
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
selectInfos = context->select_infos[fd];
|
|
|
|
context->select_infos[fd] = NULL;
|
|
|
|
|
|
|
|
disconnected = (descriptor->open_mode & O_DISCONNECTED);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-07-20 04:16:12 +04:00
|
|
|
mutex_unlock(&context->io_mutex);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
if (selectInfos != NULL)
|
|
|
|
deselect_select_infos(descriptor, selectInfos);
|
|
|
|
|
|
|
|
return disconnected ? NULL : descriptor;
|
2002-07-20 04:16:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2002-10-29 06:54:07 +03:00
|
|
|
dup_fd(int fd, bool kernel)
|
2002-07-20 04:16:12 +04:00
|
|
|
{
|
|
|
|
struct io_context *context = get_current_io_context(kernel);
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
int status;
|
|
|
|
|
2002-10-29 06:54:07 +03:00
|
|
|
TRACE(("dup_fd: fd = %d\n", fd));
|
2002-07-20 04:16:12 +04:00
|
|
|
|
|
|
|
// Try to get the fd structure
|
|
|
|
descriptor = get_fd(context, fd);
|
|
|
|
if (descriptor == NULL)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2002-07-20 04:16:12 +04:00
|
|
|
|
|
|
|
// now put the fd in place
|
|
|
|
status = new_fd(context, descriptor);
|
|
|
|
if (status < 0)
|
|
|
|
put_fd(descriptor);
|
2005-10-06 13:02:59 +04:00
|
|
|
else {
|
|
|
|
mutex_lock(&context->io_mutex);
|
|
|
|
fd_set_close_on_exec(context, status, false);
|
|
|
|
mutex_unlock(&context->io_mutex);
|
|
|
|
}
|
2002-07-20 04:16:12 +04:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-10-06 12:55:19 +04:00
|
|
|
/** POSIX says this should be the same as:
|
|
|
|
* close(newfd);
|
|
|
|
* fcntl(oldfd, F_DUPFD, newfd);
|
|
|
|
*
|
|
|
|
* We do dup2() directly to be thread-safe.
|
|
|
|
*/
|
2002-07-20 04:16:12 +04:00
|
|
|
static int
|
2002-10-29 06:54:07 +03:00
|
|
|
dup2_fd(int oldfd, int newfd, bool kernel)
|
2002-07-20 04:16:12 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *evicted = NULL;
|
|
|
|
struct io_context *context;
|
|
|
|
|
2002-10-29 06:54:07 +03:00
|
|
|
TRACE(("dup2_fd: ofd = %d, nfd = %d\n", oldfd, newfd));
|
2002-07-20 04:16:12 +04:00
|
|
|
|
|
|
|
// quick check
|
|
|
|
if (oldfd < 0 || newfd < 0)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2002-07-20 04:16:12 +04:00
|
|
|
|
|
|
|
// Get current I/O context and lock it
|
|
|
|
context = get_current_io_context(kernel);
|
|
|
|
mutex_lock(&context->io_mutex);
|
|
|
|
|
|
|
|
// Check if the fds are valid (mutex must be locked because
|
|
|
|
// the table size could be changed)
|
2003-09-04 08:20:42 +04:00
|
|
|
if ((uint32)oldfd >= context->table_size
|
|
|
|
|| (uint32)newfd >= context->table_size
|
2002-07-20 04:16:12 +04:00
|
|
|
|| context->fds[oldfd] == NULL) {
|
|
|
|
mutex_unlock(&context->io_mutex);
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2002-07-20 04:16:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check for identity, note that it cannot be made above
|
|
|
|
// because we always want to return an error on invalid
|
|
|
|
// handles
|
2007-10-01 05:37:28 +04:00
|
|
|
select_info* selectInfos = NULL;
|
2002-07-20 04:16:12 +04:00
|
|
|
if (oldfd != newfd) {
|
|
|
|
// Now do the work
|
|
|
|
evicted = context->fds[newfd];
|
2007-10-01 05:37:28 +04:00
|
|
|
selectInfos = context->select_infos[newfd];
|
|
|
|
context->select_infos[newfd] = NULL;
|
2002-07-20 04:16:12 +04:00
|
|
|
atomic_add(&context->fds[oldfd]->ref_count, 1);
|
2005-03-31 21:22:33 +04:00
|
|
|
atomic_add(&context->fds[oldfd]->open_count, 1);
|
2002-09-02 18:57:33 +04:00
|
|
|
context->fds[newfd] = context->fds[oldfd];
|
2005-10-06 12:53:17 +04:00
|
|
|
|
|
|
|
if (evicted == NULL)
|
|
|
|
context->num_used_fds++;
|
2002-07-20 04:16:12 +04:00
|
|
|
}
|
|
|
|
|
2005-10-06 13:02:59 +04:00
|
|
|
fd_set_close_on_exec(context, newfd, false);
|
|
|
|
|
2002-07-20 04:16:12 +04:00
|
|
|
mutex_unlock(&context->io_mutex);
|
|
|
|
|
|
|
|
// Say bye bye to the evicted fd
|
2005-10-06 12:55:19 +04:00
|
|
|
if (evicted) {
|
2007-10-01 05:37:28 +04:00
|
|
|
deselect_select_infos(evicted, selectInfos);
|
2005-10-06 12:55:19 +04:00
|
|
|
close_fd(evicted);
|
2002-07-20 04:16:12 +04:00
|
|
|
put_fd(evicted);
|
2005-10-06 12:55:19 +04:00
|
|
|
}
|
2002-07-20 04:16:12 +04:00
|
|
|
|
|
|
|
return newfd;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-05-24 01:48:31 +04:00
|
|
|
static status_t
|
|
|
|
fd_ioctl(bool kernelFD, int fd, ulong op, void *buffer, size_t length)
|
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
descriptor = get_fd(get_current_io_context(kernelFD), fd);
|
|
|
|
if (descriptor == NULL)
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
|
|
|
|
if (descriptor->ops->fd_ioctl)
|
|
|
|
status = descriptor->ops->fd_ioctl(descriptor, op, buffer, length);
|
|
|
|
else
|
|
|
|
status = EOPNOTSUPP;
|
|
|
|
|
|
|
|
put_fd(descriptor);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
static void
|
|
|
|
deselect_select_infos(file_descriptor* descriptor, select_info* infos)
|
|
|
|
{
|
|
|
|
TRACE(("deselect_select_infos(%p, %p)\n", descriptor, infos));
|
|
|
|
|
|
|
|
select_info* info = infos;
|
|
|
|
while (info != NULL) {
|
|
|
|
select_sync* sync = info->sync;
|
|
|
|
|
|
|
|
// deselect the selected events
|
|
|
|
if (descriptor->ops->fd_deselect && info->selected_events) {
|
|
|
|
for (uint16 event = 1; event < 16; event++) {
|
|
|
|
if (info->selected_events & SELECT_FLAG(event)) {
|
|
|
|
descriptor->ops->fd_deselect(descriptor, event,
|
|
|
|
(selectsync*)info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-02 23:47:31 +04:00
|
|
|
notify_select_events(info, B_EVENT_INVALID);
|
2007-10-01 05:37:28 +04:00
|
|
|
info = info->next;
|
|
|
|
put_select_sync(sync);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-10-29 06:54:07 +03:00
|
|
|
status_t
|
2007-10-02 23:47:31 +04:00
|
|
|
select_fd(int32 fd, struct select_info* info, bool kernel)
|
2002-10-29 06:54:07 +03:00
|
|
|
{
|
2007-10-02 23:47:31 +04:00
|
|
|
TRACE(("select_fd(fd = %d, info = %p (%p), 0x%x)\n", fd, info,
|
|
|
|
info->sync, info.selected_events));
|
2007-10-01 05:37:28 +04:00
|
|
|
|
2007-10-01 22:48:52 +04:00
|
|
|
FDGetter fdGetter;
|
|
|
|
// define before the context locker, so it will be destroyed after it
|
2002-10-29 06:54:07 +03:00
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
io_context* context = get_current_io_context(kernel);
|
|
|
|
MutexLocker locker(context->io_mutex);
|
2002-10-29 06:54:07 +03:00
|
|
|
|
2007-10-01 22:48:52 +04:00
|
|
|
struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true);
|
2002-10-29 06:54:07 +03:00
|
|
|
if (descriptor == NULL)
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
|
2007-10-01 22:48:52 +04:00
|
|
|
if (info->selected_events == 0)
|
|
|
|
return B_OK;
|
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
if (!descriptor->ops->fd_select) {
|
2002-10-29 06:54:07 +03:00
|
|
|
// if the I/O subsystem doesn't support select(), we will
|
|
|
|
// immediately notify the select call
|
2007-10-01 05:37:28 +04:00
|
|
|
return notify_select_events(info, info->selected_events);
|
2002-10-29 06:54:07 +03:00
|
|
|
}
|
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
// add the info to the IO context
|
|
|
|
info->next = context->select_infos[fd];
|
|
|
|
context->select_infos[fd] = info;
|
|
|
|
|
|
|
|
// as long as the info is in the list, we keep a reference to the sync
|
|
|
|
// object
|
2007-10-02 23:47:31 +04:00
|
|
|
atomic_add(&info->sync->ref_count, 1);
|
2007-10-01 05:37:28 +04:00
|
|
|
|
|
|
|
locker.Unlock();
|
|
|
|
|
|
|
|
// select any events asked for
|
|
|
|
uint32 selectedEvents = 0;
|
|
|
|
|
|
|
|
for (uint16 event = 1; event < 16; event++) {
|
|
|
|
if (info->selected_events & SELECT_FLAG(event)
|
2007-10-02 23:47:31 +04:00
|
|
|
&& descriptor->ops->fd_select(descriptor, event,
|
2007-10-01 05:37:28 +04:00
|
|
|
(selectsync*)info) == B_OK) {
|
|
|
|
selectedEvents |= SELECT_FLAG(event);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
info->selected_events = selectedEvents;
|
|
|
|
|
|
|
|
// if nothing has been selected, we deselect immediately
|
|
|
|
if (selectedEvents == 0)
|
2007-10-02 23:47:31 +04:00
|
|
|
deselect_fd(fd, info, kernel);
|
2007-10-01 05:37:28 +04:00
|
|
|
|
|
|
|
return B_OK;
|
2002-10-29 06:54:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2007-10-02 23:47:31 +04:00
|
|
|
deselect_fd(int32 fd, struct select_info* info, bool kernel)
|
2002-10-29 06:54:07 +03:00
|
|
|
{
|
2007-10-02 23:47:31 +04:00
|
|
|
TRACE(("deselect_fd(fd = %d, info = %p (%p), 0x%x)\n", fd, info,
|
|
|
|
info->sync, info.selected_events));
|
2007-10-01 05:37:28 +04:00
|
|
|
|
|
|
|
if (info->selected_events == 0)
|
|
|
|
return B_OK;
|
2002-10-29 06:54:07 +03:00
|
|
|
|
2007-10-01 22:48:52 +04:00
|
|
|
FDGetter fdGetter;
|
|
|
|
// define before the context locker, so it will be destroyed after it
|
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
io_context* context = get_current_io_context(kernel);
|
|
|
|
MutexLocker locker(context->io_mutex);
|
2002-10-29 06:54:07 +03:00
|
|
|
|
2007-10-01 22:48:52 +04:00
|
|
|
struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true);
|
2002-10-29 06:54:07 +03:00
|
|
|
if (descriptor == NULL)
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
// remove the info from the IO context
|
|
|
|
|
|
|
|
select_info** infoLocation = &context->select_infos[fd];
|
|
|
|
while (*infoLocation != NULL && *infoLocation != info)
|
|
|
|
infoLocation = &(*infoLocation)->next;
|
|
|
|
|
|
|
|
// If not found, someone else beat us to it.
|
2007-10-01 22:48:52 +04:00
|
|
|
if (*infoLocation != info)
|
2007-10-01 05:37:28 +04:00
|
|
|
return B_OK;
|
|
|
|
|
|
|
|
*infoLocation = info->next;
|
|
|
|
|
|
|
|
locker.Unlock();
|
|
|
|
|
|
|
|
// deselect the selected events
|
|
|
|
if (descriptor->ops->fd_deselect && info->selected_events) {
|
|
|
|
for (uint16 event = 1; event < 16; event++) {
|
|
|
|
if (info->selected_events & SELECT_FLAG(event)) {
|
|
|
|
descriptor->ops->fd_deselect(descriptor, event,
|
|
|
|
(selectsync*)info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-02 23:47:31 +04:00
|
|
|
put_select_sync(info->sync);
|
2002-10-29 06:54:07 +03:00
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
return B_OK;
|
2002-10-29 06:54:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/** This function checks if the specified fd is valid in the current
|
|
|
|
* context. It can be used for a quick check; the fd is not locked
|
|
|
|
* so it could become invalid immediately after this check.
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool
|
|
|
|
fd_is_valid(int fd, bool kernel)
|
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor = get_fd(get_current_io_context(kernel), fd);
|
|
|
|
if (descriptor == NULL)
|
|
|
|
return false;
|
2004-11-25 05:56:35 +03:00
|
|
|
|
2002-10-29 06:54:07 +03:00
|
|
|
put_fd(descriptor);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-12-16 19:11:36 +03:00
|
|
|
struct vnode *
|
|
|
|
fd_vnode(struct file_descriptor *descriptor)
|
|
|
|
{
|
|
|
|
switch (descriptor->type) {
|
|
|
|
case FDTYPE_FILE:
|
|
|
|
case FDTYPE_DIR:
|
|
|
|
case FDTYPE_ATTR_DIR:
|
|
|
|
case FDTYPE_ATTR:
|
|
|
|
return descriptor->u.vnode;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-03-17 23:25:07 +03:00
|
|
|
static status_t
|
|
|
|
common_close(int fd, bool kernel)
|
|
|
|
{
|
|
|
|
struct io_context *io = get_current_io_context(kernel);
|
|
|
|
struct file_descriptor *descriptor = remove_fd(io, fd);
|
|
|
|
|
|
|
|
if (descriptor == NULL)
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
|
2005-03-31 21:22:33 +04:00
|
|
|
#ifdef TRACE_FD
|
|
|
|
if (!kernel)
|
|
|
|
TRACE(("_user_close(descriptor = %p)\n", descriptor));
|
|
|
|
#endif
|
2005-03-17 23:25:07 +03:00
|
|
|
|
2005-03-18 04:24:11 +03:00
|
|
|
close_fd(descriptor);
|
2005-03-17 23:25:07 +03:00
|
|
|
put_fd(descriptor);
|
|
|
|
// the reference associated with the slot
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// #pragma mark -
|
2005-03-19 06:22:34 +03:00
|
|
|
// User syscalls
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
|
|
|
|
ssize_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_user_read(int fd, off_t pos, void *buffer, size_t length)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
2004-06-15 19:28:33 +04:00
|
|
|
ssize_t bytesRead;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
/* This is a user_function, so abort if we have a kernel address */
|
2004-02-22 17:52:59 +03:00
|
|
|
if (!IS_USER_ADDRESS(buffer))
|
2002-10-29 06:54:07 +03:00
|
|
|
return B_BAD_ADDRESS;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if (pos < -1)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(false), fd);
|
|
|
|
if (!descriptor)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2005-07-22 10:54:19 +04:00
|
|
|
if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) {
|
|
|
|
put_fd(descriptor);
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
if (pos == -1)
|
|
|
|
pos = descriptor->pos;
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
if (descriptor->ops->fd_read) {
|
2004-06-15 19:28:33 +04:00
|
|
|
bytesRead = descriptor->ops->fd_read(descriptor, pos, buffer, &length);
|
|
|
|
if (bytesRead >= B_OK) {
|
2004-11-03 17:54:10 +03:00
|
|
|
if (length > SSIZE_MAX)
|
|
|
|
bytesRead = SSIZE_MAX;
|
2004-06-15 19:28:33 +04:00
|
|
|
else
|
|
|
|
bytesRead = (ssize_t)length;
|
|
|
|
|
|
|
|
descriptor->pos = pos + length;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
} else
|
2004-06-15 19:28:33 +04:00
|
|
|
bytesRead = B_BAD_VALUE;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
put_fd(descriptor);
|
2004-06-15 19:28:33 +04:00
|
|
|
return bytesRead;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-11-03 17:54:10 +03:00
|
|
|
ssize_t
|
|
|
|
_user_readv(int fd, off_t pos, const iovec *userVecs, size_t count)
|
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
ssize_t bytesRead = 0;
|
|
|
|
status_t status;
|
|
|
|
iovec *vecs;
|
|
|
|
uint32 i;
|
|
|
|
|
|
|
|
/* This is a user_function, so abort if we have a kernel address */
|
|
|
|
if (!IS_USER_ADDRESS(userVecs))
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if (pos < -1)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
/* prevent integer overflow exploit in malloc() */
|
|
|
|
if (count > IOV_MAX)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2005-07-22 10:54:19 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(false), fd);
|
|
|
|
if (!descriptor)
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) {
|
|
|
|
status = B_FILE_ERROR;
|
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
vecs = (iovec*)malloc(sizeof(iovec) * count);
|
2005-07-22 10:54:19 +04:00
|
|
|
if (vecs == NULL) {
|
|
|
|
status = B_NO_MEMORY;
|
|
|
|
goto err1;
|
|
|
|
}
|
2004-11-03 17:54:10 +03:00
|
|
|
|
|
|
|
if (user_memcpy(vecs, userVecs, sizeof(iovec) * count) < B_OK) {
|
|
|
|
status = B_BAD_ADDRESS;
|
2005-07-22 10:54:19 +04:00
|
|
|
goto err2;
|
2004-11-03 17:54:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pos == -1)
|
|
|
|
pos = descriptor->pos;
|
|
|
|
|
|
|
|
if (descriptor->ops->fd_read) {
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
size_t length = vecs[i].iov_len;
|
|
|
|
status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base, &length);
|
|
|
|
if (status < B_OK) {
|
|
|
|
bytesRead = status;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if ((uint64)bytesRead + length > SSIZE_MAX)
|
2004-11-03 17:54:10 +03:00
|
|
|
bytesRead = SSIZE_MAX;
|
|
|
|
else
|
|
|
|
bytesRead += (ssize_t)length;
|
|
|
|
|
2005-10-18 23:51:47 +04:00
|
|
|
pos += vecs[i].iov_len;
|
2004-11-03 17:54:10 +03:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
bytesRead = B_BAD_VALUE;
|
|
|
|
|
2005-07-14 03:43:05 +04:00
|
|
|
status = bytesRead;
|
2005-10-18 23:51:47 +04:00
|
|
|
descriptor->pos = pos;
|
2004-11-03 17:54:10 +03:00
|
|
|
|
2005-07-22 10:54:19 +04:00
|
|
|
err2:
|
2004-11-03 17:54:10 +03:00
|
|
|
free(vecs);
|
2005-07-22 10:54:19 +04:00
|
|
|
err1:
|
|
|
|
put_fd(descriptor);
|
2004-11-03 17:54:10 +03:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
ssize_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_user_write(int fd, off_t pos, const void *buffer, size_t length)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
2004-06-15 19:28:33 +04:00
|
|
|
ssize_t bytesWritten = 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
if (IS_KERNEL_ADDRESS(buffer))
|
|
|
|
return B_BAD_ADDRESS;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if (pos < -1)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(false), fd);
|
|
|
|
if (!descriptor)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2005-07-22 10:54:19 +04:00
|
|
|
if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) {
|
|
|
|
put_fd(descriptor);
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
if (pos == -1)
|
|
|
|
pos = descriptor->pos;
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
if (descriptor->ops->fd_write) {
|
2004-06-15 19:28:33 +04:00
|
|
|
bytesWritten = descriptor->ops->fd_write(descriptor, pos, buffer, &length);
|
|
|
|
if (bytesWritten >= B_OK) {
|
2004-11-03 17:54:10 +03:00
|
|
|
if (length > SSIZE_MAX)
|
|
|
|
bytesWritten = SSIZE_MAX;
|
2004-06-15 19:28:33 +04:00
|
|
|
else
|
|
|
|
bytesWritten = (ssize_t)length;
|
|
|
|
|
|
|
|
descriptor->pos = pos + length;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
} else
|
2004-06-15 19:28:33 +04:00
|
|
|
bytesWritten = B_BAD_VALUE;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
put_fd(descriptor);
|
2004-06-15 19:28:33 +04:00
|
|
|
return bytesWritten;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-11-03 17:54:10 +03:00
|
|
|
ssize_t
|
|
|
|
_user_writev(int fd, off_t pos, const iovec *userVecs, size_t count)
|
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
ssize_t bytesWritten = 0;
|
|
|
|
status_t status;
|
|
|
|
iovec *vecs;
|
|
|
|
uint32 i;
|
|
|
|
|
|
|
|
/* This is a user_function, so abort if we have a kernel address */
|
|
|
|
if (!IS_USER_ADDRESS(userVecs))
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if (pos < -1)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
/* prevent integer overflow exploit in malloc() */
|
|
|
|
if (count > IOV_MAX)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2005-07-22 10:54:19 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(false), fd);
|
|
|
|
if (!descriptor)
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) {
|
|
|
|
status = B_FILE_ERROR;
|
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
2007-10-01 05:37:28 +04:00
|
|
|
vecs = (iovec*)malloc(sizeof(iovec) * count);
|
2005-07-22 10:54:19 +04:00
|
|
|
if (vecs == NULL) {
|
|
|
|
status = B_NO_MEMORY;
|
|
|
|
goto err1;
|
|
|
|
}
|
2004-11-03 17:54:10 +03:00
|
|
|
|
|
|
|
if (user_memcpy(vecs, userVecs, sizeof(iovec) * count) < B_OK) {
|
|
|
|
status = B_BAD_ADDRESS;
|
2005-07-22 10:54:19 +04:00
|
|
|
goto err2;
|
2004-11-03 17:54:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pos == -1)
|
|
|
|
pos = descriptor->pos;
|
|
|
|
|
|
|
|
if (descriptor->ops->fd_write) {
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
size_t length = vecs[i].iov_len;
|
|
|
|
status = descriptor->ops->fd_write(descriptor, pos, vecs[i].iov_base, &length);
|
|
|
|
if (status < B_OK) {
|
|
|
|
bytesWritten = status;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if ((uint64)bytesWritten + length > SSIZE_MAX)
|
2004-11-03 17:54:10 +03:00
|
|
|
bytesWritten = SSIZE_MAX;
|
|
|
|
else
|
|
|
|
bytesWritten += (ssize_t)length;
|
|
|
|
|
2005-10-18 23:51:47 +04:00
|
|
|
pos += vecs[i].iov_len;
|
2004-11-03 17:54:10 +03:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
bytesWritten = B_BAD_VALUE;
|
|
|
|
|
2005-07-14 03:43:05 +04:00
|
|
|
status = bytesWritten;
|
2005-10-18 23:51:47 +04:00
|
|
|
descriptor->pos = pos;
|
2004-11-03 17:54:10 +03:00
|
|
|
|
2005-07-22 10:54:19 +04:00
|
|
|
err2:
|
2004-11-03 17:54:10 +03:00
|
|
|
free(vecs);
|
2005-07-22 10:54:19 +04:00
|
|
|
err1:
|
|
|
|
put_fd(descriptor);
|
2004-11-03 17:54:10 +03:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-07-17 11:55:51 +04:00
|
|
|
off_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_user_seek(int fd, off_t pos, int seekType)
|
2002-07-14 09:15:34 +04:00
|
|
|
{
|
2008-01-11 03:36:44 +03:00
|
|
|
syscall_64_bit_return_value();
|
|
|
|
|
2002-07-14 09:15:34 +04:00
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
|
|
|
|
descriptor = get_fd(get_current_io_context(false), fd);
|
|
|
|
if (!descriptor)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2002-07-14 09:15:34 +04:00
|
|
|
|
2002-10-29 06:54:07 +03:00
|
|
|
TRACE(("user_seek(descriptor = %p)\n", descriptor));
|
2002-07-17 11:55:51 +04:00
|
|
|
|
2002-07-14 09:15:34 +04:00
|
|
|
if (descriptor->ops->fd_seek)
|
2002-07-17 11:55:51 +04:00
|
|
|
pos = descriptor->ops->fd_seek(descriptor, pos, seekType);
|
2002-07-14 09:15:34 +04:00
|
|
|
else
|
2002-07-17 11:55:51 +04:00
|
|
|
pos = ESPIPE;
|
2002-07-14 09:15:34 +04:00
|
|
|
|
|
|
|
put_fd(descriptor);
|
2002-07-17 11:55:51 +04:00
|
|
|
return pos;
|
2002-07-14 09:15:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
status_t
|
|
|
|
_user_ioctl(int fd, ulong op, void *buffer, size_t length)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
int status;
|
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
if (IS_KERNEL_ADDRESS(buffer))
|
|
|
|
return B_BAD_ADDRESS;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-03-31 21:22:33 +04:00
|
|
|
TRACE(("user_ioctl: fd %d\n", fd));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-05-24 01:48:31 +04:00
|
|
|
return fd_ioctl(false, fd, op, buffer, length);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ssize_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_user_read_dir(int fd, struct dirent *buffer, size_t bufferSize, uint32 maxCount)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
ssize_t retval;
|
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
if (IS_KERNEL_ADDRESS(buffer))
|
|
|
|
return B_BAD_ADDRESS;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-03-31 21:22:33 +04:00
|
|
|
TRACE(("user_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %lu)\n", fd, buffer, bufferSize, maxCount));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
descriptor = get_fd(get_current_io_context(false), fd);
|
|
|
|
if (descriptor == NULL)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
if (descriptor->ops->fd_read_dir) {
|
2002-07-11 01:47:38 +04:00
|
|
|
uint32 count = maxCount;
|
2002-10-29 06:54:07 +03:00
|
|
|
retval = descriptor->ops->fd_read_dir(descriptor, buffer, bufferSize, &count);
|
2002-07-09 16:24:59 +04:00
|
|
|
if (retval >= 0)
|
|
|
|
retval = count;
|
|
|
|
} else
|
|
|
|
retval = EOPNOTSUPP;
|
|
|
|
|
|
|
|
put_fd(descriptor);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_user_rewind_dir(int fd)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
status_t status;
|
|
|
|
|
2005-03-31 21:22:33 +04:00
|
|
|
TRACE(("user_rewind_dir(fd = %d)\n", fd));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
descriptor = get_fd(get_current_io_context(false), fd);
|
|
|
|
if (descriptor == NULL)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
if (descriptor->ops->fd_rewind_dir)
|
|
|
|
status = descriptor->ops->fd_rewind_dir(descriptor);
|
|
|
|
else
|
|
|
|
status = EOPNOTSUPP;
|
|
|
|
|
|
|
|
put_fd(descriptor);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
status_t
|
|
|
|
_user_close(int fd)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-03-19 06:22:34 +03:00
|
|
|
return common_close(fd, false);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-07-20 04:16:12 +04:00
|
|
|
int
|
2004-06-15 19:28:33 +04:00
|
|
|
_user_dup(int fd)
|
2002-07-20 04:16:12 +04:00
|
|
|
{
|
2002-10-29 06:54:07 +03:00
|
|
|
return dup_fd(fd, false);
|
2002-07-20 04:16:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2004-06-15 19:28:33 +04:00
|
|
|
_user_dup2(int ofd, int nfd)
|
2002-07-20 04:16:12 +04:00
|
|
|
{
|
2002-10-29 06:54:07 +03:00
|
|
|
return dup2_fd(ofd, nfd, false);
|
2002-07-20 04:16:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// #pragma mark -
|
2005-03-19 06:22:34 +03:00
|
|
|
// Kernel calls
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
|
|
|
|
ssize_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_kern_read(int fd, off_t pos, void *buffer, size_t length)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
2004-06-15 19:28:33 +04:00
|
|
|
ssize_t bytesRead;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if (pos < -1)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(true), fd);
|
|
|
|
if (!descriptor)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2005-07-22 10:54:19 +04:00
|
|
|
if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) {
|
|
|
|
put_fd(descriptor);
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
if (pos == -1)
|
|
|
|
pos = descriptor->pos;
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
if (descriptor->ops->fd_read) {
|
2004-06-15 19:28:33 +04:00
|
|
|
bytesRead = descriptor->ops->fd_read(descriptor, pos, buffer, &length);
|
|
|
|
if (bytesRead >= B_OK) {
|
2004-11-03 17:54:10 +03:00
|
|
|
if (length > SSIZE_MAX)
|
|
|
|
bytesRead = SSIZE_MAX;
|
2004-06-15 19:28:33 +04:00
|
|
|
else
|
|
|
|
bytesRead = (ssize_t)length;
|
|
|
|
|
|
|
|
descriptor->pos = pos + length;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
} else
|
2004-06-15 19:28:33 +04:00
|
|
|
bytesRead = B_BAD_VALUE;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
put_fd(descriptor);
|
2004-06-15 19:28:33 +04:00
|
|
|
return bytesRead;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-11-03 17:54:10 +03:00
|
|
|
ssize_t
|
|
|
|
_kern_readv(int fd, off_t pos, const iovec *vecs, size_t count)
|
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
ssize_t bytesRead = 0;
|
|
|
|
status_t status;
|
|
|
|
uint32 i;
|
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if (pos < -1)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2005-10-18 23:11:07 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(true), fd);
|
2004-11-03 17:54:10 +03:00
|
|
|
if (!descriptor)
|
|
|
|
return B_FILE_ERROR;
|
2005-07-22 10:54:19 +04:00
|
|
|
if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) {
|
|
|
|
put_fd(descriptor);
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
}
|
2004-11-03 17:54:10 +03:00
|
|
|
|
|
|
|
if (pos == -1)
|
|
|
|
pos = descriptor->pos;
|
|
|
|
|
|
|
|
if (descriptor->ops->fd_read) {
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
size_t length = vecs[i].iov_len;
|
|
|
|
status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base, &length);
|
|
|
|
if (status < B_OK) {
|
|
|
|
bytesRead = status;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if ((uint64)bytesRead + length > SSIZE_MAX)
|
2004-11-03 17:54:10 +03:00
|
|
|
bytesRead = SSIZE_MAX;
|
|
|
|
else
|
|
|
|
bytesRead += (ssize_t)length;
|
|
|
|
|
2005-10-18 23:51:47 +04:00
|
|
|
pos += vecs[i].iov_len;
|
2004-11-03 17:54:10 +03:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
bytesRead = B_BAD_VALUE;
|
|
|
|
|
2005-10-18 23:51:47 +04:00
|
|
|
descriptor->pos = pos;
|
2004-11-03 17:54:10 +03:00
|
|
|
put_fd(descriptor);
|
|
|
|
return bytesRead;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
ssize_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_kern_write(int fd, off_t pos, const void *buffer, size_t length)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
2004-06-15 19:28:33 +04:00
|
|
|
ssize_t bytesWritten;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if (pos < -1)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(true), fd);
|
|
|
|
if (descriptor == NULL)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2005-07-22 10:54:19 +04:00
|
|
|
if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) {
|
|
|
|
put_fd(descriptor);
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
if (pos == -1)
|
|
|
|
pos = descriptor->pos;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
if (descriptor->ops->fd_write) {
|
|
|
|
bytesWritten = descriptor->ops->fd_write(descriptor, pos, buffer, &length);
|
|
|
|
if (bytesWritten >= B_OK) {
|
2004-11-03 17:54:10 +03:00
|
|
|
if (length > SSIZE_MAX)
|
|
|
|
bytesWritten = SSIZE_MAX;
|
2004-06-15 19:28:33 +04:00
|
|
|
else
|
|
|
|
bytesWritten = (ssize_t)length;
|
|
|
|
|
|
|
|
descriptor->pos = pos + length;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
} else
|
2004-06-15 19:28:33 +04:00
|
|
|
bytesWritten = B_BAD_VALUE;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
put_fd(descriptor);
|
2004-06-15 19:28:33 +04:00
|
|
|
return bytesWritten;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-11-03 17:54:10 +03:00
|
|
|
ssize_t
|
|
|
|
_kern_writev(int fd, off_t pos, const iovec *vecs, size_t count)
|
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
ssize_t bytesWritten = 0;
|
|
|
|
status_t status;
|
|
|
|
uint32 i;
|
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if (pos < -1)
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
2005-10-18 23:11:07 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(true), fd);
|
2004-11-03 17:54:10 +03:00
|
|
|
if (!descriptor)
|
|
|
|
return B_FILE_ERROR;
|
2005-07-22 10:54:19 +04:00
|
|
|
if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) {
|
|
|
|
put_fd(descriptor);
|
|
|
|
return B_FILE_ERROR;
|
|
|
|
}
|
2004-11-03 17:54:10 +03:00
|
|
|
|
|
|
|
if (pos == -1)
|
|
|
|
pos = descriptor->pos;
|
|
|
|
|
|
|
|
if (descriptor->ops->fd_write) {
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
size_t length = vecs[i].iov_len;
|
|
|
|
status = descriptor->ops->fd_write(descriptor, pos, vecs[i].iov_base, &length);
|
|
|
|
if (status < B_OK) {
|
|
|
|
bytesWritten = status;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-08-21 23:07:10 +04:00
|
|
|
if ((uint64)bytesWritten + length > SSIZE_MAX)
|
2004-11-03 17:54:10 +03:00
|
|
|
bytesWritten = SSIZE_MAX;
|
|
|
|
else
|
|
|
|
bytesWritten += (ssize_t)length;
|
|
|
|
|
2005-10-18 23:51:47 +04:00
|
|
|
pos += vecs[i].iov_len;
|
2004-11-03 17:54:10 +03:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
bytesWritten = B_BAD_VALUE;
|
|
|
|
|
2005-10-18 23:51:47 +04:00
|
|
|
descriptor->pos = pos;
|
2004-11-03 17:54:10 +03:00
|
|
|
put_fd(descriptor);
|
|
|
|
return bytesWritten;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-07-17 11:55:51 +04:00
|
|
|
off_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_kern_seek(int fd, off_t pos, int seekType)
|
2002-07-14 09:15:34 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
|
|
|
|
descriptor = get_fd(get_current_io_context(true), fd);
|
|
|
|
if (!descriptor)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2002-07-14 09:15:34 +04:00
|
|
|
|
|
|
|
if (descriptor->ops->fd_seek)
|
2002-07-17 11:55:51 +04:00
|
|
|
pos = descriptor->ops->fd_seek(descriptor, pos, seekType);
|
2002-07-14 09:15:34 +04:00
|
|
|
else
|
2002-07-17 11:55:51 +04:00
|
|
|
pos = ESPIPE;
|
2002-07-14 09:15:34 +04:00
|
|
|
|
|
|
|
put_fd(descriptor);
|
2002-07-17 11:55:51 +04:00
|
|
|
return pos;
|
2002-07-14 09:15:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-05-23 23:56:40 +04:00
|
|
|
status_t
|
|
|
|
_kern_ioctl(int fd, ulong op, void *buffer, size_t length)
|
|
|
|
{
|
2007-05-24 01:48:31 +04:00
|
|
|
TRACE(("kern_ioctl: fd %d\n", fd));
|
|
|
|
|
|
|
|
return fd_ioctl(true, fd, op, buffer, length);
|
2007-05-23 23:56:40 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
user_fd_kernel_ioctl(int fd, ulong op, void *buffer, size_t length)
|
|
|
|
{
|
2007-05-24 01:48:31 +04:00
|
|
|
TRACE(("user_fd_kernel_ioctl: fd %d\n", fd));
|
|
|
|
|
|
|
|
return fd_ioctl(false, fd, op, buffer, length);
|
2007-05-23 23:56:40 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
ssize_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_kern_read_dir(int fd, struct dirent *buffer, size_t bufferSize, uint32 maxCount)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
ssize_t retval;
|
|
|
|
|
2005-03-31 21:22:33 +04:00
|
|
|
TRACE(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %lu)\n",fd, buffer, bufferSize, maxCount));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-14 09:15:34 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(true), fd);
|
2002-07-09 16:24:59 +04:00
|
|
|
if (descriptor == NULL)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
if (descriptor->ops->fd_read_dir) {
|
2002-07-11 01:47:38 +04:00
|
|
|
uint32 count = maxCount;
|
2002-10-29 06:54:07 +03:00
|
|
|
retval = descriptor->ops->fd_read_dir(descriptor, buffer, bufferSize, &count);
|
2002-07-09 16:24:59 +04:00
|
|
|
if (retval >= 0)
|
|
|
|
retval = count;
|
|
|
|
} else
|
|
|
|
retval = EOPNOTSUPP;
|
|
|
|
|
|
|
|
put_fd(descriptor);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2004-06-15 19:28:33 +04:00
|
|
|
_kern_rewind_dir(int fd)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct file_descriptor *descriptor;
|
|
|
|
status_t status;
|
|
|
|
|
2005-03-31 21:22:33 +04:00
|
|
|
TRACE(("sys_rewind_dir(fd = %d)\n",fd));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-14 09:15:34 +04:00
|
|
|
descriptor = get_fd(get_current_io_context(true), fd);
|
2002-07-09 16:24:59 +04:00
|
|
|
if (descriptor == NULL)
|
2002-10-08 07:24:51 +04:00
|
|
|
return B_FILE_ERROR;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
if (descriptor->ops->fd_rewind_dir)
|
|
|
|
status = descriptor->ops->fd_rewind_dir(descriptor);
|
|
|
|
else
|
|
|
|
status = EOPNOTSUPP;
|
|
|
|
|
|
|
|
put_fd(descriptor);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-06-15 19:28:33 +04:00
|
|
|
status_t
|
|
|
|
_kern_close(int fd)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-03-17 23:25:07 +03:00
|
|
|
return common_close(fd, true);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-07-20 04:16:12 +04:00
|
|
|
|
|
|
|
int
|
2004-06-15 19:28:33 +04:00
|
|
|
_kern_dup(int fd)
|
2002-07-20 04:16:12 +04:00
|
|
|
{
|
2002-10-29 06:54:07 +03:00
|
|
|
return dup_fd(fd, true);
|
2002-07-20 04:16:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2004-06-15 19:28:33 +04:00
|
|
|
_kern_dup2(int ofd, int nfd)
|
2002-07-20 04:16:12 +04:00
|
|
|
{
|
2002-10-29 06:54:07 +03:00
|
|
|
return dup2_fd(ofd, nfd, true);
|
2002-07-20 04:16:12 +04:00
|
|
|
}
|
|
|
|
|