Moved the select/poll implementation into its own file (vfs_select.c).

Fixed some minor a larger issues (one memory corruption due to the use of FD_ZERO()).
notify_select_event() is no longer binary compatible (hasn't been used on R5).
Added syscalls for select/poll in the syscall dispatcher.
Fixed some debug output in fd.c, adapted for the new notify_select_event()
call.


git-svn-id: file:///srv/svn/repos/haiku/trunk/current@1831 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2002-11-03 03:19:34 +00:00
parent 5ef7716d48
commit ed622838f5
6 changed files with 436 additions and 385 deletions

View File

@ -240,7 +240,7 @@ select_fd(int fd, uint8 event, uint32 ref, struct select_sync *sync, bool kernel
struct file_descriptor *descriptor;
status_t status;
PRINT(("select_fd(fd = %d, event = %u, ref = %lu, selectsync = %p)\n", fd, event, ref, sync));
TRACE(("select_fd(fd = %d, event = %u, ref = %lu, selectsync = %p)\n", fd, event, ref, sync));
descriptor = get_fd(get_current_io_context(kernel), fd);
if (descriptor == NULL)
@ -251,7 +251,7 @@ select_fd(int fd, uint8 event, uint32 ref, struct select_sync *sync, bool kernel
} else {
// if the I/O subsystem doesn't support select(), we will
// immediately notify the select call
status = notify_select_event((void *)sync, ref);
status = notify_select_event((void *)sync, ref, event);
}
put_fd(descriptor);
@ -265,7 +265,7 @@ deselect_fd(int fd, uint8 event, struct select_sync *sync, bool kernel)
struct file_descriptor *descriptor;
status_t status;
PRINT(("deselect_fd(fd = %d, event = %u, ref = %lu, selectsync = %p)\n", fd, event, ref, sync));
TRACE(("deselect_fd(fd = %d, event = %u, selectsync = %p)\n", fd, event, sync));
descriptor = get_fd(get_current_io_context(kernel), fd);
if (descriptor == NULL)
@ -405,7 +405,7 @@ user_read_dir(int fd, struct dirent *buffer, size_t bufferSize, uint32 maxCount)
CHECK_USER_ADDR(buffer)
PRINT(("user_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %d)\n", fd, buffer, bufferSize, maxCount));
PRINT(("user_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %lu)\n", fd, buffer, bufferSize, maxCount));
descriptor = get_fd(get_current_io_context(false), fd);
if (descriptor == NULL)
@ -635,7 +635,7 @@ sys_read_dir(int fd, struct dirent *buffer, size_t bufferSize, uint32 maxCount)
struct file_descriptor *descriptor;
ssize_t retval;
PRINT(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %u)\n",fd, buffer, bufferSize, maxCount));
PRINT(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %lu)\n",fd, buffer, bufferSize, maxCount));
descriptor = get_fd(get_current_io_context(true), fd);
if (descriptor == NULL)

View File

@ -7,6 +7,7 @@ KernelStaticLibrary libfs :
<$(SOURCE_GRIST)>devfs.c
<$(SOURCE_GRIST)>rootfs.c
<$(SOURCE_GRIST)>vfs.c
<$(SOURCE_GRIST)>vfs_select.c
:
-fno-pic -Wno-unused -D_KERNEL_
;

View File

@ -35,10 +35,6 @@
#include "rootfs.h"
#include "bootfs.h"
#include "vfs_select.h"
#include <sys/select.h>
#include <poll.h>
#include <string.h>
#include <stdio.h>
#include <ctype.h>
@ -1508,27 +1504,6 @@ out:
}
status_t
notify_select_event(selectsync *_sync, uint32 ref)
{
select_sync *sync = (select_sync *)_sync;
// ToDo: check if we have to be compatible and have to export
// this function - it would be nice if we could have one
// where the callee can specify which event has occured
// (instead of this crypted "ref" thingie).
if (sync == NULL
|| sync->sem < B_OK
|| INDEX_FROM_REF(ref) > sync->count)
return B_BAD_VALUE;
sync->set[INDEX_FROM_REF(ref)].events |= SELECT_FLAG_FROM_REF(ref);
return release_sem(sync->sem);
}
int
vfs_getrlimit(int resource, struct rlimit * rlp)
{
@ -2560,252 +2535,6 @@ common_path_write_stat(char *path, bool traverseLeafLink, const struct stat *sta
}
static int
common_select(int numfds, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
bigtime_t timeout, sigset_t *sigMask, bool kernel)
{
struct select_sync sync;
status_t status = B_OK;
int count = 0;
int fd;
// check if fds are valid before doing anything
for (fd = 0; fd < numfds; fd++) {
if (((readSet && FD_ISSET(fd, readSet))
|| (writeSet && FD_ISSET(fd, writeSet))
|| (errorSet && FD_ISSET(fd, errorSet)))
&& !fd_is_valid(fd, kernel))
return B_FILE_ERROR;
}
// allocate resources
memset(&sync, 0, sizeof(select_sync));
sync.sem = create_sem(1, "select");
if (sync.sem < B_OK)
return sync.sem;
set_sem_owner(sync.sem, B_SYSTEM_TEAM);
sync.set = malloc(sizeof(select_info) * numfds);
if (sync.set == NULL) {
delete_sem(sync.sem);
return B_NO_MEMORY;
}
memset(sync.set, 0, sizeof(select_info) * numfds);
sync.count = numfds;
// start selecting file descriptors
for (fd = 0; fd < numfds; fd++) {
if (readSet && FD_ISSET(fd, readSet)
&& select_fd(fd, B_SELECT_READ, MAKE_SELECT_REF(fd, B_SELECT_READ), &sync, kernel) == B_OK)
count++;
if (writeSet && FD_ISSET(fd, writeSet)
&& select_fd(fd, B_SELECT_WRITE, MAKE_SELECT_REF(fd, B_SELECT_WRITE), &sync, kernel) == B_OK)
count++;
if (errorSet && FD_ISSET(fd, errorSet)
&& select_fd(fd, B_SELECT_ERROR, MAKE_SELECT_REF(fd, B_SELECT_ERROR), &sync, kernel) == B_OK)
count++;
}
if (count < 1) {
count = B_BAD_VALUE;
goto err;
}
status = acquire_sem_etc(sync.sem, 1,
B_CAN_INTERRUPT | (timeout != -1 ? B_RELATIVE_TIMEOUT : 0), timeout);
// deselect file descriptors
for (fd = 0; fd < numfds; fd++) {
if (readSet && FD_ISSET(fd, readSet))
deselect_fd(fd, B_SELECT_READ, &sync, kernel);
if (writeSet && FD_ISSET(fd, writeSet))
deselect_fd(fd, B_SELECT_WRITE, &sync, kernel);
if (errorSet && FD_ISSET(fd, errorSet))
deselect_fd(fd, B_SELECT_ERROR, &sync, kernel);
}
// collect the events that are happened in the meantime
switch (status) {
case B_OK:
// clear sets to store the received events
if (readSet)
FD_ZERO(readSet);
if (writeSet)
FD_ZERO(writeSet);
if (errorSet)
FD_ZERO(errorSet);
for (count = 0, fd = 0;fd < numfds; fd++) {
if (readSet && sync.set[fd].events & SELECT_FLAG(B_SELECT_READ)) {
FD_SET(fd, readSet);
count++;
}
if (writeSet && sync.set[fd].events & SELECT_FLAG(B_SELECT_WRITE)) {
FD_SET(fd, writeSet);
count++;
}
if (errorSet && sync.set[fd].events & SELECT_FLAG(B_SELECT_ERROR)) {
FD_SET(fd, errorSet);
count++;
}
}
break;
case B_INTERRUPTED:
count = B_INTERRUPTED;
break;
default:
// B_TIMED_OUT, and B_WOULD_BLOCK
count = 0;
}
err:
delete_sem(sync.sem);
free(sync.set);
return count;
}
static int
common_poll(struct pollfd *fds, nfds_t numfds, bigtime_t timeout, bool kernel)
{
status_t status = B_OK;
int count = 0;
int i;
// allocate resources
select_sync sync;
memset(&sync, 0, sizeof(select_sync));
sync.sem = create_sem(1, "poll");
if (sync.sem < B_OK)
return sync.sem;
set_sem_owner(sync.sem, B_SYSTEM_TEAM);
sync.set = malloc(sizeof(select_info) * numfds);
if (sync.set == NULL) {
delete_sem(sync.sem);
return B_NO_MEMORY;
}
memset(sync.set, 0, sizeof(select_info) * numfds);
sync.count = numfds;
// start polling file descriptors (by selecting them)
for (i = 0; i < numfds; i++) {
int fd = fds[i].fd;
// check if fds are valid
if (!fd_is_valid(fd, kernel)) {
fds[i].revents = POLLNVAL;
continue;
}
// clear reported events mask
fds[i].revents = 0;
if ((fds[i].events & POLLIN)
&& select_fd(fd, B_SELECT_READ, MAKE_SELECT_REF(fd, B_SELECT_READ), &sync, kernel) == B_OK)
count++;
if ((fds[i].events & POLLOUT)
&& select_fd(fd, B_SELECT_WRITE, MAKE_SELECT_REF(fd, B_SELECT_WRITE), &sync, kernel) == B_OK)
count++;
if ((fds[i].events & POLLRDBAND)
&& select_fd(fd, B_SELECT_PRI_READ, MAKE_SELECT_REF(fd, B_SELECT_PRI_READ), &sync, kernel) == B_OK)
count++;
if ((fds[i].events & POLLWRBAND)
&& select_fd(fd, B_SELECT_PRI_WRITE, MAKE_SELECT_REF(fd, B_SELECT_PRI_WRITE), &sync, kernel) == B_OK)
count++;
if ((fds[i].events & POLLPRI)
&& select_fd(fd, B_SELECT_HIGH_PRI_READ, MAKE_SELECT_REF(fd, B_SELECT_HIGH_PRI_READ), &sync, kernel) == B_OK)
count++;
// Always select POLLERR and POLLHUB - would be nice if we'd have another
// notify_select_event() call which could directly trigger certain events
// without a specific select.
if (select_fd(fd, B_SELECT_ERROR, MAKE_SELECT_REF(fd, B_SELECT_ERROR),
&sync, kernel) == B_OK)
count++;
if (select_fd(fd, B_SELECT_DISCONNECTED, MAKE_SELECT_REF(fd, B_SELECT_DISCONNECTED),
&sync, kernel) == B_OK)
count++;
}
if (count < 1) {
count = B_BAD_VALUE;
goto err;
}
status = acquire_sem_etc(sync.sem, 1,
B_CAN_INTERRUPT | (timeout != -1 ? B_RELATIVE_TIMEOUT : 0), timeout);
// deselect file descriptors
for (i = 0; i < numfds; i++) {
int fd = fds[i].fd;
if (fds[i].events & POLLIN)
deselect_fd(fd, B_SELECT_READ, &sync, kernel);
if (fds[i].events & POLLOUT)
deselect_fd(fd, B_SELECT_WRITE, &sync, kernel);
if (fds[i].events & POLLRDBAND)
deselect_fd(fd, B_SELECT_PRI_READ, &sync, kernel);
if (fds[i].events & POLLWRBAND)
deselect_fd(fd, B_SELECT_PRI_WRITE, &sync, kernel);
if (fds[i].events & POLLPRI)
deselect_fd(fd, B_SELECT_HIGH_PRI_READ, &sync, kernel);
deselect_fd(fd, B_SELECT_ERROR, &sync, kernel);
deselect_fd(fd, B_SELECT_DISCONNECTED, &sync, kernel);
}
// collect the events that are happened in the meantime
switch (status) {
case B_OK:
for (count = 0, i = 0;i < numfds; i++) {
if (fds[i].revents == POLLNVAL)
continue;
// POLLxxx flags and B_SELECT_xxx flags are compatible
fds[i].revents = sync.set[i].events;
if (fds[i].revents != 0)
count++;
}
break;
case B_INTERRUPTED:
count = B_INTERRUPTED;
break;
default:
// B_TIMED_OUT, and B_WOULD_BLOCK
count = 0;
}
err:
delete_sem(sync.sem);
free(sync.set);
return count;
}
static status_t
attr_dir_open(int fd, char *path, bool kernel)
{
@ -3941,21 +3670,6 @@ sys_write_path_stat(const char *path, bool traverseLeafLink, const struct stat *
}
int
sys_select(int numfds, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
bigtime_t timeout, sigset_t *sigMask)
{
return common_select(numfds, readSet, writeSet, errorSet, timeout, sigMask, true);
}
int
sys_poll(struct pollfd *fds, int numfds, bigtime_t timeout)
{
return common_poll(fds, numfds, timeout, true);
}
int
sys_open_attr_dir(int fd, const char *path)
{
@ -4480,92 +4194,6 @@ user_write_path_stat(const char *userPath, bool traverseLeafLink, const struct s
}
int
user_select(int numfds, fd_set *userReadSet, fd_set *userWriteSet, fd_set *userErrorSet,
bigtime_t timeout, sigset_t *userSigMask)
{
fd_set *readSet = NULL, *writeSet = NULL, *errorSet = NULL;
uint32 bytes = _howmany(numfds, NFDBITS) * sizeof(fd_mask);
sigset_t sigMask;
int result;
if (numfds < 0)
return B_BAD_VALUE;
if ((userReadSet != NULL && !CHECK_USER_ADDRESS(userReadSet))
|| (userWriteSet != NULL && !CHECK_USER_ADDRESS(userWriteSet))
|| (userErrorSet != NULL && !CHECK_USER_ADDRESS(userErrorSet))
|| (userSigMask != NULL && !CHECK_USER_ADDRESS(userSigMask)))
return B_BAD_ADDRESS;
// copy parameters
if (userReadSet != NULL) {
readSet = malloc(bytes);
if (readSet == NULL) {
result = B_NO_MEMORY;
goto err;
}
if (user_memcpy(readSet, userReadSet, bytes) < B_OK) {
result = B_BAD_ADDRESS;
goto err;
}
}
if (userWriteSet != NULL) {
writeSet = malloc(bytes);
if (writeSet == NULL) {
result = B_NO_MEMORY;
goto err;
}
if (user_memcpy(writeSet, userWriteSet, bytes) < B_OK) {
result = B_BAD_ADDRESS;
goto err;
}
}
if (userErrorSet != NULL) {
errorSet = malloc(bytes);
if (errorSet == NULL) {
result = B_NO_MEMORY;
goto err;
}
if (user_memcpy(errorSet, userErrorSet, bytes) < B_OK) {
result = B_BAD_ADDRESS;
goto err;
}
}
if (userSigMask != NULL)
sigMask = *userSigMask;
result = common_select(numfds, readSet, writeSet, errorSet, timeout, userSigMask ? &sigMask : NULL, false);
// copy back results
if (result >= B_OK
&& ((readSet != NULL && user_memcpy(userReadSet, readSet, bytes) < B_OK)
|| (writeSet != NULL && user_memcpy(userWriteSet, writeSet, bytes) < B_OK)
|| (errorSet != NULL && user_memcpy(userErrorSet, errorSet, bytes) < B_OK)))
result = B_BAD_ADDRESS;
err:
free(readSet);
free(writeSet);
free(errorSet);
return result;
}
int
user_poll(struct pollfd *userfds, int numfds, bigtime_t timeout)
{
//return common_poll(userfds, numfds, timeout, false);
return B_ERROR;
}
int
user_open_attr_dir(int fd, const char *userPath)
{

View File

@ -0,0 +1,419 @@
/*
** Copyright 2002, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
#include <vfs.h>
#include <fd.h>
#include "vfs_select.h"
#include <sys/select.h>
#include <poll.h>
#include <malloc.h>
#include <string.h>
/** Selects all events in the mask on the specified file descriptor */
static int
select_events(struct select_sync *sync, int fd, int ref, uint16 selectedEvents, bool kernel)
{
uint32 count = 0;
uint16 event = 1;
// select any events asked for
for (; event < 16; event++) {
if (selectedEvents & SELECT_FLAG(event)
&& select_fd(fd, event, ref, sync, kernel) == B_OK)
count++;
}
return count;
}
/** Deselects all events in the mask on the specified file descriptor */
static void
deselect_events(struct select_sync *sync, int fd, uint16 selectedEvents, bool kernel)
{
uint16 event = 1;
// deselect any events previously asked for
for (; event < 16; event++) {
if (selectedEvents & SELECT_FLAG(event))
deselect_fd(fd, event, sync, kernel);
}
}
/** Clears all bits in the fd_set - since we are using variable sized
* arrays in the kernel, we can't use the FD_ZERO() macro provided by
* sys/select.h for this task.
* All other FD_xxx() macros are safe to use, though.
*/
static inline void
fd_zero(fd_set *set, int numfds)
{
if (set != NULL)
memset(set, 0, _howmany(numfds, NFDBITS) * sizeof(fd_mask));
}
static int
common_select(int numfds, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
bigtime_t timeout, const sigset_t *sigMask, bool kernel)
{
struct select_sync sync;
status_t status = B_OK;
int count = 0;
int fd;
// ToDo: set sigMask to make pselect() functional different from select()
// check if fds are valid before doing anything
for (fd = 0; fd < numfds; fd++) {
if (((readSet && FD_ISSET(fd, readSet))
|| (writeSet && FD_ISSET(fd, writeSet))
|| (errorSet && FD_ISSET(fd, errorSet)))
&& !fd_is_valid(fd, kernel))
return B_FILE_ERROR;
}
// allocate resources
memset(&sync, 0, sizeof(select_sync));
sync.sem = create_sem(1, "select");
if (sync.sem < B_OK)
return sync.sem;
set_sem_owner(sync.sem, B_SYSTEM_TEAM);
sync.set = malloc(sizeof(select_info) * numfds);
if (sync.set == NULL) {
delete_sem(sync.sem);
return B_NO_MEMORY;
}
sync.count = numfds;
// start selecting file descriptors
for (fd = 0; fd < numfds; fd++) {
sync.set[fd].selected_events = 0;
sync.set[fd].events = 0;
if (readSet && FD_ISSET(fd, readSet))
sync.set[fd].selected_events = SELECT_FLAG(B_SELECT_READ);
if (writeSet && FD_ISSET(fd, writeSet))
sync.set[fd].selected_events |= SELECT_FLAG(B_SELECT_WRITE);
if (errorSet && FD_ISSET(fd, errorSet))
sync.set[fd].selected_events |= SELECT_FLAG(B_SELECT_ERROR);
count += select_events(&sync, fd, fd, sync.set[fd].selected_events, kernel);
// array position is the same as the fd for select()
}
if (count < 1) {
count = B_BAD_VALUE;
goto err;
}
status = acquire_sem_etc(sync.sem, 1,
B_CAN_INTERRUPT | (timeout != -1 ? B_RELATIVE_TIMEOUT : 0), timeout);
// deselect file descriptors
for (fd = 0; fd < numfds; fd++)
deselect_events(&sync, fd, sync.set[fd].selected_events, kernel);
// collect the events that are happened in the meantime
switch (status) {
case B_OK:
// Clear sets to store the received events
// (we don't use the macros, because we have variable sized arrays;
// the other FD_xxx() macros are safe, though).
fd_zero(readSet, numfds);
fd_zero(writeSet, numfds);
fd_zero(errorSet, numfds);
for (count = 0, fd = 0;fd < numfds; fd++) {
if (readSet && sync.set[fd].events & SELECT_FLAG(B_SELECT_READ)) {
FD_SET(fd, readSet);
count++;
}
if (writeSet && sync.set[fd].events & SELECT_FLAG(B_SELECT_WRITE)) {
FD_SET(fd, writeSet);
count++;
}
if (errorSet && sync.set[fd].events & SELECT_FLAG(B_SELECT_ERROR)) {
FD_SET(fd, errorSet);
count++;
}
}
break;
case B_INTERRUPTED:
count = B_INTERRUPTED;
break;
default:
// B_TIMED_OUT, and B_WOULD_BLOCK
count = 0;
}
err:
delete_sem(sync.sem);
free(sync.set);
return count;
}
static int
common_poll(struct pollfd *fds, nfds_t numfds, bigtime_t timeout, bool kernel)
{
status_t status = B_OK;
int count = 0;
int i;
// allocate resources
select_sync sync;
memset(&sync, 0, sizeof(select_sync));
sync.sem = create_sem(1, "poll");
if (sync.sem < B_OK)
return sync.sem;
set_sem_owner(sync.sem, B_SYSTEM_TEAM);
sync.set = malloc(numfds * sizeof(select_info));
if (sync.set == NULL) {
delete_sem(sync.sem);
return B_NO_MEMORY;
}
sync.count = numfds;
// start polling file descriptors (by selecting them)
for (i = 0; i < numfds; i++) {
int fd = fds[i].fd;
// check if fds are valid
if (!fd_is_valid(fd, kernel)) {
fds[i].revents = POLLNVAL;
continue;
}
// initialize events masks
fds[i].events &= ~POLLNVAL;
fds[i].revents = 0;
sync.set[i].selected_events = fds[i].events;
sync.set[i].events = 0;
count += select_events(&sync, fd, i, fds[i].events, kernel);
}
if (count < 1) {
count = B_BAD_VALUE;
goto err;
}
status = acquire_sem_etc(sync.sem, 1,
B_CAN_INTERRUPT | (timeout != -1 ? B_RELATIVE_TIMEOUT : 0), timeout);
// deselect file descriptors
for (i = 0; i < numfds; i++)
deselect_events(&sync, fds[i].fd, sync.set[i].selected_events, kernel);
// collect the events that are happened in the meantime
switch (status) {
case B_OK:
for (count = 0, i = 0;i < numfds; i++) {
if (fds[i].revents == POLLNVAL)
continue;
// POLLxxx flags and B_SELECT_xxx flags are compatible
fds[i].revents = sync.set[i].events;
if (fds[i].revents != 0)
count++;
}
break;
case B_INTERRUPTED:
count = B_INTERRUPTED;
break;
default:
// B_TIMED_OUT, and B_WOULD_BLOCK
count = 0;
}
err:
delete_sem(sync.sem);
free(sync.set);
return count;
}
// #pragma mark -
// public functions exported to the kernel
status_t
notify_select_event(struct selectsync *_sync, uint32 ref, uint8 event)
{
select_sync *sync = (select_sync *)_sync;
if (sync == NULL
|| sync->sem < B_OK
|| ref > sync->count)
return B_BAD_VALUE;
sync->set[ref].events |= SELECT_FLAG(event);
// only wake up the waiting select()/poll() call if the event
// match the ones selected
if (sync->set[ref].selected_events & SELECT_FLAG(event))
return release_sem(sync->sem);
return B_OK;
}
// #pragma mark -
// Functions called from the POSIX layer
int
sys_select(int numfds, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
bigtime_t timeout, const sigset_t *sigMask)
{
return common_select(numfds, readSet, writeSet, errorSet, timeout, sigMask, true);
}
int
sys_poll(struct pollfd *fds, int numfds, bigtime_t timeout)
{
return common_poll(fds, numfds, timeout, true);
}
int
user_select(int numfds, fd_set *userReadSet, fd_set *userWriteSet, fd_set *userErrorSet,
bigtime_t timeout, const sigset_t *userSigMask)
{
fd_set *readSet = NULL, *writeSet = NULL, *errorSet = NULL;
uint32 bytes = _howmany(numfds, NFDBITS) * sizeof(fd_mask);
sigset_t sigMask;
int result;
if (numfds < 0)
return B_BAD_VALUE;
if ((userReadSet != NULL && !CHECK_USER_ADDRESS(userReadSet))
|| (userWriteSet != NULL && !CHECK_USER_ADDRESS(userWriteSet))
|| (userErrorSet != NULL && !CHECK_USER_ADDRESS(userErrorSet))
|| (userSigMask != NULL && !CHECK_USER_ADDRESS(userSigMask)))
return B_BAD_ADDRESS;
// copy parameters
if (userReadSet != NULL) {
readSet = malloc(bytes);
if (readSet == NULL)
return B_NO_MEMORY;
if (user_memcpy(readSet, userReadSet, bytes) < B_OK) {
result = B_BAD_ADDRESS;
goto err;
}
}
if (userWriteSet != NULL) {
writeSet = malloc(bytes);
if (writeSet == NULL) {
result = B_NO_MEMORY;
goto err;
}
if (user_memcpy(writeSet, userWriteSet, bytes) < B_OK) {
result = B_BAD_ADDRESS;
goto err;
}
}
if (userErrorSet != NULL) {
errorSet = malloc(bytes);
if (errorSet == NULL) {
result = B_NO_MEMORY;
goto err;
}
if (user_memcpy(errorSet, userErrorSet, bytes) < B_OK) {
result = B_BAD_ADDRESS;
goto err;
}
}
if (userSigMask != NULL)
sigMask = *userSigMask;
result = common_select(numfds, readSet, writeSet, errorSet, timeout, userSigMask ? &sigMask : NULL, false);
// copy back results
if (result >= B_OK
&& ((readSet != NULL && user_memcpy(userReadSet, readSet, bytes) < B_OK)
|| (writeSet != NULL && user_memcpy(userWriteSet, writeSet, bytes) < B_OK)
|| (errorSet != NULL && user_memcpy(userErrorSet, errorSet, bytes) < B_OK)))
result = B_BAD_ADDRESS;
err:
free(readSet);
free(writeSet);
free(errorSet);
return result;
}
int
user_poll(struct pollfd *userfds, int numfds, bigtime_t timeout)
{
struct pollfd *fds;
size_t bytes;
int result;
if (numfds < 0)
return B_BAD_VALUE;
if (userfds == NULL || !CHECK_USER_ADDRESS(userfds))
return B_BAD_ADDRESS;
// copy parameters
fds = malloc(bytes = numfds * sizeof(struct pollfd));
if (fds == NULL)
return B_NO_MEMORY;
if (user_memcpy(fds, userfds, bytes) < B_OK) {
result = B_BAD_ADDRESS;
goto err;
}
result = common_poll(fds, numfds, timeout, false);
// copy back results
if (result >= B_OK && user_memcpy(userfds, fds, bytes) < B_OK)
result = B_BAD_ADDRESS;
err:
free(fds);
return result;
}

View File

@ -6,6 +6,7 @@
#define VFS_SELECT_H
typedef struct select_info {
uint16 selected_events;
uint16 events;
} select_info;
@ -30,9 +31,5 @@ enum select_events {
};
#define SELECT_FLAG(type) (1L << (type - 1))
#define MAKE_SELECT_REF(index, type) ((SELECT_FLAG(type) << 16L) | ((index) & 0xffffUL))
#define INDEX_FROM_REF(ref) ((ref) & 0xffffUL)
#define SELECT_FLAG_FROM_REF(ref) ((ref) >> 16L)
#endif /* VFS_SELECT_H */

View File

@ -145,6 +145,12 @@ int syscall_dispatcher(unsigned long call_num, void *arg_buffer, uint64 *call_re
case SYSCALL_ACCESS:
*call_ret = user_access((const char *)arg0, (int)arg1);
break;
case SYSCALL_SELECT:
*call_ret = user_select((int)arg0, (fd_set *)arg1, (fd_set *)arg2, (fd_set *)arg3, (bigtime_t)INT32TOINT64(arg4, arg5), (sigset_t *)arg6);
break;
case SYSCALL_POLL:
*call_ret = user_poll((struct pollfd *)arg0, (int)arg1, (bigtime_t)INT32TOINT64(arg2, arg3));
break;
case SYSCALL_OPEN_ATTR_DIR:
*call_ret = user_open_attr_dir((int)arg0, (const char *)arg1);
break;