* Added a handy FDGetter AutoLocker-style class.

* In select_fd(): First get the file descriptor, then check whether any
  events have to be selected at all. This has the advantage that the
  caller can interpret an error return code as invalid FD. Consequently
  common_poll() no longer checks FD validity separately -- this was a
  race condition.
* common_poll() always selects POLLERR and POLLHUP now, which it has to
  do according to the specs.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22400 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2007-10-01 18:48:52 +00:00
parent 1221a4f200
commit 1c118ebc62
2 changed files with 69 additions and 28 deletions

View File

@ -27,10 +27,53 @@
#endif
static struct file_descriptor* get_fd_locked(struct io_context* context,
int fd);
static void deselect_select_infos(file_descriptor* descriptor,
select_info* infos);
struct FDGetterLocking {
inline bool Lock(file_descriptor* /*lockable*/)
{
return false;
}
inline void Unlock(file_descriptor* lockable)
{
put_fd(lockable);
}
};
class FDGetter : public AutoLocker<file_descriptor, FDGetterLocking> {
public:
inline FDGetter()
: AutoLocker<file_descriptor, FDGetterLocking>()
{
}
inline FDGetter(io_context* context, int fd, bool contextLocked = false)
: AutoLocker<file_descriptor, FDGetterLocking>(
contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd))
{
}
inline file_descriptor* SetTo(io_context* context, int fd,
bool contextLocked = false)
{
file_descriptor* descriptor
= contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd);
AutoLocker<file_descriptor, FDGetterLocking>::SetTo(descriptor, true);
return descriptor;
}
inline file_descriptor* FD() const
{
return fLockable;
}
};
/*** General fd routines ***/
@ -416,22 +459,23 @@ select_fd(int fd, struct select_sync* sync, uint32 ref, bool kernel)
{
TRACE(("select_fd(fd = %d, selectsync = %p, ref = %lu, 0x%x)\n", fd, sync, ref, sync->set[ref].selected_events));
select_info* info = &sync->set[ref];
if (info->selected_events == 0)
return B_OK;
FDGetter fdGetter;
// define before the context locker, so it will be destroyed after it
io_context* context = get_current_io_context(kernel);
MutexLocker locker(context->io_mutex);
struct file_descriptor* descriptor = get_fd_locked(context, fd);
struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true);
if (descriptor == NULL)
return B_FILE_ERROR;
select_info* info = &sync->set[ref];
if (info->selected_events == 0)
return B_OK;
if (!descriptor->ops->fd_select) {
// if the I/O subsystem doesn't support select(), we will
// immediately notify the select call
locker.Unlock();
put_fd(descriptor);
return notify_select_events(info, info->selected_events);
}
@ -461,7 +505,6 @@ select_fd(int fd, struct select_sync* sync, uint32 ref, bool kernel)
if (selectedEvents == 0)
deselect_fd(fd, sync, ref, kernel);
put_fd(descriptor);
return B_OK;
}
@ -475,10 +518,13 @@ deselect_fd(int fd, struct select_sync* sync, uint32 ref, bool kernel)
if (info->selected_events == 0)
return B_OK;
FDGetter fdGetter;
// define before the context locker, so it will be destroyed after it
io_context* context = get_current_io_context(kernel);
MutexLocker locker(context->io_mutex);
struct file_descriptor* descriptor = get_fd_locked(context, fd);
struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true);
if (descriptor == NULL)
return B_FILE_ERROR;
@ -489,11 +535,8 @@ deselect_fd(int fd, struct select_sync* sync, uint32 ref, bool kernel)
infoLocation = &(*infoLocation)->next;
// If not found, someone else beat us to it.
if (*infoLocation != info) {
locker.Unlock();
put_fd(descriptor);
if (*infoLocation != info)
return B_OK;
}
*infoLocation = info->next;
@ -511,7 +554,6 @@ deselect_fd(int fd, struct select_sync* sync, uint32 ref, bool kernel)
put_select_sync(sync);
put_fd(descriptor);
return B_OK;
}

View File

@ -149,8 +149,10 @@ common_select(int numFDs, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
if (errorSet && FD_ISSET(fd, errorSet))
sync->set[fd].selected_events |= SELECT_FLAG(B_SELECT_ERROR);
select_fd(fd, sync, fd, kernel);
// array position is the same as the fd for select()
if (sync->set[fd].selected_events != 0) {
select_fd(fd, sync, fd, kernel);
// array position is the same as the fd for select()
}
}
locker.Unlock();
@ -246,21 +248,17 @@ common_poll(struct pollfd *fds, nfds_t numFDs, bigtime_t timeout, bool kernel)
for (i = 0; i < numFDs; i++) {
int fd = fds[i].fd;
// check if fds are valid
if (!fd_is_valid(fd, kernel)) {
fds[i].revents = POLLNVAL;
continue;
}
// initialize events masks
fds[i].events &= ~POLLNVAL;
fds[i].revents = 0;
sync->set[i].selected_events = fds[i].events;
sync->set[i].selected_events = fds[i].events & ~POLLNVAL
| POLLERR | POLLHUP;
sync->set[i].events = 0;
select_fd(fd, sync, i, kernel);
if (sync->set[i].selected_events != 0)
count++;
if (select_fd(fd, sync, i, kernel) == B_OK) {
fds[i].revents = 0;
if (sync->set[i].selected_events != 0)
count++;
} else
fds[i].revents = POLLNVAL;
}
if (count < 1) {
@ -289,7 +287,8 @@ common_poll(struct pollfd *fds, nfds_t numFDs, bigtime_t timeout, bool kernel)
continue;
// POLLxxx flags and B_SELECT_xxx flags are compatible
fds[i].revents = sync->set[i].events;
fds[i].revents = sync->set[i].events
& sync->set[i].selected_events;
if (fds[i].revents != 0)
count++;
}