axeld + bonefish:
* Implemented automatic syscall restarts: - A syscall can indicate that it has been interrupted and can be restarted by setting a respective bit in thread::flags. It can store parameters it wants to be preserved for the restart in thread::syscall_restart::parameters. Another thread::flags bit indicates whether it has been restarted. - handle_signals() clears the restart flag, if the handled signal has a handler function installed and SA_RESTART is not set. Another thread flag (THREAD_FLAGS_DONT_RESTART_SYSCALL) can prevent syscalls from being restarted, even if they could be (not used yet, but we might want to use it in resume_thread(), so that we stay behaviorally compatible with BeOS). - The architecture specific syscall handler restarts the syscall, if the restart flag is set. Implemented for x86 only. - Added some support functions in the private <syscall_restart.h> to simplify the syscall restart code in the syscalls. - Adjusted all syscalls that can potentially be restarted accordingly. - _user_ioctl() sets new thread flag THREAD_FLAGS_IOCTL_SYSCALL while calling the underlying FS's/driver's hook, so that syscall restarts can also be supported there. * thread_at_kernel_exit() invokes handle_signals() in a loop now, as long as the latter indicates that the thread shall be suspended, so that after waking up signals received in the meantime will be handled before the thread returns to userland. Adjusted handle_signals() accordingly -- when encountering a suspending signal we don't check for further signals. * Fixed sigsuspend(): Suspending the thread and rescheduling doesn't result in the correct behavior. Instead we employ a temporary condition variable and interruptably wait on it. The POSIX test suite test passes, now. * Made the switch_sem[_etc]() behavior on interruption consistent. Depending on when the signal arrived (before the call or when already waiting) the first semaphore would or wouldn't be released. Now we consistently release it. * Refactored _user_{read,write}[v]() syscalls. Use a common function for either pair. The iovec version doesn't fail anymore, if anything could be read/written at all. It also checks whether a complete vector could be read/written, so that we won't skip data, if the underlying FS/driver couldn't read/write more ATM. * Some refactoring in the x86 syscall handler: The int 99 and sysenter handlers use a common subroutine to avoid code duplication. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23983 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
4af8877673
commit
4048494ce4
@ -41,7 +41,6 @@ bool arch_on_signal_stack(struct thread *thread);
|
||||
status_t arch_setup_signal_frame(struct thread *t, struct sigaction *sa,
|
||||
int signal, int signalMask);
|
||||
int64 arch_restore_signal_frame(void);
|
||||
void arch_check_syscall_restart(struct thread *t);
|
||||
|
||||
void arch_store_fork_frame(struct arch_fork_arg *arg);
|
||||
void arch_restore_fork_frame(struct arch_fork_arg *arg);
|
||||
|
@ -19,6 +19,8 @@ extern "C" {
|
||||
struct iframe *i386_get_user_iframe(void);
|
||||
void *x86_next_page_directory(struct thread *from, struct thread *to);
|
||||
|
||||
void x86_restart_syscall(struct iframe* frame);
|
||||
|
||||
void i386_return_from_signal();
|
||||
void i386_end_return_from_signal();
|
||||
|
||||
|
100
headers/private/kernel/syscall_restart.h
Normal file
100
headers/private/kernel/syscall_restart.h
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Copyright 2008, Haiku Inc. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef _KERNEL_SYSCALL_RESTART_H
|
||||
#define _KERNEL_SYSCALL_RESTART_H
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
#include <thread.h>
|
||||
|
||||
|
||||
static inline void
|
||||
syscall_restart_handle_timeout_pre(bigtime_t& timeout)
|
||||
{
|
||||
// If restarted, get the timeout from the restart parameters. Otherwise
|
||||
// convert relative timeout to an absolute one.
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
if ((thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0)
|
||||
timeout = *(bigtime_t*)thread->syscall_restart.parameters;
|
||||
else if (timeout >= 0) {
|
||||
timeout += system_time();
|
||||
if (timeout < 0)
|
||||
timeout = B_INFINITE_TIMEOUT;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
syscall_restart_handle_timeout_pre(uint32& flags, bigtime_t& timeout)
|
||||
{
|
||||
// If restarted, get the timeout from the restart parameters. Otherwise
|
||||
// convert relative timeout to an absolute one.
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
if ((thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0)
|
||||
timeout = *(bigtime_t*)thread->syscall_restart.parameters;
|
||||
else if ((flags & B_RELATIVE_TIMEOUT) != 0) {
|
||||
timeout += system_time();
|
||||
if (timeout < 0)
|
||||
timeout = B_INFINITE_TIMEOUT;
|
||||
}
|
||||
|
||||
// any timeout is absolute at this point
|
||||
if ((flags & B_RELATIVE_TIMEOUT) != 0)
|
||||
flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT;
|
||||
}
|
||||
|
||||
|
||||
static inline status_t
|
||||
syscall_restart_handle_timeout_post(status_t error, bigtime_t timeout)
|
||||
{
|
||||
if (error == B_INTERRUPTED) {
|
||||
// interrupted -- store timeout and set flag for syscall restart
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
*(bigtime_t*)thread->syscall_restart.parameters = timeout;
|
||||
atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
static inline status_t
|
||||
syscall_restart_handle_post(status_t error)
|
||||
{
|
||||
if (error == B_INTERRUPTED) {
|
||||
// interrupted -- set flag for syscall restart
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
syscall_restart_ioctl_is_restarted()
|
||||
{
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
|
||||
return (thread->flags & THREAD_FLAGS_IOCTL_SYSCALL) != 0
|
||||
&& (thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0;
|
||||
}
|
||||
|
||||
|
||||
static inline status_t
|
||||
syscall_restart_ioctl_handle_post(status_t error)
|
||||
{
|
||||
if (error == B_INTERRUPTED) {
|
||||
// interrupted -- set flag for syscall restart
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
if ((thread->flags & THREAD_FLAGS_IOCTL_SYSCALL) != 0)
|
||||
atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
#endif // _KERNEL_SYSCALL_RESTART_H
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2004-2007, Haiku Inc.
|
||||
* Copyright 2004-2008, Haiku Inc.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Thread definition and structures
|
||||
@ -205,6 +205,10 @@ struct thread {
|
||||
size_t signal_stack_size;
|
||||
bool signal_stack_enabled;
|
||||
|
||||
struct {
|
||||
uint8 parameters[32];
|
||||
} syscall_restart;
|
||||
|
||||
bool in_kernel;
|
||||
bool was_yielded;
|
||||
|
||||
@ -278,12 +282,16 @@ struct thread_queue {
|
||||
|
||||
|
||||
// bits for the thread::flags field
|
||||
#define THREAD_FLAGS_SIGNALS_PENDING 0x01
|
||||
#define THREAD_FLAGS_DEBUG_THREAD 0x02
|
||||
#define THREAD_FLAGS_DEBUGGER_INSTALLED 0x04
|
||||
#define THREAD_FLAGS_BREAKPOINTS_DEFINED 0x08
|
||||
#define THREAD_FLAGS_BREAKPOINTS_INSTALLED 0x10
|
||||
#define THREAD_FLAGS_64_BIT_SYSCALL_RETURN 0x20
|
||||
#define THREAD_FLAGS_SIGNALS_PENDING 0x0001
|
||||
#define THREAD_FLAGS_DEBUG_THREAD 0x0002
|
||||
#define THREAD_FLAGS_DEBUGGER_INSTALLED 0x0004
|
||||
#define THREAD_FLAGS_BREAKPOINTS_DEFINED 0x0008
|
||||
#define THREAD_FLAGS_BREAKPOINTS_INSTALLED 0x0010
|
||||
#define THREAD_FLAGS_64_BIT_SYSCALL_RETURN 0x0020
|
||||
#define THREAD_FLAGS_RESTART_SYSCALL 0x0040
|
||||
#define THREAD_FLAGS_DONT_RESTART_SYSCALL 0x0080
|
||||
#define THREAD_FLAGS_SYSCALL_RESTARTED 0x0100
|
||||
#define THREAD_FLAGS_IOCTL_SYSCALL 0x0200
|
||||
|
||||
|
||||
#endif /* _KERNEL_THREAD_TYPES_H */
|
||||
|
@ -22,7 +22,7 @@ KernelStaticLibrary libx86 :
|
||||
arch_real_time_clock.c
|
||||
arch_smp.c
|
||||
arch_string.S
|
||||
arch_thread.c
|
||||
arch_thread.cpp
|
||||
arch_timer.c
|
||||
arch_vm.cpp
|
||||
arch_vm_translation_map.cpp
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002-2007, The Haiku Team. All rights reserved.
|
||||
* Copyright 2002-2008, The Haiku Team. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
|
||||
@ -304,6 +304,12 @@ trap99:
|
||||
// push error, vector, orig_edx, orig_eax, and other registers
|
||||
PUSH_IFRAME_BOTTOM_SYSCALL()
|
||||
|
||||
call handle_syscall
|
||||
|
||||
POP_IFRAME_AND_RETURN()
|
||||
|
||||
|
||||
handle_syscall:
|
||||
// save %eax, the number of the syscall
|
||||
movl %eax, %esi
|
||||
|
||||
@ -312,7 +318,7 @@ trap99:
|
||||
movl %eax,%ds
|
||||
movl %eax,%es
|
||||
|
||||
movl %esp, %ebp // frame pointer is the iframe
|
||||
lea 4(%esp), %ebp // stack frame pointer is the iframe
|
||||
movl %dr3, %edi // thread pointer
|
||||
|
||||
// disable breakpoints, if installed
|
||||
@ -351,15 +357,19 @@ trap99:
|
||||
|
||||
testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
|
||||
| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
|
||||
| THREAD_FLAGS_64_BIT_SYSCALL_RETURN) \
|
||||
| THREAD_FLAGS_64_BIT_SYSCALL_RETURN \
|
||||
| THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_SYSCALL_RESTARTED) \
|
||||
, THREAD_flags(%edi)
|
||||
jnz post_syscall_work
|
||||
|
||||
cli // disable interrupts
|
||||
|
||||
// update the thread's kernel time and return
|
||||
// update the thread's kernel time
|
||||
UPDATE_THREAD_KERNEL_TIME()
|
||||
POP_IFRAME_AND_RETURN()
|
||||
|
||||
lea -4(%ebp), %esp // remove all parameters from the stack
|
||||
|
||||
ret
|
||||
|
||||
do_pre_syscall_debug:
|
||||
movl %esp, %eax // syscall parameters
|
||||
@ -370,15 +380,16 @@ trap99:
|
||||
addl $8, %esp
|
||||
jmp pre_syscall_debug_done
|
||||
|
||||
post_syscall_work_sysenter:
|
||||
post_syscall_work:
|
||||
// if the 64 bit return value bit is set, we have to clear it
|
||||
testl $THREAD_FLAGS_64_BIT_SYSCALL_RETURN, THREAD_flags(%edi)
|
||||
// clear the 64 bit return value and syscall restarted bits
|
||||
testl $(THREAD_FLAGS_64_BIT_SYSCALL_RETURN \
|
||||
| THREAD_FLAGS_SYSCALL_RESTARTED), THREAD_flags(%edi)
|
||||
jz 2f
|
||||
1:
|
||||
movl THREAD_flags(%edi), %eax
|
||||
movl %eax, %edx
|
||||
andl $~THREAD_FLAGS_64_BIT_SYSCALL_RETURN, %edx
|
||||
andl $~(THREAD_FLAGS_64_BIT_SYSCALL_RETURN \
|
||||
| THREAD_FLAGS_SYSCALL_RESTARTED), %edx
|
||||
lock
|
||||
cmpxchgl %edx, THREAD_flags(%edi)
|
||||
jnz 1b
|
||||
@ -410,6 +421,15 @@ trap99:
|
||||
call thread_at_kernel_exit_no_signals
|
||||
kernel_exit_work_done:
|
||||
|
||||
// syscall restart
|
||||
// TODO: this only needs to be done for syscalls!
|
||||
testl $THREAD_FLAGS_RESTART_SYSCALL, THREAD_flags(%edi)
|
||||
jz 1f
|
||||
push %ebp
|
||||
call x86_restart_syscall
|
||||
addl $4, %esp
|
||||
1:
|
||||
|
||||
// install breakpoints, if defined
|
||||
testl $THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%edi)
|
||||
jz 1f
|
||||
@ -417,7 +437,7 @@ trap99:
|
||||
call x86_init_user_debug_at_kernel_exit
|
||||
1:
|
||||
POP_IFRAME_AND_RETURN()
|
||||
|
||||
|
||||
kernel_exit_handle_signals:
|
||||
// make sure interrupts are enabled (they are, when coming from a syscall
|
||||
// but otherwise they might be disabled)
|
||||
@ -455,62 +475,7 @@ FUNCTION(x86_sysenter):
|
||||
|
||||
PUSH_IFRAME_BOTTOM_SYSCALL()
|
||||
|
||||
// save %eax, the number of the syscall
|
||||
movl %eax, %esi
|
||||
|
||||
movl $KERNEL_DATA_SEG,%eax
|
||||
cld
|
||||
movl %eax,%ds
|
||||
movl %eax,%es
|
||||
|
||||
movl %esp, %ebp // frame pointer is the iframe
|
||||
movl %dr3, %edi // thread pointer
|
||||
|
||||
// disable breakpoints, if installed
|
||||
cli // disable interrupts
|
||||
DISABLE_BREAKPOINTS()
|
||||
|
||||
// update the thread's user time
|
||||
UPDATE_THREAD_USER_TIME_PUSH_TIME()
|
||||
// leave the time on the stack (needed for post syscall debugging)
|
||||
|
||||
sti // enable interrupts
|
||||
|
||||
cmp $SYSCALL_COUNT, %esi // check syscall number
|
||||
jae bad_syscall_number
|
||||
movl $kSyscallInfos, %eax // get syscall info
|
||||
lea (%eax, %esi, SYSCALL_INFO_sizeof), %edx
|
||||
|
||||
// copy parameters onto this stack
|
||||
COPY_SYSCALL_PARAMETERS()
|
||||
|
||||
// pre syscall debugging
|
||||
TRACE_PRE_SYSCALL()
|
||||
testl $THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%edi)
|
||||
jnz do_pre_syscall_debug
|
||||
// if debugging is enabled, we take the slow syscall exit
|
||||
|
||||
// call the syscall function
|
||||
call *SYSCALL_INFO_function(%esi)
|
||||
|
||||
// overwrite the values of %eax and %edx on the stack (the syscall return
|
||||
// value)
|
||||
movl %edx, IFRAME_edx(%ebp)
|
||||
movl %eax, IFRAME_eax(%ebp)
|
||||
|
||||
TRACE_POST_SYSCALL()
|
||||
|
||||
testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
|
||||
| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
|
||||
| THREAD_FLAGS_64_BIT_SYSCALL_RETURN) \
|
||||
, THREAD_flags(%edi)
|
||||
jnz post_syscall_work_sysenter
|
||||
// if any special work has to be done, we take the slow syscall exit
|
||||
|
||||
cli // disable interrupts
|
||||
|
||||
// update the thread's kernel time
|
||||
UPDATE_THREAD_KERNEL_TIME()
|
||||
call handle_syscall
|
||||
|
||||
// pop the bottom of the iframe
|
||||
lea 4(%ebp), %esp // skip iframe type
|
||||
|
@ -1,12 +1,11 @@
|
||||
/*
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include <arch/thread.h>
|
||||
|
||||
#include <arch/user_debugger.h>
|
||||
@ -16,6 +15,7 @@
|
||||
#include <int.h>
|
||||
#include <thread.h>
|
||||
#include <tls.h>
|
||||
#include <tracing.h>
|
||||
#include <vm_address_space.h>
|
||||
#include <vm_types.h>
|
||||
|
||||
@ -30,9 +30,35 @@
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef SYSCALL_TRACING
|
||||
|
||||
namespace SyscallTracing {
|
||||
|
||||
class RestartSyscall : public AbstractTraceEntry {
|
||||
public:
|
||||
RestartSyscall()
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("syscall restart");
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
# define TSYSCALL(x) new(std::nothrow) SyscallTracing::x
|
||||
|
||||
#else
|
||||
# define TSYSCALL(x)
|
||||
#endif // SYSCALL_TRACING
|
||||
|
||||
|
||||
// from arch_interrupts.S
|
||||
extern void i386_stack_init(struct farcall *interrupt_stack_offset);
|
||||
extern void i386_restore_frame_from_syscall(struct iframe frame);
|
||||
extern "C" void i386_stack_init(struct farcall *interrupt_stack_offset);
|
||||
extern "C" void i386_restore_frame_from_syscall(struct iframe frame);
|
||||
|
||||
// from arch_cpu.c
|
||||
extern void (*gX86SwapFPUFunc)(void *oldState, const void *newState);
|
||||
@ -125,7 +151,7 @@ i386_get_user_iframe(void)
|
||||
}
|
||||
|
||||
|
||||
inline void *
|
||||
void *
|
||||
x86_next_page_directory(struct thread *from, struct thread *to)
|
||||
{
|
||||
if (from->team->address_space != NULL && to->team->address_space != NULL) {
|
||||
@ -165,14 +191,21 @@ set_tls_context(struct thread *thread)
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
restart_syscall(struct iframe *frame)
|
||||
void
|
||||
x86_restart_syscall(struct iframe* frame)
|
||||
{
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
|
||||
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
|
||||
atomic_or(&thread->flags, THREAD_FLAGS_SYSCALL_RESTARTED);
|
||||
|
||||
frame->eax = frame->orig_eax;
|
||||
frame->edx = frame->orig_edx;
|
||||
frame->eip -= 2;
|
||||
// undos the "int $99" syscall interrupt
|
||||
// (so that it'll be called again)
|
||||
// undoes the "int $99"/"sysenter"/"syscall" instruction
|
||||
// (so that it'll be executed again)
|
||||
|
||||
TSYSCALL(RestartSyscall());
|
||||
}
|
||||
|
||||
|
||||
@ -403,25 +436,27 @@ arch_setup_signal_frame(struct thread *thread, struct sigaction *action,
|
||||
int signal, int signalMask)
|
||||
{
|
||||
struct iframe *frame = get_current_iframe();
|
||||
uint32 *userStack = (uint32 *)frame->user_esp;
|
||||
uint32 *signalCode;
|
||||
uint32 *userRegs;
|
||||
struct vregs regs;
|
||||
uint32 buffer[6];
|
||||
status_t status;
|
||||
|
||||
if (frame->orig_eax >= 0) {
|
||||
// we're coming from a syscall
|
||||
if ((status_t)frame->eax == EINTR
|
||||
&& (action->sa_flags & SA_RESTART) != 0) {
|
||||
TRACE(("### restarting syscall %d after signal %d\n",
|
||||
frame->orig_eax, sig));
|
||||
restart_syscall(frame);
|
||||
}
|
||||
}
|
||||
|
||||
// start stuffing stuff on the user stack
|
||||
userStack = get_signal_stack(thread, frame, signal);
|
||||
uint32* userStack = get_signal_stack(thread, frame, signal);
|
||||
|
||||
// copy syscall restart info onto the user stack
|
||||
userStack -= (sizeof(thread->syscall_restart.parameters) + 12 + 3) / 4;
|
||||
uint32 threadFlags = atomic_and(&thread->flags,
|
||||
~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
|
||||
if (user_memcpy(userStack, &threadFlags, 4) < B_OK
|
||||
|| user_memcpy(userStack + 1, &frame->orig_eax, 4) < B_OK
|
||||
|| user_memcpy(userStack + 2, &frame->orig_edx, 4) < B_OK)
|
||||
return B_BAD_ADDRESS;
|
||||
status = user_memcpy(userStack + 3, thread->syscall_restart.parameters,
|
||||
sizeof(thread->syscall_restart.parameters));
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
// store the saved regs onto the user stack
|
||||
regs.eip = frame->eip;
|
||||
@ -436,7 +471,7 @@ arch_setup_signal_frame(struct thread *thread, struct sigaction *action,
|
||||
regs._reserved_2[2] = frame->ebp;
|
||||
i386_fnsave((void *)(®s.xregs));
|
||||
|
||||
userStack -= ROUNDUP((sizeof(struct vregs) + 3) / 4, 4);
|
||||
userStack -= (sizeof(struct vregs) + 3) / 4;
|
||||
userRegs = userStack;
|
||||
status = user_memcpy(userRegs, ®s, sizeof(regs));
|
||||
if (status < B_OK)
|
||||
@ -481,15 +516,35 @@ arch_restore_signal_frame(void)
|
||||
struct iframe *frame = get_current_iframe();
|
||||
int32 signalMask;
|
||||
uint32 *userStack;
|
||||
struct vregs* regsPointer;
|
||||
struct vregs regs;
|
||||
|
||||
TRACE(("### arch_restore_signal_frame: entry\n"));
|
||||
|
||||
userStack = (uint32 *)frame->user_esp;
|
||||
if (user_memcpy(&signalMask, &userStack[0], sizeof(int32)) < B_OK
|
||||
|| user_memcpy(®s, (struct vregs *)userStack[1],
|
||||
sizeof(vregs)) < B_OK)
|
||||
if (user_memcpy(&signalMask, &userStack[0], 4) < B_OK
|
||||
|| user_memcpy(®sPointer, &userStack[1], 4) < B_OK
|
||||
|| user_memcpy(®s, regsPointer, sizeof(vregs)) < B_OK) {
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
||||
uint32* syscallRestartInfo
|
||||
= (uint32*)regsPointer + (sizeof(struct vregs) + 3) / 4;
|
||||
uint32 threadFlags;
|
||||
if (user_memcpy(&threadFlags, syscallRestartInfo, 4) < B_OK
|
||||
|| user_memcpy(&frame->orig_eax, syscallRestartInfo + 1, 4) < B_OK
|
||||
|| user_memcpy(&frame->orig_edx, syscallRestartInfo + 2, 4) < B_OK
|
||||
|| user_memcpy(thread->syscall_restart.parameters,
|
||||
syscallRestartInfo + 3,
|
||||
sizeof(thread->syscall_restart.parameters)) < B_OK) {
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
||||
// set restart/64bit return value flags from previous syscall
|
||||
atomic_and(&thread->flags,
|
||||
~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
|
||||
atomic_or(&thread->flags, threadFlags
|
||||
& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
|
||||
|
||||
atomic_set(&thread->sig_block_mask, signalMask);
|
||||
|
||||
@ -512,20 +567,6 @@ arch_restore_signal_frame(void)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
arch_check_syscall_restart(struct thread *thread)
|
||||
{
|
||||
struct iframe *frame = get_current_iframe();
|
||||
if (frame == NULL) {
|
||||
// this thread is obviously new; we didn't come from an interrupt
|
||||
return;
|
||||
}
|
||||
|
||||
if ((status_t)frame->orig_eax >= 0 && (status_t)frame->eax == EINTR)
|
||||
restart_syscall(frame);
|
||||
}
|
||||
|
||||
|
||||
/** Saves everything needed to restore the frame in the child fork in the
|
||||
* arch_fork_arg structure to be passed to arch_restore_fork_frame().
|
||||
* Also makes sure to return the right value.
|
||||
@ -569,7 +610,7 @@ arch_restore_fork_frame(struct arch_fork_arg *arg)
|
||||
|
||||
|
||||
void
|
||||
arch_syscall_64_bit_return_value()
|
||||
arch_syscall_64_bit_return_value(void)
|
||||
{
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
atomic_or(&thread->flags, THREAD_FLAGS_64_BIT_SYSCALL_RETURN);
|
@ -12,7 +12,10 @@
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
#include <AutoDeleter.h>
|
||||
|
||||
#include <syscalls.h>
|
||||
#include <syscall_restart.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <vfs.h>
|
||||
#include <wait_for_objects.h>
|
||||
@ -66,6 +69,12 @@ public:
|
||||
return descriptor;
|
||||
}
|
||||
|
||||
inline file_descriptor* SetTo(int fd, bool kernel,
|
||||
bool contextLocked = false)
|
||||
{
|
||||
return SetTo(get_current_io_context(kernel), fd, contextLocked);
|
||||
}
|
||||
|
||||
inline file_descriptor* FD() const
|
||||
{
|
||||
return fLockable;
|
||||
@ -345,12 +354,12 @@ dup_fd(int fd, bool kernel)
|
||||
}
|
||||
|
||||
|
||||
/** POSIX says this should be the same as:
|
||||
* close(newfd);
|
||||
* fcntl(oldfd, F_DUPFD, newfd);
|
||||
*
|
||||
* We do dup2() directly to be thread-safe.
|
||||
*/
|
||||
/*! POSIX says this should be the same as:
|
||||
close(newfd);
|
||||
fcntl(oldfd, F_DUPFD, newfd);
|
||||
|
||||
We do dup2() directly to be thread-safe.
|
||||
*/
|
||||
static int
|
||||
dup2_fd(int oldfd, int newfd, bool kernel)
|
||||
{
|
||||
@ -612,6 +621,131 @@ common_close(int fd, bool kernel)
|
||||
}
|
||||
|
||||
|
||||
static ssize_t
|
||||
common_user_io(int fd, off_t pos, void *buffer, size_t length, bool write)
|
||||
{
|
||||
if (IS_KERNEL_ADDRESS(buffer))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
if (pos < -1)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
FDGetter fdGetter;
|
||||
struct file_descriptor* descriptor = fdGetter.SetTo(fd, false);
|
||||
if (!descriptor)
|
||||
return B_FILE_ERROR;
|
||||
|
||||
if (write ? (descriptor->open_mode & O_RWMASK) == O_RDONLY
|
||||
: (descriptor->open_mode & O_RWMASK) == O_WRONLY) {
|
||||
return B_FILE_ERROR;
|
||||
}
|
||||
|
||||
bool movePosition = false;
|
||||
if (pos == -1) {
|
||||
pos = descriptor->pos;
|
||||
movePosition = true;
|
||||
}
|
||||
|
||||
if (write ? descriptor->ops->fd_write == NULL
|
||||
: descriptor->ops->fd_read == NULL) {
|
||||
return B_BAD_VALUE;
|
||||
}
|
||||
|
||||
status_t status;
|
||||
if (write)
|
||||
status = descriptor->ops->fd_write(descriptor, pos, buffer, &length);
|
||||
else
|
||||
status = descriptor->ops->fd_read(descriptor, pos, buffer, &length);
|
||||
|
||||
if (status < B_OK)
|
||||
return syscall_restart_handle_post(status);
|
||||
|
||||
if (movePosition)
|
||||
descriptor->pos = pos + length;
|
||||
|
||||
return length <= SSIZE_MAX ? (ssize_t)length : SSIZE_MAX;
|
||||
}
|
||||
|
||||
|
||||
static ssize_t
|
||||
common_user_vector_io(int fd, off_t pos, const iovec *userVecs, size_t count,
|
||||
bool write)
|
||||
{
|
||||
if (!IS_USER_ADDRESS(userVecs))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
if (pos < -1)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
/* prevent integer overflow exploit in malloc() */
|
||||
if (count > IOV_MAX)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
FDGetter fdGetter;
|
||||
struct file_descriptor* descriptor = fdGetter.SetTo(fd, false);
|
||||
if (!descriptor)
|
||||
return B_FILE_ERROR;
|
||||
|
||||
if (write ? (descriptor->open_mode & O_RWMASK) == O_RDONLY
|
||||
: (descriptor->open_mode & O_RWMASK) == O_WRONLY) {
|
||||
return B_FILE_ERROR;
|
||||
}
|
||||
|
||||
iovec* vecs = (iovec*)malloc(sizeof(iovec) * count);
|
||||
if (vecs == NULL)
|
||||
return B_NO_MEMORY;
|
||||
MemoryDeleter _(vecs);
|
||||
|
||||
if (user_memcpy(vecs, userVecs, sizeof(iovec) * count) < B_OK)
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
bool movePosition = false;
|
||||
if (pos == -1) {
|
||||
pos = descriptor->pos;
|
||||
movePosition = true;
|
||||
}
|
||||
|
||||
if (write ? descriptor->ops->fd_write == NULL
|
||||
: descriptor->ops->fd_read == NULL) {
|
||||
return B_BAD_VALUE;
|
||||
}
|
||||
|
||||
ssize_t bytesTransferred = 0;
|
||||
for (uint32 i = 0; i < count; i++) {
|
||||
size_t length = vecs[i].iov_len;
|
||||
status_t status;
|
||||
if (write) {
|
||||
status = descriptor->ops->fd_write(descriptor, pos,
|
||||
vecs[i].iov_base, &length);
|
||||
} else {
|
||||
status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base,
|
||||
&length);
|
||||
}
|
||||
|
||||
if (status < B_OK) {
|
||||
if (bytesTransferred == 0)
|
||||
return syscall_restart_handle_post(status);
|
||||
break;
|
||||
}
|
||||
|
||||
if ((uint64)bytesTransferred + length > SSIZE_MAX)
|
||||
bytesTransferred = SSIZE_MAX;
|
||||
else
|
||||
bytesTransferred += (ssize_t)length;
|
||||
|
||||
pos += length;
|
||||
|
||||
if (length < vecs[i].iov_len)
|
||||
break;
|
||||
}
|
||||
|
||||
if (movePosition)
|
||||
descriptor->pos = pos;
|
||||
|
||||
return bytesTransferred;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
user_fd_kernel_ioctl(int fd, ulong op, void *buffer, size_t length)
|
||||
{
|
||||
@ -627,243 +761,28 @@ user_fd_kernel_ioctl(int fd, ulong op, void *buffer, size_t length)
|
||||
ssize_t
|
||||
_user_read(int fd, off_t pos, void *buffer, size_t length)
|
||||
{
|
||||
struct file_descriptor *descriptor;
|
||||
ssize_t bytesRead;
|
||||
|
||||
/* This is a user_function, so abort if we have a kernel address */
|
||||
if (!IS_USER_ADDRESS(buffer))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
if (pos < -1)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
descriptor = get_fd(get_current_io_context(false), fd);
|
||||
if (!descriptor)
|
||||
return B_FILE_ERROR;
|
||||
if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) {
|
||||
put_fd(descriptor);
|
||||
return B_FILE_ERROR;
|
||||
}
|
||||
|
||||
bool movePosition = false;
|
||||
if (pos == -1) {
|
||||
pos = descriptor->pos;
|
||||
movePosition = true;
|
||||
}
|
||||
|
||||
if (descriptor->ops->fd_read) {
|
||||
bytesRead = descriptor->ops->fd_read(descriptor, pos, buffer, &length);
|
||||
if (bytesRead >= B_OK) {
|
||||
if (length > SSIZE_MAX)
|
||||
bytesRead = SSIZE_MAX;
|
||||
else
|
||||
bytesRead = (ssize_t)length;
|
||||
|
||||
if (movePosition)
|
||||
descriptor->pos = pos + length;
|
||||
}
|
||||
} else
|
||||
bytesRead = B_BAD_VALUE;
|
||||
|
||||
put_fd(descriptor);
|
||||
return bytesRead;
|
||||
return common_user_io(fd, pos, buffer, length, false);
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
_user_readv(int fd, off_t pos, const iovec *userVecs, size_t count)
|
||||
{
|
||||
struct file_descriptor *descriptor;
|
||||
bool movePosition = false;
|
||||
ssize_t bytesRead = 0;
|
||||
status_t status;
|
||||
iovec *vecs;
|
||||
uint32 i;
|
||||
|
||||
/* This is a user_function, so abort if we have a kernel address */
|
||||
if (!IS_USER_ADDRESS(userVecs))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
if (pos < -1)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
/* prevent integer overflow exploit in malloc() */
|
||||
if (count > IOV_MAX)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
descriptor = get_fd(get_current_io_context(false), fd);
|
||||
if (!descriptor)
|
||||
return B_FILE_ERROR;
|
||||
if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) {
|
||||
status = B_FILE_ERROR;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
vecs = (iovec*)malloc(sizeof(iovec) * count);
|
||||
if (vecs == NULL) {
|
||||
status = B_NO_MEMORY;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (user_memcpy(vecs, userVecs, sizeof(iovec) * count) < B_OK) {
|
||||
status = B_BAD_ADDRESS;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
if (pos == -1) {
|
||||
pos = descriptor->pos;
|
||||
movePosition = true;
|
||||
}
|
||||
|
||||
if (descriptor->ops->fd_read) {
|
||||
for (i = 0; i < count; i++) {
|
||||
size_t length = vecs[i].iov_len;
|
||||
status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base, &length);
|
||||
if (status < B_OK) {
|
||||
bytesRead = status;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((uint64)bytesRead + length > SSIZE_MAX)
|
||||
bytesRead = SSIZE_MAX;
|
||||
else
|
||||
bytesRead += (ssize_t)length;
|
||||
|
||||
pos += vecs[i].iov_len;
|
||||
}
|
||||
} else
|
||||
bytesRead = B_BAD_VALUE;
|
||||
|
||||
status = bytesRead;
|
||||
if (movePosition)
|
||||
descriptor->pos = pos;
|
||||
|
||||
err2:
|
||||
free(vecs);
|
||||
err1:
|
||||
put_fd(descriptor);
|
||||
return status;
|
||||
return common_user_vector_io(fd, pos, userVecs, count, false);
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
_user_write(int fd, off_t pos, const void *buffer, size_t length)
|
||||
{
|
||||
struct file_descriptor *descriptor;
|
||||
ssize_t bytesWritten = 0;
|
||||
|
||||
if (IS_KERNEL_ADDRESS(buffer))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
if (pos < -1)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
descriptor = get_fd(get_current_io_context(false), fd);
|
||||
if (!descriptor)
|
||||
return B_FILE_ERROR;
|
||||
if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) {
|
||||
put_fd(descriptor);
|
||||
return B_FILE_ERROR;
|
||||
}
|
||||
|
||||
bool movePosition = false;
|
||||
if (pos == -1) {
|
||||
pos = descriptor->pos;
|
||||
movePosition = true;
|
||||
}
|
||||
|
||||
if (descriptor->ops->fd_write) {
|
||||
bytesWritten = descriptor->ops->fd_write(descriptor, pos, buffer, &length);
|
||||
if (bytesWritten >= B_OK) {
|
||||
if (length > SSIZE_MAX)
|
||||
bytesWritten = SSIZE_MAX;
|
||||
else
|
||||
bytesWritten = (ssize_t)length;
|
||||
|
||||
if (movePosition)
|
||||
descriptor->pos = pos + length;
|
||||
}
|
||||
} else
|
||||
bytesWritten = B_BAD_VALUE;
|
||||
|
||||
put_fd(descriptor);
|
||||
return bytesWritten;
|
||||
return common_user_io(fd, pos, (void*)buffer, length, true);
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
_user_writev(int fd, off_t pos, const iovec *userVecs, size_t count)
|
||||
{
|
||||
struct file_descriptor *descriptor;
|
||||
bool movePosition = false;
|
||||
ssize_t bytesWritten = 0;
|
||||
status_t status;
|
||||
iovec *vecs;
|
||||
uint32 i;
|
||||
|
||||
/* This is a user_function, so abort if we have a kernel address */
|
||||
if (!IS_USER_ADDRESS(userVecs))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
if (pos < -1)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
/* prevent integer overflow exploit in malloc() */
|
||||
if (count > IOV_MAX)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
descriptor = get_fd(get_current_io_context(false), fd);
|
||||
if (!descriptor)
|
||||
return B_FILE_ERROR;
|
||||
if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) {
|
||||
status = B_FILE_ERROR;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
vecs = (iovec*)malloc(sizeof(iovec) * count);
|
||||
if (vecs == NULL) {
|
||||
status = B_NO_MEMORY;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (user_memcpy(vecs, userVecs, sizeof(iovec) * count) < B_OK) {
|
||||
status = B_BAD_ADDRESS;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
if (pos == -1) {
|
||||
pos = descriptor->pos;
|
||||
movePosition = true;
|
||||
}
|
||||
|
||||
if (descriptor->ops->fd_write) {
|
||||
for (i = 0; i < count; i++) {
|
||||
size_t length = vecs[i].iov_len;
|
||||
status = descriptor->ops->fd_write(descriptor, pos, vecs[i].iov_base, &length);
|
||||
if (status < B_OK) {
|
||||
bytesWritten = status;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((uint64)bytesWritten + length > SSIZE_MAX)
|
||||
bytesWritten = SSIZE_MAX;
|
||||
else
|
||||
bytesWritten += (ssize_t)length;
|
||||
|
||||
pos += vecs[i].iov_len;
|
||||
}
|
||||
} else
|
||||
bytesWritten = B_BAD_VALUE;
|
||||
|
||||
status = bytesWritten;
|
||||
if (movePosition)
|
||||
descriptor->pos = pos;
|
||||
|
||||
err2:
|
||||
free(vecs);
|
||||
err1:
|
||||
put_fd(descriptor);
|
||||
return status;
|
||||
return common_user_vector_io(fd, pos, userVecs, count, true);
|
||||
}
|
||||
|
||||
|
||||
@ -894,14 +813,19 @@ status_t
|
||||
_user_ioctl(int fd, ulong op, void *buffer, size_t length)
|
||||
{
|
||||
struct file_descriptor *descriptor;
|
||||
int status;
|
||||
|
||||
if (IS_KERNEL_ADDRESS(buffer))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
TRACE(("user_ioctl: fd %d\n", fd));
|
||||
|
||||
return fd_ioctl(false, fd, op, buffer, length);
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
atomic_or(&thread->flags, THREAD_FLAGS_IOCTL_SYSCALL);
|
||||
|
||||
status_t status = fd_ioctl(false, fd, op, buffer, length);
|
||||
|
||||
atomic_and(&thread->flags, ~THREAD_FLAGS_IOCTL_SYSCALL);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
@ -1194,7 +1118,15 @@ _kern_ioctl(int fd, ulong op, void *buffer, size_t length)
|
||||
{
|
||||
TRACE(("kern_ioctl: fd %d\n", fd));
|
||||
|
||||
return fd_ioctl(true, fd, op, buffer, length);
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
bool wasSyscall = atomic_and(&thread->flags, ~THREAD_FLAGS_IOCTL_SYSCALL);
|
||||
|
||||
status_t status = fd_ioctl(true, fd, op, buffer, length);
|
||||
|
||||
if (wasSyscall)
|
||||
atomic_or(&thread->flags, THREAD_FLAGS_IOCTL_SYSCALL);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <KPath.h>
|
||||
#include <lock.h>
|
||||
#include <syscalls.h>
|
||||
#include <syscall_restart.h>
|
||||
#include <vfs.h>
|
||||
#include <vm.h>
|
||||
#include <vm_cache.h>
|
||||
@ -4565,7 +4566,8 @@ dir_remove(int fd, char *path, bool kernel)
|
||||
|
||||
|
||||
static status_t
|
||||
common_ioctl(struct file_descriptor *descriptor, ulong op, void *buffer, size_t length)
|
||||
common_ioctl(struct file_descriptor *descriptor, ulong op, void *buffer,
|
||||
size_t length)
|
||||
{
|
||||
struct vnode *vnode = descriptor->u.vnode;
|
||||
|
||||
@ -7467,7 +7469,11 @@ _user_open_parent_dir(int fd, char *userName, size_t nameLength)
|
||||
status_t
|
||||
_user_fcntl(int fd, int op, uint32 argument)
|
||||
{
|
||||
return common_fcntl(fd, op, argument, false);
|
||||
status_t status = common_fcntl(fd, op, argument, false);
|
||||
if (op == F_SETLKW)
|
||||
syscall_restart_handle_post(status);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
@ -7510,6 +7516,8 @@ _user_flock(int fd, int op)
|
||||
(op & LOCK_NB) == 0);
|
||||
}
|
||||
|
||||
syscall_restart_handle_post(status);
|
||||
|
||||
put_fd(descriptor);
|
||||
return status;
|
||||
}
|
||||
|
@ -8,23 +8,24 @@
|
||||
|
||||
/*! Ports for IPC */
|
||||
|
||||
#include <port.h>
|
||||
|
||||
#include <ctype.h>
|
||||
#include <iovec.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
#include <port.h>
|
||||
#include <kernel.h>
|
||||
#include <sem.h>
|
||||
#include <team.h>
|
||||
#include <util/list.h>
|
||||
#include <arch/int.h>
|
||||
#include <cbuf.h>
|
||||
#include <kernel.h>
|
||||
#include <sem.h>
|
||||
#include <syscall_restart.h>
|
||||
#include <team.h>
|
||||
#include <util/list.h>
|
||||
#include <wait_for_objects.h>
|
||||
|
||||
#include <iovec.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <ctype.h>
|
||||
|
||||
|
||||
//#define TRACE_PORTS
|
||||
#ifdef TRACE_PORTS
|
||||
@ -1281,7 +1282,8 @@ _user_get_port_info(port_id id, struct port_info *userInfo)
|
||||
status = get_port_info(id, &info);
|
||||
|
||||
// copy back to user space
|
||||
if (status == B_OK && user_memcpy(userInfo, &info, sizeof(struct port_info)) < B_OK)
|
||||
if (status == B_OK
|
||||
&& user_memcpy(userInfo, &info, sizeof(struct port_info)) < B_OK)
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
return status;
|
||||
@ -1289,7 +1291,8 @@ _user_get_port_info(port_id id, struct port_info *userInfo)
|
||||
|
||||
|
||||
status_t
|
||||
_user_get_next_port_info(team_id team, int32 *userCookie, struct port_info *userInfo)
|
||||
_user_get_next_port_info(team_id team, int32 *userCookie,
|
||||
struct port_info *userInfo)
|
||||
{
|
||||
struct port_info info;
|
||||
status_t status;
|
||||
@ -1305,7 +1308,8 @@ _user_get_next_port_info(team_id team, int32 *userCookie, struct port_info *user
|
||||
|
||||
// copy back to user space
|
||||
if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
|
||||
|| (status == B_OK && user_memcpy(userInfo, &info, sizeof(struct port_info)) < B_OK))
|
||||
|| (status == B_OK && user_memcpy(userInfo, &info,
|
||||
sizeof(struct port_info)) < B_OK))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
return status;
|
||||
@ -1315,7 +1319,12 @@ _user_get_next_port_info(team_id team, int32 *userCookie, struct port_info *user
|
||||
ssize_t
|
||||
_user_port_buffer_size_etc(port_id port, uint32 flags, bigtime_t timeout)
|
||||
{
|
||||
return port_buffer_size_etc(port, flags | B_CAN_INTERRUPT, timeout);
|
||||
syscall_restart_handle_timeout_pre(flags, timeout);
|
||||
|
||||
status_t status = port_buffer_size_etc(port, flags | B_CAN_INTERRUPT,
|
||||
timeout);
|
||||
|
||||
return syscall_restart_handle_timeout_post(status, timeout);
|
||||
}
|
||||
|
||||
|
||||
@ -1340,6 +1349,8 @@ _user_read_port_etc(port_id port, int32 *userCode, void *userBuffer,
|
||||
int32 messageCode;
|
||||
ssize_t bytesRead;
|
||||
|
||||
syscall_restart_handle_timeout_pre(flags, timeout);
|
||||
|
||||
if (userBuffer == NULL && bufferSize != 0)
|
||||
return B_BAD_VALUE;
|
||||
if ((userCode != NULL && !IS_USER_ADDRESS(userCode))
|
||||
@ -1353,7 +1364,7 @@ _user_read_port_etc(port_id port, int32 *userCode, void *userBuffer,
|
||||
&& user_memcpy(userCode, &messageCode, sizeof(int32)) < B_OK)
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
return bytesRead;
|
||||
return syscall_restart_handle_timeout_post(bytesRead, timeout);
|
||||
}
|
||||
|
||||
|
||||
@ -1363,13 +1374,17 @@ _user_write_port_etc(port_id port, int32 messageCode, const void *userBuffer,
|
||||
{
|
||||
iovec vec = { (void *)userBuffer, bufferSize };
|
||||
|
||||
syscall_restart_handle_timeout_pre(flags, timeout);
|
||||
|
||||
if (userBuffer == NULL && bufferSize != 0)
|
||||
return B_BAD_VALUE;
|
||||
if (userBuffer != NULL && !IS_USER_ADDRESS(userBuffer))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
return writev_port_etc(port, messageCode, &vec, 1, bufferSize,
|
||||
flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout);
|
||||
status_t status = writev_port_etc(port, messageCode, &vec, 1, bufferSize,
|
||||
flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout);
|
||||
|
||||
return syscall_restart_handle_timeout_post(status, timeout);
|
||||
}
|
||||
|
||||
|
||||
@ -1377,14 +1392,14 @@ status_t
|
||||
_user_writev_port_etc(port_id port, int32 messageCode, const iovec *userVecs,
|
||||
size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout)
|
||||
{
|
||||
iovec *vecs = NULL;
|
||||
status_t status;
|
||||
syscall_restart_handle_timeout_pre(flags, timeout);
|
||||
|
||||
if (userVecs == NULL && bufferSize != 0)
|
||||
return B_BAD_VALUE;
|
||||
if (userVecs != NULL && !IS_USER_ADDRESS(userVecs))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
iovec *vecs = NULL;
|
||||
if (userVecs && vecCount != 0) {
|
||||
vecs = (iovec*)malloc(sizeof(iovec) * vecCount);
|
||||
if (vecs == NULL)
|
||||
@ -1395,9 +1410,11 @@ _user_writev_port_etc(port_id port, int32 messageCode, const iovec *userVecs,
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
}
|
||||
status = writev_port_etc(port, messageCode, vecs, vecCount, bufferSize,
|
||||
flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout);
|
||||
|
||||
status_t status = writev_port_etc(port, messageCode, vecs, vecCount,
|
||||
bufferSize, flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT,
|
||||
timeout);
|
||||
|
||||
free(vecs);
|
||||
return status;
|
||||
return syscall_restart_handle_timeout_post(status, timeout);
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <vm_low_memory.h>
|
||||
#include <vm_page.h>
|
||||
#include <boot/kernel_args.h>
|
||||
#include <syscall_restart.h>
|
||||
#include <wait_for_objects.h>
|
||||
|
||||
#include <string.h>
|
||||
@ -886,6 +887,7 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
|
||||
&& (thread->sig_pending & KILL_SIGNALS))) {
|
||||
sSems[slot].u.used.count += count;
|
||||
status = B_INTERRUPTED;
|
||||
// the other semaphore will be released later
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -919,8 +921,10 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
|
||||
|
||||
RELEASE_SEM_LOCK(sSems[slot]);
|
||||
|
||||
if (semToBeReleased >= B_OK)
|
||||
if (semToBeReleased >= B_OK) {
|
||||
release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
|
||||
semToBeReleased = -1;
|
||||
}
|
||||
|
||||
GRAB_THREAD_LOCK();
|
||||
// check again to see if a signal is pending.
|
||||
@ -983,6 +987,13 @@ err:
|
||||
RELEASE_SEM_LOCK(sSems[slot]);
|
||||
restore_interrupts(state);
|
||||
|
||||
if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
|
||||
// depending on when we were interrupted, we need to still
|
||||
// release the semaphore to always leave in a consistent
|
||||
// state
|
||||
release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (status == B_NOT_ALLOWED)
|
||||
_user_debugger("Thread tried to acquire kernel semaphore.");
|
||||
@ -1298,28 +1309,52 @@ _user_delete_sem(sem_id id)
|
||||
status_t
|
||||
_user_acquire_sem(sem_id id)
|
||||
{
|
||||
return switch_sem_etc(-1, id, 1, B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
|
||||
status_t error = switch_sem_etc(-1, id, 1,
|
||||
B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
|
||||
|
||||
return syscall_restart_handle_post(error);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
_user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
|
||||
{
|
||||
return switch_sem_etc(-1, id, count, flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
|
||||
syscall_restart_handle_timeout_pre(flags, timeout);
|
||||
|
||||
status_t error = switch_sem_etc(-1, id, count,
|
||||
flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
|
||||
|
||||
return syscall_restart_handle_timeout_post(error, timeout);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
_user_switch_sem(sem_id releaseSem, sem_id id)
|
||||
{
|
||||
return switch_sem_etc(releaseSem, id, 1, B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
|
||||
status_t error = switch_sem_etc(releaseSem, id, 1,
|
||||
B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
|
||||
|
||||
if (releaseSem < 0)
|
||||
return syscall_restart_handle_post(error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
_user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags, bigtime_t timeout)
|
||||
_user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags,
|
||||
bigtime_t timeout)
|
||||
{
|
||||
return switch_sem_etc(releaseSem, id, count, flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
|
||||
if (releaseSem < 0)
|
||||
syscall_restart_handle_timeout_pre(flags, timeout);
|
||||
|
||||
status_t error = switch_sem_etc(releaseSem, id, count,
|
||||
flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
|
||||
|
||||
if (releaseSem < 0)
|
||||
return syscall_restart_handle_timeout_post(error, timeout);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
|
@ -235,10 +235,6 @@ handle_signals(struct thread *thread)
|
||||
{
|
||||
uint32 signalMask = atomic_get(&thread->sig_pending)
|
||||
& ~atomic_get(&thread->sig_block_mask);
|
||||
struct sigaction *handler;
|
||||
bool reschedule = false;
|
||||
bool restart = false;
|
||||
int32 i;
|
||||
|
||||
// If SIGKILL[THR] are pending, we ignore other signals.
|
||||
// Otherwise check, if the thread shall stop for debugging.
|
||||
@ -251,9 +247,13 @@ handle_signals(struct thread *thread)
|
||||
if (signalMask == 0)
|
||||
return 0;
|
||||
|
||||
bool restart = (atomic_and(&thread->flags,
|
||||
~THREAD_FLAGS_DONT_RESTART_SYSCALL)
|
||||
& THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
|
||||
|
||||
T(HandleSignals(signalMask));
|
||||
|
||||
for (i = 0; i < NSIG; i++) {
|
||||
for (int32 i = 0; i < NSIG; i++) {
|
||||
bool debugSignal;
|
||||
int32 signal = i + 1;
|
||||
|
||||
@ -275,13 +275,10 @@ handle_signals(struct thread *thread)
|
||||
// handlers to work only when the respective thread is stopped.
|
||||
// Then sigaction() could be used instead and we could get rid of
|
||||
// sigaction_etc().
|
||||
handler = &thread->sig_action[i];
|
||||
struct sigaction* handler = &thread->sig_action[i];
|
||||
|
||||
TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
|
||||
|
||||
if ((handler->sa_flags & SA_RESTART) != 0)
|
||||
restart = true;
|
||||
|
||||
if (handler->sa_handler == SIG_IGN) {
|
||||
// signal is to be ignored
|
||||
// ToDo: apply zombie cleaning on SIGCHLD
|
||||
@ -290,8 +287,7 @@ handle_signals(struct thread *thread)
|
||||
if (debugSignal)
|
||||
notify_debugger(thread, signal, handler, false);
|
||||
continue;
|
||||
}
|
||||
if (handler->sa_handler == SIG_DFL) {
|
||||
} else if (handler->sa_handler == SIG_DFL) {
|
||||
// default signal behaviour
|
||||
switch (signal) {
|
||||
case SIGCHLD:
|
||||
@ -330,7 +326,6 @@ handle_signals(struct thread *thread)
|
||||
continue;
|
||||
|
||||
thread->next_state = B_THREAD_SUSPENDED;
|
||||
reschedule = true;
|
||||
|
||||
// notify threads waiting for team state changes
|
||||
if (thread == thread->team->main_thread) {
|
||||
@ -348,7 +343,8 @@ handle_signals(struct thread *thread)
|
||||
if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
|
||||
deliver_signal(parentThread, SIGCHLD, 0);
|
||||
}
|
||||
continue;
|
||||
|
||||
return true;
|
||||
|
||||
case SIGQUIT:
|
||||
case SIGILL:
|
||||
@ -383,13 +379,18 @@ handle_signals(struct thread *thread)
|
||||
}
|
||||
}
|
||||
|
||||
// User defined signal handler
|
||||
|
||||
// notify the debugger
|
||||
if (debugSignal && !notify_debugger(thread, signal, handler, false))
|
||||
continue;
|
||||
|
||||
// User defined signal handler
|
||||
if (!restart || (handler->sa_flags & SA_RESTART) == 0)
|
||||
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
|
||||
|
||||
TRACE(("### Setting up custom signal handler frame...\n"));
|
||||
arch_setup_signal_frame(thread, handler, signal, atomic_get(&thread->sig_block_mask));
|
||||
arch_setup_signal_frame(thread, handler, signal,
|
||||
atomic_get(&thread->sig_block_mask));
|
||||
|
||||
if (handler->sa_flags & SA_ONESHOT)
|
||||
handler->sa_handler = SIG_DFL;
|
||||
@ -402,16 +403,17 @@ handle_signals(struct thread *thread)
|
||||
|
||||
update_current_thread_signals_flag();
|
||||
|
||||
return reschedule;
|
||||
return false;
|
||||
}
|
||||
|
||||
// only restart if SA_RESTART was set on at least one handler
|
||||
if (restart)
|
||||
arch_check_syscall_restart(thread);
|
||||
// clear syscall restart thread flag, if we're not supposed to restart the
|
||||
// syscall
|
||||
if (!restart)
|
||||
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
|
||||
|
||||
update_current_thread_signals_flag();
|
||||
|
||||
return reschedule;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -781,25 +783,18 @@ sigsuspend(const sigset_t *mask)
|
||||
{
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
sigset_t oldMask = atomic_get(&thread->sig_block_mask);
|
||||
cpu_status state;
|
||||
|
||||
// set the new block mask and suspend ourselves - we cannot use
|
||||
// SIGSTOP for this, as signals are only handled upon kernel exit
|
||||
// Set the new block mask and interuptably block wait for a condition
|
||||
// variable no one will ever notify.
|
||||
|
||||
atomic_set(&thread->sig_block_mask, *mask);
|
||||
|
||||
ConditionVariable<sigset_t> conditionVar;
|
||||
conditionVar.Publish(mask, "sigsuspend");
|
||||
|
||||
while (true) {
|
||||
thread->next_state = B_THREAD_SUSPENDED;
|
||||
|
||||
state = disable_interrupts();
|
||||
GRAB_THREAD_LOCK();
|
||||
|
||||
update_thread_signals_flag(thread);
|
||||
|
||||
scheduler_reschedule();
|
||||
|
||||
RELEASE_THREAD_LOCK();
|
||||
restore_interrupts(state);
|
||||
ConditionVariableEntry<sigset_t> entry;
|
||||
entry.Wait(mask, B_CAN_INTERRUPT);
|
||||
|
||||
if (has_signals_pending(thread))
|
||||
break;
|
||||
@ -811,7 +806,6 @@ sigsuspend(const sigset_t *mask)
|
||||
update_current_thread_signals_flag();
|
||||
|
||||
// we're not supposed to actually succeed
|
||||
// ToDo: could this get us into trouble with SA_RESTART handlers?
|
||||
return B_INTERRUPTED;
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <port.h>
|
||||
#include <sem.h>
|
||||
#include <syscall_process_info.h>
|
||||
#include <syscall_restart.h>
|
||||
#include <syscalls.h>
|
||||
#include <team.h>
|
||||
#include <tls.h>
|
||||
@ -2762,12 +2763,18 @@ _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t
|
||||
|
||||
if (deadChild >= B_OK) {
|
||||
// copy result data on successful completion
|
||||
if ((_userReason != NULL && user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
|
||||
|| (_userReturnCode != NULL && user_memcpy(_userReturnCode, &returnCode, sizeof(status_t)) < B_OK))
|
||||
if ((_userReason != NULL
|
||||
&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
|
||||
|| (_userReturnCode != NULL
|
||||
&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
|
||||
< B_OK)) {
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
||||
return deadChild;
|
||||
}
|
||||
|
||||
return deadChild;
|
||||
return syscall_restart_handle_post(deadChild);
|
||||
}
|
||||
|
||||
|
||||
@ -2992,9 +2999,10 @@ _user_wait_for_team(team_id id, status_t *_userReturnCode)
|
||||
if (status >= B_OK && _userReturnCode != NULL) {
|
||||
if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
|
||||
return B_BAD_ADDRESS;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
return status;
|
||||
return syscall_restart_handle_post(status);
|
||||
}
|
||||
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <ksignal.h>
|
||||
#include <smp.h>
|
||||
#include <syscalls.h>
|
||||
#include <syscall_restart.h>
|
||||
#include <team.h>
|
||||
#include <tls.h>
|
||||
#include <user_runtime.h>
|
||||
@ -1490,26 +1491,20 @@ void
|
||||
thread_at_kernel_exit(void)
|
||||
{
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
cpu_status state;
|
||||
bigtime_t now;
|
||||
|
||||
TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
|
||||
|
||||
if (handle_signals(thread)) {
|
||||
state = disable_interrupts();
|
||||
GRAB_THREAD_LOCK();
|
||||
|
||||
// was: smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
|
||||
while (handle_signals(thread)) {
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
scheduler_reschedule();
|
||||
}
|
||||
|
||||
RELEASE_THREAD_LOCK();
|
||||
} else
|
||||
state = disable_interrupts();
|
||||
cpu_status state = disable_interrupts();
|
||||
|
||||
thread->in_kernel = false;
|
||||
|
||||
// track kernel time
|
||||
now = system_time();
|
||||
bigtime_t now = system_time();
|
||||
thread->kernel_time += now - thread->last_time;
|
||||
thread->last_time = now;
|
||||
|
||||
@ -2472,7 +2467,12 @@ _user_spawn_thread(int32 (*entry)(thread_func, void *), const char *userName,
|
||||
status_t
|
||||
_user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
|
||||
{
|
||||
return snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
|
||||
// NOTE: We only know the system timebase at the moment.
|
||||
syscall_restart_handle_timeout_pre(flags, timeout);
|
||||
|
||||
status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
|
||||
|
||||
return syscall_restart_handle_timeout_post(error, timeout);
|
||||
}
|
||||
|
||||
|
||||
@ -2554,10 +2554,11 @@ _user_wait_for_thread(thread_id id, status_t *userReturnCode)
|
||||
status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
|
||||
|
||||
if (status == B_OK && userReturnCode != NULL
|
||||
&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK)
|
||||
&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
||||
return status;
|
||||
return syscall_restart_handle_post(status);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2007, Ingo Weinhold, bonefish@cs.tu-berlin.de. All rights reserved.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Copyright 2007-2008, Ingo Weinhold, bonefish@cs.tu-berlin.de.
|
||||
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
#include <port.h>
|
||||
#include <sem.h>
|
||||
#include <syscalls.h>
|
||||
#include <syscall_restart.h>
|
||||
#include <thread.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
@ -219,7 +220,7 @@ common_select(int numFDs, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
|
||||
|
||||
// wait for something to happen
|
||||
status = acquire_sem_etc(sync->sem, 1,
|
||||
B_CAN_INTERRUPT | (timeout != -1 ? B_RELATIVE_TIMEOUT : 0), timeout);
|
||||
B_CAN_INTERRUPT | (timeout != -1 ? B_ABSOLUTE_TIMEOUT : 0), timeout);
|
||||
|
||||
// restore the old signal mask
|
||||
if (sigMask != NULL)
|
||||
@ -324,7 +325,7 @@ common_poll(struct pollfd *fds, nfds_t numFDs, bigtime_t timeout, bool kernel)
|
||||
locker.Unlock();
|
||||
|
||||
status = acquire_sem_etc(sync->sem, 1,
|
||||
B_CAN_INTERRUPT | (timeout != -1 ? B_RELATIVE_TIMEOUT : 0), timeout);
|
||||
B_CAN_INTERRUPT | (timeout != -1 ? B_ABSOLUTE_TIMEOUT : 0), timeout);
|
||||
|
||||
// deselect file descriptors
|
||||
|
||||
@ -641,6 +642,9 @@ ssize_t
|
||||
_kern_select(int numFDs, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
|
||||
bigtime_t timeout, const sigset_t *sigMask)
|
||||
{
|
||||
if (timeout >= 0)
|
||||
timeout += system_time();
|
||||
|
||||
return common_select(numFDs, readSet, writeSet, errorSet, timeout,
|
||||
sigMask, true);
|
||||
}
|
||||
@ -649,6 +653,9 @@ _kern_select(int numFDs, fd_set *readSet, fd_set *writeSet, fd_set *errorSet,
|
||||
ssize_t
|
||||
_kern_poll(struct pollfd *fds, int numFDs, bigtime_t timeout)
|
||||
{
|
||||
if (timeout >= 0)
|
||||
timeout += system_time();
|
||||
|
||||
return common_poll(fds, numFDs, timeout, true);
|
||||
}
|
||||
|
||||
@ -673,6 +680,8 @@ _user_select(int numFDs, fd_set *userReadSet, fd_set *userWriteSet,
|
||||
sigset_t sigMask;
|
||||
int result;
|
||||
|
||||
syscall_restart_handle_timeout_pre(timeout);
|
||||
|
||||
if (numFDs < 0)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
@ -733,8 +742,10 @@ _user_select(int numFDs, fd_set *userReadSet, fd_set *userWriteSet,
|
||||
|| (writeSet != NULL
|
||||
&& user_memcpy(userWriteSet, writeSet, bytes) < B_OK)
|
||||
|| (errorSet != NULL
|
||||
&& user_memcpy(userErrorSet, errorSet, bytes) < B_OK)))
|
||||
&& user_memcpy(userErrorSet, errorSet, bytes) < B_OK))) {
|
||||
result = B_BAD_ADDRESS;
|
||||
} else
|
||||
syscall_restart_handle_timeout_post(result, timeout);
|
||||
|
||||
err:
|
||||
free(readSet);
|
||||
@ -752,6 +763,8 @@ _user_poll(struct pollfd *userfds, int numFDs, bigtime_t timeout)
|
||||
size_t bytes;
|
||||
int result;
|
||||
|
||||
syscall_restart_handle_timeout_pre(timeout);
|
||||
|
||||
if (numFDs < 0)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
@ -774,6 +787,8 @@ _user_poll(struct pollfd *userfds, int numFDs, bigtime_t timeout)
|
||||
// copy back results
|
||||
if (result >= B_OK && user_memcpy(userfds, fds, bytes) < B_OK)
|
||||
result = B_BAD_ADDRESS;
|
||||
else
|
||||
syscall_restart_handle_timeout_post(result, timeout);
|
||||
|
||||
err:
|
||||
free(fds);
|
||||
@ -786,6 +801,8 @@ ssize_t
|
||||
_user_wait_for_objects(object_wait_info* userInfos, int numInfos, uint32 flags,
|
||||
bigtime_t timeout)
|
||||
{
|
||||
syscall_restart_handle_timeout_pre(flags, timeout);
|
||||
|
||||
if (numInfos < 0)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
@ -804,8 +821,10 @@ _user_wait_for_objects(object_wait_info* userInfos, int numInfos, uint32 flags,
|
||||
result = common_wait_for_objects(infos, numInfos, flags, timeout,
|
||||
false);
|
||||
|
||||
if (user_memcpy(userInfos, infos, bytes) != B_OK)
|
||||
if (result >= 0 && user_memcpy(userInfos, infos, bytes) != B_OK)
|
||||
result = B_BAD_ADDRESS;
|
||||
else
|
||||
syscall_restart_handle_timeout_post(result, timeout);
|
||||
} else
|
||||
result = B_BAD_ADDRESS;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user