2016-06-22 03:32:09 +03:00
|
|
|
/*
|
|
|
|
* safe-syscall.inc.S : host-specific assembly fragment
|
|
|
|
* to handle signals occurring at the same time as system calls.
|
2021-11-17 18:14:00 +03:00
|
|
|
* This is intended to be included by common-user/safe-syscall.S
|
2016-06-22 03:32:09 +03:00
|
|
|
*
|
|
|
|
* Written by Richard Henderson <rth@twiddle.net>
|
|
|
|
* Copyright (C) 2016 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
2021-11-23 13:44:55 +03:00
|
|
|
.global safe_syscall_base
|
|
|
|
.global safe_syscall_start
|
|
|
|
.global safe_syscall_end
|
|
|
|
.type safe_syscall_base, @function
|
2016-06-22 03:32:09 +03:00
|
|
|
|
2021-11-23 13:44:55 +03:00
|
|
|
/* This is the entry point for making a system call. The calling
|
|
|
|
* convention here is that of a C varargs function with the
|
|
|
|
* first argument an 'int *' to the signal_pending flag, the
|
|
|
|
* second one the system call number (as a 'long'), and all further
|
|
|
|
* arguments being syscall arguments (also 'long').
|
|
|
|
*/
|
2016-06-22 03:32:09 +03:00
|
|
|
safe_syscall_base:
|
2021-11-23 13:44:55 +03:00
|
|
|
.cfi_startproc
|
|
|
|
push %ebp
|
|
|
|
.cfi_adjust_cfa_offset 4
|
|
|
|
.cfi_rel_offset ebp, 0
|
|
|
|
push %esi
|
|
|
|
.cfi_adjust_cfa_offset 4
|
|
|
|
.cfi_rel_offset esi, 0
|
|
|
|
push %edi
|
|
|
|
.cfi_adjust_cfa_offset 4
|
|
|
|
.cfi_rel_offset edi, 0
|
|
|
|
push %ebx
|
|
|
|
.cfi_adjust_cfa_offset 4
|
|
|
|
.cfi_rel_offset ebx, 0
|
2016-06-22 03:32:09 +03:00
|
|
|
|
2021-11-23 13:44:55 +03:00
|
|
|
/* The syscall calling convention isn't the same as the C one:
|
|
|
|
* we enter with 0(%esp) == return address
|
2021-11-15 16:08:52 +03:00
|
|
|
* 4(%esp) == &signal_pending
|
2021-11-23 13:44:55 +03:00
|
|
|
* 8(%esp) == syscall number
|
|
|
|
* 12(%esp) ... 32(%esp) == syscall arguments
|
|
|
|
* and return the result in eax
|
|
|
|
* and the syscall instruction needs
|
|
|
|
* eax == syscall number
|
|
|
|
* ebx, ecx, edx, esi, edi, ebp == syscall arguments
|
|
|
|
* and returns the result in eax
|
|
|
|
* Shuffle everything around appropriately.
|
|
|
|
* Note the 16 bytes that we pushed to save registers.
|
|
|
|
*/
|
|
|
|
mov 12+16(%esp), %ebx /* the syscall arguments */
|
|
|
|
mov 16+16(%esp), %ecx
|
|
|
|
mov 20+16(%esp), %edx
|
|
|
|
mov 24+16(%esp), %esi
|
|
|
|
mov 28+16(%esp), %edi
|
|
|
|
mov 32+16(%esp), %ebp
|
2016-06-22 03:32:09 +03:00
|
|
|
|
2021-11-23 13:44:55 +03:00
|
|
|
/* This next sequence of code works in conjunction with the
|
|
|
|
* rewind_if_safe_syscall_function(). If a signal is taken
|
|
|
|
* and the interrupted PC is anywhere between 'safe_syscall_start'
|
|
|
|
* and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
|
|
|
|
* The code sequence must therefore be able to cope with this, and
|
|
|
|
* the syscall instruction must be the final one in the sequence.
|
|
|
|
*/
|
2016-06-22 03:32:09 +03:00
|
|
|
safe_syscall_start:
|
2021-11-23 13:44:55 +03:00
|
|
|
/* if signal_pending is non-zero, don't do the call */
|
|
|
|
mov 4+16(%esp), %eax /* signal_pending */
|
|
|
|
cmpl $0, (%eax)
|
2021-11-15 16:08:52 +03:00
|
|
|
jnz 2f
|
2021-11-23 13:44:55 +03:00
|
|
|
mov 8+16(%esp), %eax /* syscall number */
|
|
|
|
int $0x80
|
2016-06-22 03:32:09 +03:00
|
|
|
safe_syscall_end:
|
2021-11-23 17:18:49 +03:00
|
|
|
|
2021-11-23 13:44:55 +03:00
|
|
|
/* code path for having successfully executed the syscall */
|
2021-11-23 17:18:49 +03:00
|
|
|
#if defined(__linux__)
|
|
|
|
/* Linux kernel returns (small) negative errno. */
|
2021-11-15 16:08:52 +03:00
|
|
|
cmp $-4095, %eax
|
|
|
|
jae 0f
|
2021-11-23 17:18:49 +03:00
|
|
|
#elif defined(__FreeBSD__)
|
|
|
|
/* FreeBSD kernel returns positive errno and C bit set. */
|
|
|
|
jc 1f
|
|
|
|
#else
|
|
|
|
#error "unsupported os"
|
|
|
|
#endif
|
2021-11-23 13:44:55 +03:00
|
|
|
pop %ebx
|
|
|
|
.cfi_remember_state
|
|
|
|
.cfi_adjust_cfa_offset -4
|
|
|
|
.cfi_restore ebx
|
|
|
|
pop %edi
|
|
|
|
.cfi_adjust_cfa_offset -4
|
|
|
|
.cfi_restore edi
|
|
|
|
pop %esi
|
|
|
|
.cfi_adjust_cfa_offset -4
|
|
|
|
.cfi_restore esi
|
|
|
|
pop %ebp
|
|
|
|
.cfi_adjust_cfa_offset -4
|
|
|
|
.cfi_restore ebp
|
|
|
|
ret
|
2021-11-15 16:08:52 +03:00
|
|
|
.cfi_restore_state
|
|
|
|
|
2021-11-23 17:18:49 +03:00
|
|
|
#if defined(__linux__)
|
2021-11-15 16:08:52 +03:00
|
|
|
0: neg %eax
|
|
|
|
jmp 1f
|
2021-11-23 17:18:49 +03:00
|
|
|
#endif
|
2016-06-22 03:32:09 +03:00
|
|
|
|
2021-11-23 13:44:55 +03:00
|
|
|
/* code path when we didn't execute the syscall */
|
2021-11-22 21:47:33 +03:00
|
|
|
2: mov $QEMU_ERESTARTSYS, %eax
|
2016-06-22 03:32:09 +03:00
|
|
|
|
2021-11-15 16:08:52 +03:00
|
|
|
/* code path setting errno */
|
|
|
|
1: pop %ebx
|
|
|
|
.cfi_adjust_cfa_offset -4
|
|
|
|
.cfi_restore ebx
|
|
|
|
pop %edi
|
|
|
|
.cfi_adjust_cfa_offset -4
|
|
|
|
.cfi_restore edi
|
|
|
|
pop %esi
|
|
|
|
.cfi_adjust_cfa_offset -4
|
|
|
|
.cfi_restore esi
|
|
|
|
pop %ebp
|
|
|
|
.cfi_adjust_cfa_offset -4
|
|
|
|
.cfi_restore ebp
|
2022-01-04 22:00:35 +03:00
|
|
|
mov %eax, (%esp)
|
2021-11-15 16:08:52 +03:00
|
|
|
jmp safe_syscall_set_errno_tail
|
|
|
|
|
|
|
|
.cfi_endproc
|
2021-11-23 13:44:55 +03:00
|
|
|
.size safe_syscall_base, .-safe_syscall_base
|