- Add macros to handle (some) trapframe registers for common x86 code.

- Merge i386 and amd64 syscall.c into x86.  No functional changes intended.

Proposed on (port-i386 & port-amd64).  Unfortunately, I cannot merge these
lists into the single port-x86. :(
This commit is contained in:
rmind 2009-04-16 15:34:23 +00:00
parent 1408cbc26a
commit 3de18e79b4
6 changed files with 75 additions and 198 deletions

View File

@ -1,168 +0,0 @@
/* $NetBSD: syscall.c,v 1.45 2009/03/29 01:10:28 rmind Exp $ */
/*-
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Charles M. Hannum.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: syscall.c,v 1.45 2009/03/29 01:10:28 rmind Exp $");
#include "opt_sa.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/user.h>
#include <sys/signal.h>
#include <sys/sa.h>
#include <sys/savar.h>
#include <sys/ktrace.h>
#include <sys/syscall.h>
#include <sys/syscallvar.h>
#include <sys/syscall_stats.h>
#include <uvm/uvm_extern.h>
#include <machine/cpu.h>
#include <machine/psl.h>
#include <machine/userret.h>
void syscall_intern(struct proc *);
static void syscall(struct trapframe *);
void
child_return(void *arg)
{
struct lwp *l = arg;
struct trapframe *tf = l->l_md.md_regs;
tf->tf_rax = 0;
tf->tf_rflags &= ~PSL_C;
userret(l);
ktrsysret(SYS_fork, 0, 0);
}
void
syscall_intern(struct proc *p)
{
p->p_md.md_syscall = syscall;
}
/*
* syscall(frame):
* System call request from POSIX system call gate interface to kernel.
* Like trap(), argument is call by reference.
*/
static void
syscall(struct trapframe *frame)
{
const struct sysent *callp;
struct proc *p;
struct lwp *l;
int error;
register_t code, rval[2];
#define args (&frame->tf_rdi)
/* Verify that the syscall args will fit in the trapframe space */
typedef char foo[offsetof(struct trapframe, tf_arg9)
>= sizeof (register_t) * (2 + SYS_MAXSYSARGS - 1) ? 1 : -1];
l = curlwp;
p = l->l_proc;
LWP_CACHE_CREDS(l, p);
code = frame->tf_rax & (SYS_NSYSENT - 1);
callp = p->p_emul->e_sysent + code;
SYSCALL_COUNT(syscall_counts, code);
SYSCALL_TIME_SYS_ENTRY(l, syscall_times, code);
#ifdef KERN_SA
if (__predict_false((l->l_savp)
&& (l->l_savp->savp_pflags & SAVP_FLAG_DELIVERING)))
l->l_savp->savp_pflags &= ~SAVP_FLAG_DELIVERING;
#endif
/*
* The first 6 syscall args are passed in rdi, rsi, rdx, r10, r8 and r9
* (rcx gets copied to r10 in the libc stub because the syscall
* instruction overwrites %cx) and are together in the trap frame
* with space following for 4 more entries.
*/
if (__predict_false(callp->sy_argsize > 6 * 8)) {
error = copyin((register_t *)frame->tf_rsp + 1,
&frame->tf_arg6, callp->sy_argsize - 6 * 8);
if (error != 0)
goto bad;
/* Refetch to avoid register spill to stack */
code = frame->tf_rax & (SYS_NSYSENT - 1);
}
if (!__predict_false(p->p_trace_enabled)
|| __predict_false(callp->sy_flags & SYCALL_INDIRECT)
|| (error = trace_enter(code, args, callp->sy_narg)) == 0) {
rval[0] = 0;
rval[1] = 0;
KASSERT(l->l_holdcnt == 0);
error = sy_call(callp, l, args, rval);
}
if (__predict_false(p->p_trace_enabled)
&& !__predict_false(callp->sy_flags & SYCALL_INDIRECT)) {
code = frame->tf_rax & (SYS_NSYSENT - 1);
trace_exit(code, rval, error);
}
if (__predict_true(error == 0)) {
frame->tf_rax = rval[0];
frame->tf_rdx = rval[1];
frame->tf_rflags &= ~PSL_C; /* carry bit */
} else {
switch (error) {
case ERESTART:
/*
* The offset to adjust the PC by depends on whether we
* entered the kernel through the trap or call gate.
* We saved the instruction size in tf_err on entry.
*/
frame->tf_rip -= frame->tf_err;
break;
case EJUSTRETURN:
/* nothing to do */
break;
default:
bad:
frame->tf_rax = error;
frame->tf_rflags |= PSL_C; /* carry bit */
break;
}
}
SYSCALL_TIME_SYS_EXIT(l);
userret(l);
}

View File

@ -1,4 +1,4 @@
# $NetBSD: files.amd64,v 1.65 2009/03/30 22:28:39 rmind Exp $
# $NetBSD: files.amd64,v 1.66 2009/04/16 15:34:23 rmind Exp $
#
# new style config file for amd64 architecture
#
@ -45,7 +45,6 @@ file arch/amd64/amd64/machdep.c
file arch/amd64/amd64/mem.c
file arch/amd64/amd64/process_machdep.c
file arch/amd64/amd64/procfs_machdep.c procfs
file arch/amd64/amd64/syscall.c
file arch/amd64/amd64/trap.c
file arch/amd64/amd64/fpu.c
file arch/amd64/amd64/lock_stubs.S

View File

@ -1,4 +1,4 @@
# $NetBSD: files.i386,v 1.347 2009/03/30 22:28:39 rmind Exp $
# $NetBSD: files.i386,v 1.348 2009/04/16 15:34:23 rmind Exp $
#
# new style config file for i386 architecture
#
@ -82,7 +82,6 @@ file arch/i386/i386/mem.c
file arch/i386/i386/mtrr_k6.c mtrr
file arch/i386/i386/process_machdep.c
file arch/i386/i386/procfs_machdep.c procfs
file arch/i386/i386/syscall.c
file arch/i386/i386/trap.c
file dev/cons.c

View File

@ -1,4 +1,4 @@
# $NetBSD: files.x86,v 1.49 2009/04/07 18:24:23 dyoung Exp $
# $NetBSD: files.x86,v 1.50 2009/04/16 15:34:23 rmind Exp $
# options for MP configuration through the MP spec
defflag opt_mpbios.h MPBIOS MPVERBOSE MPDEBUG MPBIOS_SCANPCI
@ -63,6 +63,7 @@ file arch/x86/x86/patch.c
file arch/x86/x86/platform.c
file arch/x86/x86/pmap.c
file arch/x86/x86/sys_machdep.c
file arch/x86/x86/syscall.c
file arch/x86/x86/vm_machdep.c
file arch/x86/x86/x86_autoconf.c
file arch/x86/x86/x86_machdep.c

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.14 2009/03/30 09:51:37 tsutsui Exp $ */
/* $NetBSD: cpu.h,v 1.15 2009/04/16 15:34:23 rmind Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -198,6 +198,23 @@ struct cpu_info {
int ci_padout __aligned(64);
};
/*
* Macros to handle (some) trapframe registers for common x86 code.
*/
#ifdef __x86_64__
#define X86_TF_RAX(tf) tf->tf_rax
#define X86_TF_RDX(tf) tf->tf_rdx
#define X86_TF_RSP(tf) tf->tf_rsp
#define X86_TF_RIP(tf) tf->tf_rip
#define X86_TF_RFLAGS(tf) tf->tf_rflags
#else
#define X86_TF_RAX(tf) tf->tf_eax
#define X86_TF_RDX(tf) tf->tf_edx
#define X86_TF_RSP(tf) tf->tf_esp
#define X86_TF_RIP(tf) tf->tf_eip
#define X86_TF_RFLAGS(tf) tf->tf_eflags
#endif
/*
* Processor flag notes: The "primary" CPU has certain MI-defined
* roles (mostly relating to hardclock handling); we distinguish

View File

@ -1,7 +1,7 @@
/* $NetBSD: syscall.c,v 1.59 2009/03/29 01:10:28 rmind Exp $ */
/* $NetBSD: syscall.c,v 1.1 2009/04/16 15:34:23 rmind Exp $ */
/*-
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
* Copyright (c) 1998, 2000, 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -30,9 +30,8 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: syscall.c,v 1.59 2009/03/29 01:10:28 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: syscall.c,v 1.1 2009/04/16 15:34:23 rmind Exp $");
#include "opt_vm86.h"
#include "opt_sa.h"
#include <sys/param.h>
@ -53,11 +52,16 @@ __KERNEL_RCSID(0, "$NetBSD: syscall.c,v 1.59 2009/03/29 01:10:28 rmind Exp $");
#include <machine/psl.h>
#include <machine/userret.h>
static void syscall(struct trapframe *);
int x86_copyargs(void *, void *, size_t);
#ifndef __x86_64__
#include "opt_vm86.h"
#ifdef VM86
void syscall_vm86(struct trapframe *);
void syscall_vm86(struct trapframe *);
#endif
int x86_copyargs(void *, void *, size_t);
#endif
void syscall_intern(struct proc *);
static void syscall(struct trapframe *);
void
child_return(void *arg)
@ -65,8 +69,8 @@ child_return(void *arg)
struct lwp *l = arg;
struct trapframe *tf = l->l_md.md_regs;
tf->tf_eax = 0;
tf->tf_eflags &= ~PSL_C;
X86_TF_RAX(tf) = 0;
X86_TF_RFLAGS(tf) &= ~PSL_C;
userret(l);
ktrsysret(SYS_fork, 0, 0);
@ -82,7 +86,7 @@ syscall_intern(struct proc *p)
/*
* syscall(frame):
* System call request from POSIX system call gate interface to kernel.
* Like trap(), argument is call by reference.
* Like trap(), argument is call by reference.
*/
static void
syscall(struct trapframe *frame)
@ -91,13 +95,21 @@ syscall(struct trapframe *frame)
struct proc *p;
struct lwp *l;
int error;
register_t code, args[2 + SYS_MAXSYSARGS], rval[2];
register_t code, rval[2];
#ifdef __x86_64__
/* Verify that the syscall args will fit in the trapframe space */
CTASSERT(offsetof(struct trapframe, tf_arg9) >=
sizeof(register_t) * (2 + SYS_MAXSYSARGS - 1));
#define args (&frame->tf_rdi)
#else
register_t args[2 + SYS_MAXSYSARGS];
#endif
l = curlwp;
p = l->l_proc;
LWP_CACHE_CREDS(l, p);
code = frame->tf_eax & (SYS_NSYSENT - 1);
code = X86_TF_RAX(frame) & (SYS_NSYSENT - 1);
callp = p->p_emul->e_sysent + code;
SYSCALL_COUNT(syscall_counts, code);
@ -105,21 +117,36 @@ syscall(struct trapframe *frame)
#ifdef KERN_SA
if (__predict_false((l->l_savp)
&& (l->l_savp->savp_pflags & SAVP_FLAG_DELIVERING)))
&& (l->l_savp->savp_pflags & SAVP_FLAG_DELIVERING)))
l->l_savp->savp_pflags &= ~SAVP_FLAG_DELIVERING;
#endif
#ifdef __x86_64__
/*
* The first 6 syscall args are passed in rdi, rsi, rdx, r10, r8 and r9
* (rcx gets copied to r10 in the libc stub because the syscall
* instruction overwrites %cx) and are together in the trap frame
* with space following for 4 more entries.
*/
if (__predict_false(callp->sy_argsize > 6 * 8)) {
error = copyin((register_t *)frame->tf_rsp + 1,
&frame->tf_arg6, callp->sy_argsize - 6 * 8);
if (error != 0)
goto bad;
/* Refetch to avoid register spill to stack */
code = frame->tf_rax & (SYS_NSYSENT - 1);
}
#else
if (callp->sy_argsize) {
error = x86_copyargs((char *)frame->tf_esp + sizeof(int), args,
callp->sy_argsize);
if (__predict_false(error != 0))
goto bad;
}
#endif
if (!__predict_false(p->p_trace_enabled)
|| __predict_false(callp->sy_flags & SYCALL_INDIRECT)
|| (error = trace_enter(frame->tf_eax & (SYS_NSYSENT - 1),
args, callp->sy_narg)) == 0) {
|| (error = trace_enter(code, args, callp->sy_narg)) == 0) {
rval[0] = 0;
rval[1] = 0;
KASSERT(l->l_holdcnt == 0);
@ -128,14 +155,14 @@ syscall(struct trapframe *frame)
if (__predict_false(p->p_trace_enabled)
&& !__predict_false(callp->sy_flags & SYCALL_INDIRECT)) {
code = frame->tf_eax & (SYS_NSYSENT - 1);
code = X86_TF_RAX(frame) & (SYS_NSYSENT - 1);
trace_exit(code, rval, error);
}
if (__predict_true(error == 0)) {
frame->tf_eax = rval[0];
frame->tf_edx = rval[1];
frame->tf_eflags &= ~PSL_C; /* carry bit */
X86_TF_RAX(frame) = rval[0];
X86_TF_RDX(frame) = rval[1];
X86_TF_RFLAGS(frame) &= ~PSL_C; /* carry bit */
} else {
switch (error) {
case ERESTART:
@ -144,15 +171,15 @@ syscall(struct trapframe *frame)
* entered the kernel through the trap or call gate.
* We saved the instruction size in tf_err on entry.
*/
frame->tf_eip -= frame->tf_err;
X86_TF_RIP(frame) -= frame->tf_err;
break;
case EJUSTRETURN:
/* nothing to do */
break;
default:
bad:
frame->tf_eax = error;
frame->tf_eflags |= PSL_C; /* carry bit */
X86_TF_RAX(frame) = error;
X86_TF_RFLAGS(frame) |= PSL_C; /* carry bit */
break;
}
}
@ -162,6 +189,7 @@ syscall(struct trapframe *frame)
}
#ifdef VM86
void
syscall_vm86(struct trapframe *frame)
{
@ -181,11 +209,12 @@ syscall_vm86(struct trapframe *frame)
#ifdef KERN_SA
/* While this is probably not needed, it's probably better to include than not */
if (__predict_false((l->l_savp)
&& (l->l_savp->savp_pflags & SAVP_FLAG_DELIVERING)))
&& (l->l_savp->savp_pflags & SAVP_FLAG_DELIVERING)))
l->l_savp->savp_pflags &= ~SAVP_FLAG_DELIVERING;
#endif
(*p->p_emul->e_trapsignal)(l, &ksi);
userret(l);
}
#endif