Merge the nathanw_sa branch.

This commit is contained in:
thorpej 2003-01-18 06:44:56 +00:00
parent 49784e4bd0
commit c464d72f40
33 changed files with 1061 additions and 465 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: vme_machdep.c,v 1.44 2003/01/03 11:57:46 mrg Exp $ */
/* $NetBSD: vme_machdep.c,v 1.45 2003/01/18 06:44:59 thorpej Exp $ */
/*-
* Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
@ -742,10 +742,10 @@ vmeintr4m(arg)
int s;
s = splhigh();
if (curproc == NULL)
if (curlwp == NULL)
xpcb = (struct pcb *)proc0paddr;
else
xpcb = &curproc->p_addr->u_pcb;
xpcb = &curlwp->l_addr->u_pcb;
saveonfault = (u_long)xpcb->pcb_onfault;
vec = fkbyte(addr, xpcb);

View File

@ -1,4 +1,4 @@
/* $NetBSD: fpu.c,v 1.16 2003/01/06 18:32:33 pk Exp $ */
/* $NetBSD: fpu.c,v 1.17 2003/01/18 06:45:00 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -117,8 +117,8 @@ static u_char fpu_codes[] = {
* unknown FPops do enter the queue, however.
*/
void
fpu_cleanup(p, fs)
register struct proc *p;
fpu_cleanup(l, fs)
register struct lwp *l;
#ifndef SUN4U
register struct fpstate *fs;
#else /* SUN4U */
@ -126,6 +126,7 @@ fpu_cleanup(p, fs)
#endif /* SUN4U */
{
register int i, fsr = fs->fs_fsr, error;
struct proc *p = l->l_proc;
union instr instr;
struct fpemu fe;
@ -140,9 +141,9 @@ fpu_cleanup(p, fs)
/* XXX missing trap address! */
if ((i = fsr & FSR_CX) == 0)
panic("fpu ieee trap, but no exception");
KERNEL_PROC_LOCK(p);
trapsignal(p, SIGFPE, fpu_codes[i - 1]);
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_LOCK(l);
trapsignal(l, SIGFPE, fpu_codes[i - 1]);
KERNEL_PROC_UNLOCK(l);
break; /* XXX should return, but queue remains */
case FSR_TT_UNFIN:
@ -171,9 +172,9 @@ fpu_cleanup(p, fs)
log(LOG_ERR, "fpu hardware error (%s[%d])\n",
p->p_comm, p->p_pid);
uprintf("%s[%d]: fpu hardware error\n", p->p_comm, p->p_pid);
KERNEL_PROC_LOCK(p);
trapsignal(p, SIGFPE, -1); /* ??? */
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_LOCK(l);
trapsignal(l, SIGFPE, -1); /* ??? */
KERNEL_PROC_UNLOCK(l);
goto out;
default:
@ -193,10 +194,10 @@ fpu_cleanup(p, fs)
if (error == 0)
continue;
KERNEL_PROC_LOCK(p);
KERNEL_PROC_LOCK(l);
switch (error) {
case FPE:
trapsignal(p, SIGFPE,
trapsignal(l, SIGFPE,
fpu_codes[(fs->fs_fsr & FSR_CX) - 1]);
break;
@ -207,14 +208,14 @@ fpu_cleanup(p, fs)
Debugger();
#endif
#endif /* SUN4U */
trapsignal(p, SIGILL, 0); /* ??? code? */
trapsignal(l, SIGILL, 0); /* ??? code? */
break;
default:
panic("fpu_cleanup 3");
/* NOTREACHED */
}
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_UNLOCK(l);
/* XXX should stop here, but queue remains */
}
out:
@ -229,8 +230,8 @@ out:
* We know the `queue' is empty, though; we just want to emulate
* the instruction at tf->tf_pc.
*/
fpu_emulate(p, tf, fs)
struct proc *p;
fpu_emulate(l, tf, fs)
struct lwp *l;
register struct trapframe *tf;
#ifndef SUN4U
register struct fpstate *fs;
@ -247,10 +248,10 @@ fpu_emulate(p, tf, fs)
* We do this here, rather than earlier, to avoid
* losing even more badly than usual.
*/
if (p->p_addr->u_pcb.pcb_uw) {
if (l->l_addr->u_pcb.pcb_uw) {
write_user_windows();
if (rwindow_save(p))
sigexit(p, SIGILL);
if (rwindow_save(l))
sigexit(l, SIGILL);
}
if (loadstore) {
do_it;
@ -366,64 +367,64 @@ fpu_execute(fe, instr)
rs1 = fs->fs_regs[rs2];
goto mov;
case FMVIC >> 2:
/* Presume we're curproc */
/* Presume we're curlwp */
DPRINTF(FPE_INSN, ("fpu_execute: FMVIC\n"));
cond = (curproc->p_md.md_tf->tf_tstate>>TSTATE_CCR_SHIFT)&PSR_ICC;
cond = (curlwp->l_md.md_tf->tf_tstate>>TSTATE_CCR_SHIFT)&PSR_ICC;
if (instr.i_fmovcc.i_cond != cond) return(0); /* success */
rs1 = fs->fs_regs[rs2];
goto mov;
case FMVXC >> 2:
/* Presume we're curproc */
/* Presume we're curlwp */
DPRINTF(FPE_INSN, ("fpu_execute: FMVXC\n"));
cond = (curproc->p_md.md_tf->tf_tstate>>(TSTATE_CCR_SHIFT+XCC_SHIFT))&PSR_ICC;
cond = (curlwp->l_md.md_tf->tf_tstate>>(TSTATE_CCR_SHIFT+XCC_SHIFT))&PSR_ICC;
if (instr.i_fmovcc.i_cond != cond) return(0); /* success */
rs1 = fs->fs_regs[rs2];
goto mov;
case FMVRZ >> 2:
/* Presume we're curproc */
/* Presume we're curlwp */
DPRINTF(FPE_INSN, ("fpu_execute: FMVRZ\n"));
rs1 = instr.i_fmovr.i_rs1;
if (rs1 != 0 && (int64_t)curproc->p_md.md_tf->tf_global[rs1] != 0)
if (rs1 != 0 && (int64_t)curlwp->l_md.md_tf->tf_global[rs1] != 0)
return (0); /* success */
rs1 = fs->fs_regs[rs2];
goto mov;
case FMVRLEZ >> 2:
/* Presume we're curproc */
/* Presume we're curlwp */
DPRINTF(FPE_INSN, ("fpu_execute: FMVRLEZ\n"));
rs1 = instr.i_fmovr.i_rs1;
if (rs1 != 0 && (int64_t)curproc->p_md.md_tf->tf_global[rs1] > 0)
if (rs1 != 0 && (int64_t)curlwp->l_md.md_tf->tf_global[rs1] > 0)
return (0); /* success */
rs1 = fs->fs_regs[rs2];
goto mov;
case FMVRLZ >> 2:
/* Presume we're curproc */
/* Presume we're curlwp */
DPRINTF(FPE_INSN, ("fpu_execute: FMVRLZ\n"));
rs1 = instr.i_fmovr.i_rs1;
if (rs1 == 0 || (int64_t)curproc->p_md.md_tf->tf_global[rs1] >= 0)
if (rs1 == 0 || (int64_t)curlwp->l_md.md_tf->tf_global[rs1] >= 0)
return (0); /* success */
rs1 = fs->fs_regs[rs2];
goto mov;
case FMVRNZ >> 2:
/* Presume we're curproc */
/* Presume we're curlwp */
DPRINTF(FPE_INSN, ("fpu_execute: FMVRNZ\n"));
rs1 = instr.i_fmovr.i_rs1;
if (rs1 == 0 || (int64_t)curproc->p_md.md_tf->tf_global[rs1] == 0)
if (rs1 == 0 || (int64_t)curlwp->l_md.md_tf->tf_global[rs1] == 0)
return (0); /* success */
rs1 = fs->fs_regs[rs2];
goto mov;
case FMVRGZ >> 2:
/* Presume we're curproc */
/* Presume we're curlwp */
DPRINTF(FPE_INSN, ("fpu_execute: FMVRGZ\n"));
rs1 = instr.i_fmovr.i_rs1;
if (rs1 == 0 || (int64_t)curproc->p_md.md_tf->tf_global[rs1] <= 0)
if (rs1 == 0 || (int64_t)curlwp->l_md.md_tf->tf_global[rs1] <= 0)
return (0); /* success */
rs1 = fs->fs_regs[rs2];
goto mov;
case FMVRGEZ >> 2:
/* Presume we're curproc */
/* Presume we're curlwp */
DPRINTF(FPE_INSN, ("fpu_execute: FMVRGEZ\n"));
rs1 = instr.i_fmovr.i_rs1;
if (rs1 != 0 && (int64_t)curproc->p_md.md_tf->tf_global[rs1] < 0)
if (rs1 != 0 && (int64_t)curlwp->l_md.md_tf->tf_global[rs1] < 0)
return (0); /* success */
rs1 = fs->fs_regs[rs2];
goto mov;

View File

@ -1,4 +1,4 @@
/* $NetBSD: fpu_extern.h,v 1.5 2001/12/04 00:05:05 darrenr Exp $ */
/* $NetBSD: fpu_extern.h,v 1.6 2003/01/18 06:45:00 thorpej Exp $ */
/*-
* Copyright (c) 1995 The NetBSD Foundation, Inc.
@ -54,11 +54,11 @@ struct fpn;
/* fpu.c */
#ifndef SUN4U
void fpu_cleanup __P((struct proc *, struct fpstate *));
int fpu_emulate __P((struct proc *, struct trapframe *, struct fpstate *));
void fpu_cleanup __P((struct lwp *, struct fpstate *));
int fpu_emulate __P((struct lwp *, struct trapframe *, struct fpstate *));
#else /* SUN4U */
void fpu_cleanup __P((struct proc *, struct fpstate64 *));
int fpu_emulate __P((struct proc *, struct trapframe64 *, struct fpstate64 *));
void fpu_cleanup __P((struct lwp *, struct fpstate64 *));
int fpu_emulate __P((struct lwp *, struct trapframe64 *, struct fpstate64 *));
#endif /* SUN4U */
int fpu_execute __P((struct fpemu *, union instr));

View File

@ -1,4 +1,4 @@
# $NetBSD: Makefile,v 1.24 2002/11/26 23:30:23 lukem Exp $
# $NetBSD: Makefile,v 1.25 2003/01/18 06:44:57 thorpej Exp $
INCSDIR= /usr/include/sparc
@ -12,7 +12,7 @@ INCS= ansi.h aout_machdep.h apmvar.h asm.h autoconf.h \
int_const.h int_fmtio.h int_limits.h int_mwgwtypes.h int_types.h \
kbd.h kbio.h kcore.h \
limits.h lock.h \
math.h \
math.h mcontext.h \
oldmon.h openpromio.h \
param.h pcb.h pmap.h pmc.h proc.h profile.h psl.h pte.h ptrace.h \
reg.h reloc.h \

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.61 2003/01/16 16:57:44 pk Exp $ */
/* $NetBSD: cpu.h,v 1.62 2003/01/18 06:44:57 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -85,13 +85,14 @@
* referenced in generic code
*/
#define curcpu() (cpuinfo.ci_self)
#define curproc (cpuinfo.ci_curproc)
#define curlwp (cpuinfo.ci_curlwp)
#define CPU_IS_PRIMARY(ci) ((ci)->master)
#define cpu_swapin(p) /* nothing */
#define cpu_swapout(p) /* nothing */
#define cpu_wait(p) /* nothing */
#define cpu_number() (cpuinfo.ci_cpuid)
#define cpu_swapin(p) /* nothing */
#define cpu_swapout(p) /* nothing */
#define cpu_wait(p) /* nothing */
#define cpu_number() (cpuinfo.ci_cpuid)
#define cpu_proc_fork(p1, p2) /* nothing */
#if defined(MULTIPROCESSOR)
void cpu_boot_secondary_processors __P((void));
@ -202,7 +203,7 @@ int probeget(caddr_t, int);
void write_all_windows(void);
void write_user_windows(void);
void proc_trampoline(void);
void switchexit(struct proc *);
void switchexit(struct lwp *, void (*)(struct lwp *));
struct pcb;
void snapshot(struct pcb *);
struct frame *getfp(void);
@ -211,8 +212,8 @@ void copywords(const void *, void *, size_t);
void qcopy(const void *, void *, size_t);
void qzero(void *, size_t);
/* trap.c */
void kill_user_windows(struct proc *);
int rwindow_save(struct proc *);
void kill_user_windows(struct lwp *);
int rwindow_save(struct lwp *);
/* cons.c */
int cnrom(void);
/* zs.c */
@ -232,7 +233,7 @@ void kgdb_panic(void);
#endif
/* emul.c */
struct trapframe;
int fixalign(struct proc *, struct trapframe *);
int fixalign(struct lwp *, struct trapframe *);
int emulinstr(int, struct trapframe *);
/* cpu.c */
void mp_pause_cpus(void);

View File

@ -0,0 +1,167 @@
/* $NetBSD: mcontext.h,v 1.2 2003/01/18 06:44:56 thorpej Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Klaus Klein.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SPARC_MCONTEXT_H_
#define _SPARC_MCONTEXT_H_
/*
* Layout of mcontext_t according the System V Application Binary Interface,
* Edition 4.1, SPARC Processor ABI Supplement and updated for SPARC v9.
*/
#ifdef __arch64__
#define _NGREG 21 /* %ccr, pc, npc, %g1-7, %o0-7, %asi, %fprs */
#else
#define _NGREG 19 /* %psr, pc, npc, %g1-7, %o0-7 */
#endif
typedef long int __greg_t;
typedef __greg_t __gregset_t[_NGREG];
/* Offsets into gregset_t, for convenience. */
#ifdef __arch64__
#define _REG_CCR 0
#else
#define _REG_PSR 0
#endif
#define _REG_PC 1
#define _REG_nPC 2
#define _REG_Y 3
#define _REG_G1 4
#define _REG_G2 5
#define _REG_G3 6
#define _REG_G4 7
#define _REG_G5 8
#define _REG_G6 9
#define _REG_G7 10
#define _REG_O0 11
#define _REG_O1 12
#define _REG_O2 13
#define _REG_O3 14
#define _REG_O4 15
#define _REG_O5 16
#define _REG_O6 17
#define _REG_O7 18
#ifdef __arch64__
#define _REG_ASI 19
#define _REG_FPRS 20
#endif
#define _SPARC_MAXREGWINDOW 31
/* Layout of a register window. */
typedef struct {
__greg_t __rw_local[8]; /* %l0-7 */
__greg_t __rw_in[8]; /* %i0-7 */
} __rwindow_t;
/* Description of available register windows. */
typedef struct {
int __wbcnt;
__greg_t * __spbuf[_SPARC_MAXREGWINDOW];
__rwindow_t __wbuf[_SPARC_MAXREGWINDOW];
} __gwindows_t;
/* FPU address queue */
struct __fpq {
unsigned int * __fpq_addr; /* address */
unsigned int __fpq_instr; /* instruction */
};
struct __fq {
union {
double __whole;
struct __fpq __fpq;
} _FQu;
};
/* FPU state description */
typedef struct {
union {
unsigned int __fpu_regs[32];
#ifdef __arch64__
double __fpu_dregs[32];
long double __fpu_qregs[16];
#else
double __fpu_dregs[16];
#endif
} __fpu_fr; /* FPR contents */
struct __fq * __fpu_q; /* pointer to FPU insn queue */
unsigned long __fpu_fsr; /* %fsr */
unsigned char __fpu_qcnt; /* # entries in __fpu_q */
unsigned char __fpu_q_entrysize; /* size of a __fpu_q entry */
unsigned char __fpu_en; /* this context valid? */
} __fpregset_t;
/* `Extra Register State'(?) */
typedef struct {
unsigned int __xrs_id; /* See below */
char * __xrs_ptr; /* points into filler area */
} __xrs_t;
#define _XRS_ID 0x78727300 /* 'xrs\0' */
#ifdef __arch64__
/* Ancillary State Registers, 16-31 are available to user programs */
typedef long __asrset_t[16]; /* %asr16-31 */
#endif
typedef struct {
__gregset_t __gregs; /* GPR state */
__gwindows_t * __gwins; /* may point to register windows */
__fpregset_t __fpregs; /* FPU state, if any */
__xrs_t __xrs; /* may indicate extra reg state */
#ifdef __arch64__
__asrset_t __asrs; /* ASR state */
#endif
#ifdef __arch64__
long int __filler[4];
#else
long int __filler[19];
#endif
} mcontext_t;
#ifdef __arch64__
#define _UC_MACHINE_PAD 4 /* Padding appended to ucontext_t */
#define _UC_MACHINE_SP(uc) (((uc)->uc_mcontext.__gregs[_REG_O6])+0x7ff)
#else
#define _UC_MACHINE_PAD 23 /* Padding appended to ucontext_t */
#define _UC_MACHINE_SP(uc) ((uc)->uc_mcontext.__gregs[_REG_O6])
#endif
#endif /* !_SPARC_MCONTEXT_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: param.h,v 1.57 2002/07/17 06:19:45 thorpej Exp $ */
/* $NetBSD: param.h,v 1.58 2003/01/18 06:44:57 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -179,7 +179,6 @@ extern void delay __P((unsigned int));
#define DELAY(n) delay(n)
#endif /* _LOCORE */
/*
* microSPARC-IIep is a sun4m but with an integrated PCI controller.
* In a lot of places (like pmap &c) we want it to be treated as SUN4M.

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.64 2003/01/11 03:40:31 mrg Exp $ */
/* $NetBSD: pmap.h,v 1.65 2003/01/18 06:44:57 thorpej Exp $ */
/*
* Copyright (c) 1996
@ -247,9 +247,8 @@ int pmap_dumpmmu __P((int (*)__P((dev_t, daddr_t, caddr_t, size_t)),
/* FUNCTION DECLARATIONS FOR COMMON PMAP MODULE */
struct proc;
void pmap_activate __P((struct proc *));
void pmap_deactivate __P((struct proc *));
void pmap_activate __P((struct lwp *));
void pmap_deactivate __P((struct lwp *));
void pmap_bootstrap __P((int nmmu, int nctx, int nregion));
int pmap_count_ptes __P((struct pmap *));
void pmap_prefer __P((vaddr_t, vaddr_t *));

View File

@ -1,4 +1,4 @@
/* $NetBSD: proc.h,v 1.10 2003/01/12 16:29:01 pk Exp $ */
/* $NetBSD: proc.h,v 1.11 2003/01/18 06:44:57 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -44,16 +44,22 @@
* @(#)proc.h 8.1 (Berkeley) 6/11/93
*/
#ifndef _SPARC_PROC_H_
#define _SPARC_PROC_H_
/*
* Machine-dependent part of the proc structure for SPARC.
*/
struct mdproc {
struct mdlwp {
struct trapframe *md_tf; /* trap/syscall registers */
struct fpstate *md_fpstate; /* fpu state, if any; always resident */
u_long md_flags;
struct cpu_info *md_fpu; /* Module holding FPU state */
};
struct mdproc {
};
/* md_flags */
#define MDP_FIXALIGN 0x1 /* Fix unaligned memory accesses */
@ -70,3 +76,5 @@ extern struct simplelock fpulock;
simple_unlock(&fpulock); \
splx(s); \
} while (/* CONSTCOND */ 0)
#endif /* _SPARC_PROC_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: signal.h,v 1.9 2003/01/09 23:25:24 thorpej Exp $ */
/* $NetBSD: signal.h,v 1.10 2003/01/18 06:44:57 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -97,6 +97,54 @@ struct sigcontext {
long sc_o0; /* %o0 to restore */
sigset_t sc_mask; /* signal mask to restore (new style) */
};
#ifdef __arch64__
#define _MCONTEXT_TO_SIGCONTEXT_32_64(uc, sc) \
do { \
(sc)->sc_tstate = \
((uc)->uc_mcontext.__gregs[_REG_CCR] << TSTATE_CCR_SHIFT) | \
((uc)->uc_mcontext.__gregs[_REG_ASI] << TSTATE_ASI_SHIFT); \
} while (/*CONSTCOND*/0)
#define _SIGCONTEXT_TO_MCONTEXT_32_64(sc, uc) \
do { \
(uc)->uc_mcontext.__gregs[_REG_CCR] = \
((sc)->sc_tstate & TSTATE_CCR) >> TSTATE_CCR_SHIFT; \
(uc)->uc_mcontext.__gregs[_REG_ASI] = \
((sc)->sc_tstate & TSTATE_ASI) >> TSTATE_ASI_SHIFT; \
} while (/*CONSTCOND*/0)
#else /* ! __arch64__ */
#define _MCONTEXT_TO_SIGCONTEXT_32_64(uc, sc) \
do { \
(sc)->sc_psr = (uc)->uc_mcontext.__gregs[_REG_PSR]; \
} while (/*CONSTCOND*/0)
#define _SIGCONTEXT_TO_MCONTEXT_32_64(sc, uc) \
do { \
(uc)->uc_mcontext.__gregs[_REG_PSR] = (sc)->sc_psr; \
} while (/*CONSTCOND*/0)
#endif /* __arch64__ */
#define _MCONTEXT_TO_SIGCONTEXT(uc, sc) \
do { \
(sc)->sc_sp = (uc)->uc_mcontext.__gregs[_REG_O6]; \
(sc)->sc_pc = (uc)->uc_mcontext.__gregs[_REG_PC]; \
(sc)->sc_npc = (uc)->uc_mcontext.__gregs[_REG_nPC]; \
_MCONTEXT_TO_SIGCONTEXT_32_64((uc), (sc)); \
(sc)->sc_g1 = (uc)->uc_mcontext.__gregs[_REG_G1]; \
(sc)->sc_o0 = (uc)->uc_mcontext.__gregs[_REG_O0]; \
} while (/*CONSTCOND*/0)
#define _SIGCONTEXT_TO_MCONTEXT(sc, uc) \
do { \
(uc)->uc_mcontext.__gregs[_REG_O6] = (sc)->sc_sp; \
(uc)->uc_mcontext.__gregs[_REG_PC] = (sc)->sc_pc; \
(uc)->uc_mcontext.__gregs[_REG_nPC] = (sc)->sc_npc; \
_SIGCONTEXT_TO_MCONTEXT_32_64((sc), (uc)); \
(uc)->uc_mcontext.__gregs[_REG_G1] = (sc)->sc_g1; \
(uc)->uc_mcontext.__gregs[_REG_O0] = (sc)->sc_o0; \
} while (/*CONSTCOND*/0)
#else /* _LOCORE */
/* XXXXX These values don't work for _LP64 */
#define SC_SP_OFFSET 8

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_machdep.h,v 1.8 1999/01/21 23:05:14 christos Exp $ */
/* $NetBSD: svr4_machdep.h,v 1.9 2003/01/18 06:44:58 thorpej Exp $ */
/*-
* Copyright (c) 1994 The NetBSD Foundation, Inc.
@ -117,6 +117,6 @@ typedef struct svr4_mcontext {
struct svr4_ucontext;
int svr4_trap __P((int, struct proc *));
int svr4_trap __P((int, struct lwp *));
#endif /* !_SPARC_SVR4_MACHDEP_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: autoconf.c,v 1.187 2003/01/01 02:20:48 thorpej Exp $ */
/* $NetBSD: autoconf.c,v 1.188 2003/01/18 06:45:00 thorpej Exp $ */
/*
* Copyright (c) 1996
@ -257,7 +257,7 @@ bootstrap()
ncpu = find_cpus();
/* Attach user structure to proc0 */
proc0.p_addr = proc0paddr;
lwp0.l_addr = proc0paddr;
cpuinfo.master = 1;
getcpuinfo(&cpuinfo, 0);

View File

@ -1,4 +1,4 @@
/* $NetBSD: clock.c,v 1.90 2003/01/06 12:50:45 pk Exp $ */
/* $NetBSD: clock.c,v 1.91 2003/01/18 06:45:01 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -144,7 +144,6 @@ struct idprom *idprom;
void establish_hostid(struct idprom *);
void myetheraddr(u_char *);
/*
* XXX this belongs elsewhere
*/
@ -242,11 +241,11 @@ setstatclockrate(newhz)
*/
void schedintr(void *v)
{
struct proc *p = curproc;
struct lwp *l = curlwp;
/* XXX - should consult a cpuinfo.schedtickpending */
if (p != NULL)
schedclock(p);
if (l != NULL)
schedclock(l);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: compat_13_machdep.c,v 1.2 2000/12/22 22:58:55 jdolecek Exp $ */
/* $NetBSD: compat_13_machdep.c,v 1.3 2003/01/18 06:45:01 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -48,6 +48,7 @@
#include <sys/signal.h>
#include <sys/signalvar.h>
#include <sys/sa.h>
#include <sys/syscallargs.h>
/*
@ -61,14 +62,15 @@
*/
/* ARGSUSED */
int
compat_13_sys_sigreturn(p, v, retval)
struct proc *p;
compat_13_sys_sigreturn(l, v, retval)
struct lwp *l;
void *v;
register_t *retval;
{
struct compat_13_sys_sigreturn_args /* {
syscallarg(struct sigcontext13 *) sigcntxp;
} */ *uap = v;
struct proc *p = l->l_proc;
struct sigcontext13 sc, *scp;
sigset_t mask;
struct trapframe *tf;
@ -76,14 +78,14 @@ compat_13_sys_sigreturn(p, v, retval)
/* First ensure consistent stack state (see sendsig). */
write_user_windows();
if (rwindow_save(p))
sigexit(p, SIGILL);
if (rwindow_save(l))
sigexit(l, SIGILL);
if ((error = copyin(SCARG(uap, sigcntxp), &sc, sizeof sc)) != 0)
return (error);
scp = &sc;
tf = p->p_md.md_tf;
tf = l->l_md.md_tf;
/*
* Only the icc bits in the psr are used, so it need not be
* verified. pc and npc must be multiples of 4. This is all

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.166 2003/01/16 17:21:43 pk Exp $ */
/* $NetBSD: cpu.c,v 1.167 2003/01/18 06:45:01 thorpej Exp $ */
/*
* Copyright (c) 1996
@ -435,8 +435,8 @@ static struct cpu_softc *bootcpu;
pmap_globalize_boot_cpuinfo(cpi);
cpuinfo.ci_self = cpi;
/* XXX - fixup proc0.p_cpu */
proc0.p_cpu = cpi;
/* XXX - fixup lwp.l_cpu */
lwp0.l_cpu = cpi;
#else
/* The `local' VA is global for uniprocessor. */
cpi = sc->sc_cpuinfo = (struct cpu_info *)CPUINFO_VA;
@ -2134,8 +2134,8 @@ cpu_debug_dump(void)
ci,
ci->ci_cpuid,
ci->flags,
ci->ci_curproc,
ci->fpproc);
ci->ci_curlwp,
ci->fplwp);
}
}
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpuvar.h,v 1.53 2003/01/12 01:16:06 pk Exp $ */
/* $NetBSD: cpuvar.h,v 1.54 2003/01/18 06:45:02 thorpej Exp $ */
/*
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -189,8 +189,8 @@ struct cpu_info {
* associated with this CPU--running on it, using its FPU,
* etc.
*/
struct proc *ci_curproc; /* CPU owner */
struct proc *fpproc; /* FPU owner */
struct lwp *ci_curlwp; /* CPU owner */
struct lwp *fplwp; /* FPU owner */
/*
* Idle PCB and Interrupt stack;

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_interface.c,v 1.53 2003/01/16 16:57:43 pk Exp $ */
/* $NetBSD: db_interface.c,v 1.54 2003/01/18 06:45:03 thorpej Exp $ */
/*
* Mach Operating System
@ -356,32 +356,37 @@ db_proc_cmd(addr, have_addr, count, modif)
db_expr_t count;
char *modif;
{
struct lwp *l;
struct proc *p;
p = curproc;
l = curlwp;
if (have_addr)
p = (struct proc*) addr;
if (p == NULL) {
l = (struct lwp *) addr;
if (l == NULL) {
db_printf("no current process\n");
return;
}
db_printf("process %p: ", p);
db_printf("pid:%d cpu:%d stat:%d vmspace:%p", p->p_pid,
p->p_cpu->ci_cpuid, p->p_stat, p->p_vmspace);
p = l->l_proc;
db_printf("LWP %p: ", l);
db_printf("pid:%d.%d cpu:%d stat:%d vmspace:%p", p->p_pid,
l->l_lid, l->l_cpu->ci_cpuid, l->l_stat, p->p_vmspace);
if (p->p_stat != SZOMB && p->p_stat != SDEAD)
db_printf(" ctx: %p cpuset %x",
p->p_vmspace->vm_map.pmap->pm_ctx,
p->p_vmspace->vm_map.pmap->pm_cpuset);
db_printf("\npmap:%p wchan:%p pri:%d upri:%d\n",
p->p_vmspace->vm_map.pmap,
p->p_wchan, p->p_priority, p->p_usrpri);
l->l_wchan, l->l_priority, l->l_usrpri);
db_printf("maxsaddr:%p ssiz:%d pg or %llxB\n",
p->p_vmspace->vm_maxsaddr, p->p_vmspace->vm_ssize,
(unsigned long long)ctob(p->p_vmspace->vm_ssize));
db_printf("profile timer: %ld sec %ld usec\n",
p->p_stats->p_timer[ITIMER_PROF].it_value.tv_sec,
p->p_stats->p_timer[ITIMER_PROF].it_value.tv_usec);
db_printf("pcb: %p\n", &p->p_addr->u_pcb);
db_printf("pcb: %p\n", &l->l_addr->u_pcb);
return;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_trace.c,v 1.18 2003/01/13 19:44:06 pk Exp $ */
/* $NetBSD: db_trace.c,v 1.19 2003/01/18 06:45:03 thorpej Exp $ */
/*
* Mach Operating System
@ -73,17 +73,19 @@ db_stack_trace_print(addr, have_addr, count, modif, pr)
if (trace_thread) {
struct proc *p;
struct user *u;
struct lwp *l;
(*pr)("trace: pid %d ", (int)addr);
p = pfind(addr);
if (p == NULL) {
(*pr)("not found\n");
return;
}
if ((p->p_flag & P_INMEM) == 0) {
}
l = LIST_FIRST(&p->p_lwps); /* XXX NJWLWP */
if ((l->l_flag & L_INMEM) == 0) {
(*pr)("swapped out\n");
return;
}
u = p->p_addr;
u = l->l_addr;
frame = (struct frame *)u->u_pcb.pcb_sp;
pc = u->u_pcb.pcb_pc;
(*pr)("at %p\n", frame);

View File

@ -1,4 +1,4 @@
/* $NetBSD: emul.c,v 1.6 2002/12/16 16:59:11 pk Exp $ */
/* $NetBSD: emul.c,v 1.7 2003/01/18 06:45:03 thorpej Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/lwp.h>
#include <machine/reg.h>
#include <machine/instr.h>
#include <machine/cpu.h>
@ -53,12 +53,12 @@
#define GPR(tf, i) ((int32_t *) &tf->tf_global)[i]
#define IPR(tf, i) ((int32_t *) tf->tf_out[6])[i - 16]
#define FPR(p, i) ((int32_t) p->p_md.md_fpstate->fs_regs[i])
#define FPR(l, i) ((int32_t) l->l_md.md_fpstate->fs_regs[i])
static __inline int readgpreg __P((struct trapframe *, int, void *));
static __inline int readfpreg __P((struct proc *, int, void *));
static __inline int readfpreg __P((struct lwp *, int, void *));
static __inline int writegpreg __P((struct trapframe *, int, const void *));
static __inline int writefpreg __P((struct proc *, int, const void *));
static __inline int writefpreg __P((struct lwp *, int, const void *));
static __inline int decodeaddr __P((struct trapframe *, union instr *, void *));
static int muldiv __P((struct trapframe *, union instr *, int32_t *, int32_t *,
int32_t *));
@ -105,23 +105,23 @@ writegpreg(tf, i, val)
static __inline int
readfpreg(p, i, val)
struct proc *p;
readfpreg(l, i, val)
struct lwp *l;
int i;
void *val;
{
*(int32_t *) val = FPR(p, i);
*(int32_t *) val = FPR(l, i);
return 0;
}
static __inline int
writefpreg(p, i, val)
struct proc *p;
writefpreg(l, i, val)
struct lwp *l;
int i;
const void *val;
{
FPR(p, i) = *(const int32_t *) val;
FPR(l, i) = *(const int32_t *) val;
return 0;
}
@ -233,8 +233,8 @@ muldiv(tf, code, rd, rs1, rs2)
*/
int
fixalign(p, tf)
struct proc *p;
fixalign(l, tf)
struct lwp *l;
struct trapframe *tf;
{
static u_char sizedef[] = { 0x4, 0xff, 0x2, 0x8 };
@ -313,19 +313,19 @@ fixalign(p, tf)
uprintf("%c%d\n", REGNAME(code.i_asi.i_rs2));
#endif
#ifdef DIAGNOSTIC
if (op.bits.fl && p != cpuinfo.fpproc)
if (op.bits.fl && l != cpuinfo.fplwp)
panic("fp align without being the FP owning process");
#endif
if (op.bits.st) {
if (op.bits.fl) {
savefpstate(p->p_md.md_fpstate);
savefpstate(l->l_md.md_fpstate);
error = readfpreg(p, code.i_op3.i_rd, &data.i[0]);
error = readfpreg(l, code.i_op3.i_rd, &data.i[0]);
if (error)
return error;
if (size == 8) {
error = readfpreg(p, code.i_op3.i_rd + 1,
error = readfpreg(l, code.i_op3.i_rd + 1,
&data.i[1]);
if (error)
return error;
@ -367,16 +367,16 @@ fixalign(p, tf)
return error;
if (op.bits.fl) {
error = writefpreg(p, code.i_op3.i_rd, &data.i[0]);
error = writefpreg(l, code.i_op3.i_rd, &data.i[0]);
if (error)
return error;
if (size == 8) {
error = writefpreg(p, code.i_op3.i_rd + 1,
error = writefpreg(l, code.i_op3.i_rd + 1,
&data.i[1]);
if (error)
return error;
}
loadfpstate(p->p_md.md_fpstate);
loadfpstate(l->l_md.md_fpstate);
}
else {
error = writegpreg(tf, code.i_op3.i_rd, &data.i[0]);

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.40 2003/01/12 06:11:01 uwe Exp $
# $NetBSD: genassym.cf,v 1.41 2003/01/18 06:45:03 thorpej Exp $
#
# Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -115,13 +115,15 @@ define BSD BSD
define USRSTACK USRSTACK
# proc fields and values
define P_ADDR offsetof(struct proc, p_addr)
define P_STAT offsetof(struct proc, p_stat)
define P_WCHAN offsetof(struct proc, p_wchan)
define L_ADDR offsetof(struct lwp, l_addr)
define L_PROC offsetof(struct lwp, l_proc)
define L_STAT offsetof(struct lwp, l_stat)
define L_WCHAN offsetof(struct lwp, l_wchan)
define L_CPU offsetof(struct lwp, l_cpu)
define L_PRIORITY offsetof(struct lwp, l_priority)
define P_VMSPACE offsetof(struct proc, p_vmspace)
define P_CPU offsetof(struct proc, p_cpu)
define SRUN SRUN
define SONPROC SONPROC
define LSRUN LSRUN
define LSONPROC LSONPROC
# VM structure fields
define VM_PMAP offsetof(struct vmspace, vm_map.pmap)
@ -151,7 +153,7 @@ define CPUINFO_REDZONE offsetof(struct cpu_info, redzone)
define REDSIZE REDSIZE
define CPUINFO_IDLE_U offsetof(struct cpu_info, idle_u)
define CPUINFO_CURPCB offsetof(struct cpu_info, curpcb)
define CPUINFO_CURPROC offsetof(struct cpu_info, ci_curproc)
define CPUINFO_CURLWP offsetof(struct cpu_info, ci_curlwp)
define CPUINFO_SELF offsetof(struct cpu_info, ci_self)
define CPUINFO_FLAGS offsetof(struct cpu_info, flags)
define CPUINFO_WANT_AST offsetof(struct cpu_info, want_ast)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.184 2003/01/17 14:49:45 pk Exp $ */
/* $NetBSD: locore.s,v 1.185 2003/01/18 06:45:03 thorpej Exp $ */
/*
* Copyright (c) 1996 Paul Kranenburg
@ -212,8 +212,8 @@ _C_LABEL(kgdb_stack):
*/
cpcb = CPUINFO_VA + CPUINFO_CURPCB
/* curproc points to the current process that has the CPU */
curproc = CPUINFO_VA + CPUINFO_CURPROC
/* curlwp points to the current LWP that has the CPU */
curlwp = CPUINFO_VA + CPUINFO_CURLWP
/*
* cputyp is the current cpu type, used to distinguish between
@ -4283,7 +4283,7 @@ _C_LABEL(cpu_hatch):
clr %l4
sethi %hi(cpcb), %l6
b idle_enter
sethi %hi(curproc), %l7
sethi %hi(curlwp), %l7
#else /* ALT_SWITCH_CODE */
/* Idle here .. */
9: ba 9b
@ -4315,6 +4315,7 @@ _C_LABEL(sigcode):
! sigreturn does not return unless it fails
mov SYS_exit, %g1 ! exit(errno)
t ST_SYSCALL
/* NOTREACHED */
_C_LABEL(esigcode):
/*
@ -4547,13 +4548,14 @@ ENTRY(write_user_windows)
/*
* switchexit is called only from cpu_exit() before the current process
* has freed its vmspace and kernel stack; we must schedule them to be
* freed. (curproc is already NULL.)
* freed. (curlwp is already NULL.)
*
* We lay the process to rest by changing to the `idle' kernel stack,
* and note that the `last loaded process' is nonexistent.
*/
ENTRY(switchexit)
mov %o0, %g2 ! save proc for exit2() call
mov %o1, %g1 ! exit2() or lwp_exit2()
/*
* Change pcb to idle u. area, i.e., set %sp to top of stack
@ -4587,7 +4589,7 @@ ENTRY(switchexit)
#endif
wr %g0, PSR_S|PSR_ET, %psr ! and then enable traps
nop
call _C_LABEL(exit2) ! exit2(p)
call %g1 ! {lwp}exit2(p)
mov %g2, %o0
/*
@ -4600,7 +4602,7 @@ ENTRY(switchexit)
* %l2 = %hi(whichqs)
* %l4 = lastproc
* %l6 = %hi(cpcb)
* %l7 = %hi(curproc)
* %l7 = %hi(curlwp)
* %o0 = tmp 1
* %o1 = tmp 2
*/
@ -4614,9 +4616,9 @@ ENTRY(switchexit)
clr %l4 ! lastproc = NULL;
#endif
sethi %hi(cpcb), %l6
sethi %hi(curproc), %l7
sethi %hi(curlwp), %l7
b idle_enter
st %g0, [%l7 + %lo(curproc)] ! curproc = NULL;
st %g0, [%l7 + %lo(curlwp)] ! curlwp = NULL;
/*
* When no processes are on the runq, switch
@ -4662,7 +4664,7 @@ idle_switch:
sethi %hi(_C_LABEL(sched_whichqs)), %l2
clr %l4 ! lastproc = NULL;
sethi %hi(cpcb), %l6
sethi %hi(curproc), %l7
sethi %hi(curlwp), %l7
/* FALLTHROUGH*/
idle:
@ -4737,8 +4739,8 @@ Lsw_panic_srun:
* SAVE LATER WHEN SOMEONE ELSE IS READY ... MUST MEASURE!
*/
.globl _C_LABEL(__ffstab)
ENTRY(cpu_switchto)
ENTRY(cpu_switch)
ENTRY(cpu_switchto)
/*
* REGISTER USAGE AT THIS POINT:
* %l1 = oldpsr (excluding ipl bits)
@ -4747,7 +4749,7 @@ ENTRY(cpu_switch)
* %l4(%g4) = lastproc
* %l5 = tmp 0
* %l6 = %hi(cpcb)
* %l7 = %hi(curproc)
* %l7 = %hi(curlwp)
* %o0 = tmp 1
* %o1 = tmp 2
* %o2 = tmp 3
@ -4761,10 +4763,10 @@ ENTRY(cpu_switch)
ld [%l6 + %lo(cpcb)], %o0
std %i6, [%o0 + PCB_SP] ! cpcb->pcb_<sp,pc> = <fp,pc>;
rd %psr, %l1 ! oldpsr = %psr;
sethi %hi(curproc), %l7
sethi %hi(curlwp), %l7
st %l1, [%o0 + PCB_PSR] ! cpcb->pcb_psr = oldpsr;
andn %l1, PSR_PIL, %l1 ! oldpsr &= ~PSR_PIL;
st %g0, [%l7 + %lo(curproc)] ! curproc = NULL;
st %g0, [%l7 + %lo(curlwp)] ! curlwp = NULL;
/*
* Save the old process: write back all windows (excluding
* the current one). XXX crude; knows nwindows <= 8
@ -4853,7 +4855,7 @@ Lsw_load:
* %l4 = lastproc
* %l5 =
* %l6 = %hi(cpcb)
* %l7 = %hi(curproc)
* %l7 = %hi(curlwp)
* %o0 = tmp 1
* %o1 = tmp 2
* %o2 = tmp 3
@ -4861,12 +4863,12 @@ Lsw_load:
*/
/* firewalls */
ld [%l3 + P_WCHAN], %o0 ! if (p->p_wchan)
ld [%l3 + L_WCHAN], %o0 ! if (p->p_wchan)
tst %o0
bne Lsw_panic_wchan ! panic("switch wchan");
EMPTY
ldsb [%l3 + P_STAT], %o0 ! if (p->p_stat != SRUN)
cmp %o0, SRUN
ld [%l3 + L_STAT], %o0 ! if (p->p_stat != LSRUN)
cmp %o0, LSRUN
bne Lsw_panic_srun ! panic("switch SRUN");
EMPTY
@ -4874,19 +4876,19 @@ Lsw_load:
* Committed to running process p.
* It may be the same as the one we were running before.
*/
mov SONPROC, %o0 ! p->p_stat = SONPROC;
stb %o0, [%l3 + P_STAT]
mov LSONPROC, %o0 ! p->p_stat = LSONPROC;
st %o0, [%l3 + L_STAT]
/* p->p_cpu initialized in fork1() for single-processor */
#if defined(MULTIPROCESSOR)
sethi %hi(_CISELFP), %o0 ! p->p_cpu = cpuinfo.ci_self;
ld [%o0 + %lo(_CISELFP)], %o0
st %o0, [%l3 + P_CPU]
st %o0, [%l3 + L_CPU]
#endif
ld [%l3 + P_ADDR], %g5 ! newpcb = p->p_addr;
ld [%l3 + L_ADDR], %g5 ! newpcb = p->p_addr;
st %g0, [%l3 + 4] ! p->p_back = NULL;
st %l3, [%l7 + %lo(curproc)] ! curproc = p;
st %l3, [%l7 + %lo(curlwp)] ! curlwp = p;
/*
* Load the new process. To load, we must change stacks and
@ -4968,7 +4970,8 @@ Lsw_load:
#endif
INCR(_C_LABEL(nswitchdiff)) ! clobbers %o0,%o1
ld [%g3 + P_VMSPACE], %o3 ! vm = p->p_vmspace;
ld [%g3 + L_PROC], %o2 ! p = l->l_proc;
ld [%o2 + P_VMSPACE], %o3 ! vm = p->p_vmspace;
ld [%o3 + VM_PMAP], %o3 ! pm = vm->vm_map.vm_pmap;
#if defined(MULTIPROCESSOR)
sethi %hi(CPUINFO_VA + CPUINFO_CPUNO), %o0
@ -5031,6 +5034,7 @@ Lsw_sameproc:
restore %g0, %g0, %o0 ! return (0)
#endif /* !MULTIPROCESSOR */
/*
* Snapshot the current process so that stack frames are up to date.
* Only used just before a crash dump.
@ -5110,7 +5114,7 @@ ENTRY(proc_trampoline)
* %g4 = lastproc
* %g5 = <free>; newpcb
* %g6 = %hi(cpcb)
* %g7 = %hi(curproc)
* %g7 = %hi(curlwp)
* %o0 = tmp 1
* %o1 = tmp 2
* %o2 = tmp 3
@ -5146,13 +5150,14 @@ ENTRY(proc_trampoline)
/*
* switchexit is called only from cpu_exit() before the current process
* has freed its vmspace and kernel stack; we must schedule them to be
* freed. (curproc is already NULL.)
* freed. (curlwp is already NULL.)
*
* We lay the process to rest by changing to the `idle' kernel stack,
* and note that the `last loaded process' is nonexistent.
*/
ENTRY(switchexit)
mov %o0, %g2 ! save proc for exit2() call
mov %o1, %g1 ! exit2() or lwp_exit2()
/*
* Change pcb to idle u. area, i.e., set %sp to top of stack
@ -5186,7 +5191,7 @@ ENTRY(switchexit)
#endif
wr %g0, PSR_S|PSR_ET, %psr ! and then enable traps
call _C_LABEL(exit2) ! exit2(p)
call %g1 ! {lwp}exit2(p)
mov %g2, %o0
/*
@ -5199,7 +5204,7 @@ ENTRY(switchexit)
* %g2 = %hi(whichqs)
* %g4 = lastproc
* %g6 = %hi(cpcb)
* %g7 = %hi(curproc)
* %g7 = %hi(curlwp)
* %o0 = tmp 1
* %o1 = tmp 2
* %o3 = whichqs
@ -5212,8 +5217,8 @@ ENTRY(switchexit)
sethi %hi(_C_LABEL(sched_whichqs)), %g2
clr %g4 ! lastproc = NULL;
sethi %hi(cpcb), %g6
sethi %hi(curproc), %g7
st %g0, [%g7 + %lo(curproc)] ! curproc = NULL;
sethi %hi(curlwp), %g7
st %g0, [%g7 + %lo(curlwp)] ! curlwp = NULL;
b,a idle_enter_no_schedlock
/* FALLTHROUGH */
@ -5335,7 +5340,7 @@ ENTRY(cpu_switch)
* %g4 = lastproc
* %g5 = tmp 0
* %g6 = %hi(cpcb)
* %g7 = %hi(curproc)
* %g7 = %hi(curlwp)
* %o0 = tmp 1
* %o1 = tmp 2
* %o2 = tmp 3
@ -5351,8 +5356,8 @@ ENTRY(cpu_switch)
rd %psr, %g1 ! oldpsr = %psr;
st %g1, [%o0 + PCB_PSR] ! cpcb->pcb_psr = oldpsr;
andn %g1, PSR_PIL, %g1 ! oldpsr &= ~PSR_PIL;
sethi %hi(curproc), %g7
st %g0, [%g7 + %lo(curproc)] ! curproc = NULL;
sethi %hi(curlwp), %g7
st %g0, [%g7 + %lo(curlwp)] ! curlwp = NULL;
Lsw_scan:
nop; nop; nop ! paranoia
@ -5408,6 +5413,7 @@ Lsw_scan:
andn %o3, %o1, %o3
st %o3, [%g2 + %lo(_C_LABEL(sched_whichqs))]
1:
cpu_switch0:
/*
* PHASE TWO: NEW REGISTER USAGE:
* %g1 = oldpsr (excluding ipl bits)
@ -5416,7 +5422,7 @@ Lsw_scan:
* %g4 = lastproc
* %g5 = newpcb
* %g6 = %hi(cpcb)
* %g7 = %hi(curproc)
* %g7 = %hi(curlwp)
* %o0 = tmp 1
* %o1 = tmp 2
* %o2 = tmp 3
@ -5424,12 +5430,12 @@ Lsw_scan:
*/
/* firewalls */
ld [%g3 + P_WCHAN], %o0 ! if (p->p_wchan)
ld [%g3 + L_WCHAN], %o0 ! if (p->p_wchan)
tst %o0
bne Lsw_panic_wchan ! panic("switch wchan");
EMPTY
ldsb [%g3 + P_STAT], %o0 ! if (p->p_stat != SRUN)
cmp %o0, SRUN
ld [%g3 + L_STAT], %o0 ! if (p->p_stat != LSRUN)
cmp %o0, LSRUN
bne Lsw_panic_srun ! panic("switch SRUN");
EMPTY
@ -5437,14 +5443,14 @@ Lsw_scan:
* Committed to running process p.
* It may be the same as the one we were running before.
*/
mov SONPROC, %o0 ! p->p_stat = SONPROC;
stb %o0, [%g3 + P_STAT]
mov LSONPROC, %o0 ! p->p_stat = LSONPROC;
st %o0, [%g3 + L_STAT]
/* p->p_cpu initialized in fork1() for single-processor */
#if defined(MULTIPROCESSOR)
sethi %hi(_CISELFP), %o0 ! p->p_cpu = cpuinfo.ci_self;
ld [%o0 + %lo(_CISELFP)], %o0
st %o0, [%g3 + P_CPU]
st %o0, [%g3 + L_CPU]
#endif
sethi %hi(_WANT_RESCHED), %o0 ! want_resched = 0;
@ -5453,10 +5459,10 @@ Lsw_scan:
/* Done with the run queues; release the scheduler lock */
SAVE_GLOBALS_AND_CALL(sched_unlock_idle)
#endif
ld [%g3 + P_ADDR], %g5 ! newpcb = p->p_addr;
ld [%g3 + L_ADDR], %g5 ! newpcb = p->p_addr;
st %g0, [%g3 + 4] ! p->p_back = NULL;
ld [%g5 + PCB_PSR], %g2 ! newpsr = newpcb->pcb_psr;
st %g3, [%g7 + %lo(curproc)] ! curproc = p;
st %g3, [%g7 + %lo(curlwp)] ! curlwp = p;
cmp %g3, %g4 ! p == lastproc?
be,a Lsw_sameproc ! yes, go return 0
@ -5527,12 +5533,14 @@ Lsw_load:
* can talk about user space stuff. (Its pcb_uw is currently
* zero so it is safe to have interrupts going here.)
*/
ld [%g3 + P_VMSPACE], %o3 ! vm = p->p_vmspace;
mov 1, %o0 ! return value
ld [%g3 + L_PROC], %o3 ! p = l->l_proc;
ld [%o3 + P_VMSPACE], %o3 ! vm = p->p_vmspace;
ld [%o3 + VM_PMAP], %o3 ! pm = vm->vm_map.vm_pmap;
ld [%o3 + PMAP_CTX], %o0 ! if (pm->pm_ctx != NULL)
tst %o0
ld [%o3 + PMAP_CTX], %o1 ! if (pm->pm_ctx != NULL)
tst %o1
bnz,a Lsw_havectx ! goto havecontext;
ld [%o3 + PMAP_CTXNUM], %o0 ! load context number
ld [%o3 + PMAP_CTXNUM], %o1 ! load context number
/* p does not have a context: call ctx_alloc to get one */
save %sp, -CCFSZ, %sp
@ -5540,12 +5548,11 @@ Lsw_load:
mov %i3, %o0
ret
restore
restore %g0, 1, %o0 ! set return value to 1
/* p does have a context: just switch to it */
Lsw_havectx:
! context is in %o0
! pmap is in %o3
! context is in %o1
#if (defined(SUN4) || defined(SUN4C)) && defined(SUN4M)
NOP_ON_4M_15:
b,a 1f
@ -5553,9 +5560,9 @@ NOP_ON_4M_15:
#endif
1:
#if defined(SUN4) || defined(SUN4C)
set AC_CONTEXT, %o1
set AC_CONTEXT, %o2
retl
stba %o0, [%o1] ASI_CONTROL ! setcontext(vm->vm_pmap.pm_ctxnum);
stba %o1, [%o2] ASI_CONTROL ! setcontext(vm->vm_pmap.pm_ctxnum);
#endif
2:
#if defined(SUN4M)
@ -5566,12 +5573,12 @@ NOP_ON_4M_15:
set CPUINFO_VA+CPUINFO_PURE_VCACHE_FLS, %o2
ld [%o2], %o2
mov %o7, %g7 ! save return address
jmpl %o2, %o7 ! this function must not clobber %o0 and %g7
jmpl %o2, %o7 ! this function must not clobber %o0,%o1 and %g7
nop
set SRMMU_CXR, %o1
set SRMMU_CXR, %o2
jmp %g7 + 8
sta %o0, [%o1] ASI_SRMMU ! setcontext(vm->vm_pmap.pm_ctxnum);
sta %o1, [%o2] ASI_SRMMU ! setcontext(vm->vm_pmap.pm_ctxnum);
#endif
Lsw_sameproc:
@ -5582,8 +5589,25 @@ Lsw_sameproc:
! wr %g2, 0 %psr ! %psr = newpsr; (done earlier)
nop
retl
nop
mov %g0, %o0 ! return value = 0
ENTRY(cpu_switchto)
/*
* Like cpu_switch, but we're passed the process to switch to.
*
* Use the same register usage convention as in cpu_switch().
*/
mov %o0, %g4 ! lastproc = arg1;
mov %o1, %g3 ! newproc = arg2;
sethi %hi(cpcb), %g6
ld [%g6 + %lo(cpcb)], %o0
std %o6, [%o0 + PCB_SP] ! cpcb->pcb_<sp,pc> = <sp,pc>;
rd %psr, %g1 ! oldpsr = %psr;
st %g1, [%o0 + PCB_PSR] ! cpcb->pcb_psr = oldpsr;
andn %g1, PSR_PIL, %g1 ! oldpsr &= ~PSR_PIL;
sethi %hi(curlwp), %g7
st %g0, [%g7 + %lo(curlwp)] ! curlwp = NULL;
b,a cpu_switch0
/*
* Snapshot the current process so that stack frames are up to date.

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.221 2003/01/16 16:20:20 pk Exp $ */
/* $NetBSD: machdep.c,v 1.222 2003/01/18 06:45:05 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -102,8 +102,10 @@
#include <sys/mbuf.h>
#include <sys/mount.h>
#include <sys/msgbuf.h>
#include <sys/sa.h>
#include <sys/syscallargs.h>
#include <sys/exec.h>
#include <sys/savar.h>
#include <uvm/uvm.h> /* we use uvm.kernel_object */
@ -388,17 +390,17 @@ mdallocsys(v)
*/
/* ARGSUSED */
void
setregs(p, pack, stack)
struct proc *p;
setregs(l, pack, stack)
struct lwp *l;
struct exec_package *pack;
u_long stack;
{
struct trapframe *tf = p->p_md.md_tf;
struct trapframe *tf = l->l_md.md_tf;
struct fpstate *fs;
int psr;
/* Don't allow misaligned code by default */
p->p_md.md_flags &= ~MDP_FIXALIGN;
l->l_md.md_flags &= ~MDP_FIXALIGN;
/*
* Set the registers to 0 except for:
@ -408,7 +410,7 @@ setregs(p, pack, stack)
* %pc,%npc: entry point of program
*/
psr = tf->tf_psr & (PSR_S | PSR_CWP);
if ((fs = p->p_md.md_fpstate) != NULL) {
if ((fs = l->l_md.md_fpstate) != NULL) {
struct cpu_info *cpi;
int s;
/*
@ -417,26 +419,26 @@ setregs(p, pack, stack)
* to save it. In any case, get rid of our FPU state.
*/
FPU_LOCK(s);
if ((cpi = p->p_md.md_fpu) != NULL) {
if (cpi->fpproc != p)
panic("FPU(%d): fpproc %p",
cpi->ci_cpuid, cpi->fpproc);
if (p == cpuinfo.fpproc)
if ((cpi = l->l_md.md_fpu) != NULL) {
if (cpi->fplwp != l)
panic("FPU(%d): fplwp %p",
cpi->ci_cpuid, cpi->fplwp);
if (l == cpuinfo.fplwp)
savefpstate(fs);
#if defined(MULTIPROCESSOR)
else
XCALL1(savefpstate, fs, 1 << cpi->ci_cpuid);
#endif
cpi->fpproc = NULL;
cpi->fplwp = NULL;
}
p->p_md.md_fpu = NULL;
l->l_md.md_fpu = NULL;
FPU_UNLOCK(s);
free((void *)fs, M_SUBPROC);
p->p_md.md_fpstate = NULL;
l->l_md.md_fpstate = NULL;
}
bzero((caddr_t)tf, sizeof *tf);
tf->tf_psr = psr;
tf->tf_global[1] = (int)p->p_psstr;
tf->tf_global[1] = (int)l->l_proc->p_psstr;
tf->tf_pc = pack->ep_entry & ~3;
tf->tf_npc = tf->tf_pc + 4;
stack -= sizeof(struct rwindow);
@ -518,7 +520,8 @@ sendsig(sig, mask, code)
sigset_t *mask;
u_long code;
{
struct proc *p = curproc;
struct lwp *l = curlwp;
struct proc *p = l->l_proc;
struct sigacts *ps = p->p_sigacts;
struct sigframe *fp;
struct trapframe *tf;
@ -526,7 +529,7 @@ sendsig(sig, mask, code)
struct sigframe sf;
sig_t catcher = SIGACTION(p, sig).sa_handler;
tf = p->p_md.md_tf;
tf = l->l_md.md_tf;
oldsp = tf->tf_out[6];
/*
@ -592,7 +595,7 @@ sendsig(sig, mask, code)
*/
newsp = (int)fp - sizeof(struct rwindow);
write_user_windows();
if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
if (rwindow_save(l) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
suword(&((struct rwindow *)newsp)->rw_in[6], oldsp)) {
/*
* Process has trashed its stack; give it an illegal
@ -602,7 +605,7 @@ sendsig(sig, mask, code)
if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
printf("sendsig: window save or copyout error\n");
#endif
sigexit(p, SIGILL);
sigexit(l, SIGILL);
/* NOTREACHED */
}
#ifdef DEBUG
@ -627,7 +630,7 @@ sendsig(sig, mask, code)
default:
/* Don't know what trampoline version; kill it. */
sigexit(p, SIGILL);
sigexit(l, SIGILL);
}
tf->tf_global[1] = (int)catcher;
@ -656,8 +659,8 @@ sendsig(sig, mask, code)
*/
/* ARGSUSED */
int
sys___sigreturn14(p, v, retval)
struct proc *p;
sys___sigreturn14(l, v, retval)
struct lwp *l;
void *v;
register_t *retval;
{
@ -666,12 +669,15 @@ sys___sigreturn14(p, v, retval)
} */ *uap = v;
struct sigcontext sc, *scp;
struct trapframe *tf;
struct proc *p;
int error;
p = l->l_proc;
/* First ensure consistent stack state (see sendsig). */
write_user_windows();
if (rwindow_save(p))
sigexit(p, SIGILL);
if (rwindow_save(l))
sigexit(l, SIGILL);
#ifdef DEBUG
if (sigdebug & SDB_FOLLOW)
printf("sigreturn: %s[%d], sigcntxp %p\n",
@ -681,7 +687,7 @@ sys___sigreturn14(p, v, retval)
return (error);
scp = &sc;
tf = p->p_md.md_tf;
tf = l->l_md.md_tf;
/*
* Only the icc bits in the psr are used, so it need not be
* verified. pc and npc must be multiples of 4. This is all
@ -709,6 +715,216 @@ sys___sigreturn14(p, v, retval)
return (EJUSTRETURN);
}
/*
* cpu_upcall:
*
* Send an an upcall to userland.
*/
void
cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted,
void *sas, void *ap, void *sp, sa_upcall_t upcall)
{
struct trapframe *tf;
vaddr_t addr;
tf = l->l_md.md_tf;
addr = (vaddr_t) upcall;
/* Arguments to the upcall... */
tf->tf_out[0] = type;
tf->tf_out[1] = (vaddr_t) sas;
tf->tf_out[2] = nevents;
tf->tf_out[3] = ninterrupted;
tf->tf_out[4] = (vaddr_t) ap;
/*
* Ensure the stack is double-word aligned, and provide a
* C call frame.
*/
sp = (void *)(((vaddr_t)sp & ~0x7) - CCFSZ);
/* Arrange to begin execution at the upcall handler. */
tf->tf_pc = addr;
tf->tf_npc = addr + 4;
tf->tf_out[6] = (vaddr_t) sp;
tf->tf_out[7] = -1; /* "you lose" if upcall returns */
}
void
cpu_getmcontext(l, mcp, flags)
struct lwp *l;
mcontext_t *mcp;
unsigned int *flags;
{
struct trapframe *tf = (struct trapframe *)l->l_md.md_tf;
__greg_t *r = mcp->__gregs;
#ifdef FPU_CONTEXT
__fpregset_t *f = &mcp->__fpregs;
struct fpstate *fps = l->l_md.md_fpstate;
#endif
write_user_windows();
if (rwindow_save(l))
sigexit(l, SIGILL);
/*
* Get the general purpose registers
*/
r[_REG_PSR] = tf->tf_psr;
r[_REG_PC] = tf->tf_pc;
r[_REG_nPC] = tf->tf_npc;
r[_REG_Y] = tf->tf_y;
r[_REG_G1] = tf->tf_global[1];
r[_REG_G2] = tf->tf_global[2];
r[_REG_G3] = tf->tf_global[3];
r[_REG_G4] = tf->tf_global[4];
r[_REG_G5] = tf->tf_global[5];
r[_REG_G6] = tf->tf_global[6];
r[_REG_G7] = tf->tf_global[7];
r[_REG_O0] = tf->tf_out[0];
r[_REG_O1] = tf->tf_out[1];
r[_REG_O2] = tf->tf_out[2];
r[_REG_O3] = tf->tf_out[3];
r[_REG_O4] = tf->tf_out[4];
r[_REG_O5] = tf->tf_out[5];
r[_REG_O6] = tf->tf_out[6];
r[_REG_O7] = tf->tf_out[7];
*flags |= _UC_CPU;
#ifdef FPU_CONTEXT
/*
* Get the floating point registers
*/
bcopy(fps->fs_regs, f->__fpu_regs, sizeof(fps->fs_regs));
f->__fp_nqsize = sizeof(struct fp_qentry);
f->__fp_nqel = fps->fs_qsize;
f->__fp_fsr = fps->fs_fsr;
if (f->__fp_q != NULL) {
size_t sz = f->__fp_nqel * f->__fp_nqsize;
if (sz > sizeof(fps->fs_queue)) {
#ifdef DIAGNOSTIC
printf("getcontext: fp_queue too large\n");
#endif
return;
}
if (copyout(fps->fs_queue, f->__fp_q, sz) != 0) {
#ifdef DIAGNOSTIC
printf("getcontext: copy of fp_queue failed %d\n",
error);
#endif
return;
}
}
f->fp_busy = 0; /* XXX: How do we determine that? */
*flags |= _UC_FPU;
#endif
return;
}
/*
* Set to mcontext specified.
* Return to previous pc and psl as specified by
* context left by sendsig. Check carefully to
* make sure that the user has not modified the
* psl to gain improper privileges or to cause
* a machine fault.
* This is almost like sigreturn() and it shows.
*/
int
cpu_setmcontext(l, mcp, flags)
struct lwp *l;
const mcontext_t *mcp;
unsigned int flags;
{
struct trapframe *tf;
__greg_t *r = mcp->__gregs;
#ifdef FPU_CONTEXT
__fpregset_t *f = &mcp->__fpregs;
struct fpstate *fps = l->l_md.md_fpstate;
#endif
write_user_windows();
if (rwindow_save(l))
sigexit(l, SIGILL);
#ifdef DEBUG
if (sigdebug & SDB_FOLLOW)
printf("__setmcontext: %s[%d], __mcontext %p\n",
l->l_proc->p_comm, l->l_proc->p_pid, mcp);
#endif
if (flags & _UC_CPU) {
/* Restore register context. */
tf = (struct trapframe *)l->l_md.md_tf;
/*
* Only the icc bits in the psr are used, so it need not be
* verified. pc and npc must be multiples of 4. This is all
* that is required; if it holds, just do it.
*/
if (((r[_REG_PC] | r[_REG_nPC]) & 3) != 0) {
printf("pc or npc are not multiples of 4!\n");
return (EINVAL);
}
/* take only psr ICC field */
tf->tf_psr = (tf->tf_psr & ~PSR_ICC) |
(r[_REG_PSR] & PSR_ICC);
tf->tf_pc = r[_REG_PC];
tf->tf_npc = r[_REG_nPC];
tf->tf_y = r[_REG_Y];
/* Restore everything */
tf->tf_global[1] = r[_REG_G1];
tf->tf_global[2] = r[_REG_G2];
tf->tf_global[3] = r[_REG_G3];
tf->tf_global[4] = r[_REG_G4];
tf->tf_global[5] = r[_REG_G5];
tf->tf_global[6] = r[_REG_G6];
tf->tf_global[7] = r[_REG_G7];
tf->tf_out[0] = r[_REG_O0];
tf->tf_out[1] = r[_REG_O1];
tf->tf_out[2] = r[_REG_O2];
tf->tf_out[3] = r[_REG_O3];
tf->tf_out[4] = r[_REG_O4];
tf->tf_out[5] = r[_REG_O5];
tf->tf_out[6] = r[_REG_O6];
tf->tf_out[7] = r[_REG_O7];
}
#ifdef FPU_CONTEXT
if (flags & _UC_FPU) {
/*
* Set the floating point registers
*/
int error;
size_t sz = f->__fp_nqel * f->__fp_nqsize;
if (sz > sizeof(fps->fs_queue)) {
#ifdef DIAGNOSTIC
printf("setmcontext: fp_queue too large\n");
#endif
return (EINVAL);
}
bcopy(f->__fpu_regs, fps->fs_regs, sizeof(fps->fs_regs));
fps->fs_qsize = f->__fp_nqel;
fps->fs_fsr = f->__fp_fsr;
if (f->__fp_q != NULL) {
if ((error = copyin(f->__fp_q, fps->fs_queue, sz)) != 0) {
#ifdef DIAGNOSTIC
printf("setmcontext: fp_queue copy failed\n");
#endif
return (error);
}
}
}
#endif
return (0);
}
int waittime = -1;
void
@ -731,12 +947,12 @@ cpu_reboot(howto, user_boot_string)
#endif
boothowto = howto;
if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
extern struct proc proc0;
extern struct lwp lwp0;
extern int sparc_clock_time_is_ok;
/* XXX protect against curproc->p_stats.foo refs in sync() */
if (curproc == NULL)
curproc = &proc0;
/* XXX protect against curlwp->p_stats.foo refs in sync() */
if (curlwp == NULL)
curlwp = &lwp0;
waittime = 0;
vfs_shutdown();
@ -1024,10 +1240,11 @@ oldmon_w_trace(va)
u_long stop;
struct frame *fp;
if (curproc)
printf("curproc = %p, pid %d\n", curproc, curproc->p_pid);
if (curlwp)
printf("curlwp = %p, pid %d\n",
curlwp, curproc->p_pid);
else
printf("no curproc\n");
printf("no curlwp\n");
printf("uvm: swtch %d, trap %d, sys %d, intr %d, soft %d, faults %d\n",
uvmexp.swtch, uvmexp.traps, uvmexp.syscalls, uvmexp.intrs,
@ -1094,10 +1311,10 @@ caddr_t addr;
}
s = splhigh();
if (curproc == NULL)
if (curlwp == NULL)
xpcb = (struct pcb *)proc0paddr;
else
xpcb = &curproc->p_addr->u_pcb;
xpcb = &curlwp->l_addr->u_pcb;
saveonfault = (u_long)xpcb->pcb_onfault;
res = xldcontrolb(addr, xpcb);

View File

@ -1,4 +1,4 @@
/* $NetBSD: memreg.c,v 1.35 2002/10/02 16:02:11 thorpej Exp $ */
/* $NetBSD: memreg.c,v 1.36 2003/01/18 06:45:05 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -257,7 +257,7 @@ hypersparc_memerr(type, sfsr, sfva, tf)
u_int afva;
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(curproc);
KERNEL_PROC_LOCK(curlwp);
else
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
@ -276,7 +276,7 @@ hypersparc_memerr(type, sfsr, sfva, tf)
}
out:
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_UNLOCK(curproc);
KERNEL_PROC_UNLOCK(curlwp);
else
KERNEL_UNLOCK();
return;
@ -297,7 +297,7 @@ viking_memerr(type, sfsr, sfva, tf)
u_int afva=0;
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(curproc);
KERNEL_PROC_LOCK(curlwp);
else
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
@ -335,7 +335,7 @@ viking_memerr(type, sfsr, sfva, tf)
out:
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_UNLOCK(curproc);
KERNEL_PROC_UNLOCK(curlwp);
else
KERNEL_UNLOCK();
return;
@ -356,7 +356,7 @@ memerr4m(type, sfsr, sfva, tf)
u_int afva;
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(curproc);
KERNEL_PROC_LOCK(curlwp);
else
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
@ -369,7 +369,7 @@ memerr4m(type, sfsr, sfva, tf)
hardmemerr4m(type, sfsr, sfva, afsr, afva);
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_UNLOCK(curproc);
KERNEL_PROC_UNLOCK(curlwp);
else
KERNEL_UNLOCK();
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.230 2003/01/17 14:15:17 pk Exp $ */
/* $NetBSD: pmap.c,v 1.231 2003/01/18 06:45:05 thorpej Exp $ */
/*
* Copyright (c) 1996
@ -7144,10 +7144,10 @@ pmap_redzone()
* process is the current process, load the new MMU context.
*/
void
pmap_activate(p)
struct proc *p;
pmap_activate(l)
struct lwp *l;
{
pmap_t pm = p->p_vmspace->vm_map.pmap;
pmap_t pm = l->l_proc->p_vmspace->vm_map.pmap;
int s;
/*
@ -7158,7 +7158,7 @@ pmap_activate(p)
*/
s = splvm();
if (p == curproc) {
if (l->l_proc == curproc) {
write_user_windows();
if (pm->pm_ctx == NULL) {
ctx_alloc(pm); /* performs setcontext() */
@ -7179,13 +7179,20 @@ pmap_activate(p)
* Deactivate the address space of the specified process.
*/
void
pmap_deactivate(p)
struct proc *p;
pmap_deactivate(l)
struct lwp *l;
{
#if defined(MULTIPROCESSOR)
pmap_t pm;
struct proc *p;
if (p && p->p_vmspace &&
#ifdef DIAGNOSTIC
if (l == NULL)
panic("pmap_deactivate: l==NULL");
#endif
p = l->l_proc;
if (p->p_vmspace &&
(pm = p->p_vmspace->vm_map.pmap) != pmap_kernel()) {
if (pm->pm_ctx && CPU_HAS_SRMMU)
sp_tlb_flush(0, pm->pm_ctxnum, ASI_SRMMUFP_L0);

View File

@ -1,4 +1,4 @@
/* $NetBSD: process_machdep.c,v 1.7 1999/12/29 15:21:27 pk Exp $ */
/* $NetBSD: process_machdep.c,v 1.8 2003/01/18 06:45:06 thorpej Exp $ */
/*
* Copyright (c) 1993 The Regents of the University of California.
@ -75,28 +75,28 @@
int
process_read_regs(p, regs)
struct proc *p;
struct lwp *p;
struct reg *regs;
{
/* NOTE: struct reg == struct trapframe */
bcopy(p->p_md.md_tf, (caddr_t)regs, sizeof(struct reg));
bcopy(p->l_md.md_tf, (caddr_t)regs, sizeof(struct reg));
return (0);
}
int
process_write_regs(p, regs)
struct proc *p;
struct lwp *p;
struct reg *regs;
{
int psr = p->p_md.md_tf->tf_psr & ~PSR_ICC;
bcopy((caddr_t)regs, p->p_md.md_tf, sizeof(struct reg));
p->p_md.md_tf->tf_psr = psr | (regs->r_psr & PSR_ICC);
int psr = p->l_md.md_tf->tf_psr & ~PSR_ICC;
bcopy((caddr_t)regs, p->l_md.md_tf, sizeof(struct reg));
p->l_md.md_tf->tf_psr = psr | (regs->r_psr & PSR_ICC);
return (0);
}
int
process_sstep(p, sstep)
struct proc *p;
struct lwp *p;
int sstep;
{
if (sstep)
@ -106,42 +106,42 @@ process_sstep(p, sstep)
int
process_set_pc(p, addr)
struct proc *p;
struct lwp *p;
caddr_t addr;
{
p->p_md.md_tf->tf_pc = (u_int)addr;
p->p_md.md_tf->tf_npc = (u_int)addr + 4;
p->l_md.md_tf->tf_pc = (u_int)addr;
p->l_md.md_tf->tf_npc = (u_int)addr + 4;
return (0);
}
int
process_read_fpregs(p, regs)
struct proc *p;
struct lwp *p;
struct fpreg *regs;
{
extern struct fpstate initfpstate;
struct fpstate *statep = &initfpstate;
/* NOTE: struct fpreg == prefix of struct fpstate */
if (p->p_md.md_fpstate)
statep = p->p_md.md_fpstate;
if (p->l_md.md_fpstate)
statep = p->l_md.md_fpstate;
bcopy(statep, regs, sizeof(struct fpreg));
return (0);
}
int
process_write_fpregs(p, regs)
struct proc *p;
struct lwp *p;
struct fpreg *regs;
{
if (p->p_md.md_fpstate == NULL)
if (p->l_md.md_fpstate == NULL)
return (EINVAL);
/* Write new values to the FP registers */
bcopy(regs, p->p_md.md_fpstate, sizeof(struct fpreg));
bcopy(regs, p->l_md.md_fpstate, sizeof(struct fpreg));
/* Reset FP queue in this process `fpstate' */
p->p_md.md_fpstate->fs_qsize = 0;
p->l_md.md_fpstate->fs_qsize = 0;
return (0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: sunos_machdep.c,v 1.10 2002/07/04 23:32:07 thorpej Exp $ */
/* $NetBSD: sunos_machdep.c,v 1.11 2003/01/18 06:45:06 thorpej Exp $ */
/*
* Copyright (c) 1995 Matthew R. Green
@ -41,6 +41,7 @@
#include <sys/signal.h>
#include <sys/signalvar.h>
#include <sys/sa.h>
#include <sys/syscallargs.h>
#include <compat/sunos/sunos.h>
#include <compat/sunos/sunos_syscallargs.h>
@ -70,14 +71,15 @@ sunos_sendsig(sig, mask, code)
sigset_t *mask;
u_long code;
{
struct proc *p = curproc;
struct lwp *l = curlwp;
struct proc *p = l->l_proc;
struct sunos_sigframe *fp;
struct trapframe *tf;
int addr, onstack, oldsp, newsp;
sig_t catcher = SIGACTION(p, sig).sa_handler;
struct sunos_sigframe sf;
tf = p->p_md.md_tf;
tf = l->l_md.md_tf;
oldsp = tf->tf_out[6];
/*
@ -134,7 +136,7 @@ sunos_sendsig(sig, mask, code)
*/
newsp = (int)fp - sizeof(struct rwindow);
write_user_windows();
if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
if (rwindow_save(l) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
suword(&((struct rwindow *)newsp)->rw_in[6], oldsp)) {
/*
* Process has trashed its stack; give it an illegal
@ -144,7 +146,7 @@ sunos_sendsig(sig, mask, code)
if ((sunos_sigdebug & SDB_KSTACK) && p->p_pid == sunos_sigpid)
printf("sendsig: window save or copyout error\n");
#endif
sigexit(p, SIGILL);
sigexit(l, SIGILL);
/* NOTREACHED */
}
#ifdef DEBUG
@ -172,13 +174,13 @@ sunos_sendsig(sig, mask, code)
}
int
sunos_sys_sigreturn(p, v, retval)
register struct proc *p;
sunos_sys_sigreturn(l, v, retval)
register struct lwp *l;
void *v;
register_t *retval;
{
struct sunos_sys_sigreturn_args *uap = v;
return (compat_13_sys_sigreturn(p,
return (compat_13_sys_sigreturn(l,
(struct compat_13_sys_sigreturn_args *)uap, retval));
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_machdep.c,v 1.46 2002/07/04 23:32:07 thorpej Exp $ */
/* $NetBSD: svr4_machdep.c,v 1.47 2003/01/18 06:45:06 thorpej Exp $ */
/*-
* Copyright (c) 1994 The NetBSD Foundation, Inc.
@ -53,6 +53,7 @@
#include <sys/signalvar.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/sa.h>
#include <sys/syscallargs.h>
#include <sys/exec_elf.h>
@ -73,13 +74,13 @@
static void svr4_getsiginfo __P((union svr4_siginfo *, int, u_long, caddr_t));
void
svr4_setregs(p, epp, stack)
struct proc *p;
svr4_setregs(l, epp, stack)
struct lwp *l;
struct exec_package *epp;
u_long stack;
{
setregs(p, epp, stack);
setregs(l, epp, stack);
}
#ifdef DEBUG
@ -127,21 +128,21 @@ svr4_printmcontext(fun, mc)
#endif
void *
svr4_getmcontext(p, mc, flags)
struct proc *p;
svr4_getmcontext(l, mc, flags)
struct lwp *l;
struct svr4_mcontext *mc;
u_long *flags;
{
struct trapframe *tf = (struct trapframe *)p->p_md.md_tf;
struct trapframe *tf = (struct trapframe *)l->l_md.md_tf;
svr4_greg_t *r = mc->greg;
#ifdef FPU_CONTEXT
svr4_fregset_t *f = &mc->freg;
struct fpstate *fps = p->p_md.md_fpstate;
struct fpstate *fps = l->l_md.md_fpstate;
#endif
write_user_windows();
if (rwindow_save(p))
sigexit(p, SIGILL);
if (rwindow_save(l))
sigexit(l, SIGILL);
/*
* Get the general purpose registers
@ -214,8 +215,8 @@ svr4_getmcontext(p, mc, flags)
* This is almost like sigreturn() and it shows.
*/
int
svr4_setmcontext(p, mc, flags)
struct proc *p;
svr4_setmcontext(l, mc, flags)
struct lwp *l;
struct svr4_mcontext *mc;
u_long flags;
{
@ -223,7 +224,7 @@ svr4_setmcontext(p, mc, flags)
svr4_greg_t *r = mc->greg;
#ifdef FPU_CONTEXT
svr4_fregset_t *f = &mc->freg;
struct fpstate *fps = p->p_md.md_fpstate;
struct fpstate *fps = l->l_md.md_fpstate;
#endif
#ifdef DEBUG_SVR4
@ -231,18 +232,18 @@ svr4_setmcontext(p, mc, flags)
#endif
write_user_windows();
if (rwindow_save(p))
sigexit(p, SIGILL);
if (rwindow_save(l))
sigexit(l, SIGILL);
#ifdef DEBUG
if (sigdebug & SDB_FOLLOW)
printf("svr4_setmcontext: %s[%d], svr4_mcontext %p\n",
p->p_comm, p->p_pid, mc);
l->l_proc->p_comm, l->l_proc->p_pid, mc);
#endif
if (flags & SVR4_UC_CPU) {
/* Restore register context. */
tf = (struct trapframe *)p->p_md.md_tf;
tf = (struct trapframe *)l->l_md.md_tf;
/*
* Only the icc bits in the psr are used, so it need not be
@ -455,13 +456,14 @@ svr4_sendsig(sig, mask, code)
sigset_t *mask;
u_long code;
{
register struct proc *p = curproc;
register struct lwp *l = curlwp;
struct proc *p = l->l_proc;
register struct trapframe *tf;
struct svr4_sigframe *fp, frame;
int onstack, oldsp, newsp, addr;
sig_t catcher = SIGACTION(p, sig).sa_handler;
tf = (struct trapframe *)p->p_md.md_tf;
tf = (struct trapframe *)l->l_md.md_tf;
oldsp = tf->tf_out[6];
/* Do we need to jump onto the signal stack? */
@ -482,7 +484,7 @@ svr4_sendsig(sig, mask, code)
/*
* Build the argument list for the signal handler.
*/
svr4_getcontext(p, &frame.sf_uc, mask);
svr4_getcontext(l, &frame.sf_uc);
svr4_getsiginfo(&frame.sf_si, sig, code, (caddr_t) tf->tf_pc);
/* Build stack frame for signal trampoline. */
@ -502,7 +504,7 @@ svr4_sendsig(sig, mask, code)
newsp = (int)fp - sizeof(struct rwindow);
write_user_windows();
if (rwindow_save(p) || copyout(&frame, fp, sizeof(frame)) != 0 ||
if (rwindow_save(l) || copyout(&frame, fp, sizeof(frame)) != 0 ||
suword(&((struct rwindow *)newsp)->rw_in[6], oldsp)) {
/*
* Process has trashed its stack; give it an illegal
@ -512,7 +514,7 @@ svr4_sendsig(sig, mask, code)
if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
printf("svr4_sendsig: window save or copyout error\n");
#endif
sigexit(p, SIGILL);
sigexit(l, SIGILL);
/* NOTREACHED */
}
@ -533,14 +535,14 @@ svr4_sendsig(sig, mask, code)
#define ADVANCE (n = tf->tf_npc, tf->tf_pc = n, tf->tf_npc = n + 4)
int
svr4_trap(type, p)
svr4_trap(type, l)
int type;
struct proc *p;
struct lwp *l;
{
int n;
struct trapframe *tf = p->p_md.md_tf;
struct trapframe *tf = l->l_md.md_tf;
if (p->p_emul != &emul_svr4)
if (l->l_proc->p_emul != &emul_svr4)
return 0;
switch (type) {
@ -606,11 +608,11 @@ svr4_trap(type, p)
microtime(&tv);
tm =
(u_quad_t) (p->p_rtime.tv_sec +
(u_quad_t) (l->l_proc->p_rtime.tv_sec +
tv.tv_sec -
spc->spc_runtime.tv_sec)
* 1000000 +
(u_quad_t) (p->p_rtime.tv_usec +
(u_quad_t) (l->l_proc->p_rtime.tv_usec +
tv.tv_usec -
spc->spc_runtime.tv_usec)
* 1000;
@ -643,8 +645,8 @@ svr4_trap(type, p)
/*
*/
int
svr4_sys_sysarch(p, v, retval)
struct proc *p;
svr4_sys_sysarch(l, v, retval)
struct lwp *l;
void *v;
register_t *retval;
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_machdep.c,v 1.10 2000/12/13 18:13:10 jdolecek Exp $ */
/* $NetBSD: sys_machdep.c,v 1.11 2003/01/18 06:45:06 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -49,17 +49,17 @@
#include <sys/ioctl.h>
#include <sys/file.h>
#include <sys/time.h>
#include <sys/proc.h>
#include <sys/uio.h>
#include <sys/kernel.h>
#include <sys/buf.h>
#include <sys/mount.h>
#include <sys/sa.h>
#include <sys/syscallargs.h>
int
sys_sysarch(p, v, retval)
struct proc *p;
struct lwp *p;
void *v;
register_t *retval;
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: timer_msiiep.c,v 1.9 2003/01/15 06:57:12 pk Exp $ */
/* $NetBSD: timer_msiiep.c,v 1.10 2003/01/18 06:45:06 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -160,7 +160,7 @@ statintr_msiiep(void *cap)
* The factor 8 is only valid for stathz==100.
* See also clock.c
*/
if (curproc && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) {
if (curlwp && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) {
if (CLKF_LOPRI(frame, IPL_SCHED)) {
/* No need to schedule a soft interrupt */
spllowerschedclock();

View File

@ -1,4 +1,4 @@
/* $NetBSD: timer_sun4.c,v 1.9 2003/01/15 06:57:12 pk Exp $ */
/* $NetBSD: timer_sun4.c,v 1.10 2003/01/18 06:45:06 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -132,7 +132,7 @@ statintr_4(void *cap)
* The factor 8 is only valid for stathz==100.
* See also clock.c
*/
if (curproc && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) {
if (curlwp && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) {
if (CLKF_LOPRI(frame, IPL_SCHED)) {
/* No need to schedule a soft interrupt */
spllowerschedclock();

View File

@ -1,4 +1,4 @@
/* $NetBSD: timer_sun4m.c,v 1.6 2003/01/14 23:00:59 pk Exp $ */
/* $NetBSD: timer_sun4m.c,v 1.7 2003/01/18 06:45:07 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -163,7 +163,7 @@ statintr_4m(void *cap)
* The factor 8 is only valid for stathz==100.
* See also clock.c
*/
if (curproc && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) {
if (curlwp && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) {
if (CLKF_LOPRI(frame, IPL_SCHED)) {
/* No need to schedule a soft interrupt */
spllowerschedclock();

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.129 2003/01/13 20:00:34 pk Exp $ */
/* $NetBSD: trap.c,v 1.130 2003/01/18 06:45:07 thorpej Exp $ */
/*
* Copyright (c) 1996
@ -61,9 +61,12 @@
#include <sys/user.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/pool.h>
#include <sys/resource.h>
#include <sys/signal.h>
#include <sys/wait.h>
#include <sys/sa.h>
#include <sys/savar.h>
#include <sys/syscall.h>
#include <sys/syslog.h>
#ifdef KTRACE
@ -194,9 +197,9 @@ const char *trap_type[] = {
#define N_TRAP_TYPES (sizeof trap_type / sizeof *trap_type)
static __inline void userret __P((struct proc *, int, u_quad_t));
static __inline void userret __P((struct lwp *, int, u_quad_t));
void trap __P((unsigned, int, int, struct trapframe *));
static __inline void share_fpu __P((struct proc *, struct trapframe *));
static __inline void share_fpu __P((struct lwp *, struct trapframe *));
void mem_access_fault __P((unsigned, int, u_int, int, int, struct trapframe *));
void mem_access_fault4m __P((unsigned, u_int, u_int, struct trapframe *));
void syscall __P((register_t, struct trapframe *, register_t));
@ -208,17 +211,18 @@ int ignore_bogus_traps = 1;
* trap, mem_access_fault, and syscall.
*/
static __inline void
userret(p, pc, oticks)
struct proc *p;
userret(l, pc, oticks)
struct lwp *l;
int pc;
u_quad_t oticks;
{
struct proc *p = l->l_proc;
int sig;
/* take pending signals */
while ((sig = CURSIG(p)) != 0)
while ((sig = CURSIG(l)) != 0)
postsig(sig);
p->p_priority = p->p_usrpri;
l->l_priority = l->l_usrpri;
if (cpuinfo.want_ast) {
cpuinfo.want_ast = 0;
if (p->p_flag & P_OWEUPC) {
@ -230,18 +234,55 @@ userret(p, pc, oticks)
/*
* We are being preempted.
*/
preempt(NULL);
while ((sig = CURSIG(p)) != 0)
preempt(0);
while ((sig = CURSIG(l)) != 0)
postsig(sig);
}
/* Invoke any pending upcalls. */
while (l->l_flag & L_SA_UPCALL)
sa_upcall_userret(l);
/*
* If profiling, charge recent system time to the trapped pc.
*/
if (p->p_flag & P_PROFIL)
addupc_task(p, pc, (int)(p->p_sticks - oticks));
curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
curcpu()->ci_schedstate.spc_curpriority = l->l_priority;
}
/*
* Start a new LWP
*/
void
startlwp(arg)
void *arg;
{
int err;
ucontext_t *uc = arg;
struct lwp *l = curlwp;
err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
#if DIAGNOSTIC
if (err) {
printf("Error %d from cpu_setmcontext.", err);
}
#endif
pool_put(&lwp_uc_pool, uc);
userret(l, l->l_md.md_tf->tf_pc, 0);
}
/*
* XXX This is a terrible name.
*/
void
upcallret(l)
struct lwp *l;
{
userret(l, l->l_md.md_tf->tf_pc, 0);
}
/*
@ -251,10 +292,10 @@ userret(p, pc, oticks)
* ktrsysret should occur before the call to userret.
*/
static __inline void share_fpu(p, tf)
struct proc *p;
struct lwp *p;
struct trapframe *tf;
{
if ((tf->tf_psr & PSR_EF) != 0 && cpuinfo.fpproc != p)
if ((tf->tf_psr & PSR_EF) != 0 && cpuinfo.fplwp != p)
tf->tf_psr &= ~PSR_EF;
}
@ -269,6 +310,7 @@ trap(type, psr, pc, tf)
struct trapframe *tf;
{
struct proc *p;
struct lwp *l;
struct pcb *pcb;
int n, s;
char bits[64];
@ -351,19 +393,20 @@ trap(type, psr, pc, tf)
panic(type < N_TRAP_TYPES ? trap_type[type] : T);
/* NOTREACHED */
}
if ((p = curproc) == NULL)
p = &proc0;
if ((l = curlwp) == NULL)
l = &lwp0;
p = l->l_proc;
sticks = p->p_sticks;
pcb = &p->p_addr->u_pcb;
p->p_md.md_tf = tf; /* for ptrace/signals */
pcb = &l->l_addr->u_pcb;
l->l_md.md_tf = tf; /* for ptrace/signals */
#ifdef FPU_DEBUG
if (type != T_FPDISABLED && (tf->tf_psr & PSR_EF) != 0) {
if (cpuinfo.fpproc != p)
if (cpuinfo.fplwp != l)
panic("FPU enabled but wrong proc (0)");
savefpstate(p->p_md.md_fpstate);
p->p_md.md_fpu = NULL;
cpuinfo.fpproc = NULL;
savefpstate(l->l_md.md_fpstate);
l->l_md.md_fpumid = -1;
cpuinfo.fplwp = NULL;
tf->tf_psr &= ~PSR_EF;
setpsr(getpsr() & ~PSR_EF);
}
@ -406,7 +449,7 @@ badtrap:
case T_SVR4_GETHRTIME:
case T_SVR4_GETHRVTIME:
case T_SVR4_GETHRESTIME:
if (!svr4_trap(type, p))
if (!svr4_trap(type, l))
goto badtrap;
break;
#endif
@ -429,7 +472,7 @@ badtrap:
break;
case T_FPDISABLED: {
struct fpstate *fs = p->p_md.md_fpstate;
struct fpstate *fs = l->l_md.md_fpstate;
#ifdef FPU_DEBUG
if ((tf->tf_psr & PSR_PS) != 0) {
@ -441,18 +484,18 @@ badtrap:
#endif
if (fs == NULL) {
KERNEL_PROC_LOCK(p);
KERNEL_PROC_LOCK(l);
fs = malloc(sizeof *fs, M_SUBPROC, M_WAITOK);
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_UNLOCK(l);
*fs = initfpstate;
p->p_md.md_fpstate = fs;
l->l_md.md_fpstate = fs;
}
/*
* If we have not found an FPU, we have to emulate it.
*/
if (!cpuinfo.fpupresent) {
#ifdef notyet
fpu_emulate(p, tf, fs);
fpu_emulate(l, tf, fs);
#else
sig = SIGFPE;
/* XXX - ucode? */
@ -465,46 +508,74 @@ badtrap:
* resolve the FPU state, turn it on, and try again.
*/
if (fs->fs_qsize) {
fpu_cleanup(p, fs);
fpu_cleanup(l, fs);
break;
}
if (cpuinfo.fpproc != p) { /* we do not have it */
#if 1
if (cpuinfo.fplwp != l) { /* we do not have it */
struct cpu_info *cpi;
FPU_LOCK(s);
if (cpuinfo.fpproc != NULL) { /* someone else had it*/
savefpstate(cpuinfo.fpproc->p_md.md_fpstate);
cpuinfo.fpproc->p_md.md_fpu = NULL;
if (cpuinfo.fplwp != NULL) { /* someone else had it*/
savefpstate(cpuinfo.fplwp->l_md.md_fpstate);
cpuinfo.fplwp->l_md.md_fpu = NULL;
}
/*
* On MP machines, some of the other FPUs might
* still have our state. Tell the owning processor
* to save the process' FPU state.
*/
if ((cpi = p->p_md.md_fpu) != NULL) {
if ((cpi = l->l_md.md_fpu) != NULL) {
if (cpi->ci_cpuid == cpuinfo.ci_cpuid)
panic("FPU(%d): state for %p",
cpi->ci_cpuid, p);
cpi->ci_cpuid, l);
#if defined(MULTIPROCESSOR)
XCALL1(savefpstate, fs, 1 << cpi->ci_cpuid);
#endif
cpi->fpproc = NULL;
cpi->fplwp = NULL;
}
loadfpstate(fs);
cpuinfo.fpproc = p; /* now we do have it */
p->p_md.md_fpu = curcpu();
cpuinfo.fplwp = l; /* now we do have it */
l->l_md.md_fpu = curcpu();
FPU_UNLOCK(s);
}
#else
if (cpuinfo.fplwp != l) { /* we do not have it */
int mid;
FPU_LOCK(s);
mid = l->l_md.md_fpumid;
if (cpuinfo.fplwp != NULL) { /* someone else had it*/
savefpstate(cpuinfo.fplwp->l_md.md_fpstate);
cpuinfo.fplwp->l_md.md_fpumid = -1;
}
/*
* On MP machines, some of the other FPUs might
* still have our state. We can't handle that yet,
* so panic if it happens. Possible solutions:
* (1) send an inter-processor message to have the
* other FPU save the state, or (2) don't do lazy FPU
* context switching at all.
*/
if (mid != -1 && mid != cpuinfo.mid) {
printf("own FPU on module %d\n", mid);
panic("fix this");
}
loadfpstate(fs);
cpuinfo.fplwp = l; /* now we do have it */
l->l_md.md_fpu = curcpu();
FPU_UNLOCK(s);
}
#endif
tf->tf_psr |= PSR_EF;
break;
}
case T_WINOF:
KERNEL_PROC_LOCK(p);
if (rwindow_save(p))
sigexit(p, SIGILL);
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_LOCK(l);
if (rwindow_save(l))
sigexit(l, SIGILL);
KERNEL_PROC_UNLOCK(l);
break;
#define read_rw(src, dst) \
@ -519,7 +590,7 @@ badtrap:
* nsaved to -1. If we decide to deliver a signal on
* our way out, we will clear nsaved.
*/
KERNEL_PROC_LOCK(p);
KERNEL_PROC_LOCK(l);
if (pcb->pcb_uw || pcb->pcb_nsaved)
panic("trap T_RWRET 1");
#ifdef DEBUG
@ -529,11 +600,11 @@ badtrap:
tf->tf_out[6]);
#endif
if (read_rw(tf->tf_out[6], &pcb->pcb_rw[0]))
sigexit(p, SIGILL);
sigexit(l, SIGILL);
if (pcb->pcb_nsaved)
panic("trap T_RWRET 2");
pcb->pcb_nsaved = -1; /* mark success */
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_UNLOCK(l);
break;
case T_WINUF:
@ -546,7 +617,7 @@ badtrap:
* in the pcb. The restore's window may still be in
* the cpu; we need to force it out to the stack.
*/
KERNEL_PROC_LOCK(p);
KERNEL_PROC_LOCK(l);
#ifdef DEBUG
if (rwindow_debug)
printf("cpu%d:%s[%d]: rwindow: T_WINUF 0: pcb<-stack: 0x%x\n",
@ -554,8 +625,8 @@ badtrap:
tf->tf_out[6]);
#endif
write_user_windows();
if (rwindow_save(p) || read_rw(tf->tf_out[6], &pcb->pcb_rw[0]))
sigexit(p, SIGILL);
if (rwindow_save(l) || read_rw(tf->tf_out[6], &pcb->pcb_rw[0]))
sigexit(l, SIGILL);
#ifdef DEBUG
if (rwindow_debug)
printf("cpu%d:%s[%d]: rwindow: T_WINUF 1: pcb<-stack: 0x%x\n",
@ -563,18 +634,18 @@ badtrap:
pcb->pcb_rw[0].rw_in[6]);
#endif
if (read_rw(pcb->pcb_rw[0].rw_in[6], &pcb->pcb_rw[1]))
sigexit(p, SIGILL);
sigexit(l, SIGILL);
if (pcb->pcb_nsaved)
panic("trap T_WINUF");
pcb->pcb_nsaved = -1; /* mark success */
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_UNLOCK(l);
break;
case T_ALIGN:
if ((p->p_md.md_flags & MDP_FIXALIGN) != 0) {
KERNEL_PROC_LOCK(p);
n = fixalign(p, tf);
KERNEL_PROC_UNLOCK(p);
if ((l->l_md.md_flags & MDP_FIXALIGN) != 0) {
KERNEL_PROC_LOCK(l);
n = fixalign(l, tf);
KERNEL_PROC_UNLOCK(l);
if (n == 0) {
ADVANCE;
break;
@ -593,15 +664,17 @@ badtrap:
* will not match once fpu_cleanup does its job, so
* we must not save again later.)
*/
if (p != cpuinfo.fpproc)
KERNEL_PROC_LOCK(l);
if (l != cpuinfo.fplwp)
panic("fpe without being the FP user");
FPU_LOCK(s);
savefpstate(p->p_md.md_fpstate);
cpuinfo.fpproc = NULL;
p->p_md.md_fpu = NULL;
savefpstate(l->l_md.md_fpstate);
cpuinfo.fplwp = NULL;
l->l_md.md_fpu = NULL;
FPU_UNLOCK(s);
/* tf->tf_psr &= ~PSR_EF; */ /* share_fpu will do this */
fpu_cleanup(p, p->p_md.md_fpstate);
fpu_cleanup(l, l->l_md.md_fpstate);
KERNEL_PROC_UNLOCK(l);
/* fpu_cleanup posts signals if needed */
#if 0 /* ??? really never??? */
ADVANCE;
@ -633,10 +706,10 @@ badtrap:
case T_FLUSHWIN:
write_user_windows();
#ifdef probably_slower_since_this_is_usually_false
KERNEL_PROC_LOCK(p);
KERNEL_PROC_LOCK(l);
if (pcb->pcb_nsaved && rwindow_save(p))
sigexit(p, SIGILL);
KERNEL_PROC_UNLOCK(p);
sigexit(l, SIGILL);
KERNEL_PROC_UNLOCK(l);
#endif
ADVANCE;
break;
@ -658,7 +731,7 @@ badtrap:
uprintf("T_FIXALIGN\n");
#endif
/* User wants us to fix alignment faults */
p->p_md.md_flags |= MDP_FIXALIGN;
l->l_md.md_flags |= MDP_FIXALIGN;
ADVANCE;
break;
@ -670,12 +743,12 @@ badtrap:
break;
}
if (sig != 0) {
KERNEL_PROC_LOCK(p);
trapsignal(p, sig, ucode);
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_LOCK(l);
trapsignal(l, sig, ucode);
KERNEL_PROC_UNLOCK(l);
}
userret(p, pc, sticks);
share_fpu(p, tf);
userret(l, pc, sticks);
share_fpu(l, tf);
#undef ADVANCE
}
@ -689,10 +762,10 @@ badtrap:
* If the windows cannot be saved, pcb_nsaved is restored and we return -1.
*/
int
rwindow_save(p)
struct proc *p;
rwindow_save(l)
struct lwp *l;
{
struct pcb *pcb = &p->p_addr->u_pcb;
struct pcb *pcb = &l->l_addr->u_pcb;
struct rwindow *rw = &pcb->pcb_rw[0];
int i;
@ -706,7 +779,7 @@ rwindow_save(p)
#ifdef DEBUG
if (rwindow_debug)
printf("cpu%d:%s[%d]: rwindow: pcb->stack:",
cpuinfo.ci_cpuid, p->p_comm, p->p_pid);
cpuinfo.ci_cpuid, l->l_proc->p_comm, l->l_proc->p_pid);
#endif
do {
#ifdef DEBUG
@ -732,12 +805,12 @@ rwindow_save(p)
* the registers into the new process after the exec.
*/
void
kill_user_windows(p)
struct proc *p;
kill_user_windows(l)
struct lwp *l;
{
write_user_windows();
p->p_addr->u_pcb.pcb_nsaved = 0;
l->l_addr->u_pcb.pcb_nsaved = 0;
}
/*
@ -761,6 +834,7 @@ mem_access_fault(type, ser, v, pc, psr, tf)
{
#if defined(SUN4) || defined(SUN4C)
struct proc *p;
struct lwp *l;
struct vmspace *vm;
vaddr_t va;
int rv = EFAULT;
@ -770,20 +844,21 @@ mem_access_fault(type, ser, v, pc, psr, tf)
char bits[64];
uvmexp.traps++;
if ((p = curproc) == NULL) /* safety check */
p = &proc0;
if ((l = curlwp) == NULL) /* safety check */
l = &lwp0;
p = l->l_proc;
sticks = p->p_sticks;
if ((psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(p);
KERNEL_PROC_LOCK(l);
#ifdef FPU_DEBUG
if ((tf->tf_psr & PSR_EF) != 0) {
if (cpuinfo.fpproc != p)
if (cpuinfo.fplwp != l)
panic("FPU enabled but wrong proc (1)");
savefpstate(p->p_md.md_fpstate);
p->p_md.md_fpu = NULL;
cpuinfo.fpproc = NULL;
savefpstate(l->l_md.md_fpstate);
l->l_md.md_fpu = NULL;
cpuinfo.fplwp = NULL;
tf->tf_psr &= ~PSR_EF;
setpsr(getpsr() & ~PSR_EF);
}
@ -819,7 +894,7 @@ mem_access_fault(type, ser, v, pc, psr, tf)
* If this was an access that we shouldn't try to page in,
* resume at the fault handler without any action.
*/
if (p->p_addr && p->p_addr->u_pcb.pcb_onfault == Lfsbail)
if (l->l_addr && l->l_addr->u_pcb.pcb_onfault == Lfsbail)
goto kfault;
/*
@ -836,7 +911,7 @@ mem_access_fault(type, ser, v, pc, psr, tf)
goto kfault;
}
} else
p->p_md.md_tf = tf;
l->l_md.md_tf = tf;
/*
* mmu_pagein returns -1 if the page is already valid, in which
@ -894,8 +969,8 @@ mem_access_fault(type, ser, v, pc, psr, tf)
fault:
if (psr & PSR_PS) {
kfault:
onfault = p->p_addr ?
(int)p->p_addr->u_pcb.pcb_onfault : 0;
onfault = l->l_addr ?
(int)l->l_addr->u_pcb.pcb_onfault : 0;
if (!onfault) {
(void) splhigh();
printf("data fault: pc=0x%x addr=0x%x ser=%s\n",
@ -914,15 +989,15 @@ kfault:
p->p_pid, p->p_comm,
p->p_cred && p->p_ucred ?
p->p_ucred->cr_uid : -1);
trapsignal(p, SIGKILL, (u_int)v);
trapsignal(l, SIGKILL, (u_int)v);
} else
trapsignal(p, SIGSEGV, (u_int)v);
trapsignal(l, SIGSEGV, (u_int)v);
}
out:
if ((psr & PSR_PS) == 0) {
KERNEL_PROC_UNLOCK(p);
userret(p, pc, sticks);
share_fpu(p, tf);
KERNEL_PROC_UNLOCK(l);
userret(l, pc, sticks);
share_fpu(l, tf);
}
#endif /* SUN4 || SUN4C */
}
@ -939,6 +1014,7 @@ mem_access_fault4m(type, sfsr, sfva, tf)
{
int pc, psr;
struct proc *p;
struct lwp *l;
struct vmspace *vm;
vaddr_t va;
int rv = EFAULT;
@ -949,17 +1025,18 @@ mem_access_fault4m(type, sfsr, sfva, tf)
uvmexp.traps++; /* XXXSMP */
if ((p = curproc) == NULL) /* safety check */
p = &proc0;
if ((l = curlwp) == NULL) /* safety check */
l = &lwp0;
p = l->l_proc;
sticks = p->p_sticks;
#ifdef FPU_DEBUG
if ((tf->tf_psr & PSR_EF) != 0) {
if (cpuinfo.fpproc != p)
if (cpuinfo.fplwp != l)
panic("FPU enabled but wrong proc (2)");
savefpstate(p->p_md.md_fpstate);
p->p_md.md_fpu = NULL;
cpuinfo.fpproc = NULL;
savefpstate(l->l_md.md_fpstate);
l->l_md.md_fpu = NULL;
cpuinfo.fplwp = NULL;
tf->tf_psr &= ~PSR_EF;
setpsr(getpsr() & ~PSR_EF);
}
@ -994,7 +1071,7 @@ mem_access_fault4m(type, sfsr, sfva, tf)
}
if ((psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(p);
KERNEL_PROC_LOCK(l);
else
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
@ -1102,7 +1179,7 @@ mem_access_fault4m(type, sfsr, sfva, tf)
* If this was an access that we shouldn't try to page in,
* resume at the fault handler without any action.
*/
if (p->p_addr && p->p_addr->u_pcb.pcb_onfault == Lfsbail)
if (l->l_addr && l->l_addr->u_pcb.pcb_onfault == Lfsbail)
goto kfault;
/*
@ -1121,7 +1198,7 @@ mem_access_fault4m(type, sfsr, sfva, tf)
goto kfault;
}
} else
p->p_md.md_tf = tf;
l->l_md.md_tf = tf;
vm = p->p_vmspace;
@ -1152,8 +1229,8 @@ mem_access_fault4m(type, sfsr, sfva, tf)
fault:
if (psr & PSR_PS) {
kfault:
onfault = p->p_addr ?
(int)p->p_addr->u_pcb.pcb_onfault : 0;
onfault = l->l_addr ?
(int)l->l_addr->u_pcb.pcb_onfault : 0;
if (!onfault) {
(void) splhigh();
printf("data fault: pc=0x%x addr=0x%x sfsr=%s\n",
@ -1173,16 +1250,16 @@ kfault:
p->p_pid, p->p_comm,
p->p_cred && p->p_ucred ?
p->p_ucred->cr_uid : -1);
trapsignal(p, SIGKILL, (u_int)sfva);
trapsignal(l, SIGKILL, (u_int)sfva);
} else
trapsignal(p, SIGSEGV, (u_int)sfva);
trapsignal(l, SIGSEGV, (u_int)sfva);
}
out:
if ((psr & PSR_PS) == 0) {
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_UNLOCK(l);
out_nounlock:
userret(p, pc, sticks);
share_fpu(p, tf);
userret(l, pc, sticks);
share_fpu(l, tf);
}
else
KERNEL_UNLOCK();
@ -1206,6 +1283,7 @@ syscall(code, tf, pc)
int i, nsys, *ap, nap;
const struct sysent *callp;
struct proc *p;
struct lwp *l;
int error, new;
struct args {
register_t i[8];
@ -1214,29 +1292,29 @@ syscall(code, tf, pc)
u_quad_t sticks;
uvmexp.syscalls++; /* XXXSMP */
p = curproc;
l = curlwp;
p = l->l_proc;
#ifdef DIAGNOSTIC
if (tf->tf_psr & PSR_PS)
panic("syscall");
if (cpuinfo.curpcb != &p->p_addr->u_pcb)
if (cpuinfo.curpcb != &l->l_addr->u_pcb)
panic("syscall cpcb/ppcb");
if (tf != (struct trapframe *)((caddr_t)cpuinfo.curpcb + USPACE) - 1)
panic("syscall trapframe");
#endif
sticks = p->p_sticks;
p->p_md.md_tf = tf;
l->l_md.md_tf = tf;
new = code & (SYSCALL_G7RFLAG | SYSCALL_G2RFLAG);
code &= ~(SYSCALL_G7RFLAG | SYSCALL_G2RFLAG);
#ifdef FPU_DEBUG
if ((tf->tf_psr & PSR_EF) != 0) {
if (cpuinfo.fpproc != p)
if (cpuinfo.fplwp != p)
panic("FPU enabled but wrong proc (3)");
savefpstate(p->p_md.md_fpstate);
p->p_md.md_fpumid = -1;
cpuinfo.fpproc = NULL;
l->l_md.md_fpumid = -1;
cpuinfo.fplwp = NULL;
tf->tf_psr &= ~PSR_EF;
setpsr(getpsr() & ~PSR_EF);
}
@ -1293,20 +1371,20 @@ syscall(code, tf, pc)
/* Lock the kernel if the syscall isn't MP-safe. */
if ((callp->sy_flags & SYCALL_MPSAFE) == 0)
KERNEL_PROC_LOCK(p);
KERNEL_PROC_LOCK(l);
if ((error = trace_enter(p, code, code, NULL, args.i, rval)) != 0) {
if ((error = trace_enter(l, code, code, NULL, args.i, rval)) != 0) {
if ((callp->sy_flags & SYCALL_MPSAFE) == 0)
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_UNLOCK(l);
goto bad;
}
rval[0] = 0;
rval[1] = tf->tf_out[1];
error = (*callp->sy_call)(p, &args, rval);
error = (*callp->sy_call)(l, &args, rval);
if ((callp->sy_flags & SYCALL_MPSAFE) == 0)
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_UNLOCK(l);
switch (error) {
case 0:
@ -1346,10 +1424,10 @@ syscall(code, tf, pc)
break;
}
trace_exit(p, code, args.i, rval, error);
trace_exit(l, code, args.i, rval, error);
userret(p, pc, sticks);
share_fpu(p, tf);
userret(l, pc, sticks);
share_fpu(l, tf);
}
/*
@ -1359,19 +1437,23 @@ void
child_return(arg)
void *arg;
{
struct proc *p = arg;
struct lwp *l = arg;
#ifdef KTRACE
struct proc *p;
#endif
/*
* Return values in the frame set by cpu_fork().
*/
KERNEL_PROC_UNLOCK(p);
userret(p, p->p_md.md_tf->tf_pc, 0);
KERNEL_PROC_UNLOCK(l);
userret(l, l->l_md.md_tf->tf_pc, 0);
#ifdef KTRACE
p = l->l_proc;
if (KTRPOINT(p, KTR_SYSRET)) {
KERNEL_PROC_LOCK(p);
KERNEL_PROC_LOCK(l);
ktrsysret(p,
(p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0);
KERNEL_PROC_UNLOCK(p);
KERNEL_PROC_UNLOCK(l);
}
#endif
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.70 2003/01/12 16:29:01 pk Exp $ */
/* $NetBSD: vm_machdep.c,v 1.71 2003/01/18 06:45:07 thorpej Exp $ */
/*
* Copyright (c) 1996
@ -180,16 +180,16 @@ vunmapbuf(bp, len)
#define TOPFRAMEOFF (USPACE-sizeof(struct trapframe)-sizeof(struct frame))
/*
* Finish a fork operation, with process p2 nearly set up.
* Finish a fork operation, with process l2 nearly set up.
* Copy and update the pcb and trap frame, making the child ready to run.
*
* Rig the child's kernel stack so that it will start out in
* proc_trampoline() and call child_return() with p2 as an
* proc_trampoline() and call child_return() with l2 as an
* argument. This causes the newly-created child process to go
* directly to user level with an apparent return value of 0 from
* fork(), while the parent process returns normally.
*
* p1 is the process being forked; if p1 == &proc0, we are creating
* l1 is the process being forked; if l1 == &lwp0, we are creating
* a kernel thread, and the return path and argument are specified with
* `func' and `arg'.
*
@ -198,73 +198,73 @@ vunmapbuf(bp, len)
* accordingly.
*/
void
cpu_fork(p1, p2, stack, stacksize, func, arg)
struct proc *p1, *p2;
cpu_lwp_fork(l1, l2, stack, stacksize, func, arg)
struct lwp *l1, *l2;
void *stack;
size_t stacksize;
void (*func) __P((void *));
void *arg;
{
struct pcb *opcb = &p1->p_addr->u_pcb;
struct pcb *npcb = &p2->p_addr->u_pcb;
struct pcb *opcb = &l1->l_addr->u_pcb;
struct pcb *npcb = &l2->l_addr->u_pcb;
struct trapframe *tf2;
struct rwindow *rp;
/*
* Save all user registers to p1's stack or, in the case of
* Save all user registers to l1's stack or, in the case of
* user registers and invalid stack pointers, to opcb.
* We then copy the whole pcb to p2; when switch() selects p2
* to run, it will run at the `proc_trampoline' stub, rather
* than returning at the copying code below.
*
* If process p1 has an FPU state, we must copy it. If it is
* If process l1 has an FPU state, we must copy it. If it is
* the FPU user, we must save the FPU state first.
*/
if (p1 == curproc) {
if (l1 == curlwp) {
write_user_windows();
opcb->pcb_psr = getpsr();
}
#ifdef DIAGNOSTIC
else if (p1 != &proc0)
panic("cpu_fork: curproc");
else if (l1 != &lwp0)
panic("cpu_lwp_fork: curlwp");
#endif
bcopy((caddr_t)opcb, (caddr_t)npcb, sizeof(struct pcb));
if (p1->p_md.md_fpstate != NULL) {
if (l1->l_md.md_fpstate != NULL) {
struct cpu_info *cpi;
int s;
p2->p_md.md_fpstate = malloc(sizeof(struct fpstate),
l2->l_md.md_fpstate = malloc(sizeof(struct fpstate),
M_SUBPROC, M_WAITOK);
FPU_LOCK(s);
if ((cpi = p1->p_md.md_fpu) != NULL) {
if (cpi->fpproc != p1)
panic("FPU(%d): fpproc %p",
cpi->ci_cpuid, cpi->fpproc);
if (p1 == cpuinfo.fpproc)
savefpstate(p1->p_md.md_fpstate);
if ((cpi = l1->l_md.md_fpu) != NULL) {
if (cpi->fplwp != l1)
panic("FPU(%d): fplwp %p",
cpi->ci_cpuid, cpi->fplwp);
if (l1 == cpuinfo.fplwp)
savefpstate(l1->l_md.md_fpstate);
#if defined(MULTIPROCESSOR)
else
XCALL1(savefpstate, p1->p_md.md_fpstate,
XCALL1(savefpstate, l1->l_md.md_fpstate,
1 << cpi->ci_cpuid);
#endif
}
bcopy(p1->p_md.md_fpstate, p2->p_md.md_fpstate,
bcopy(l1->l_md.md_fpstate, l2->l_md.md_fpstate,
sizeof(struct fpstate));
FPU_UNLOCK(s);
} else
p2->p_md.md_fpstate = NULL;
l2->l_md.md_fpstate = NULL;
p2->p_md.md_fpu = NULL;
l2->l_md.md_fpu = NULL;
/*
* Setup (kernel) stack frame that will by-pass the child
* out of the kernel. (The trap frame invariably resides at
* the tippity-top of the u. area.)
*/
tf2 = p2->p_md.md_tf = (struct trapframe *)
tf2 = l2->l_md.md_tf = (struct trapframe *)
((int)npcb + USPACE - sizeof(*tf2));
/* Copy parent's trapframe */
@ -309,44 +309,70 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
* as an argument. switchexit() switches to the idle context, schedules
* the old vmspace and stack to be freed, then selects a new process to
* run.
*
* If proc==0, we're an exiting lwp, and call switch_lwp_exit() instead of
* switch_exit(), and only do LWP-appropriate cleanup (e.g. don't deactivate
* the pmap).
*/
void
cpu_exit(p)
struct proc *p;
cpu_exit(l, proc)
struct lwp *l;
int proc;
{
struct fpstate *fs;
if ((fs = p->p_md.md_fpstate) != NULL) {
if ((fs = l->l_md.md_fpstate) != NULL) {
struct cpu_info *cpi;
int s;
FPU_LOCK(s);
if ((cpi = p->p_md.md_fpu) != NULL) {
if (cpi->fpproc != p)
panic("FPU(%d): fpproc %p",
cpi->ci_cpuid, cpi->fpproc);
if (p == cpuinfo.fpproc)
if ((cpi = l->l_md.md_fpu) != NULL) {
if (cpi->fplwp != l)
panic("FPU(%d): fplwp %p",
cpi->ci_cpuid, cpi->fplwp);
if (l == cpuinfo.fplwp)
savefpstate(fs);
#if defined(MULTIPROCESSOR)
else
XCALL1(savefpstate, fs, 1 << cpi->ci_cpuid);
#endif
cpi->fpproc = NULL;
cpi->fplwp = NULL;
}
FPU_UNLOCK(s);
free((void *)fs, M_SUBPROC);
}
switchexit(p);
switchexit(l, proc ? exit2 : lwp_exit2);
/* NOTREACHED */
}
void
cpu_setfunc(l, func, arg)
struct lwp *l;
void (*func) __P((void *));
void *arg;
{
struct pcb *pcb = &l->l_addr->u_pcb;
/*struct trapframe *tf = l->l_md.md_tf;*/
struct rwindow *rp;
/* Construct kernel frame to return to in cpu_switch() */
rp = (struct rwindow *)((u_int)pcb + TOPFRAMEOFF);
rp->rw_local[0] = (int)func; /* Function to call */
rp->rw_local[1] = (int)arg; /* and its argument */
pcb->pcb_pc = (int)proc_trampoline - 8;
pcb->pcb_sp = (int)rp;
pcb->pcb_psr &= ~PSR_CWP; /* Run in window #0 */
pcb->pcb_wim = 1; /* Fence at window #1 */
}
/*
* cpu_coredump is called to write a core dump header.
* (should this be defined elsewhere? machdep.c?)
*/
int
cpu_coredump(p, vp, cred, chdr)
struct proc *p;
cpu_coredump(l, vp, cred, chdr)
struct lwp *l;
struct vnode *vp;
struct ucred *cred;
struct core *chdr;
@ -354,17 +380,20 @@ cpu_coredump(p, vp, cred, chdr)
int error;
struct md_coredump md_core;
struct coreseg cseg;
struct proc *p;
p = l->l_proc;
CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
chdr->c_hdrsize = ALIGN(sizeof(*chdr));
chdr->c_seghdrsize = ALIGN(sizeof(cseg));
chdr->c_cpusize = sizeof(md_core);
md_core.md_tf = *p->p_md.md_tf;
if (p->p_md.md_fpstate) {
if (p == cpuinfo.fpproc)
savefpstate(p->p_md.md_fpstate);
md_core.md_fpstate = *p->p_md.md_fpstate;
md_core.md_tf = *l->l_md.md_tf;
if (l->l_md.md_fpstate) {
if (l == cpuinfo.fplwp)
savefpstate(l->l_md.md_fpstate);
md_core.md_fpstate = *l->l_md.md_fpstate;
} else
bzero((caddr_t)&md_core.md_fpstate, sizeof(struct fpstate));