diff --git a/lib/libc/arch/vax/gen/Makefile.inc b/lib/libc/arch/vax/gen/Makefile.inc index e0b5e8bb2486..ba8f1b5eee25 100644 --- a/lib/libc/arch/vax/gen/Makefile.inc +++ b/lib/libc/arch/vax/gen/Makefile.inc @@ -1,8 +1,13 @@ -# $NetBSD: Makefile.inc,v 1.5 1999/03/06 11:13:43 ragge Exp $ +# $NetBSD: Makefile.inc,v 1.6 2002/03/27 18:38:50 matt Exp $ -SRCS+= _setjmp.S byte_swap_2.S byte_swap_4.S bswap64.S fabs.S frexp.c \ - infinity.c isinf.c ldexp.S modf.S setjmp.S udiv.S urem.S alloca.S \ - sigsetjmp.S __setjmp14.S __sigsetjmp14.S +SRCS+= byte_swap_2.S byte_swap_4.S bswap64.S \ + fabs.S frexp.c infinity.c isinf.c ldexp.S modf.S \ + udiv.S urem.S \ + __setjmp14.S __sigsetjmp14.S _setjmp.S + +.if ${OBJECT_FMT} != "ELF" +SRCS+= setjmp.S sigsetjmp.S +.endif LSRCS+= Lint_bswap16.c Lint_bswap32.c Lint_bswap64.c DPSRCS+= Lint_bswap16.c Lint_bswap32.c Lint_bswap64.c diff --git a/lib/libc/arch/vax/gen/__setjmp14.S b/lib/libc/arch/vax/gen/__setjmp14.S index c7617d09fca2..f0d1f6373078 100644 --- a/lib/libc/arch/vax/gen/__setjmp14.S +++ b/lib/libc/arch/vax/gen/__setjmp14.S @@ -33,7 +33,7 @@ #if defined(LIBC_SCCS) && !defined(lint) /* .asciz "@(#)setjmp.s 8.1 (Berkeley) 6/4/93" */ - .asciz "$NetBSD: __setjmp14.S,v 1.5 2002/02/24 01:06:19 matt Exp $" + .asciz "$NetBSD: __setjmp14.S,v 1.6 2002/03/27 18:38:50 matt Exp $" #endif /* LIBC_SCCS and not lint */ /* @@ -49,33 +49,49 @@ #include "DEFS.h" -ENTRY(__setjmp14, R6) - movl 4(%ap),%r6 # construct sigcontext +ENTRY(__setjmp14, 0) + movl 4(%ap),%r2 # construct sigcontext subl2 $12,%sp # space for current struct sigstack pushl %sp # get current values pushl $0 # no new values calls $4,_C_LABEL(__sigaltstack14) # pop args plus signal stack value - movl (%sp)+,(%r6)+ # save onsigstack status of caller - pushal 24(%r6) + movl 4(%ap),%r2 # construct sigcontext + movl (%sp)+,(%r2) # save onsigstack status of caller + pushal 28(%r2) pushl $0 pushl $0 calls $3,_C_LABEL(__sigprocmask14) # get signal mask - addl2 $4,%r6 # skip old mask - movl (%ap),%r0 - moval 4(%ap)[%r0],(%r6)+ # save sp of caller - movl 12(%fp),(%r6)+ # save frame pointer of caller - movl 8(%fp),(%r6)+ # save argument pointer of caller - movl 16(%fp),(%r6)+ # save pc of caller - movpsl (%r6) # save psl of caller - movw 4(%fp),(%r6) + addl3 $8,4(%ap),%r2 # point to sp in signal context + clrl %r0 # assume no stack arguments + bbc $13,6(%fp),1f # handle callg + addl3 $1,(%ap),%r0 # get argument count +1 if calls +1: moval 20(%fp)[%r0],(%r2)+ # save sp of caller + movl 12(%fp),(%r2)+ # save frame pointer of caller + movl 8(%fp),(%r2)+ # save argument pointer of caller + movl 16(%fp),(%r2)+ # save pc of caller + movpsl (%r2) # save current psl + movw 4(%fp),(%r2) # save psw of caller +#ifdef __ELF__ + addl2 $4,%r2 # get past caller psl + movq %r6,(%r2)+ # save r6/r7 + movq %r8,(%r2)+ # save r8/r9 + movq %r10,(%r2)+ # save r10/r11 +#endif clrl %r0 ret ENTRY(__longjmp14, 0) movl 8(%ap),%r0 # return(v) movl 4(%ap),%r1 # fetch buffer - tstl 12(%r1) + tstl 12(%r1) # is fp non-null? beql botch + +#ifdef __ELF__ + moval 44(%r1),%r2 # get ptr to saved registers + movq (%r2)+,%r6 # restore r6/r7 + movq (%r2)+,%r8 # restore r8/r9 + movq (%r2)+,%r10 # restore r10/r11 +#else loop: cmpl 12(%r1),%fp # are we there yet? beql done @@ -91,6 +107,7 @@ loop: ret # pop another frame done: +#endif /* !__ELF__ */ pushl %r1 # pointer to sigcontext calls $1,_C_LABEL(__sigreturn14) # restore previous context # we should never return diff --git a/lib/libc/arch/vax/gen/_setjmp.S b/lib/libc/arch/vax/gen/_setjmp.S index 5a1ad6b03557..556d1a0b8138 100644 --- a/lib/libc/arch/vax/gen/_setjmp.S +++ b/lib/libc/arch/vax/gen/_setjmp.S @@ -33,7 +33,7 @@ #if defined(LIBC_SCCS) && !defined(lint) /* .asciz "@(#)_setjmp.s 8.1 (Berkeley) 6/4/93" */ - .asciz "$NetBSD: _setjmp.S,v 1.5 2002/02/24 02:43:46 matt Exp $" + .asciz "$NetBSD: _setjmp.S,v 1.6 2002/03/27 18:38:50 matt Exp $" #endif /* LIBC_SCCS and not lint */ /* @@ -45,22 +45,46 @@ * _setjmp(a) * by restoring registers from the stack, * The previous signal state is NOT restored. + * + * Even though we don't use sigreturn14, we still store things in a sigcontext + * in order to be consistent. */ #include "DEFS.h" ENTRY(_setjmp, 0) movl 4(%ap),%r0 - movl 12(%fp),(%r0) # save frame pointer of caller - movl 16(%fp),4(%r0) # save pc of caller + movl 12(%fp),12(%r0) # save frame pointer of caller + movl 16(%fp),20(%r0) # save pc of caller +#ifdef __ELF__ + movl 8(%fp),16(%r0) # save ap of caller + clrl %r1 # clear arg count + bbc $13,6(%fp),1f # was this a callg? + addl3 $1,(%ap),%r1 # get real arg count+1 for calls +1: moval 20(%fp)[%r1],8(%r0) # save sp of caller + movpsl 24(%r1) # save current psl + movw 4(%fp),24(%r1) # save psw of caller + movq %r6,44(%r0) # save r6/r7 + movq %r8,52(%r0) # save r8/r9 + movq %r10,60(%r0) # save r10/r11 +#endif clrl %r0 ret ENTRY(_longjmp, 0) movl 8(%ap),%r0 # return(v) movl 4(%ap),%r1 # fetch buffer - tstl (%r1) + tstl 12(%r1) # is fp null beql botch +#ifdef __ELF__ + movq 44(%r1),%r6 # restore r6/r7 + movq 52(%r1),%r8 # restore r8/r9 + movq 60(%r1),%r10 # restore r10/r11 + movl 16(%r1),%ap # restore ap + movq 8(%r1),%sp # restore sp/fp + movq 20(%r1),-(%sp) # save pc/psl to new stack + rei # and go back to saved pc/psl +#else loop: bitw $1,6(%fp) # %r0 saved? beql 1f @@ -74,7 +98,7 @@ loop: beql 2f movl %r1,20(%fp) 2: - cmpl (%r1),12(%fp) + cmpl 12(%r1),12(%fp) beql done blssu botch movab loop,16(%fp) @@ -92,7 +116,8 @@ done: 3: addl2 $8,%sp # compensate for PSL-PC push 4: - jmp *4(%r1) # done, return.... + jmp *20(%r1) # done, return.... +#endif /* !__ELF__ */ botch: calls $0,_C_LABEL(longjmperror)