Change to use a register prefix.

This commit is contained in:
matt 2002-02-24 01:06:18 +00:00
parent 28b1aa2b30
commit 0ce5ca145f
53 changed files with 1017 additions and 1017 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: crt0.c,v 1.12 2001/07/26 22:55:12 wiz Exp $ */
/* $NetBSD: crt0.c,v 1.13 2002/02/24 01:06:18 matt Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -54,7 +54,7 @@ struct kframe {
asm(" .type start,@function");
asm(" start:");
asm(" .word 0x0101"); /* two nops just in case */
asm(" pushl sp"); /* no registers to save */
asm(" pushl %sp"); /* no registers to save */
asm(" calls $1,___start"); /* do the real start */
asm(" halt");
@ -100,14 +100,14 @@ asm ("__callmain:"); /* Defined for the benefit of debuggers */
#ifdef DYNAMIC
asm(" ___syscall:");
asm(" .word 0"); /* no registers to save */
asm(" addl2 $4,ap"); /* n-1 args to syscall */
asm(" movl (ap),r0"); /* get syscall number */
asm(" subl3 $1,-4(ap),(ap)"); /* n-1 args to syscall */
asm(" chmk r0"); /* do system call */
asm(" addl2 $4,%ap"); /* n-1 args to syscall */
asm(" movl (%ap),r0"); /* get syscall number */
asm(" subl3 $1,-4(%ap),(%ap)"); /* n-1 args to syscall */
asm(" chmk %r0"); /* do system call */
asm(" jcc 1f"); /* check error */
asm(" mnegl $1,r0");
asm(" mnegl $1,%r0");
asm(" ret");
asm(" 1: movpsl -(sp)"); /* flush the icache */
asm(" 1: movpsl -(%sp)"); /* flush the icache */
asm(" pushab 2f"); /* by issuing an REI */
asm(" rei");
asm(" 2: ret");
@ -117,7 +117,7 @@ asm ("__callmain:"); /* Defined for the benefit of debuggers */
#include "common.c"
#if defined(LIBC_SCCS) && !defined(lint)
__RCSID("$NetBSD: crt0.c,v 1.12 2001/07/26 22:55:12 wiz Exp $");
__RCSID("$NetBSD: crt0.c,v 1.13 2002/02/24 01:06:18 matt Exp $");
#endif /* LIBC_SCCS and not lint */
#ifdef MCRT0

View File

@ -1,4 +1,4 @@
/* $NetBSD: crt0.c,v 1.6 2001/09/08 18:59:20 matt Exp $ */
/* $NetBSD: crt0.c,v 1.7 2002/02/24 01:06:18 matt Exp $ */
/*
* Copyright (c) 1999 Matt Thomas
@ -48,13 +48,13 @@ __asm("
__start:
_start:
.word 0x0101
pushl r9 # ps_strings
pushl r8 # obj
pushl r7 # cleanup
movl (r6),r0 # argc
pushal 8(r6)[r0] # envp = &argv[argc + 1]
pushal 4(r6) # argv
pushl r0 # argc
pushl %r9 # ps_strings
pushl %r8 # obj
pushl %r7 # cleanup
movl (%r6),%r0 # argc
pushal 8(%r6)[%r0] # envp = &argv[argc + 1]
pushal 4(%r6) # argv
pushl %r0 # argc
calls $6,___start
");
@ -99,7 +99,7 @@ ___start(argc, argv, envp, cleanup, obj, ps_strings)
* NOTE: Leave the RCS ID _after_ __start(), in case it gets placed in .text.
*/
#if defined(LIBC_SCCS) && !defined(lint)
__RCSID("$NetBSD: crt0.c,v 1.6 2001/09/08 18:59:20 matt Exp $");
__RCSID("$NetBSD: crt0.c,v 1.7 2002/02/24 01:06:18 matt Exp $");
#endif /* LIBC_SCCS and not lint */
#include "common.c"

View File

@ -33,7 +33,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)setjmp.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: __setjmp14.S,v 1.4 2000/06/28 19:20:17 matt Exp $"
.asciz "$NetBSD: __setjmp14.S,v 1.5 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/*
@ -50,48 +50,48 @@
#include "DEFS.h"
ENTRY(__setjmp14, R6)
movl 4(ap),r6 # construct sigcontext
subl2 $12,sp # space for current struct sigstack
pushl sp # get current values
movl 4(%ap),%r6 # construct sigcontext
subl2 $12,%sp # space for current struct sigstack
pushl %sp # get current values
pushl $0 # no new values
calls $4,_C_LABEL(__sigaltstack14) # pop args plus signal stack value
movl (sp)+,(r6)+ # save onsigstack status of caller
pushal 24(r6)
movl (%sp)+,(%r6)+ # save onsigstack status of caller
pushal 24(%r6)
pushl $0
pushl $0
calls $3,_C_LABEL(__sigprocmask14) # get signal mask
addl2 $4,r6 # skip old mask
movl (ap),r0
moval 4(ap)[r0],(r6)+ # save sp of caller
movl 12(fp),(r6)+ # save frame pointer of caller
movl 8(fp),(r6)+ # save argument pointer of caller
movl 16(fp),(r6)+ # save pc of caller
movpsl (r6) # save psl of caller
movw 4(fp),(r6)
clrl r0
addl2 $4,%r6 # skip old mask
movl (%ap),%r0
moval 4(%ap)[%r0],(%r6)+ # save sp of caller
movl 12(%fp),(%r6)+ # save frame pointer of caller
movl 8(%fp),(%r6)+ # save argument pointer of caller
movl 16(%fp),(%r6)+ # save pc of caller
movpsl (%r6) # save psl of caller
movw 4(%fp),(%r6)
clrl %r0
ret
ENTRY(__longjmp14, 0)
movl 8(ap),r0 # return(v)
movl 4(ap),r1 # fetch buffer
tstl 12(r1)
movl 8(%ap),%r0 # return(v)
movl 4(%ap),%r1 # fetch buffer
tstl 12(%r1)
beql botch
loop:
cmpl 12(r1),fp # are we there yet?
cmpl 12(%r1),%fp # are we there yet?
beql done
blssu botch
moval 20(fp),r2
blbc 6(fp),1f # was r0 saved?
movl r0,(r2)+
moval 20(%fp),%r2
blbc 6(%fp),1f # was %r0 saved?
movl %r0,(%r2)+
1:
bbc $1,6(fp),2f # was r1 saved?
movl r1,(r2)
bbc $1,6(%fp),2f # was %r1 saved?
movl %r1,(%r2)
2:
movab loop,16(fp)
movab loop,16(%fp)
ret # pop another frame
done:
pushl r1 # pointer to sigcontext
pushl %r1 # pointer to sigcontext
calls $1,_C_LABEL(__sigreturn14) # restore previous context
# we should never return
botch:

View File

@ -32,7 +32,7 @@
*/
#if defined(LIBC_SCCS) && !defined(lint)
.asciz "$NetBSD: __sigsetjmp14.S,v 1.2 2000/06/28 19:20:17 matt Exp $"
.asciz "$NetBSD: __sigsetjmp14.S,v 1.3 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/*
@ -51,16 +51,16 @@
#include <machine/setjmp.h>
ENTRY(__sigsetjmp14, R6)
movl 4(ap),r0 # get env pointer
movl 8(ap),(_JBLEN*4)(r0) # save "savemask"
tstl 8(ap) # do saving of signal mask?
movl 4(%ap),%r0 # get env pointer
movl 8(%ap),(_JBLEN*4)(%r0) # save "savemask"
tstl 8(%ap) # do saving of signal mask?
beql L1
jmp _C_LABEL(__setjmp14)+2 # yep, do full setjmp
L1: jmp _C_LABEL(_setjmp)+2 # nope, skip to _setjmp
ENTRY(__siglongjmp14, 0)
movl 4(ap),r0 # get env pointer
tstl (_JBLEN*4)(r0) # test if "savemask" was set
movl 4(%ap),%r0 # get env pointer
tstl (_JBLEN*4)(%r0) # test if "savemask" was set
beql L2
jmp _C_LABEL(__longjmp14)+2 # yep, do full longjmp
L2: jmp _C_LABEL(_longjmp)+2 # nope, skip to _longjmp

View File

@ -33,7 +33,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)_setjmp.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: _setjmp.S,v 1.3 2000/06/28 19:20:17 matt Exp $"
.asciz "$NetBSD: _setjmp.S,v 1.4 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/*
@ -50,49 +50,49 @@
#include "DEFS.h"
ENTRY(_setjmp, 0)
movl 4(ap),r0
movl 12(fp),(r0) # save frame pointer of caller
movl 16(fp),4(r0) # save pc of caller
clrl r0
movl 4(%ap),%r0
movl 12(%fp),(%r0) # save frame pointer of caller
movl 16(%fp),4(%r0) # save pc of caller
clrl %r0
ret
ENTRY(_longjmp, 0)
movl 8(ap),r0 # return(v)
movl 4(ap),r1 # fetch buffer
tstl (r1)
movl 8(%ap),%r0 # return(v)
movl 4(%ap),%r1 # fetch buffer
tstl (%r1)
beql botch
loop:
bitw $1,6(fp) # r0 saved?
bitw $1,6(%fp) # %r0 saved?
beql 1f
movl r0,20(fp)
bitw $2,6(fp) # was r1 saved?
movl %r0,20(%fp)
bitw $2,6(%fp) # was %r1 saved?
beql 2f
movl r1,24(fp)
movl %r1,24(%fp)
brb 2f
1:
bitw $2,6(fp) # was r1 saved?
bitw $2,6(%fp) # was %r1 saved?
beql 2f
movl r1,20(fp)
movl %r1,20(%fp)
2:
cmpl (r1),12(fp)
cmpl (%r1),12(%fp)
beql done
blssu botch
movab loop,16(fp)
movab loop,16(%fp)
ret # pop another frame
done:
cmpb *16(fp),$2 # returning to an "rei"?
cmpb *16(%fp),$2 # returning to an "rei"?
bneq 1f
movab 3f,16(fp) # do return w/ psl-pc pop
movab 3f,16(%fp) # do return w/ psl-pc pop
brw 2f
1:
movab 4f,16(fp) # do standard return
movab 4f,16(%fp) # do standard return
2:
ret # unwind stack before signals enabled
3:
addl2 $8,sp # compensate for PSL-PC push
4:
jmp *4(r1) # done, return....
jmp *4(%r1) # done, return....
botch:
calls $0,_C_LABEL(longjmperror)

View File

@ -33,18 +33,18 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)alloca.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: alloca.S,v 1.1 1995/04/17 12:23:38 ragge Exp $"
.asciz "$NetBSD: alloca.S,v 1.2 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
#include "DEFS.h"
ENTRY(alloca, 0)
movl 4(ap),r0 # get allocation size
movl 16(fp),r2 # save return address before we smash it
movab here,16(fp)
movl 4(%ap),%r0 # get allocation size
movl 16(%fp),%r2 # save return address before we smash it
movab here,16(%fp)
ret
here:
subl2 r0,sp # create stack space
bicl2 $3,sp # align to longword boundary
movl sp,r0
jmp (r2)
subl2 %r0,%sp # create stack space
bicl2 $3,%sp # align to longword boundary
movl %sp,%r0
jmp (%r2)

View File

@ -1,19 +1,19 @@
/* Written by Anders Magnusson. Public Domain */
#if defined(LIBC_SCCS) && !defined(lint)
.asciz "$NetBSD: bswap64.S,v 1.1 1999/01/15 13:31:20 bouyer Exp $"
.asciz "$NetBSD: bswap64.S,v 1.2 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
#include "DEFS.h"
ENTRY(bswap64, 0)
movq 4(ap),r3
rotl $-8,r3,r1
insv r1,$16,$8,r1
rotl $8,r3,r2
movb r2,r1
rotl $-8,r4,r0
insv r0,$16,$8,r0
rotl $8,r4,r2
movb r2,r0
movq 4(%ap),%r3
rotl $-8,%r3,%r1
insv %r1,$16,$8,%r1
rotl $8,%r3,%r2
movb %r2,%r1
rotl $-8,%r4,%r0
insv %r0,$16,$8,%r0
rotl $8,%r4,%r2
movb %r2,%r0
ret

View File

@ -33,7 +33,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)htons.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: byte_swap_2.S,v 1.1 1999/01/15 13:31:21 bouyer Exp $"
.asciz "$NetBSD: byte_swap_2.S,v 1.2 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
#include "DEFS.h"
@ -41,7 +41,7 @@
ALTENTRY(ntohs)
ALTENTRY(htons)
ENTRY(__bswap16, 0)
rotl $8,4(ap),r0
movb 5(ap),r0
movzwl r0,r0
rotl $8,4(%ap),%r0
movb 5(%ap),%r0
movzwl %r0,%r0
ret

View File

@ -33,7 +33,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)htonl.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: byte_swap_4.S,v 1.2 1999/03/06 11:13:43 ragge Exp $"
.asciz "$NetBSD: byte_swap_4.S,v 1.3 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
#include "DEFS.h"
@ -41,7 +41,7 @@
ALTENTRY(ntohl)
ALTENTRY(htonl)
ENTRY(__bswap32, 0)
rotl $-8,4(ap),r0
insv r0,$16,$8,r0
movb 7(ap),r0
rotl $-8,4(%ap),%r0
insv %r0,$16,$8,%r0
movb 7(%ap),%r0
ret

View File

@ -33,7 +33,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)fabs.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: fabs.S,v 1.1 1995/04/17 12:23:39 ragge Exp $"
.asciz "$NetBSD: fabs.S,v 1.2 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/* fabs - floating absolute value */
@ -41,8 +41,8 @@
#include "DEFS.h"
ENTRY(fabs, 0)
movd 4(ap),r0
movd 4(%ap),%r0
bgeq 1f
mnegd r0,r0
mnegd %r0,%r0
1:
ret

View File

@ -33,7 +33,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/*.asciz "@(#)ldexp.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: ldexp.S,v 1.4 2000/06/28 19:20:17 matt Exp $"
.asciz "$NetBSD: ldexp.S,v 1.5 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/*
@ -61,30 +61,30 @@
.globl _C_LABEL(errno)
ENTRY(ldexp, 0)
movd 4(ap),r0 /* fetch "value" */
extzv $7,$8,r0,r2 /* r2 := biased exponent */
movd 4(%ap),%r0 /* fetch "value" */
extzv $7,$8,%r0,%r2 /* %r2 := biased exponent */
jeql 1f /* if zero, done */
addl2 12(ap),r2 /* r2 := new biased exponent */
addl2 12(%ap),%r2 /* %r2 := new biased exponent */
jleq 2f /* if <= 0, underflow */
cmpl r2,$256 /* otherwise check if too big */
cmpl %r2,$256 /* otherwise check if too big */
jgeq 3f /* jump if overflow */
insv r2,$7,$8,r0 /* put exponent back in result */
insv %r2,$7,$8,%r0 /* put exponent back in result */
1:
ret
2:
clrd r0
clrd %r0
jbr 1f
3:
movd huge,r0 /* largest possible floating magnitude */
jbc $15,4(ap),1f /* jump if argument was positive */
mnegd r0,r0 /* if arg < 0, make result negative */
movd huge,%r0 /* largest possible floating magnitude */
jbc $15,4(%ap),1f /* jump if argument was positive */
mnegd %r0,%r0 /* if arg < 0, make result negative */
1:
#ifdef _REENTRANT
pushl r0
pushl %r0
calls $0,_C_LABEL(__errno)
movl $ ERANGE,(r0)
movl (sp)+,r0
movl $ ERANGE,(%r0)
movl (%sp)+,%r0
#else
movl $ ERANGE,_C_LABEL(errno)
#endif

View File

@ -33,7 +33,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)modf.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: modf.S,v 1.1 1995/04/17 12:23:45 ragge Exp $"
.asciz "$NetBSD: modf.S,v 1.2 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/*
@ -47,10 +47,10 @@
#include "DEFS.h"
ENTRY(modf, 0)
emodd 4(ap),$0,$0f1.0,r2,r0
emodd 4(%ap),$0,$0f1.0,%r2,%r0
jvs 1f # integer overflow
cvtld r2,*12(ap)
cvtld %r2,*12(%ap)
ret
1:
subd3 r0,4(ap),*12(ap)
subd3 %r0,4(%ap),*12(%ap)
ret

View File

@ -33,7 +33,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)setjmp.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: setjmp.S,v 1.4 2000/06/28 19:20:17 matt Exp $"
.asciz "$NetBSD: setjmp.S,v 1.5 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/*
@ -50,46 +50,46 @@
#include "DEFS.h"
ENTRY(setjmp, R6)
movl 4(ap),r6 # construct sigcontext
subl2 $12,sp # space for current struct sigstack
pushl sp # get current values
movl 4(%ap),%r6 # construct sigcontext
subl2 $12,%sp # space for current struct sigstack
pushl %sp # get current values
pushl $0 # no new values
calls $4,_C_LABEL(__sigaltstack14) # pop args plus signal stack value
movl (sp)+,(r6)+ # save onsigstack status of caller
movl (%sp)+,(%r6)+ # save onsigstack status of caller
pushl $0
calls $1,_C_LABEL(sigblock) # get signal mask
movl r0,(r6)+ # save signal mask of caller
movl (ap),r0
moval 4(ap)[r0],(r6)+ # save sp of caller
movl 12(fp),(r6)+ # save frame pointer of caller
movl 8(fp),(r6)+ # save argument pointer of caller
movl 16(fp),(r6)+ # save pc of caller
movpsl (r6) # save psl of caller
movw 4(fp),(r6)
clrl r0
movl %r0,(%r6)+ # save signal mask of caller
movl (%ap),%r0
moval 4(%ap)[%r0],(%r6)+ # save sp of caller
movl 12(%fp),(%r6)+ # save frame pointer of caller
movl 8(%fp),(%r6)+ # save argument pointer of caller
movl 16(%fp),(%r6)+ # save pc of caller
movpsl (%r6) # save psl of caller
movw 4(%fp),(%r6)
clrl %r0
ret
ENTRY(longjmp, 0)
movl 8(ap),r0 # return(v)
movl 4(ap),r1 # fetch buffer
tstl 12(r1)
movl 8(%ap),%r0 # return(v)
movl 4(%ap),%r1 # fetch buffer
tstl 12(%r1)
beql botch
loop:
cmpl 12(r1),fp # are we there yet?
cmpl 12(%r1),%fp # are we there yet?
beql done
blssu botch
moval 20(fp),r2
blbc 6(fp),1f # was r0 saved?
movl r0,(r2)+
moval 20(%fp),%r2
blbc 6(%fp),1f # was %r0 saved?
movl %r0,(%r2)+
1:
bbc $1,6(fp),2f # was r1 saved?
movl r1,(r2)
bbc $1,6(%fp),2f # was %r1 saved?
movl %r1,(%r2)
2:
movab loop,16(fp)
movab loop,16(%fp)
ret # pop another frame
done:
pushl r1 # pointer to sigcontext
pushl %r1 # pointer to sigcontext
calls $1,_C_LABEL(sigreturn) # restore previous context
# we should never return
botch:

View File

@ -32,7 +32,7 @@
*/
#if defined(LIBC_SCCS) && !defined(lint)
.asciz "$NetBSD: sigsetjmp.S,v 1.2 2000/06/28 19:20:17 matt Exp $"
.asciz "$NetBSD: sigsetjmp.S,v 1.3 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/*
@ -51,16 +51,16 @@
#include <machine/setjmp.h>
ENTRY(sigsetjmp, R6)
movl 4(ap),r0 # get env pointer
movl 8(ap),(_JBLEN*4)(r0) # save "savemask"
tstl 8(ap) # do saving of signal mask?
movl 4(%ap),%r0 # get env pointer
movl 8(%ap),(_JBLEN*4)(%r0) # save "savemask"
tstl 8(%ap) # do saving of signal mask?
beql L1
jmp _C_LABEL(setjmp)+2 # yep, do full setjmp
L1: jmp _C_LABEL(_setjmp)+2 # nope, skip to _setjmp
ENTRY(siglongjmp, 0)
movl 4(ap),r0 # get env pointer
tstl (_JBLEN*4)(r0) # test if "savemask" was set
movl 4(%ap),%r0 # get env pointer
tstl (_JBLEN*4)(%r0) # test if "savemask" was set
beql L2
jmp _C_LABEL(longjmp)+2 # yep, do full longjmp
L2: jmp _C_LABEL(_longjmp)+2 # nope, skip to _longjmp

View File

@ -36,7 +36,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)udiv.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: udiv.S,v 1.2 2000/08/07 03:18:04 matt Exp $"
.asciz "$NetBSD: udiv.S,v 1.3 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/*
@ -47,31 +47,31 @@
#include "DEFS.h"
#define DIVIDEND 4(ap)
#define DIVISOR 8(ap)
#define DIVIDEND 4(%ap)
#define DIVISOR 8(%ap)
#ifdef __ELF__
ASENTRY(__udiv,0)
#else
ASENTRY(udiv,0)
#endif
movl DIVISOR,r2
movl DIVISOR,%r2
jlss Leasy # big divisor: settle by comparison
movl DIVIDEND,r0
movl DIVIDEND,%r0
jlss Lhard # big dividend: extended division
divl2 r2,r0 # small divisor and dividend: signed division
divl2 %r2,%r0 # small divisor and dividend: signed division
ret
Lhard:
clrl r1
ediv r2,r0,r0,r1
clrl %r1
ediv %r2,%r0,%r0,%r1
ret
Leasy:
cmpl DIVIDEND,r2
cmpl DIVIDEND,%r2
jgequ Lone # if dividend is as big or bigger, return 1
clrl r0 # else return 0
clrl %r0 # else return 0
ret
Lone:
movl $1,r0
movl $1,%r0
ret
#ifdef __ELF__
@ -79,26 +79,26 @@ ASENTRY(__audiv,0)
#else
ASENTRY(audiv,0)
#endif
movl DIVIDEND,r3
movl DIVISOR,r2
movl DIVIDEND,%r3
movl DIVISOR,%r2
jlss La_easy # big divisor: settle by comparison
movl (r3),r0
movl (%r3),%r0
jlss La_hard # big dividend: extended division
divl2 r2,r0 # small divisor and dividend: signed division
movl r0,(r3) # leave the value of the assignment in r0
divl2 %r2,%r0 # small divisor and dividend: signed division
movl %r0,(%r3) # leave the value of the assignment in %r0
ret
La_hard:
clrl r1
ediv r2,r0,r0,r1
movl r0,(r3)
clrl %r1
ediv %r2,%r0,%r0,%r1
movl %r0,(%r3)
ret
La_easy:
cmpl (r3),r2
cmpl (%r3),%r2
jgequ La_one # if dividend is as big or bigger, return 1
clrl r0 # else return 0
clrl (r3)
clrl %r0 # else return 0
clrl (%r3)
ret
La_one:
movl $1,r0
movl r0,(r3)
movl $1,%r0
movl %r0,(%r3)
ret

View File

@ -36,7 +36,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)urem.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: urem.S,v 1.2 2000/08/07 03:18:04 matt Exp $"
.asciz "$NetBSD: urem.S,v 1.3 2002/02/24 01:06:19 matt Exp $"
#endif /* LIBC_SCCS and not lint */
#include "DEFS.h"
@ -47,30 +47,30 @@
* aurem() takes a pointer to a dividend and an ordinary divisor.
*/
#define DIVIDEND 4(ap)
#define DIVISOR 8(ap)
#define DIVIDEND 4(%ap)
#define DIVISOR 8(%ap)
#ifdef __ELF__
ASENTRY(__urem,0)
#else
ASENTRY(urem,0)
#endif
movl DIVISOR,r2
movl DIVISOR,%r2
jlss Leasy # big divisor: settle by comparison
movl DIVIDEND,r0
movl DIVIDEND,%r0
jlss Lhard # big dividend: need extended division
divl3 r2,r0,r1 # small divisor and dividend: signed modulus
mull2 r2,r1
subl2 r1,r0
divl3 %r2,%r0,%r1 # small divisor and dividend: signed modulus
mull2 %r2,%r1
subl2 %r1,%r0
ret
Lhard:
clrl r1
ediv r2,r0,r1,r0
clrl %r1
ediv %r2,%r0,%r1,%r0
ret
Leasy:
subl3 r2,DIVIDEND,r0
subl3 %r2,DIVIDEND,%r0
jcc Ldifference # if divisor goes in once, return difference
movl DIVIDEND,r0 # if divisor is bigger, return dividend
movl DIVIDEND,%r0 # if divisor is bigger, return dividend
Ldifference:
ret
@ -79,26 +79,26 @@ ASENTRY(__aurem,0)
#else
ASENTRY(aurem,0)
#endif
movl DIVIDEND,r3
movl DIVISOR,r2
movl DIVIDEND,%r3
movl DIVISOR,%r2
jlss La_easy # big divisor: settle by comparison
movl (r3),r0
movl (%r3),%r0
jlss La_hard # big dividend: need extended division
divl3 r2,r0,r1 # small divisor and dividend: signed modulus
mull2 r2,r1
subl2 r1,r0
movl r0,(r3) # leave the value of the assignment in r0
divl3 %r2,%r0,%r1 # small divisor and dividend: signed modulus
mull2 %r2,%r1
subl2 %r1,%r0
movl %r0,(%r3) # leave the value of the assignment in %r0
ret
La_hard:
clrl r1
ediv r2,r0,r1,r0
movl r0,(r3)
clrl %r1
ediv %r2,%r0,%r1,%r0
movl %r0,(%r3)
ret
La_easy:
subl3 r2,(r3),r0
subl3 %r2,(%r3),%r0
jcs La_dividend # if divisor is bigger, leave dividend alone
movl r0,(r3) # if divisor goes in once, store difference
movl %r0,(%r3) # if divisor goes in once, store difference
ret
La_dividend:
movl (r3),r0
movl (%r3),%r0
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: bcmp.S,v 1.1 1996/05/19 15:57:38 ragge Exp $ */
/* $NetBSD: bcmp.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@ -40,23 +40,23 @@
/* still, this is four times faster than the generic C version on a uvax2 */
ENTRY(bcmp, 0)
movl 12(ap),r0 # r0 = n
movl 12(%ap),%r0 # %r0 = n
jeql 9f
movq 4(ap),r1 # r1 = s1, r2 = s2
ashl $-2,r0,r3 # convert len to # of long words
movq 4(%ap),%r1 # %r1 = s1, %r2 = s2
ashl $-2,%r0,%r3 # convert len to # of long words
jeql 2f
1:
cmpl (r1)+,(r2)+ # no "cmpq" alas, so four bytes at a time
cmpl (%r1)+,(%r2)+ # no "cmpq" alas, so four bytes at a time
jneq 9f
sobgtr r3,1b
sobgtr %r3,1b
2:
bicl3 $-4,r0,r3 # handle at most 3 extra bytes
bicl3 $-4,%r0,%r3 # handle at most 3 extra bytes
jeql 8f
3:
cmpb (r1)+,(r2)+
cmpb (%r1)+,(%r2)+
jneq 9f
sobgtr r3,3b
sobgtr %r3,3b
8:
clrl r0 # we have a match!
clrl %r0 # we have a match!
9:
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: bcopy.S,v 1.1 1996/05/19 15:57:39 ragge Exp $ */
/* $NetBSD: bcopy.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@ -41,39 +41,39 @@
#include "DEFS.h"
ENTRY(bcopy, R6)
movl 4(ap),r1
movl 8(ap),r3
movl 12(ap),r6
cmpl r1,r3
movl 4(%ap),%r1
movl 8(%ap),%r3
movl 12(%ap),%r6
cmpl %r1,%r3
bgtr 2f # normal forward case
blss 3f # overlapping, must do backwards
ret # equal, nothing to do
1:
subl2 r0,r6
movc3 r0,(r1),(r3)
subl2 %r0,%r6
movc3 %r0,(%r1),(%r3)
2:
movzwl $65535,r0
cmpl r6,r0
movzwl $65535,%r0
cmpl %r6,%r0
jgtr 1b
movc3 r6,(r1),(r3)
movc3 %r6,(%r1),(%r3)
ret
3:
addl2 r6,r1
addl2 r6,r3
movzwl $65535,r0
addl2 %r6,%r1
addl2 %r6,%r3
movzwl $65535,%r0
jbr 5f
4:
subl2 r0,r6
subl2 r0,r1
subl2 r0,r3
movc3 r0,(r1),(r3)
movzwl $65535,r0
subl2 r0,r1
subl2 r0,r3
subl2 %r0,%r6
subl2 %r0,%r1
subl2 %r0,%r3
movc3 %r0,(%r1),(%r3)
movzwl $65535,%r0
subl2 %r0,%r1
subl2 %r0,%r3
5:
cmpl r6,r0
cmpl %r6,%r0
jgtr 4b
subl2 r6,r1
subl2 r6,r3
movc3 r6,(r1),(r3)
subl2 %r6,%r1
subl2 %r6,%r3
movc3 %r6,(%r1),(%r3)
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: bzero.S,v 1.1 1996/05/19 15:57:40 ragge Exp $ */
/* $NetBSD: bzero.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@ -41,14 +41,14 @@
#include "DEFS.h"
ENTRY(bzero, 0)
movl 4(ap),r3
movl 4(%ap),%r3
jbr 2f
1:
subl2 r0,8(ap)
movc5 $0,(r3),$0,r0,(r3)
subl2 %r0,8(%ap)
movc5 $0,(%r3),$0,%r0,(%r3)
2:
movzwl $65535,r0
cmpl 8(ap),r0
movzwl $65535,%r0
cmpl 8(%ap),%r0
jgtr 1b
movc5 $0,(r3),$0,8(ap),(r3)
movc5 $0,(%r3),$0,8(%ap),(%r3)
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: ffs.S,v 1.1 1996/05/19 15:57:41 ragge Exp $ */
/* $NetBSD: ffs.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@ -41,9 +41,9 @@
#include "DEFS.h"
ENTRY(ffs, 0)
ffs $0,$32,4(ap),r0
ffs $0,$32,4(%ap),%r0
bneq 1f
mnegl $1,r0
mnegl $1,%r0
1:
incl r0
incl %r0
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: index.S,v 1.1 1996/05/19 15:57:43 ragge Exp $ */
/* $NetBSD: index.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*
* Copyright (c) 1980, 1993
* The Regents of the University of California. All rights reserved.
@ -45,19 +45,19 @@
/* Alas not quite twice as fast as the generic C version on a uvax2 */
ENTRY(index, 0)
movq 4(ap),r0 # r0 = cp; r1 = c
tstb r1 # special case, looking for '\0'
movq 4(%ap),%r0 # %r0 = cp; %r1 = c
tstb %r1 # special case, looking for '\0'
jeql 3f
1:
cmpb (r0),r1
cmpb (%r0),%r1
jeql 2f
tstb (r0)+
tstb (%r0)+
jneq 1b
clrl r0 # return NULL if no match
clrl %r0 # return NULL if no match
2:
ret
3:
tstb (r0)+
tstb (%r0)+
jneq 3b
decl r0
decl %r0
jbr 2b

View File

@ -1,4 +1,4 @@
/* $NetBSD: memcmp.S,v 1.1 1996/05/19 15:57:44 ragge Exp $ */
/* $NetBSD: memcmp.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
@ -37,33 +37,33 @@
#include "DEFS.h"
ENTRY(memcmp, 0)
movl 12(ap),r0
movl 12(%ap),%r0
jeql 9f
movq 4(ap),r1
ashl $-2,r0,r3 # convert len to long words
movq 4(%ap),%r1
ashl $-2,%r0,%r3 # convert len to long words
jeql 2f
1:
cmpl (r1)+,(r2)+ # no "cmpq" alas
cmpl (%r1)+,(%r2)+ # no "cmpq" alas
jneq 7f
sobgtr r3,1b
sobgtr %r3,1b
2:
bicl3 $-4,r0,r3 # handle at most 3 extra bytes
bicl3 $-4,%r0,%r3 # handle at most 3 extra bytes
jeql 4f
3:
cmpb (r1)+,(r2)+
cmpb (%r1)+,(%r2)+
jneq 8f
sobgtr r3,3b
sobgtr %r3,3b
4:
clrl r0 # we had a match
clrl %r0 # we had a match
ret
7: # backup, and do a byte compare
tstl -(r1)
tstl -(r2)
movl $4,r3
tstl -(%r1)
tstl -(%r2)
movl $4,%r3
jbr 3b
8:
movzbl -(r1),r3
movzbl -(r2),r4
subl3 r4,r3,r0
movzbl -(%r1),%r3
movzbl -(%r2),%r4
subl3 %r4,%r3,%r0
9:
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: memcpy.S,v 1.1 1996/05/19 15:57:45 ragge Exp $ */
/* $NetBSD: memcpy.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
@ -47,48 +47,48 @@
#include "DEFS.h"
ENTRY(memcpy, 0)
movzwl $65535,r0 /* r0 = 64K (needed below) */
movq 8(ap),r1 /* r1 = src, r2 = length */
movl 4(ap),r3 /* r3 = dst */
cmpl r1,r3
movzwl $65535,%r0 /* %r0 = 64K (needed below) */
movq 8(%ap),%r1 /* %r1 = src, %r2 = length */
movl 4(%ap),%r3 /* %r3 = dst */
cmpl %r1,%r3
bgtru 1f /* normal forward case */
beql 2f /* equal, nothing to do */
addl2 r2,r1 /* overlaps iff src<dst but src+len>dst */
cmpl r1,r3
addl2 %r2,%r1 /* overlaps iff src<dst but src+len>dst */
cmpl %r1,%r3
bgtru 4f /* overlapping, must move backwards */
subl2 r2,r1
subl2 %r2,%r1
1: /* move forward */
cmpl r2,r0
cmpl %r2,%r0
bgtru 3f /* stupid movc3 limitation */
movc3 r2,(r1),(r3) /* move it all */
movc3 %r2,(%r1),(%r3) /* move it all */
2:
movl 4(ap),r0 /* return original dst */
movl 4(%ap),%r0 /* return original dst */
ret
3:
subl2 r0,12(ap) /* adjust length by 64K */
movc3 r0,(r1),(r3) /* move 64K */
movl 12(ap),r2
decw r0 /* from 0 to 65535 */
subl2 %r0,12(%ap) /* adjust length by 64K */
movc3 %r0,(%r1),(%r3) /* move 64K */
movl 12(%ap),%r2
decw %r0 /* from 0 to 65535 */
brb 1b /* retry */
4: /* move backward */
addl2 r2,r3
addl2 %r2,%r3
5:
cmpl r2,r0
cmpl %r2,%r0
bgtru 6f /* stupid movc3 limitation */
subl2 r2,r1
subl2 r2,r3
movc3 r2,(r1),(r3) /* move it all */
movl 4(ap),r0 /* return original dst */
subl2 %r2,%r1
subl2 %r2,%r3
movc3 %r2,(%r1),(%r3) /* move it all */
movl 4(%ap),%r0 /* return original dst */
ret
6:
subl2 r0,12(ap) /* adjust length by 64K */
subl2 r0,r1
subl2 r0,r3
movc3 r0,(r1),(r3) /* move 64K */
movl 12(ap),r2
decw r0
subl2 r0,r1
subl2 r0,r3
subl2 %r0,12(%ap) /* adjust length by 64K */
subl2 %r0,%r1
subl2 %r0,%r3
movc3 %r0,(%r1),(%r3) /* move 64K */
movl 12(%ap),%r2
decw %r0
subl2 %r0,%r1
subl2 %r0,%r3
brb 5b

View File

@ -1,4 +1,4 @@
/* $NetBSD: memmove.S,v 1.1 1996/05/19 15:57:47 ragge Exp $ */
/* $NetBSD: memmove.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
@ -47,48 +47,48 @@
#include "DEFS.h"
ENTRY(memmove, 0)
movzwl $65535,r0 /* r0 = 64K (needed below) */
movq 8(ap),r1 /* r1 = src, r2 = length */
movl 4(ap),r3 /* r3 = dst */
cmpl r1,r3
movzwl $65535,%r0 /* %r0 = 64K (needed below) */
movq 8(%ap),%r1 /* %r1 = src, %r2 = length */
movl 4(%ap),%r3 /* %r3 = dst */
cmpl %r1,%r3
bgtru 1f /* normal forward case */
beql 2f /* equal, nothing to do */
addl2 r2,r1 /* overlaps iff src<dst but src+len>dst */
cmpl r1,r3
addl2 %r2,%r1 /* overlaps iff src<dst but src+len>dst */
cmpl %r1,%r3
bgtru 4f /* overlapping, must move backwards */
subl2 r2,r1
subl2 %r2,%r1
1: /* move forward */
cmpl r2,r0
cmpl %r2,%r0
bgtru 3f /* stupid movc3 limitation */
movc3 r2,(r1),(r3) /* move it all */
movc3 %r2,(%r1),(%r3) /* move it all */
2:
movl 4(ap),r0 /* return original dst */
movl 4(%ap),%r0 /* return original dst */
ret
3:
subl2 r0,12(ap) /* adjust length by 64K */
movc3 r0,(r1),(r3) /* move 64K */
movl 12(ap),r2
decw r0 /* from 0 to 65535 */
subl2 %r0,12(%ap) /* adjust length by 64K */
movc3 %r0,(%r1),(%r3) /* move 64K */
movl 12(%ap),%r2
decw %r0 /* from 0 to 65535 */
brb 1b /* retry */
4: /* move backward */
addl2 r2,r3
addl2 %r2,%r3
5:
cmpl r2,r0
cmpl %r2,%r0
bgtru 6f /* stupid movc3 limitation */
subl2 r2,r1
subl2 r2,r3
movc3 r2,(r1),(r3) /* move it all */
movl 4(ap),r0 /* return original dst */
subl2 %r2,%r1
subl2 %r2,%r3
movc3 %r2,(%r1),(%r3) /* move it all */
movl 4(%ap),%r0 /* return original dst */
ret
6:
subl2 r0,12(ap) /* adjust length by 64K */
subl2 r0,r1
subl2 r0,r3
movc3 r0,(r1),(r3) /* move 64K */
movl 12(ap),r2
decw r0
subl2 r0,r1
subl2 r0,r3
subl2 %r0,12(%ap) /* adjust length by 64K */
subl2 %r0,%r1
subl2 %r0,%r3
movc3 %r0,(%r1),(%r3) /* move 64K */
movl 12(%ap),%r2
decw %r0
subl2 %r0,%r1
subl2 %r0,%r3
brb 5b

View File

@ -1,4 +1,4 @@
/* $NetBSD: memset.S,v 1.1 1996/05/19 15:57:49 ragge Exp $ */
/* $NetBSD: memset.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
@ -41,16 +41,16 @@
#include "DEFS.h"
ENTRY(memset, 0)
movl 4(ap),r3
movl 4(%ap),%r3
1:
movzwl $65535,r0
movq 8(ap),r1
cmpl r2,r0
movzwl $65535,%r0
movq 8(%ap),%r1
cmpl %r2,%r0
jgtru 2f
movc5 $0,(r3),r1,r2,(r3)
movl r1,r0
movc5 $0,(%r3),%r1,%r2,(%r3)
movl %r1,%r0
ret
2:
subl2 r0,12(ap)
movc5 $0,(r3),r1,r0,(r3)
subl2 %r0,12(%ap)
movc5 $0,(%r3),%r1,%r0,(%r3)
jbr 1b

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)Ovfork.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: Ovfork.S,v 1.5 2000/06/28 19:20:18 matt Exp $"
.asciz "$NetBSD: Ovfork.S,v 1.6 2002/02/24 01:06:20 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
/*
@ -49,8 +49,8 @@ WARN_REFERENCES(vfork, \
/*
* pid = vfork();
*
* r1 == 0 in parent process, r1 == 1 in child process.
* r0 == pid of child in parent, r0 == pid of parent in child.
* %r1 == 0 in parent process, %r1 == 1 in child process.
* %r0 == pid of child in parent, %r0 == pid of parent in child.
*
* trickery here, due to keith sklower, uses ret to clear the stack,
* and then returns with a jump indirect, since only one person can return
@ -58,26 +58,26 @@ WARN_REFERENCES(vfork, \
*/
ENTRY(vfork, 0)
movl 16(fp),r2 # save return address before we smash it
movab here,16(fp)
movl 16(%fp),%r2 # save return address before we smash it
movab here,16(%fp)
ret
here:
chmk $ SYS_vfork
bcs err # if failed, set errno and return -1
/* this next trick is Chris Torek's fault */
mnegl r1,r1 # r1 = 0xffffffff if child, 0 if parent
bicl2 r1,r0 # r0 &= ~r1, i.e., 0 if child, else unchanged
jmp (r2)
mnegl %r1,%r1 # %r1 = 0xffffffff if child, 0 if parent
bicl2 %r1,%r0 # %r0 &= ~%r1, i.e., 0 if child, else unchanged
jmp (%r2)
err:
#ifdef _REENTRANT
pushr $0x5
calls $0,_C_LABEL(__errno)
movl (sp)+,(r0)
mnegl $1,r0
movl (%sp)+,(%r0)
mnegl $1,%r0
rsb
#else
movl r0,_C_LABEL(errno)
mnegl $1,r0
jmp (r2)
movl %r0,_C_LABEL(errno)
mnegl $1,%r0
jmp (%r2)
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: __clone.S,v 1.1 2001/07/20 06:10:12 matt Exp $ */
/* $NetBSD: __clone.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -52,32 +52,32 @@ ENTRY(__clone, 0)
/*
* Sanity checks: func and stack may not be NULL.
*/
movl 4(ap),r2 /* check and save function */
movl 4(%ap),%r2 /* check and save function */
beql 9f
tstl 8(ap) /* check stack */
tstl 8(%ap) /* check stack */
beql 9f
/*
* The system call expects (flags, stack).
*/
movl 12(ap),4(ap) /* XXX this doesn't work for
movl 12(%ap),4(%ap) /* XXX this doesn't work for
callg with a RO arglist */
movl $2,(ap)
SYSTRAP(__clone) /* only r0/r1 munged */
movl $2,(%ap)
SYSTRAP(__clone) /* only %r0/%r1 munged */
blbc r1,8f /* r1<0>: 0=parent 1=child */
blbc %r1,8f /* %r1<0>: 0=parent 1=child */
/* Call the clone's entry point. */
pushl 16(ap)
calls $1,(r2)
pushl 16(%ap)
calls $1,(%r2)
/* Pass return value to _exit(). */
pushl r0
pushl %r0
calls $1,_C_LABEL(_exit)
/* NOTREACHED */
8: ret
9: movl $EINVAL,r0
9: movl $EINVAL,%r0
jmp CERROR+2

View File

@ -38,16 +38,16 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)syscall.s 8.2 (Berkeley) 1/21/94" */
.asciz "$NetBSD: __syscall.S,v 1.1 2000/12/13 07:34:54 matt Exp $"
.asciz "$NetBSD: __syscall.S,v 1.2 2002/02/24 01:06:20 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
ENTRY(__syscall, 0)
movl 4(ap),r0 # syscall number
addl2 $8,ap # skip the first argument
subl3 $2,-8(ap),(ap) # two fewer arguments
chmk r0
movl 4(%ap),%r0 # syscall number
addl2 $8,%ap # skip the first argument
subl3 $2,-8(%ap),(%ap) # two fewer arguments
chmk %r0
jcs 1f
ret
1:

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)Ovfork.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: __vfork14.S,v 1.3 2000/06/28 19:20:18 matt Exp $"
.asciz "$NetBSD: __vfork14.S,v 1.4 2002/02/24 01:06:20 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
/*
@ -46,8 +46,8 @@
/*
* pid = vfork();
*
* r1 == 0 in parent process, r1 == 1 in child process.
* r0 == pid of child in parent, r0 == pid of parent in child.
* %r1 == 0 in parent process, %r1 == 1 in child process.
* %r0 == pid of child in parent, %r0 == pid of parent in child.
*
* trickery here, due to keith sklower, uses ret to clear the stack,
* and then returns with a jump indirect, since only one person can return
@ -55,26 +55,26 @@
*/
ENTRY(__vfork14, 0)
movl 16(fp),r2 # save return address before we smash it
movab here,16(fp)
movl 16(%fp),%r2 # save return address before we smash it
movab here,16(%fp)
ret
here:
chmk $ SYS___vfork14
bcs err # if failed, set errno and return -1
/* this next trick is Chris Torek's fault */
mnegl r1,r1 # r1 = 0xffffffff if child, 0 if parent
bicl2 r1,r0 # r0 &= ~r1, i.e., 0 if child, else unchanged
jmp (r2)
mnegl %r1,%r1 # %r1 = 0xffffffff if child, 0 if parent
bicl2 %r1,%r0 # %r0 &= ~%r1, i.e., 0 if child, else unchanged
jmp (%r2)
err:
#ifdef _REENTRANT
pushr $0x5
calls $0,_C_LABEL(__errno)
movl (sp)+,(r0)
mnegl $1,r0
movl (%sp)+,(%r0)
mnegl $1,%r0
rsb
#else
movl r0,_C_LABEL(errno)
mnegl $1,r0
jmp (r2)
movl %r0,_C_LABEL(errno)
mnegl $1,%r0
jmp (%r2)
#endif

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)brk.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: brk.S,v 1.9 2000/07/01 05:21:12 matt Exp $"
.asciz "$NetBSD: brk.S,v 1.10 2002/02/24 01:06:20 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
@ -46,13 +46,13 @@ WEAK_ALIAS(brk, _brk)
#endif
ENTRY(_brk, 0)
cmpl 4(ap),_C_LABEL(__minbrk) # gtr > _end
cmpl 4(%ap),_C_LABEL(__minbrk) # gtr > _end
bgeq 1f # is fine
movl _C_LABEL(__minbrk),4(ap) # shrink back to _end
movl _C_LABEL(__minbrk),4(%ap) # shrink back to _end
1: chmk $ SYS_break # do it
jcs err
movl 4(ap),CURBRK
clrl r0
movl 4(%ap),CURBRK
clrl %r0
ret
err:
jmp CERROR+2

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)cerror.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: cerror.S,v 1.6 2000/08/07 03:18:05 matt Exp $"
.asciz "$NetBSD: cerror.S,v 1.7 2002/02/24 01:06:20 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
@ -41,12 +41,12 @@
.globl _C_LABEL(errno)
CERROR: .word 0
#ifdef _REENTRANT
pushl r0
pushl %r0
calls $0,_C_LABEL(__errno)
movl (sp)+,(r0)
movl (%sp)+,(%r0)
#else
movl r0,_C_LABEL(errno)
movl %r0,_C_LABEL(errno)
#endif
mnegl $1,r0
movl r0,r1
mnegl $1,%r0
movl %r0,%r1
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: execl.S,v 1.1 2001/07/01 13:31:18 ragge Exp $ */
/* $NetBSD: execl.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*
* Copyright (c) 2001 Ludd, University of Lule}, Sweden. All rights reserved.
*
@ -32,7 +32,7 @@
#include "SYS.h"
ENTRY(execl, 0)
pushal 8(ap) # Push pointer to argv vector
pushl 4(ap) # Push path
pushal 8(%ap) # Push pointer to argv vector
pushl 4(%ap) # Push path
calls $2,_C_LABEL(execv)
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: execle.S,v 1.2 2001/09/06 18:42:58 chuck Exp $ */
/* $NetBSD: execle.S,v 1.3 2002/02/24 01:06:20 matt Exp $ */
/*
* Copyright (c) 2001 Ludd, University of Lule}, Sweden. All rights reserved.
*
@ -32,9 +32,9 @@
#include "SYS.h"
ENTRY(execle, 0)
movl (ap),r0 # Get number of args
pushl (ap)[r0] # Push last arg (envp)
pushal 8(ap) # Push pointer to argv vector
pushl 4(ap) # Push path
movl (%ap),%r0 # Get number of args
pushl (%ap)[%r0] # Push last arg (envp)
pushal 8(%ap) # Push pointer to argv vector
pushl 4(%ap) # Push path
calls $3,_C_LABEL(execve)
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: execlp.S,v 1.1 2001/07/01 13:31:18 ragge Exp $ */
/* $NetBSD: execlp.S,v 1.2 2002/02/24 01:06:20 matt Exp $ */
/*
* Copyright (c) 2001 Ludd, University of Lule}, Sweden. All rights reserved.
*
@ -32,7 +32,7 @@
#include "SYS.h"
ENTRY(execlp, 0)
pushal 8(ap) # Push pointer to argv vector
pushl 4(ap) # Push path
pushal 8(%ap) # Push pointer to argv vector
pushl 4(%ap) # Push path
calls $2,_C_LABEL(execvp)
ret

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)fork.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: fork.S,v 1.2 2000/06/26 06:33:04 kleink Exp $"
.asciz "$NetBSD: fork.S,v 1.3 2002/02/24 01:06:20 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
@ -43,7 +43,7 @@ WEAK_ALIAS(fork, _fork)
#endif
_SYSCALL(_fork,fork)
jlbc r1,1f # parent, since r1 == 0 in parent, 1 in child
clrl r0
jlbc %r1,1f # parent, since %r1 == 0 in parent, 1 in child
clrl %r0
1:
ret # pid = fork()

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)pipe.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: pipe.S,v 1.2 2000/09/28 08:38:55 kleink Exp $"
.asciz "$NetBSD: pipe.S,v 1.3 2002/02/24 01:06:20 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
@ -43,8 +43,8 @@ WEAK_ALIAS(pipe, _pipe)
#endif
_SYSCALL(_pipe,pipe)
movl 4(ap),r2
movl r0,(r2)+
movl r1,(r2)
clrl r0
movl 4(%ap),%r2
movl %r0,(%r2)+
movl %r1,(%r2)
clrl %r0
ret

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)ptrace.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: ptrace.S,v 1.5 2000/06/30 23:46:36 matt Exp $"
.asciz "$NetBSD: ptrace.S,v 1.6 2002/02/24 01:06:20 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
@ -41,7 +41,7 @@
ENTRY(ptrace, 0)
#ifdef _REENTRANT
calls $0,_C_LABEL(__errno)
clrl (r0)
clrl (%r0)
#else
clrl _C_LABEL(errno)
#endif

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)sbrk.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: sbrk.S,v 1.8 2001/05/05 17:56:58 kleink Exp $"
.asciz "$NetBSD: sbrk.S,v 1.9 2002/02/24 01:06:21 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
@ -54,14 +54,14 @@ CURBRK:
.text
ENTRY(_sbrk, 0)
addl3 CURBRK,4(ap),-(sp)
addl3 CURBRK,4(%ap),-(%sp)
pushl $1
movl ap,r3
movl sp,ap
movl %ap,%r3
movl %sp,%ap
chmk $ SYS_break
jcs err
movl CURBRK,r0
addl2 4(r3),CURBRK
movl CURBRK,%r0
addl2 4(%r3),CURBRK
ret
err:
jmp CERROR+2

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)sigpending.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: sigpending.S,v 1.3 1998/12/02 01:01:05 thorpej Exp $"
.asciz "$NetBSD: sigpending.S,v 1.4 2002/02/24 01:06:21 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
@ -42,6 +42,6 @@ WARN_REFERENCES(sigpending, \
"warning: reference to compatibility sigpending(); include <signal.h> for correct reference")
_SYSCALL(sigpending,compat_13_sigpending13)
movl r0,*4(ap) # store old mask
clrl r0
movl %r0,*4(%ap) # store old mask
clrl %r0
ret

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)sigprocmask.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: sigprocmask.S,v 1.6 2000/06/30 23:46:36 matt Exp $"
.asciz "$NetBSD: sigprocmask.S,v 1.7 2002/02/24 01:06:21 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
@ -42,19 +42,19 @@ WARN_REFERENCES(sigprocmask, \
"warning: reference to compatibility sigprocmask(); include <signal.h> for correct reference")
ENTRY(sigprocmask, 0)
tstl 8(ap) # check new sigset pointer
tstl 8(%ap) # check new sigset pointer
bneq 1f # if not null, indirect
/* movl $0,8(ap) # null mask pointer: block empty set */
movl $1,4(ap) # SIG_BLOCK
/* movl $0,8(%ap) # null mask pointer: block empty set */
movl $1,4(%ap) # SIG_BLOCK
jbr 2f
1: movl *8(ap),8(ap) # indirect to new mask arg
1: movl *8(%ap),8(%ap) # indirect to new mask arg
2: chmk $ SYS_compat_13_sigprocmask13
jcc 3f
jmp CERROR+2
3: tstl 12(ap) # test if old mask requested
3: tstl 12(%ap) # test if old mask requested
beql out
movl r0,*12(ap) # store old mask
movl %r0,*12(%ap) # store old mask
out:
clrl r0
clrl %r0
ret

View File

@ -33,7 +33,7 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)sigsuspend.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: sigsuspend.S,v 1.6 2000/06/30 23:46:37 matt Exp $"
.asciz "$NetBSD: sigsuspend.S,v 1.7 2002/02/24 01:06:21 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
@ -42,9 +42,9 @@ WARN_REFERENCES(sigsuspend, \
"warning: reference to compatibility sigsuspend(); include <signal.h> for correct reference")
ENTRY(sigsuspend, 0)
movl *4(ap),4(ap) # indirect to mask arg
movl *4(%ap),4(%ap) # indirect to mask arg
chmk $ SYS_compat_13_sigsuspend13
jcc 1f
jmp CERROR+2
1: clrl r0 # shouldnt happen
1: clrl %r0 # shouldnt happen
ret

View File

@ -38,15 +38,15 @@
#if defined(SYSLIBC_SCCS) && !defined(lint)
/* .asciz "@(#)syscall.s 8.2 (Berkeley) 1/21/94" */
.asciz "$NetBSD: syscall.S,v 1.4 2000/06/30 23:46:37 matt Exp $"
.asciz "$NetBSD: syscall.S,v 1.5 2002/02/24 01:06:21 matt Exp $"
#endif /* SYSLIBC_SCCS and not lint */
#include "SYS.h"
ENTRY(syscall, 0)
movl 4(ap),r0 # syscall number
subl3 $1,(ap)+,(ap) # one fewer arguments
chmk r0
movl 4(%ap),%r0 # syscall number
subl3 $1,(%ap)+,(%ap) # one fewer arguments
chmk %r0
jcs 1f
ret
1:

View File

@ -1,4 +1,4 @@
/* $NetBSD: n_argred.S,v 1.6 2000/07/14 22:26:15 matt Exp $ */
/* $NetBSD: n_argred.S,v 1.7 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1985, 1993
* The Regents of the University of California. All rights reserved.
@ -50,13 +50,13 @@
ENTRY(__libm_argred, 0)
/*
* Compare the argument with the largest possible that can
* be reduced by table lookup. r3 := |x| will be used in table_lookup .
* be reduced by table lookup. %r3 := |x| will be used in table_lookup .
*/
movd r0,r3
movd %r0,%r3
bgeq abs1
mnegd r3,r3
mnegd %r3,%r3
abs1:
cmpd r3,$0d+4.55530934770520019583e+01
cmpd %r3,$0d+4.55530934770520019583e+01
blss small_arg
jsb trigred
rsb
@ -65,61 +65,61 @@ small_arg:
rsb
/*
* At this point,
* r0 contains the quadrant number, 0, 1, 2, or 3;
* r2/r1 contains the reduced argument as a D-format number;
* r3 contains a F-format extension to the reduced argument;
* r4 contains a 0 or 1 corresponding to a sin or cos entry.
* %r0 contains the quadrant number, 0, 1, 2, or 3;
* %r2/%r1 contains the reduced argument as a D-format number;
* %r3 contains a F-format extension to the reduced argument;
* %r4 contains a 0 or 1 corresponding to a sin or cos entry.
*/
ENTRY(__libm_sincos, 0)
/*
* Compensate for a cosine entry by adding one to the quadrant number.
*/
addl2 r4,r0
addl2 %r4,%r0
/*
* Polyd clobbers r5-r0 ; save X in r7/r6 .
* Polyd clobbers %r5-%r0 ; save X in %r7/%r6 .
* This can be avoided by rewriting trigred .
*/
movd r1,r6
movd %r1,%r6
/*
* Likewise, save alpha in r8 .
* Likewise, save alpha in %r8 .
* This can be avoided by rewriting trigred .
*/
movf r3,r8
movf %r3,%r8
/*
* Odd or even quadrant? cosine if odd, sine otherwise.
* Save floor(quadrant/2) in r9 ; it determines the final sign.
* Save floor(quadrant/2) in %r9 ; it determines the final sign.
*/
rotl $-1,r0,r9
rotl $-1,%r0,%r9
blss cosine
sine:
muld2 r1,r1 # Xsq = X * X
cmpw $0x2480,r1 # [zl] Xsq > 2^-56?
muld2 %r1,%r1 # Xsq = X * X
cmpw $0x2480,%r1 # [zl] Xsq > 2^-56?
blss 1f # [zl] yes, go ahead and do polyd
clrq r1 # [zl] work around 11/780 FPA polyd bug
clrq %r1 # [zl] work around 11/780 FPA polyd bug
1:
polyd r1,$7,sin_coef # Q = P(Xsq) , of deg 7
mulf3 $0f3.0,r8,r4 # beta = 3 * alpha
mulf2 r0,r4 # beta = Q * beta
addf2 r8,r4 # beta = alpha + beta
muld2 r6,r0 # S(X) = X * Q
/* cvtfd r4,r4 ... r5 = 0 after a polyd. */
addd2 r4,r0 # S(X) = beta + S(X)
addd2 r6,r0 # S(X) = X + S(X)
polyd %r1,$7,sin_coef # Q = P(Xsq) , of deg 7
mulf3 $0f3.0,%r8,%r4 # beta = 3 * alpha
mulf2 %r0,%r4 # beta = Q * beta
addf2 %r8,%r4 # beta = alpha + beta
muld2 %r6,%r0 # S(X) = X * Q
/* cvtfd %r4,%r4 ... %r5 = 0 after a polyd. */
addd2 %r4,%r0 # S(X) = beta + S(X)
addd2 %r6,%r0 # S(X) = X + S(X)
jbr done
cosine:
muld2 r6,r6 # Xsq = X * X
muld2 %r6,%r6 # Xsq = X * X
beql zero_arg
mulf2 r1,r8 # beta = X * alpha
polyd r6,$7,cos_coef /* Q = P'(Xsq) , of deg 7 */
subd3 r0,r8,r0 # beta = beta - Q
subw2 $0x80,r6 # Xsq = Xsq / 2
addd2 r0,r6 # Xsq = Xsq + beta
mulf2 %r1,%r8 # beta = X * alpha
polyd %r6,$7,cos_coef /* Q = P'(Xsq) , of deg 7 */
subd3 %r0,%r8,%r0 # beta = beta - Q
subw2 $0x80,%r6 # Xsq = Xsq / 2
addd2 %r0,%r6 # Xsq = Xsq + beta
zero_arg:
subd3 r6,$0d1.0,r0 # C(X) = 1 - Xsq
subd3 %r6,$0d1.0,%r0 # C(X) = 1 - Xsq
done:
blbc r9,even
mnegd r0,r0
blbc %r9,even
mnegd %r0,%r0
even:
rsb
@ -270,30 +270,30 @@ twoOverPi:
_ALIGN_TEXT
table_lookup:
muld3 r3,twoOverPi,r0
cvtrdl r0,r0 # n = nearest int to ((2/pi)*|x|) rnded
subd2 leading[r0],r3 # p = (|x| - leading n*pi/2) exactly
subd3 middle[r0],r3,r1 # q = (p - middle n*pi/2) rounded
subd2 r1,r3 # r = (p - q)
subd2 middle[r0],r3 # r = r - middle n*pi/2
subd2 trailing[r0],r3 # r = r - trailing n*pi/2 rounded
muld3 %r3,twoOverPi,%r0
cvtrdl %r0,%r0 # n = nearest int to ((2/pi)*|x|) rnded
subd2 leading[%r0],%r3 # p = (|x| - leading n*pi/2) exactly
subd3 middle[%r0],%r3,%r1 # q = (p - middle n*pi/2) rounded
subd2 %r1,%r3 # r = (p - q)
subd2 middle[%r0],%r3 # r = r - middle n*pi/2
subd2 trailing[%r0],%r3 # r = r - trailing n*pi/2 rounded
/*
* If the original argument was negative,
* negate the reduce argument and
* adjust the octant/quadrant number.
*/
tstw 4(ap)
tstw 4(%ap)
bgeq abs2
mnegf r1,r1
mnegf r3,r3
/* subb3 r0,$8,r0 ...used for pi/4 reduction -S.McD */
subb3 r0,$4,r0
mnegf %r1,%r1
mnegf %r3,%r3
/* subb3 %r0,$8,%r0 ...used for pi/4 reduction -S.McD */
subb3 %r0,$4,%r0
abs2:
/*
* Clear all unneeded octant/quadrant bits.
*/
/* bicb2 $0xf8,r0 ...used for pi/4 reduction -S.McD */
bicb2 $0xfc,r0
/* bicb2 $0xf8,%r0 ...used for pi/4 reduction -S.McD */
bicb2 $0xfc,%r0
rsb
/*
* p.0
@ -335,102 +335,102 @@ bits2opi:
* Trigred preforms argument reduction
* for the trigonometric functions. It
* takes one input argument, a D-format
* number in r1/r0 . The magnitude of
* number in %r1/%r0 . The magnitude of
* the input argument must be greater
* than or equal to 1/2 . Trigred produces
* three results: the number of the octant
* occupied by the argument, the reduced
* argument, and an extension of the
* reduced argument. The octant number is
* returned in r0 . The reduced argument
* returned in %r0 . The reduced argument
* is returned as a D-format number in
* r2/r1 . An 8 bit extension of the
* %r2/%r1 . An 8 bit extension of the
* reduced argument is returned as an
* F-format number in r3.
* F-format number in %r3.
* p.2
*/
trigred:
/*
* Save the sign of the input argument.
*/
movw r0,-(sp)
movw %r0,-(%sp)
/*
* Extract the exponent field.
*/
extzv $7,$7,r0,r2
extzv $7,$7,%r0,%r2
/*
* Convert the fraction part of the input
* argument into a quadword integer.
*/
bicw2 $0xff80,r0
bisb2 $0x80,r0 # -S.McD
rotl $16,r0,r0
rotl $16,r1,r1
bicw2 $0xff80,%r0
bisb2 $0x80,%r0 # -S.McD
rotl $16,%r0,%r0
rotl $16,%r1,%r1
/*
* If r1 is negative, add 1 to r0 . This
* If %r1 is negative, add 1 to %r0 . This
* adjustment is made so that the two's
* complement multiplications done later
* will produce unsigned results.
*/
bgeq posmid
incl r0
incl %r0
posmid:
/* p.3
*
* Set r3 to the address of the first quadword
* Set %r3 to the address of the first quadword
* used to obtain the needed portion of 2/pi .
* The address is longword aligned to ensure
* efficient access.
*/
ashl $-3,r2,r3
bicb2 $3,r3
mnegl r3,r3
movab bits2opi[r3],r3
ashl $-3,%r2,%r3
bicb2 $3,%r3
mnegl %r3,%r3
movab bits2opi[%r3],%r3
/*
* Set r2 to the size of the shift needed to
* Set %r2 to the size of the shift needed to
* obtain the correct portion of 2/pi .
*/
bicb2 $0xe0,r2
bicb2 $0xe0,%r2
/* p.4
*
* Move the needed 128 bits of 2/pi into
* r11 - r8 . Adjust the numbers to allow
* %r11 - %r8 . Adjust the numbers to allow
* for unsigned multiplication.
*/
ashq r2,(r3),r10
ashq %r2,(%r3),%r10
subl2 $4,r3
ashq r2,(r3),r9
subl2 $4,%r3
ashq %r2,(%r3),%r9
bgeq signoff1
incl r11
incl %r11
signoff1:
subl2 $4,r3
ashq r2,(r3),r8
subl2 $4,%r3
ashq %r2,(%r3),%r8
bgeq signoff2
incl r10
incl %r10
signoff2:
subl2 $4,r3
ashq r2,(r3),r7
subl2 $4,%r3
ashq %r2,(%r3),%r7
bgeq signoff3
incl r9
incl %r9
signoff3:
/* p.5
*
* Multiply the contents of r0/r1 by the
* slice of 2/pi in r11 - r8 .
* Multiply the contents of %r0/%r1 by the
* slice of 2/pi in %r11 - %r8 .
*/
emul r0,r8,$0,r4
emul r0,r9,r5,r5
emul r0,r10,r6,r6
emul %r0,%r8,$0,%r4
emul %r0,%r9,%r5,%r5
emul %r0,%r10,%r6,%r6
emul r1,r8,$0,r7
emul r1,r9,r8,r8
emul r1,r10,r9,r9
emul r1,r11,r10,r10
emul %r1,%r8,$0,%r7
emul %r1,%r9,%r8,%r8
emul %r1,%r10,%r9,%r9
emul %r1,%r11,%r10,%r10
addl2 r4,r8
adwc r5,r9
adwc r6,r10
addl2 %r4,%r8
adwc %r5,%r9
adwc %r6,%r10
/* p.6
*
* If there are more than five leading zeros
@ -439,42 +439,42 @@ signoff3:
* two quotient bits, generate more fraction bits.
* Otherwise, branch to code to produce the result.
*/
bicl3 $0xc1ffffff,r10,r4
bicl3 $0xc1ffffff,%r10,%r4
beql more1
cmpl $0x3e000000,r4
cmpl $0x3e000000,%r4
bneq result
more1:
/* p.7
*
* generate another 32 result bits.
*/
subl2 $4,r3
ashq r2,(r3),r5
subl2 $4,%r3
ashq %r2,(%r3),%r5
bgeq signoff4
emul r1,r6,$0,r4
addl2 r1,r5
emul r0,r6,r5,r5
addl2 r0,r6
emul %r1,%r6,$0,%r4
addl2 %r1,%r5
emul %r0,%r6,%r5,%r5
addl2 %r0,%r6
jbr addbits1
signoff4:
emul r1,r6,$0,r4
emul r0,r6,r5,r5
emul %r1,%r6,$0,%r4
emul %r0,%r6,%r5,%r5
addbits1:
addl2 r5,r7
adwc r6,r8
adwc $0,r9
adwc $0,r10
addl2 %r5,%r7
adwc %r6,%r8
adwc $0,%r9
adwc $0,%r10
/* p.8
*
* Check for massive cancellation.
*/
bicl3 $0xc0000000,r10,r6
bicl3 $0xc0000000,%r10,%r6
/* bneq more2 -S.McD Test was backwards */
beql more2
cmpl $0x3fffffff,r6
cmpl $0x3fffffff,%r6
bneq result
more2:
/* p.9
@ -484,43 +484,43 @@ more2:
* Testing has shown there will always be
* enough bits after this point.
*/
subl2 $4,r3
ashq r2,(r3),r5
subl2 $4,%r3
ashq %r2,(%r3),%r5
bgeq signoff5
emul r0,r6,r4,r5
addl2 r0,r6
emul %r0,%r6,%r4,%r5
addl2 %r0,%r6
jbr addbits2
signoff5:
emul r0,r6,r4,r5
emul %r0,%r6,%r4,%r5
addbits2:
addl2 r6,r7
adwc $0,r8
adwc $0,r9
adwc $0,r10
addl2 %r6,%r7
adwc $0,%r8
adwc $0,%r9
adwc $0,%r10
/* p.10
*
* The following code produces the reduced
* argument from the product bits contained
* in r10 - r7 .
* in %r10 - %r7 .
*/
result:
/*
* Extract the octant number from r10 .
* Extract the octant number from %r10 .
*/
/* extzv $29,$3,r10,r0 ...used for pi/4 reduction -S.McD */
extzv $30,$2,r10,r0
/* extzv $29,$3,%r10,%r0 ...used for pi/4 reduction -S.McD */
extzv $30,$2,%r10,%r0
/*
* Clear the octant bits in r10 .
* Clear the octant bits in %r10 .
*/
/* bicl2 $0xe0000000,r10 ...used for pi/4 reduction -S.McD */
bicl2 $0xc0000000,r10
/* bicl2 $0xe0000000,%r10 ...used for pi/4 reduction -S.McD */
bicl2 $0xc0000000,%r10
/*
* Zero the sign flag.
*/
clrl r5
clrl %r5
/* p.11
*
* Check to see if the fraction is greater than
@ -529,16 +529,16 @@ result:
* on, and replace the fraction with 1 minus
* the fraction.
*/
/* bitl $0x10000000,r10 ...used for pi/4 reduction -S.McD */
bitl $0x20000000,r10
/* bitl $0x10000000,%r10 ...used for pi/4 reduction -S.McD */
bitl $0x20000000,%r10
beql small
incl r0
incl r5
/* subl3 r10,$0x1fffffff,r10 ...used for pi/4 reduction -S.McD */
subl3 r10,$0x3fffffff,r10
mcoml r9,r9
mcoml r8,r8
mcoml r7,r7
incl %r0
incl %r5
/* subl3 %r10,$0x1fffffff,%r10 ...used for pi/4 reduction -S.McD */
subl3 %r10,$0x3fffffff,%r10
mcoml %r9,%r9
mcoml %r8,%r8
mcoml %r7,%r7
small:
/* p.12
*
@ -546,63 +546,63 @@ small:
* Test whether the first 30 bits of the
* fraction are zero.
*/
tstl r10
tstl %r10
beql tiny
/*
* Find the position of the first one bit in r10 .
* Find the position of the first one bit in %r10 .
*/
cvtld r10,r1
extzv $7,$7,r1,r1
cvtld %r10,%r1
extzv $7,$7,%r1,%r1
/*
* Compute the size of the shift needed.
*/
subl3 r1,$32,r6
subl3 %r1,$32,%r6
/*
* Shift up the high order 64 bits of the
* product.
*/
ashq r6,r9,r10
ashq r6,r8,r9
ashq %r6,%r9,%r10
ashq %r6,%r8,%r9
jbr mult
/* p.13
*
* Test to see if the sign bit of r9 is on.
* Test to see if the sign bit of %r9 is on.
*/
tiny:
tstl r9
tstl %r9
bgeq tinier
/*
* If it is, shift the product bits up 32 bits.
*/
movl $32,r6
movq r8,r10
tstl r10
movl $32,%r6
movq %r8,%r10
tstl %r10
jbr mult
/* p.14
*
* Test whether r9 is zero. It is probably
* impossible for both r10 and r9 to be
* Test whether %r9 is zero. It is probably
* impossible for both %r10 and %r9 to be
* zero, but until proven to be so, the test
* must be made.
*/
tinier:
beql zero
/*
* Find the position of the first one bit in r9 .
* Find the position of the first one bit in %r9 .
*/
cvtld r9,r1
extzv $7,$7,r1,r1
cvtld %r9,%r1
extzv $7,$7,%r1,%r1
/*
* Compute the size of the shift needed.
*/
subl3 r1,$32,r1
addl3 $32,r1,r6
subl3 %r1,$32,%r1
addl3 $32,%r1,%r6
/*
* Shift up the high order 64 bits of the
* product.
*/
ashq r1,r8,r10
ashq r1,r7,r9
ashq %r1,%r8,%r10
ashq %r1,%r7,%r9
jbr mult
/* p.15
*
@ -610,156 +610,156 @@ tinier:
* argument to zero.
*/
zero:
clrl r1
clrl r2
clrl r3
clrl %r1
clrl %r2
clrl %r3
jbr return
/* p.16
*
* At this point, r0 contains the octant number,
* r6 indicates the number of bits the fraction
* has been shifted, r5 indicates the sign of
* the fraction, r11/r10 contain the high order
* At this point, %r0 contains the octant number,
* %r6 indicates the number of bits the fraction
* has been shifted, %r5 indicates the sign of
* the fraction, %r11/%r10 contain the high order
* 64 bits of the fraction, and the condition
* codes indicate where the sign bit of r10
* codes indicate where the sign bit of %r10
* is on. The following code multiplies the
* fraction by pi/2 .
*/
mult:
/*
* Save r11/r10 in r4/r1 . -S.McD
* Save %r11/%r10 in %r4/%r1 . -S.McD
*/
movl r11,r4
movl r10,r1
movl %r11,%r4
movl %r10,%r1
/*
* If the sign bit of r10 is on, add 1 to r11 .
* If the sign bit of %r10 is on, add 1 to %r11 .
*/
bgeq signoff6
incl r11
incl %r11
signoff6:
/* p.17
*
* Move pi/2 into r3/r2 .
* Move pi/2 into %r3/%r2 .
*/
movq $0xc90fdaa22168c235,r2
movq $0xc90fdaa22168c235,%r2
/*
* Multiply the fraction by the portion of pi/2
* in r2 .
* in %r2 .
*/
emul r2,r10,$0,r7
emul r2,r11,r8,r7
emul %r2,%r10,$0,%r7
emul %r2,%r11,%r8,%r7
/*
* Multiply the fraction by the portion of pi/2
* in r3 .
* in %r3 .
*/
emul r3,r10,$0,r9
emul r3,r11,r10,r10
emul %r3,%r10,$0,%r9
emul %r3,%r11,%r10,%r10
/*
* Add the product bits together.
*/
addl2 r7,r9
adwc r8,r10
adwc $0,r11
addl2 %r7,%r9
adwc %r8,%r10
adwc $0,%r11
/*
* Compensate for not sign extending r8 above.-S.McD
* Compensate for not sign extending %r8 above.-S.McD
*/
tstl r8
tstl %r8
bgeq signoff6a
decl r11
decl %r11
signoff6a:
/*
* Compensate for r11/r10 being unsigned. -S.McD
* Compensate for %r11/%r10 being unsigned. -S.McD
*/
addl2 r2,r10
adwc r3,r11
addl2 %r2,%r10
adwc %r3,%r11
/*
* Compensate for r3/r2 being unsigned. -S.McD
* Compensate for %r3/%r2 being unsigned. -S.McD
*/
addl2 r1,r10
adwc r4,r11
addl2 %r1,%r10
adwc %r4,%r11
/* p.18
*
* If the sign bit of r11 is zero, shift the
* product bits up one bit and increment r6 .
* If the sign bit of %r11 is zero, shift the
* product bits up one bit and increment %r6 .
*/
blss signon
incl r6
ashq $1,r10,r10
tstl r9
incl %r6
ashq $1,%r10,%r10
tstl %r9
bgeq signoff7
incl r10
incl %r10
signoff7:
signon:
/* p.19
*
* Shift the 56 most significant product
* bits into r9/r8 . The sign extension
* bits into %r9/%r8 . The sign extension
* will be handled later.
*/
ashq $-8,r10,r8
ashq $-8,%r10,%r8
/*
* Convert the low order 8 bits of r10
* Convert the low order 8 bits of %r10
* into an F-format number.
*/
cvtbf r10,r3
cvtbf %r10,%r3
/*
* If the result of the conversion was
* negative, add 1 to r9/r8 .
* negative, add 1 to %r9/%r8 .
*/
bgeq chop
incl r8
adwc $0,r9
incl %r8
adwc $0,%r9
/*
* If r9 is now zero, branch to special
* If %r9 is now zero, branch to special
* code to handle that possibility.
*/
beql carryout
chop:
/* p.20
*
* Convert the number in r9/r8 into
* D-format number in r2/r1 .
* Convert the number in %r9/%r8 into
* D-format number in %r2/%r1 .
*/
rotl $16,r8,r2
rotl $16,r9,r1
rotl $16,%r8,%r2
rotl $16,%r9,%r1
/*
* Set the exponent field to the appropriate
* value. Note that the extra bits created by
* sign extension are now eliminated.
*/
subw3 r6,$131,r6
insv r6,$7,$9,r1
subw3 %r6,$131,%r6
insv %r6,$7,$9,%r1
/*
* Set the exponent field of the F-format
* number in r3 to the appropriate value.
* number in %r3 to the appropriate value.
*/
tstf r3
tstf %r3
beql return
/* extzv $7,$8,r3,r4 -S.McD */
extzv $7,$7,r3,r4
addw2 r4,r6
/* subw2 $217,r6 -S.McD */
subw2 $64,r6
insv r6,$7,$8,r3
/* extzv $7,$8,%r3,%r4 -S.McD */
extzv $7,$7,%r3,%r4
addw2 %r4,%r6
/* subw2 $217,%r6 -S.McD */
subw2 $64,%r6
insv %r6,$7,$8,%r3
jbr return
/* p.21
*
* The following code generates the appropriate
* result for the unlikely possibility that
* rounding the number in r9/r8 resulted in
* rounding the number in %r9/%r8 resulted in
* a carry out.
*/
carryout:
clrl r1
clrl r2
subw3 r6,$132,r6
insv r6,$7,$9,r1
tstf r3
clrl %r1
clrl %r2
subw3 %r6,$132,%r6
insv %r6,$7,$9,%r1
tstf %r3
beql return
extzv $7,$8,r3,r4
addw2 r4,r6
subw2 $218,r6
insv r6,$7,$8,r3
extzv $7,$8,%r3,%r4
addw2 %r4,%r6
subw2 $218,%r6
insv %r6,$7,$8,%r3
/* p.22
*
* The following code makes an needed
@ -773,9 +773,9 @@ return:
* equal to 1/2 . If so, negate the reduced
* argument.
*/
blbc r5,signoff8
mnegf r1,r1
mnegf r3,r3
blbc %r5,signoff8
mnegf %r1,%r1
mnegf %r3,%r3
signoff8:
/* p.23
*
@ -783,18 +783,18 @@ signoff8:
* negate the reduce argument and
* adjust the octant number.
*/
tstw (sp)+
tstw (%sp)+
bgeq signoff9
mnegf r1,r1
mnegf r3,r3
/* subb3 r0,$8,r0 ...used for pi/4 reduction -S.McD */
subb3 r0,$4,r0
mnegf %r1,%r1
mnegf %r3,%r3
/* subb3 %r0,$8,%r0 ...used for pi/4 reduction -S.McD */
subb3 %r0,$4,%r0
signoff9:
/*
* Clear all unneeded octant bits.
*
* bicb2 $0xf8,r0 ...used for pi/4 reduction -S.McD */
bicb2 $0xfc,r0
* bicb2 $0xf8,%r0 ...used for pi/4 reduction -S.McD */
bicb2 $0xfc,%r0
/*
* Return.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: n_atan2.S,v 1.4 2000/07/14 04:50:58 matt Exp $ */
/* $NetBSD: n_atan2.S,v 1.5 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1985, 1993
* The Regents of the University of California. All rights reserved.
@ -77,45 +77,45 @@
*/
ENTRY(atan2, 0x0fc0)
movq 4(ap),r2 # r2 = y
movq 12(ap),r4 # r4 = x
bicw3 $0x7f,r2,r0
bicw3 $0x7f,r4,r1
cmpw r0,$0x8000 # y is the reserved operand
movq 4(%ap),%r2 # %r2 = y
movq 12(%ap),%r4 # %r4 = x
bicw3 $0x7f,%r2,%r0
bicw3 $0x7f,%r4,%r1
cmpw %r0,$0x8000 # y is the reserved operand
jeql resop
cmpw r1,$0x8000 # x is the reserved operand
cmpw %r1,$0x8000 # x is the reserved operand
jeql resop
subl2 $8,sp
bicw3 $0x7fff,r2,-4(fp) # copy y sign bit to -4(fp)
bicw3 $0x7fff,r4,-8(fp) # copy x sign bit to -8(fp)
cmpd r4,$0x4080 # x = 1.0 ?
subl2 $8,%sp
bicw3 $0x7fff,%r2,-4(%fp) # copy y sign bit to -4(%fp)
bicw3 $0x7fff,%r4,-8(%fp) # copy x sign bit to -8(%fp)
cmpd %r4,$0x4080 # x = 1.0 ?
bneq xnot1
movq r2,r0
bicw2 $0x8000,r0 # t = |y|
movq r0,r2 # y = |y|
movq %r2,%r0
bicw2 $0x8000,%r0 # t = |y|
movq %r0,%r2 # y = |y|
jbr begin
xnot1:
bicw3 $0x807f,r2,r11 # yexp
bicw3 $0x807f,%r2,%r11 # yexp
jeql yeq0 # if y=0 goto yeq0
bicw3 $0x807f,r4,r10 # xexp
bicw3 $0x807f,%r4,%r10 # xexp
jeql pio2 # if x=0 goto pio2
subw2 r10,r11 # k = yexp - xexp
cmpw r11,$0x2000 # k >= 64 (exp) ?
subw2 %r10,%r11 # k = yexp - xexp
cmpw %r11,$0x2000 # k >= 64 (exp) ?
jgeq pio2 # atan2 = +-pi/2
divd3 r4,r2,r0 # t = y/x never overflow
bicw2 $0x8000,r0 # t > 0
bicw2 $0xff80,r2 # clear the exponent of y
bicw2 $0xff80,r4 # clear the exponent of x
bisw2 $0x4080,r2 # normalize y to [1,2)
bisw2 $0x4080,r4 # normalize x to [1,2)
subw2 r11,r4 # scale x so that yexp-xexp=k
divd3 %r4,%r2,%r0 # t = y/x never overflow
bicw2 $0x8000,%r0 # t > 0
bicw2 $0xff80,%r2 # clear the exponent of y
bicw2 $0xff80,%r4 # clear the exponent of x
bisw2 $0x4080,%r2 # normalize y to [1,2)
bisw2 $0x4080,%r4 # normalize x to [1,2)
subw2 %r11,%r4 # scale x so that yexp-xexp=k
begin:
cmpw r0,$0x411c # t : 39/16
cmpw %r0,$0x411c # t : 39/16
jgeq L50
addl3 $0x180,r0,r10 # 8*t
cvtrfl r10,r10 # [8*t] rounded to int
ashl $-1,r10,r10 # [8*t]/2
casel r10,$0,$4
addl3 $0x180,%r0,%r10 # 8*t
cvtrfl %r10,%r10 # [8*t] rounded to int
ashl $-1,%r10,%r10 # [8*t]/2
casel %r10,$0,$4
L1:
.word L20-L1
.word L20-L1
@ -123,82 +123,82 @@ L1:
.word L40-L1
.word L40-L1
L10:
movq $0xb4d9940f985e407b,r6 # Hi=.98279372324732906796d0
movq $0x21b1879a3bc2a2fc,r8 # Lo=-.17092002525602665777d-17
subd3 r4,r2,r0 # y-x
addw2 $0x80,r0 # 2(y-x)
subd2 r4,r0 # 2(y-x)-x
addw2 $0x80,r4 # 2x
movq r2,r10
addw2 $0x80,r10 # 2y
addd2 r10,r2 # 3y
addd2 r4,r2 # 3y+2x
divd2 r2,r0 # (2y-3x)/(2x+3y)
movq $0xb4d9940f985e407b,%r6 # Hi=.98279372324732906796d0
movq $0x21b1879a3bc2a2fc,%r8 # Lo=-.17092002525602665777d-17
subd3 %r4,%r2,%r0 # y-x
addw2 $0x80,%r0 # 2(y-x)
subd2 %r4,%r0 # 2(y-x)-x
addw2 $0x80,%r4 # 2x
movq %r2,%r10
addw2 $0x80,%r10 # 2y
addd2 %r10,%r2 # 3y
addd2 %r4,%r2 # 3y+2x
divd2 %r2,%r0 # (2y-3x)/(2x+3y)
jbr L60
L20:
cmpw r0,$0x3280 # t : 2**(-28)
cmpw %r0,$0x3280 # t : 2**(-28)
jlss L80
clrq r6 # Hi=r6=0, Lo=r8=0
clrq r8
clrq %r6 # Hi=%r6=0, Lo=%r8=0
clrq %r8
jbr L60
L30:
movq $0xda7b2b0d63383fed,r6 # Hi=.46364760900080611433d0
movq $0xf0ea17b2bf912295,r8 # Lo=.10147340032515978826d-17
movq r2,r0
addw2 $0x80,r0 # 2y
subd2 r4,r0 # 2y-x
addw2 $0x80,r4 # 2x
addd2 r2,r4 # 2x+y
divd2 r4,r0 # (2y-x)/(2x+y)
movq $0xda7b2b0d63383fed,%r6 # Hi=.46364760900080611433d0
movq $0xf0ea17b2bf912295,%r8 # Lo=.10147340032515978826d-17
movq %r2,%r0
addw2 $0x80,%r0 # 2y
subd2 %r4,%r0 # 2y-x
addw2 $0x80,%r4 # 2x
addd2 %r2,%r4 # 2x+y
divd2 %r4,%r0 # (2y-x)/(2x+y)
jbr L60
L50:
movq $0x68c2a2210fda40c9,r6 # Hi=1.5707963267948966135d1
movq $0x06e0145c26332326,r8 # Lo=.22517417741562176079d-17
cmpw r0,$0x5100 # y : 2**57
movq $0x68c2a2210fda40c9,%r6 # Hi=1.5707963267948966135d1
movq $0x06e0145c26332326,%r8 # Lo=.22517417741562176079d-17
cmpw %r0,$0x5100 # y : 2**57
bgeq L90
divd3 r2,r4,r0
bisw2 $0x8000,r0 # -x/y
divd3 %r2,%r4,%r0
bisw2 $0x8000,%r0 # -x/y
jbr L60
L40:
movq $0x68c2a2210fda4049,r6 # Hi=.78539816339744830676d0
movq $0x06e0145c263322a6,r8 # Lo=.11258708870781088040d-17
subd3 r4,r2,r0 # y-x
addd2 r4,r2 # y+x
divd2 r2,r0 # (y-x)/(y+x)
movq $0x68c2a2210fda4049,%r6 # Hi=.78539816339744830676d0
movq $0x06e0145c263322a6,%r8 # Lo=.11258708870781088040d-17
subd3 %r4,%r2,%r0 # y-x
addd2 %r4,%r2 # y+x
divd2 %r2,%r0 # (y-x)/(y+x)
L60:
movq r0,r10
muld2 r0,r0
polyd r0,$12,ptable
muld2 r10,r0
subd2 r0,r8
addd3 r8,r10,r0
addd2 r6,r0
movq %r0,%r10
muld2 %r0,%r0
polyd %r0,$12,ptable
muld2 %r10,%r0
subd2 %r0,%r8
addd3 %r8,%r10,%r0
addd2 %r6,%r0
L80:
movw -8(fp),r2
movw -8(%fp),%r2
bneq pim
bisw2 -4(fp),r0 # return sign(y)*r0
bisw2 -4(%fp),%r0 # return sign(y)*%r0
ret
L90: # x >= 2**25
movq r6,r0
movq %r6,%r0
jbr L80
pim:
subd3 r0,$0x68c2a2210fda4149,r0 # pi-t
bisw2 -4(fp),r0
subd3 %r0,$0x68c2a2210fda4149,%r0 # pi-t
bisw2 -4(%fp),%r0
ret
yeq0:
movw -8(fp),r2
movw -8(%fp),%r2
beql zero # if sign(x)=1 return pi
movq $0x68c2a2210fda4149,r0 # pi=3.1415926535897932270d1
movq $0x68c2a2210fda4149,%r0 # pi=3.1415926535897932270d1
ret
zero:
clrq r0 # return 0
clrq %r0 # return 0
ret
pio2:
movq $0x68c2a2210fda40c9,r0 # pi/2=1.5707963267948966135d1
bisw2 -4(fp),r0 # return sign(y)*pi/2
movq $0x68c2a2210fda40c9,%r0 # pi/2=1.5707963267948966135d1
bisw2 -4(%fp),%r0 # return sign(y)*pi/2
ret
resop:
movq $0x8000,r0 # propagate the reserved operand
movq $0x8000,%r0 # propagate the reserved operand
ret
_ALIGN_TEXT

View File

@ -1,4 +1,4 @@
/* $NetBSD: n_cabs.S,v 1.3 2000/07/14 04:50:58 matt Exp $ */
/* $NetBSD: n_cabs.S,v 1.4 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1985, 1993
* The Regents of the University of California. All rights reserved.
@ -41,88 +41,88 @@
* double precision complex absolute value
* CABS by W. Kahan, 9/7/80.
* Revised for reserved operands by E. LeBlanc, 8/18/82
* argument for complex absolute value by reference, *4(ap)
* argument for cabs and hypot (C fcns) by value, 4(ap)
* output is in r0:r1 (error less than 0.86 ulps)
* argument for complex absolute value by reference, *4(%ap)
* argument for cabs and hypot (C fcns) by value, 4(%ap)
* output is in %r0:%r1 (error less than 0.86 ulps)
*/
/* entry for c functions cabs and hypot */
ALTENTRY(cabs)
ENTRY(hypot, 0x8040) # save r6, enable floating overflow
movq 4(ap),r0 # r0:1 = x
movq 12(ap),r2 # r2:3 = y
ENTRY(hypot, 0x8040) # save %r6, enable floating overflow
movq 4(%ap),%r0 # %r0:1 = x
movq 12(%ap),%r2 # %r2:3 = y
jbr cabs2
/* entry for Fortran use, call by: d = abs(z) */
ENTRY(z_abs, 0x8040) # save r6, enable floating overflow
movl 4(ap),r2 # indirect addressing is necessary here
movq (r2)+,r0 # r0:1 = x
movq (r2),r2 # r2:3 = y
ENTRY(z_abs, 0x8040) # save %r6, enable floating overflow
movl 4(%ap),%r2 # indirect addressing is necessary here
movq (%r2)+,%r0 # %r0:1 = x
movq (%r2),%r2 # %r2:3 = y
cabs2:
bicw3 $0x7f,r0,r4 # r4 has signed biased exp of x
cmpw $0x8000,r4
bicw3 $0x7f,%r0,%r4 # %r4 has signed biased exp of x
cmpw $0x8000,%r4
jeql return # x is a reserved operand, so return it
bicw3 $0x7f,r2,r5 # r5 has signed biased exp of y
cmpw $0x8000,r5
bicw3 $0x7f,%r2,%r5 # %r5 has signed biased exp of y
cmpw $0x8000,%r5
jneq cont /* y isn't a reserved operand */
movq r2,r0 /* return y if it's reserved */
movq %r2,%r0 /* return y if it's reserved */
ret
cont:
bsbb regs_set # r0:1 = dsqrt(x^2+y^2)/2^r6
addw2 r6,r0 # unscaled cdabs in r0:1
bsbb regs_set # %r0:1 = dsqrt(x^2+y^2)/2^%r6
addw2 %r6,%r0 # unscaled cdabs in %r0:1
jvc return # unless it overflows
subw2 $0x80,r0 # halve r0 to get meaningful overflow
addd2 r0,r0 # overflow; r0 is half of true abs value
subw2 $0x80,%r0 # halve %r0 to get meaningful overflow
addd2 %r0,%r0 # overflow; %r0 is half of true abs value
return:
ret
ENTRY(__libm_cdabs_r6,0) # ENTRY POINT for cdsqrt
# calculates a scaled (factor in r6)
# calculates a scaled (factor in %r6)
# complex absolute value
movq (r4)+,r0 # r0:r1 = x via indirect addressing
movq (r4),r2 # r2:r3 = y via indirect addressing
movq (%r4)+,%r0 # %r0:%r1 = x via indirect addressing
movq (%r4),%r2 # %r2:%r3 = y via indirect addressing
bicw3 $0x7f,r0,r5 # r5 has signed biased exp of x
cmpw $0x8000,r5
bicw3 $0x7f,%r0,%r5 # %r5 has signed biased exp of x
cmpw $0x8000,%r5
jeql cdreserved # x is a reserved operand
bicw3 $0x7f,r2,r5 # r5 has signed biased exp of y
cmpw $0x8000,r5
bicw3 $0x7f,%r2,%r5 # %r5 has signed biased exp of y
cmpw $0x8000,%r5
jneq regs_set /* y isn't a reserved operand either? */
cdreserved:
movl *4(ap),r4 # r4 -> (u,v), if x or y is reserved
movq r0,(r4)+ # copy u and v as is and return
movq r2,(r4) # (again addressing is indirect)
movl *4(%ap),%r4 # %r4 -> (u,v), if x or y is reserved
movq %r0,(%r4)+ # copy u and v as is and return
movq %r2,(%r4) # (again addressing is indirect)
ret
regs_set:
bicw2 $0x8000,r0 # r0:r1 = dabs(x)
bicw2 $0x8000,r2 # r2:r3 = dabs(y)
cmpw r0,r2
bicw2 $0x8000,%r0 # %r0:%r1 = dabs(x)
bicw2 $0x8000,%r2 # %r2:%r3 = dabs(y)
cmpw %r0,%r2
jgeq ordered
movq r0,r4
movq r2,r0
movq r4,r2 # force y's exp <= x's exp
movq %r0,%r4
movq %r2,%r0
movq %r4,%r2 # force y's exp <= x's exp
ordered:
bicw3 $0x7f,r0,r6 # r6 = exponent(x) + bias(129)
bicw3 $0x7f,%r0,%r6 # %r6 = exponent(x) + bias(129)
jeql retsb # if x = y = 0 then cdabs(x,y) = 0
subw2 $0x4780,r6 # r6 = exponent(x) - 14
subw2 r6,r0 # 2^14 <= scaled x < 2^15
bitw $0xff80,r2
subw2 $0x4780,%r6 # %r6 = exponent(x) - 14
subw2 %r6,%r0 # 2^14 <= scaled x < 2^15
bitw $0xff80,%r2
jeql retsb # if y = 0 return dabs(x)
subw2 r6,r2
cmpw $0x3780,r2 # if scaled y < 2^-18
subw2 %r6,%r2
cmpw $0x3780,%r2 # if scaled y < 2^-18
jgtr retsb # return dabs(x)
emodd r0,$0,r0,r4,r0 # r4 + r0:1 = scaled x^2
emodd r2,$0,r2,r5,r2 # r5 + r2:3 = scaled y^2
addd2 r2,r0
addl2 r5,r4
cvtld r4,r2
addd2 r2,r0 # r0:1 = scaled x^2 + y^2
emodd %r0,$0,%r0,%r4,%r0 # %r4 + %r0:1 = scaled x^2
emodd %r2,$0,%r2,%r5,%r2 # %r5 + %r2:3 = scaled y^2
addd2 %r2,%r0
addl2 %r5,%r4
cvtld %r4,%r2
addd2 %r2,%r0 # %r0:1 = scaled x^2 + y^2
jmp _C_LABEL(__libm_dsqrt_r5)+2
# r0:1 = dsqrt(x^2+y^2)/2^r6
# %r0:1 = dsqrt(x^2+y^2)/2^%r6
retsb:
rsb # error < 0.86 ulp

View File

@ -1,4 +1,4 @@
/* $NetBSD: n_cbrt.S,v 1.4 2000/07/14 04:50:58 matt Exp $ */
/* $NetBSD: n_cbrt.S,v 1.5 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1985, 1993
* The Regents of the University of California. All rights reserved.
@ -45,43 +45,43 @@
*/
ALTENTRY(cbrt)
ENTRY(d_cbrt, 0x00c0) # save r6 & r7
movq 4(ap),r0 # r0 = argument x
ENTRY(d_cbrt, 0x00c0) # save %r6 & %r7
movq 4(%ap),%r0 # %r0 = argument x
jbr dcbrt2
ENTRY(dcbrt_, 0x00c0) # save r6 & r7
movq *4(ap),r0 # r0 = argument x
ENTRY(dcbrt_, 0x00c0) # save %r6 & %r7
movq *4(%ap),%r0 # %r0 = argument x
dcbrt2: bicw3 $0x807f,r0,r2 # biased exponent of x
dcbrt2: bicw3 $0x807f,%r0,%r2 # biased exponent of x
jeql return # dcbrt(0)=0 dcbrt(res)=res. operand
bicw3 $0x7fff,r0,ap # ap has sign(x)
xorw2 ap,r0 # r0 is abs(x)
movl r0,r2 # r2 has abs(x)
rotl $16,r2,r2 # r2 = |x| with bits unscrambled
divl2 $3,r2 # rough dcbrt with bias/3
addl2 B,r2 # restore bias, diminish fraction
rotl $16,r2,r2 # r2=|q|=|dcbrt| to 5 bits
mulf3 r2,r2,r3 # r3 =qq
divf2 r0,r3 # r3 = qq/x
mulf2 r2,r3
addf2 C,r3 # r3 = s = C + qqq/x
divf3 r3,D,r4 # r4 = D/s
addf2 E,r4
addf2 r4,r3 # r3 = s + E + D/s
divf3 r3,F,r3 # r3 = F / (s + E + D/s)
addf2 G,r3 # r3 = G + F / (s + E + D/s)
mulf2 r3,r2 # r2 = qr3 = new q to 23 bits
clrl r3 # r2:r3 = q as double float
muld3 r2,r2,r4 # r4:r5 = qq exactly
divd2 r4,r0 # r0:r1 = x/(q*q) rounded
subd3 r2,r0,r6 # r6:r7 = x/(q*q) - q exactly
movq r2,r4 # r4:r5 = q
addw2 $0x80,r4 # r4:r5 = 2 * q
addd2 r0,r4 # r4:r5 = 2*q + x/(q*q)
divd2 r4,r6 # r6:r7 = (x/(q*q)-q)/(2*q+x/(q*q))
muld2 r2,r6 # r6:r7 = q*(x/(q*q)-q)/(2*q+x/(q*q))
addd3 r6,r2,r0 # r0:r1 = q + r6:r7
bisw2 ap,r0 # restore the sign bit
bicw3 $0x7fff,%r0,%ap # ap has sign(x)
xorw2 %ap,%r0 # %r0 is abs(x)
movl %r0,%r2 # %r2 has abs(x)
rotl $16,%r2,%r2 # %r2 = |x| with bits unscrambled
divl2 $3,%r2 # rough dcbrt with bias/3
addl2 B,%r2 # restore bias, diminish fraction
rotl $16,%r2,%r2 # %r2=|q|=|dcbrt| to 5 bits
mulf3 %r2,%r2,%r3 # %r3 =qq
divf2 %r0,%r3 # %r3 = qq/x
mulf2 %r2,%r3
addf2 C,%r3 # %r3 = s = C + qqq/x
divf3 %r3,D,%r4 # %r4 = D/s
addf2 E,%r4
addf2 %r4,%r3 # %r3 = s + E + D/s
divf3 %r3,F,%r3 # %r3 = F / (s + E + D/s)
addf2 G,%r3 # %r3 = G + F / (s + E + D/s)
mulf2 %r3,%r2 # %r2 = q%r3 = new q to 23 bits
clrl %r3 # %r2:%r3 = q as double float
muld3 %r2,%r2,%r4 # %r4:%r5 = qq exactly
divd2 %r4,%r0 # %r0:%r1 = x/(q*q) rounded
subd3 %r2,%r0,%r6 # %r6:%r7 = x/(q*q) - q exactly
movq %r2,%r4 # %r4:%r5 = q
addw2 $0x80,%r4 # %r4:%r5 = 2 * q
addd2 %r0,%r4 # %r4:%r5 = 2*q + x/(q*q)
divd2 %r4,%r6 # %r6:%r7 = (x/(q*q)-q)/(2*q+x/(q*q))
muld2 %r2,%r6 # %r6:%r7 = q*(x/(q*q)-q)/(2*q+x/(q*q))
addd3 %r6,%r2,%r0 # %r0:%r1 = q + %r6:%r7
bisw2 %ap,%r0 # restore the sign bit
return:
ret # error less than 0.667 ulps

View File

@ -1,4 +1,4 @@
/* $NetBSD: n_infnan.S,v 1.4 2000/07/14 04:50:58 matt Exp $ */
/* $NetBSD: n_infnan.S,v 1.5 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1985, 1993
* The Regents of the University of California. All rights reserved.
@ -51,10 +51,10 @@ _sccsid:
.set ERANGE,34
ENTRY(infnan, 0)
cmpl 4(ap),$ERANGE
cmpl 4(%ap),$ERANGE
bneq 1f
movl $ERANGE,_C_LABEL(errno)
brb 2f
1: movl $EDOM,_C_LABEL(errno)
2: emodd $0,$0,$0x8000,r0,r0 # generates the reserved operand fault
2: emodd $0,$0,$0x8000,%r0,%r0 # generates the reserved operand fault
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: n_sincos.S,v 1.4 2000/07/14 04:50:58 matt Exp $ */
/* $NetBSD: n_sincos.S,v 1.5 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1985, 1993
* The Regents of the University of California. All rights reserved.
@ -51,25 +51,25 @@
#include <machine/asm.h>
ENTRY(sin, 0xfc0)
movq 4(ap),r0
bicw3 $0x807f,r0,r2
movq 4(%ap),%r0
bicw3 $0x807f,%r0,%r2
beql 1f # if x is zero or reserved operand then return x
/*
* Save the PSL's IV & FU bits on the stack.
*/
movpsl r2
bicw3 $0xff9f,r2,-(sp)
movpsl %r2
bicw3 $0xff9f,%r2,-(%sp)
/*
* Clear the IV & FU bits.
*/
bicpsw $0x0060
/*
* Entered by sine ; save 0 in r4 .
* Entered by sine ; save 0 in %r4 .
*/
jsb _C_LABEL(__libm_argred)+2
movl $0,r4
movl $0,%r4
jsb _C_LABEL(__libm_sincos)+2
bispsw (sp)+
bispsw (%sp)+
1: ret
/*
@ -80,24 +80,24 @@ ENTRY(sin, 0xfc0)
*/
ENTRY(cos, 0x0fc0)
movq 4(ap),r0
bicw3 $0x7f,r0,r2
cmpw $0x8000,r2
movq 4(%ap),%r0
bicw3 $0x7f,%r0,%r2
cmpw $0x8000,%r2
beql 1f # if x is reserved operand then return x
/*
* Save the PSL's IV & FU bits on the stack.
*/
movpsl r2
bicw3 $0xff9f,r2,-(sp)
movpsl %r2
bicw3 $0xff9f,%r2,-(%sp)
/*
* Clear the IV & FU bits.
*/
bicpsw $0x0060
/*
* Entered by cosine ; save 1 in r4 .
* Entered by cosine ; save 1 in %r4 .
*/
jsb _C_LABEL(__libm_argred)+2
movl $1,r4
movl $1,%r4
jsb _C_LABEL(__libm_sincos)+2
bispsw (sp)+
bispsw (%sp)+
1: ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: n_sqrt.S,v 1.4 2002/02/21 07:49:55 matt Exp $ */
/* $NetBSD: n_sqrt.S,v 1.5 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1985, 1993
* The Regents of the University of California. All rights reserved.
@ -49,14 +49,14 @@
*/
.set EDOM,33
ENTRY(d_sqrt, 0x003c) # save r5,r4,r3,r2
movq *4(ap),r0
ENTRY(d_sqrt, 0x003c) # save %r5,%r4,%r3,%r2
movq *4(%ap),%r0
jbr dsqrt2
ENTRY(sqrt, 0x003c) # save r5,r4,r3,r2
movq 4(ap),r0
ENTRY(sqrt, 0x003c) # save %r5,%r4,%r3,%r2
movq 4(%ap),%r0
dsqrt2: bicw3 $0x807f,r0,r2 # check exponent of input
dsqrt2: bicw3 $0x807f,%r0,%r2 # check exponent of input
jeql noexp # biased exponent is zero -> 0.0 or reserved
bsbb __libm_dsqrt_r5_lcl+2
noexp: ret
@ -69,32 +69,32 @@ ALTENTRY(__libm_dsqrt_r5)
nop
/* ENTRY POINT FOR cdabs and cdsqrt */
/* returns double square root scaled by */
/* 2^r6 */
/* 2^%r6 */
movd r0,r4
movd %r0,%r4
jleq nonpos # argument is not positive
movzwl r4,r2
ashl $-1,r2,r0
addw2 $0x203c,r0 # r0 has magic initial approximation
movzwl %r4,%r2
ashl $-1,%r2,%r0
addw2 $0x203c,%r0 # %r0 has magic initial approximation
/*
* Do two steps of Heron's rule
* ((arg/guess) + guess) / 2 = better guess
*/
divf3 r0,r4,r2
addf2 r2,r0
subw2 $0x80,r0 # divide by two
divf3 %r0,%r4,%r2
addf2 %r2,%r0
subw2 $0x80,%r0 # divide by two
divf3 r0,r4,r2
addf2 r2,r0
subw2 $0x80,r0 # divide by two
divf3 %r0,%r4,%r2
addf2 %r2,%r0
subw2 $0x80,%r0 # divide by two
/* Scale argument and approximation to prevent over/underflow */
bicw3 $0x807f,r4,r1
subw2 $0x4080,r1 # r1 contains scaling factor
subw2 r1,r4
movl r0,r2
subw2 r1,r2
bicw3 $0x807f,%r4,%r1
subw2 $0x4080,%r1 # %r1 contains scaling factor
subw2 %r1,%r4
movl %r0,%r2
subw2 %r1,%r2
/* Cubic step
*
@ -102,16 +102,16 @@ ALTENTRY(__libm_dsqrt_r5)
* a is approximation, and n is the original argument.
* (let s be scale factor in the following comments)
*/
clrl r1
clrl r3
muld2 r0,r2 # r2:r3 = a*a/s
subd2 r2,r4 # r4:r5 = n/s - a*a/s
addw2 $0x100,r2 # r2:r3 = 4*a*a/s
addd2 r4,r2 # r2:r3 = n/s + 3*a*a/s
muld2 r0,r4 # r4:r5 = a*n/s - a*a*a/s
divd2 r2,r4 # r4:r5 = a*(n-a*a)/(n+3*a*a)
addw2 $0x80,r4 # r4:r5 = 2*a*(n-a*a)/(n+3*a*a)
addd2 r4,r0 # r0:r1 = a + 2*a*(n-a*a)/(n+3*a*a)
clrl %r1
clrl %r3
muld2 %r0,%r2 # %r2:%r3 = a*a/s
subd2 %r2,%r4 # %r4:%r5 = n/s - a*a/s
addw2 $0x100,%r2 # %r2:%r3 = 4*a*a/s
addd2 %r4,%r2 # %r2:%r3 = n/s + 3*a*a/s
muld2 %r0,%r4 # %r4:%r5 = a*n/s - a*a*a/s
divd2 %r2,%r4 # %r4:%r5 = a*(n-a*a)/(n+3*a*a)
addw2 $0x80,%r4 # %r4:%r5 = 2*a*(n-a*a)/(n+3*a*a)
addd2 %r4,%r0 # %r0:%r1 = a + 2*a*(n-a*a)/(n+3*a*a)
rsb # DONE!
nonpos:
jneq negarg

View File

@ -1,4 +1,4 @@
/* $NetBSD: n_support.S,v 1.3 2000/07/14 04:51:00 matt Exp $ */
/* $NetBSD: n_support.S,v 1.4 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1985, 1993
* The Regents of the University of California. All rights reserved.
@ -54,53 +54,53 @@ _sccsid:
*/
ENTRY(copysign, 0)
movq 4(ap),r0 # load x into r0
bicw3 $0x807f,r0,r2 # mask off the exponent of x
movq 4(%ap),%r0 # load x into %r0
bicw3 $0x807f,%r0,%r2 # mask off the exponent of x
beql Lz # if zero or reserved op then return x
bicw3 $0x7fff,12(ap),r2 # copy the sign bit of y into r2
bicw2 $0x8000,r0 # replace x by |x|
bisw2 r2,r0 # copy the sign bit of y to x
bicw3 $0x7fff,12(%ap),%r2 # copy the sign bit of y into %r2
bicw2 $0x8000,%r0 # replace x by |x|
bisw2 %r2,%r0 # copy the sign bit of y to x
Lz: ret
/*
* double logb(double x);
*/
ENTRY(logb, 0)
bicl3 $0xffff807f,4(ap),r0 # mask off the exponent of x
bicl3 $0xffff807f,4(%ap),%r0 # mask off the exponent of x
beql Ln
ashl $-7,r0,r0 # get the bias exponent
subl2 $129,r0 # get the unbias exponent
cvtld r0,r0 # return the answer in double
ashl $-7,%r0,%r0 # get the bias exponent
subl2 $129,%r0 # get the unbias exponent
cvtld %r0,%r0 # return the answer in double
ret
Ln: movq 4(ap),r0 # r0:1 = x (zero or reserved op)
Ln: movq 4(%ap),%r0 # %r0:1 = x (zero or reserved op)
bneq 1f # simply return if reserved op
movq $0x0000fe00ffffcfff,r0 # -2147483647.0
movq $0x0000fe00ffffcfff,%r0 # -2147483647.0
1: ret
/*
* long finite(double x);
*/
ENTRY(finite, 0)
bicw3 $0x7f,4(ap),r0 # mask off the mantissa
cmpw r0,$0x8000 # to see if x is the reserved op
bicw3 $0x7f,4(%ap),%r0 # mask off the mantissa
cmpw %r0,$0x8000 # to see if x is the reserved op
beql 1f # if so, return FALSE (0)
movl $1,r0 # else return TRUE (1)
movl $1,%r0 # else return TRUE (1)
ret
1: clrl r0
1: clrl %r0
ret
/* int isnan(double x);
*/
#if 0
ENTRY(isnan, 0)
clrl r0
clrl %r0
ret
#endif
/* int isnanf(float x);
*/
ENTRY(isnanf, 0)
clrl r0
clrl %r0
ret
/*
@ -110,28 +110,28 @@ ENTRY(isnanf, 0)
.set ERANGE,34
ENTRY(scalb, 0)
movq 4(ap),r0
bicl3 $0xffff807f,r0,r3
movq 4(%ap),%r0
bicl3 $0xffff807f,%r0,%r3
beql ret1 # 0 or reserved operand
movq 12(ap),r4
cvtdl r4, r2
cmpl r2,$0x12c
movq 12(%ap),%r4
cvtdl %r4, %r2
cmpl %r2,$0x12c
bgeq ovfl
cmpl r2,$-0x12c
cmpl %r2,$-0x12c
bleq unfl
ashl $7,r2,r2
addl2 r2,r3
ashl $7,%r2,%r2
addl2 %r2,%r3
bleq unfl
cmpl r3,$0x8000
cmpl %r3,$0x8000
bgeq ovfl
addl2 r2,r0
addl2 %r2,%r0
ret
ovfl: pushl $ERANGE
calls $1,_C_LABEL(infnan) # if it returns
bicw3 $0x7fff,4(ap),r2 # get the sign of input arg
bisw2 r2,r0 # re-attach the sign to r0/1
bicw3 $0x7fff,4(%ap),%r2 # get the sign of input arg
bisw2 %r2,%r0 # re-attach the sign to %r0/1
ret
unfl: movq $0,r0
unfl: movq $0,%r0
ret1: ret
/*
@ -143,83 +143,83 @@ ret1: ret
.set EDOM,33
ENTRY(drem, 0x0fc0)
subl2 $12,sp
movq 4(ap),r0 #r0=x
movq 12(ap),r2 #r2=y
subl2 $12,%sp
movq 4(%ap),%r0 #%r0=x
movq 12(%ap),%r2 #%r2=y
jeql Rop #if y=0 then generate reserved op fault
bicw3 $0x007f,r0,r4 #check if x is Rop
cmpw r4,$0x8000
bicw3 $0x007f,%r0,%r4 #check if x is Rop
cmpw %r4,$0x8000
jeql Ret #if x is Rop then return Rop
bicl3 $0x007f,r2,r4 #check if y is Rop
cmpw r4,$0x8000
bicl3 $0x007f,%r2,%r4 #check if y is Rop
cmpw %r4,$0x8000
jeql Ret #if y is Rop then return Rop
bicw2 $0x8000,r2 #y := |y|
movw $0,-4(fp) #-4(fp) = nx := 0
cmpw r2,$0x1c80 #yexp ? 57
bicw2 $0x8000,%r2 #y := |y|
movw $0,-4(%fp) #-4(%fp) = nx := 0
cmpw %r2,$0x1c80 #yexp ? 57
bgtr C1 #if yexp > 57 goto C1
addw2 $0x1c80,r2 #scale up y by 2**57
movw $0x1c80,-4(fp) #nx := 57 (exponent field)
addw2 $0x1c80,%r2 #scale up y by 2**57
movw $0x1c80,-4(%fp) #nx := 57 (exponent field)
C1:
movw -4(fp),-8(fp) #-8(fp) = nf := nx
bicw3 $0x7fff,r0,-12(fp) #-12(fp) = sign of x
bicw2 $0x8000,r0 #x := |x|
movq r2,r10 #y1 := y
bicl2 $0xffff07ff,r11 #clear the last 27 bits of y1
movw -4(%fp),-8(%fp) #-8(%fp) = nf := nx
bicw3 $0x7fff,%r0,-12(%fp) #-12(%fp) = sign of x
bicw2 $0x8000,%r0 #x := |x|
movq %r2,%r10 #y1 := y
bicl2 $0xffff07ff,%r11 #clear the last 27 bits of y1
loop:
cmpd r0,r2 #x ? y
cmpd %r0,%r2 #x ? y
bleq E1 #if x <= y goto E1
/* begin argument reduction */
movq r2,r4 #t =y
movq r10,r6 #t1=y1
bicw3 $0x807f,r0,r8 #xexp= exponent of x
bicw3 $0x807f,r2,r9 #yexp= exponent fo y
subw2 r9,r8 #xexp-yexp
subw2 $0x0c80,r8 #k=xexp-yexp-25(exponent bit field)
movq %r2,%r4 #t =y
movq %r10,%r6 #t1=y1
bicw3 $0x807f,%r0,%r8 #xexp= exponent of x
bicw3 $0x807f,%r2,%r9 #yexp= exponent fo y
subw2 %r9,%r8 #xexp-yexp
subw2 $0x0c80,%r8 #k=xexp-yexp-25(exponent bit field)
blss C2 #if k<0 goto C2
addw2 r8,r4 #t +=k
addw2 r8,r6 #t1+=k, scale up t and t1
addw2 %r8,%r4 #t +=k
addw2 %r8,%r6 #t1+=k, scale up t and t1
C2:
divd3 r4,r0,r8 #x/t
cvtdl r8,r8 #n=[x/t] truncated
cvtld r8,r8 #float(n)
subd2 r6,r4 #t:=t-t1
muld2 r8,r4 #n*(t-t1)
muld2 r8,r6 #n*t1
subd2 r6,r0 #x-n*t1
subd2 r4,r0 #(x-n*t1)-n*(t-t1)
divd3 %r4,%r0,%r8 #x/t
cvtdl %r8,%r8 #n=[x/t] truncated
cvtld %r8,%r8 #float(n)
subd2 %r6,%r4 #t:=t-t1
muld2 %r8,%r4 #n*(t-t1)
muld2 %r8,%r6 #n*t1
subd2 %r6,%r0 #x-n*t1
subd2 %r4,%r0 #(x-n*t1)-n*(t-t1)
jbr loop
E1:
movw -4(fp),r6 #r6=nx
movw -4(%fp),%r6 #%r6=nx
beql C3 #if nx=0 goto C3
addw2 r6,r0 #x:=x*2**57 scale up x by nx
movw $0,-4(fp) #clear nx
addw2 %r6,%r0 #x:=x*2**57 scale up x by nx
movw $0,-4(%fp) #clear nx
jbr loop
C3:
movq r2,r4 #r4 = y
subw2 $0x80,r4 #r4 = y/2
cmpd r0,r4 #x:y/2
movq %r2,%r4 #%r4 = y
subw2 $0x80,%r4 #%r4 = y/2
cmpd %r0,%r4 #x:y/2
blss E2 #if x < y/2 goto E2
bgtr C4 #if x > y/2 goto C4
cvtdl r8,r8 #ifix(float(n))
blbc r8,E2 #if the last bit is zero, goto E2
cvtdl %r8,%r8 #ifix(float(n))
blbc %r8,E2 #if the last bit is zero, goto E2
C4:
subd2 r2,r0 #x-y
subd2 %r2,%r0 #x-y
E2:
xorw2 -12(fp),r0 #x^sign (exclusive or)
movw -8(fp),r6 #r6=nf
bicw3 $0x807f,r0,r8 #r8=exponent of x
bicw2 $0x7f80,r0 #clear the exponent of x
subw2 r6,r8 #r8=xexp-nf
xorw2 -12(%fp),%r0 #x^sign (exclusive or)
movw -8(%fp),%r6 #%r6=nf
bicw3 $0x807f,%r0,%r8 #%r8=exponent of x
bicw2 $0x7f80,%r0 #clear the exponent of x
subw2 %r6,%r8 #%r8=xexp-nf
bgtr C5 #if xexp-nf is positive goto C5
movw $0,r8 #clear r8
movq $0,r0 #x underflow to zero
movw $0,%r8 #clear %r8
movq $0,%r0 #x underflow to zero
C5:
bisw2 r8,r0 /* put r8 into x's exponent field */
bisw2 %r8,%r0 /* put %r8 into x's exponent field */
ret
Rop: #Reserved operand
pushl $EDOM
calls $1,_C_LABEL(infnan) #generate reserved op fault
ret
Ret:
movq $0x8000,r0 #propagate reserved op
movq $0x8000,%r0 #propagate reserved op
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: n_tan.S,v 1.4 2000/07/14 04:51:00 matt Exp $ */
/* $NetBSD: n_tan.S,v 1.5 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1985, 1993
* The Regents of the University of California. All rights reserved.
@ -48,15 +48,15 @@
* method: true range reduction to [-pi/4,pi/4], P. Tang & B. Corbett
* S. McDonald, April 4, 1985
*/
ENTRY(tan, 0x0fc0) # save r6-r11
movq 4(ap),r0
bicw3 $0x807f,r0,r2
ENTRY(tan, 0x0fc0) # save %r6-%r11
movq 4(%ap),%r0
bicw3 $0x807f,%r0,%r2
beql 1f # if x is zero or reserved operand then return x
/*
* Save the PSL's IV & FU bits on the stack.
*/
movpsl r2
bicw3 $0xff9f,r2,-(sp)
movpsl %r2
bicw3 $0xff9f,%r2,-(%sp)
/*
* Clear the IV & FU bits.
*/
@ -64,30 +64,30 @@ ENTRY(tan, 0x0fc0) # save r6-r11
jsb _C_LABEL(__libm_argred)+2
/*
* At this point,
* r0 contains the quadrant number, 0, 1, 2, or 3;
* r2/r1 contains the reduced argument as a D-format number;
* r3 contains a F-format extension to the reduced argument;
* %r0 contains the quadrant number, 0, 1, 2, or 3;
* %r2/%r1 contains the reduced argument as a D-format number;
* %r3 contains a F-format extension to the reduced argument;
*
* Save r3/r0 so that we can call cosine after calling sine.
* Save %r3/%r0 so that we can call cosine after calling sine.
*/
movq r2,-(sp)
movq r0,-(sp)
movq %r2,-(%sp)
movq %r0,-(%sp)
/*
* Call sine. r4 = 0 implies sine.
* Call sine. %r4 = 0 implies sine.
*/
movl $0,r4
movl $0,%r4
jsb _C_LABEL(__libm_sincos)+2
/*
* Save sin(x) in r11/r10 .
* Save sin(x) in %r11/%r10 .
*/
movd r0,r10
movd %r0,%r10
/*
* Call cosine. r4 = 1 implies cosine.
* Call cosine. %r4 = 1 implies cosine.
*/
movq (sp)+,r0
movq (sp)+,r2
movl $1,r4
movq (%sp)+,%r0
movq (%sp)+,%r2
movl $1,%r4
jsb _C_LABEL(__libm_sincos)+2
divd3 r0,r10,r0
bispsw (sp)+
divd3 %r0,%r10,%r0
bispsw (%sp)+
1: ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: mdprologue.S,v 1.10 1999/06/28 17:28:56 ragge Exp $ */
/* $NetBSD: mdprologue.S,v 1.11 2002/02/24 01:06:21 matt Exp $ */
/*
* Copyright (c) 1998 Matt Thomas <matt@3am-software.com>
@ -43,24 +43,24 @@
.align 2
_rtl: /* crt0 calls us here */
.word 0 /* no registers to save */
movl 8(ap),r0 /* load crtp into r0 */
movl 8(%ap),%r0 /* load crtp into %r0 */
/* setup arguments for rtld() */
/*
* Add the 1st entry in the GOT (e.g. __DYNAMIC) to the base
* address of ld.so and pushd that onto the stack.
*/
addl3 __GLOBAL_OFFSET_TABLE_,(r0),-(sp)
pushl r0 /* 2nd arg == crtp */
pushl 4(ap) /* 1st arg == version */
addl3 __GLOBAL_OFFSET_TABLE_,(%r0),-(%sp)
pushl %r0 /* 2nd arg == crtp */
pushl 4(%ap) /* 1st arg == version */
calls $3,_rtld /* _rtld(version, crtp, DYNAMIC) */
movpsl -(sp) /* flush the instruction cache */
movpsl -(%sp) /* flush the instruction cache */
pushab 1f /* by issuing an */
rei /* rei. */
1: ret
/*
* First call to a procedure generally comes through here for binding.
* We got here via JSB so now (sp) is inside our jmpslot_t. So we
* We got here via JSB so now (%sp) is inside our jmpslot_t. So we
* simply preserve our registers, push the address of jmpslot_t for
* _binder. Save the address we are supported to call (which was
* returned in R0) in the stack location that the JSB used to store
@ -78,26 +78,26 @@ _rtl: /* crt0 calls us here */
.align 1
.type _binder_entry,@label
_binder_entry:
pushr $0x3f /* save r0 to r5 */
pushr $0x3f /* save %r0 to %r5 */
#ifdef DEBUG
pushl $29
pushab LC1
pushl $2
calls $3,_write
#endif
subl3 $8, 24(sp), -(sp) /* point to beginning of jmpslot */
bicl2 $3, (sp)
subl3 $8, 24(%sp), -(%sp) /* point to beginning of jmpslot */
bicl2 $3, (%sp)
calls $1, _binder /* _binder(jsp) */
movpsl -(sp) /* flush the instruction cache */
movpsl -(%sp) /* flush the instruction cache */
pushab 1f /* by issuing an */
rei /* rei. */
1: movl r0, 24(sp) /* save return address onto stack */
bicw3 6(fp),(r0),r0 /* does the entry mask save any additional regs */
popr $0x3f /* restore r0 to r5 (cond flags aren't modified) */
1: movl %r0, 24(%sp) /* save return address onto stack */
bicw3 6(%fp),(%r0),%r0 /* does the entry mask save any additional regs */
popr $0x3f /* restore %r0 to %r5 (cond flags aren't modified) */
bneq 2f /* yes? do it the hard way */
addl2 $2,(sp) /* no? skip past the mask */
addl2 $2,(%sp) /* no? skip past the mask */
rsb /* and jump to it */
2: callg (ap), *(sp)+ /* return value from _binder() == actual */
2: callg (%ap), *(%sp)+ /* return value from _binder() == actual */
ret
#ifdef DEBUG
LC1: .asciz "ld.so: entered _binder_entry\n"

View File

@ -1,4 +1,4 @@
/* $NetBSD: rtld_start.S,v 1.6 2000/08/07 01:47:07 matt Exp $ */
/* $NetBSD: rtld_start.S,v 1.7 2002/02/24 01:06:22 matt Exp $ */
/*
* Copyright 1996 Matt Thomas <matt@3am-software.com>
@ -39,34 +39,34 @@ ENTRY(_rtld_start, 0)
/* Allocate space on the stack for the cleanup and obj_main
* entries that _rtld() will provide for us.
*/
clrl fp
subl2 $8,sp
pushl sp
clrl %fp
subl2 $8,%sp
pushl %sp
calls $1,_rtld
movq (sp)+,r7 /* grab cleanup and obj_main into r7/r8 */
jmp 2(r0) /* jump to entry point + 2 */
movq (%sp)+,%r7 /* grab cleanup and obj_main into %r7/%r8 */
jmp 2(%r0) /* jump to entry point + 2 */
/*
* Lazy binding entry point, called via PLT.
*/
ALTENTRY(_rtld_bind_start)
pushr $0xff /* save R0-R7 */
movq 32(sp),r0 /* get addresses of plt.got & reloc index */
pushl (r1) /* push relocation index */
pushl r0 /* push address of obj entry */
movq 32(%sp),%r0 /* get addresses of plt.got & reloc index */
pushl (%r1) /* push relocation index */
pushl %r0 /* push address of obj entry */
calls $2,_rtld_bind
movpsl -(sp) /* flush the instruction cache */
movpsl -(%sp) /* flush the instruction cache */
pushab 1f /* by issuing an */
rei /* rei. */
1: movl r0,36(sp) /* save return address onto stack */
2: bicw3 6(fp),(r0),r0 /* does the entry mask save any additional regs */
1: movl %r0,36(%sp) /* save return address onto stack */
2: bicw3 6(%fp),(%r0),%r0 /* does the entry mask save any additional regs */
popr $0xff /* restore R0-R7 (cond flags not modified) */
bneq 4f /* yes? do it the hard way */
3: addl2 $4,sp /* no? skip past plt.got on stack */
addl2 $2,(sp) /* skip past the mask */
3: addl2 $4,%sp /* no? skip past plt.got on stack */
addl2 $2,(%sp) /* skip past the mask */
rsb /* and jump to it */
4: addl2 $4,sp
callg (ap),*(sp)+ /* return value from _rtld_bind() == actual */
4: addl2 $4,%sp
callg (%ap),*(%sp)+ /* return value from _rtld_bind() == actual */
ret