diff --git a/regress/lib/libc/ieeefp/testfloat/arch/i386/systfloat.S b/regress/lib/libc/ieeefp/testfloat/arch/i386/systfloat.S index 01e8983e4945..ad0efd33dc61 100644 --- a/regress/lib/libc/ieeefp/testfloat/arch/i386/systfloat.S +++ b/regress/lib/libc/ieeefp/testfloat/arch/i386/systfloat.S @@ -1,3 +1,42 @@ +/* $NetBSD: systfloat.S,v 1.2 2001/03/13 07:43:19 ross Exp $ */ + +/* This is a derivative work. */ + +/*- + * Copyright (c) 2001 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Ross Harvey. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /* =============================================================================== @@ -29,9 +68,10 @@ this code that are retained. ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_int32_to_floatx80 -_syst_int32_to_floatx80: + +#include + +ENTRY(syst_int32_to_floatx80) fildl 8(%esp) movl 4(%esp),%eax fstpt (%eax) @@ -41,9 +81,8 @@ _syst_int32_to_floatx80: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_int64_to_floatx80 -_syst_int64_to_floatx80: + +ENTRY(syst_int64_to_floatx80) fildq 8(%esp) movl 4(%esp),%eax fstpt (%eax) @@ -53,9 +92,7 @@ _syst_int64_to_floatx80: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_float32_to_floatx80 -_syst_float32_to_floatx80: +ENTRY(syst_float32_to_floatx80) flds 8(%esp) movl 4(%esp),%eax fstpt (%eax) @@ -65,9 +102,7 @@ _syst_float32_to_floatx80: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_float64_to_floatx80 -_syst_float64_to_floatx80: +ENTRY(syst_float64_to_floatx80) fldl 8(%esp) movl 4(%esp),%eax fstpt (%eax) @@ -77,9 +112,7 @@ _syst_float64_to_floatx80: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_to_int32 -_syst_floatx80_to_int32: +ENTRY(syst_floatx80_to_int32) fldt 4(%esp) subl $4,%esp fistpl (%esp) @@ -91,9 +124,7 @@ _syst_floatx80_to_int32: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_to_int64 -_syst_floatx80_to_int64: +ENTRY(syst_floatx80_to_int64) fldt 4(%esp) subl $8,%esp fistpq (%esp) @@ -106,9 +137,7 @@ _syst_floatx80_to_int64: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_to_float32 -_syst_floatx80_to_float32: +ENTRY(syst_floatx80_to_float32) fldt 4(%esp) subl $4,%esp fstps (%esp) @@ -120,9 +149,7 @@ _syst_floatx80_to_float32: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_to_float64 -_syst_floatx80_to_float64: +ENTRY(syst_floatx80_to_float64) fldt 4(%esp) subl $8,%esp fstpl (%esp) @@ -135,9 +162,7 @@ _syst_floatx80_to_float64: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_round_to_int -_syst_floatx80_round_to_int: +ENTRY(syst_floatx80_round_to_int) fldt 8(%esp) frndint movl 4(%esp),%eax @@ -148,9 +173,7 @@ _syst_floatx80_round_to_int: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_add -_syst_floatx80_add: +ENTRY(syst_floatx80_add) fldt 8(%esp) fldt 20(%esp) faddp @@ -162,9 +185,7 @@ _syst_floatx80_add: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_sub -_syst_floatx80_sub: +ENTRY(syst_floatx80_sub) fldt 8(%esp) fldt 20(%esp) fsubrp @@ -176,9 +197,7 @@ _syst_floatx80_sub: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_mul -_syst_floatx80_mul: +ENTRY(syst_floatx80_mul) fldt 8(%esp) fldt 20(%esp) fmulp @@ -190,9 +209,7 @@ _syst_floatx80_mul: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_div -_syst_floatx80_div: +ENTRY(syst_floatx80_div) fldt 8(%esp) fldt 20(%esp) fdivrp @@ -204,9 +221,7 @@ _syst_floatx80_div: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_rem -_syst_floatx80_rem: +ENTRY(syst_floatx80_rem) fldt 20(%esp) fldt 8(%esp) floatx80_rem_loop: @@ -223,9 +238,7 @@ floatx80_rem_loop: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_sqrt -_syst_floatx80_sqrt: +ENTRY(syst_floatx80_sqrt) fldt 8(%esp) fsqrt movl 4(%esp),%eax @@ -236,9 +249,7 @@ _syst_floatx80_sqrt: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_eq -_syst_floatx80_eq: +ENTRY(syst_floatx80_eq) fldt 16(%esp) fldt 4(%esp) fucompp @@ -253,9 +264,7 @@ _syst_floatx80_eq: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_le -_syst_floatx80_le: +ENTRY(syst_floatx80_le) fldt 4(%esp) fldt 16(%esp) fcompp @@ -269,9 +278,7 @@ _syst_floatx80_le: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_lt -_syst_floatx80_lt: +ENTRY(syst_floatx80_lt) fldt 4(%esp) fldt 16(%esp) fcompp @@ -285,9 +292,7 @@ _syst_floatx80_lt: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_eq_signaling -_syst_floatx80_eq_signaling: +ENTRY(syst_floatx80_eq_signaling) fldt 16(%esp) fldt 4(%esp) fcompp @@ -302,9 +307,7 @@ _syst_floatx80_eq_signaling: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_le_quiet -_syst_floatx80_le_quiet: +ENTRY(syst_floatx80_le_quiet) fldt 4(%esp) fldt 16(%esp) fucompp @@ -318,9 +321,8 @@ _syst_floatx80_le_quiet: ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- */ - .align 2 -.globl _syst_floatx80_lt_quiet -_syst_floatx80_lt_quiet: + +ENTRY(syst_floatx80_lt_quiet) fldt 4(%esp) fldt 16(%esp) fucompp @@ -330,3 +332,45 @@ _syst_floatx80_lt_quiet: movzb %al,%eax ret +/* +------------------------------------------------------------------------------- +------------------------------------------------------------------------------- +*/ + +ENTRY(syst_floatx80_to_int32_round_to_zero) + pushl %ebp + movl %esp,%ebp + subl $12,%esp + fldt 8(%ebp) + fnstcw -4(%ebp) + movl -4(%ebp),%edx + movb $12,%dh + movl %edx,-12(%ebp) + fldcw -12(%ebp) + fistpl -12(%ebp) + movl -12(%ebp),%eax + fldcw -4(%ebp) + leave + ret + +/* +------------------------------------------------------------------------------- +------------------------------------------------------------------------------- +*/ + +ENTRY(syst_floatx80_to_int64_round_to_zero) + pushl %ebp + movl %esp,%ebp + subl $12,%esp + fldt 8(%ebp) + fnstcw -4(%ebp) + movl -4(%ebp),%ecx + movb $12,%ch + movl %ecx,-12(%ebp) + fldcw -12(%ebp) + fistpq -12(%ebp) + movl -12(%ebp),%eax + movl -8(%ebp),%edx + fldcw -4(%ebp) + leave + ret