Pull changes from gcc3 back to properly emit calls for the _Qp* routines

correctly.

Enable -msoft-quad-float as the default moving forward. A complete build with
this compiler produces no errors and passes known regressions problems (awk,
etc).

XXX - -mhard-quad-float no longer will generate code correctly (internal
compiler problems). This is the case all the way to gcc-current so it's not
an easy fix at the moment but no one should be using hard quad math anyways.
This commit is contained in:
jmc 2002-03-28 08:14:55 +00:00
parent c670c2a754
commit d3debc5e93
4 changed files with 865 additions and 73 deletions

View File

@ -15,6 +15,11 @@
#include <sparc/sp64-elf.h>
#undef TARGET_DEFAULT
#define TARGET_DEFAULT \
(MASK_V9 + MASK_PTR64 + MASK_64BIT + /* MASK_HARD_QUAD */ \
+ MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_STACK_BIAS)
#undef SPARC_DEFAULT_CMODEL
#define SPARC_DEFAULT_CMODEL CM_MEDANY

View File

@ -4578,6 +4578,152 @@ output_cbranch (op, label, reversed, annul, noop, insn)
return string;
}
/* Emit a library call comparison between floating point X and Y.
COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
values as arguments instead of the TFmode registers themselves,
that's why we cannot call emit_float_lib_cmp. */
void
sparc_emit_float_lib_cmp (x, y, comparison)
rtx x, y;
enum rtx_code comparison;
{
char *qpfunc;
rtx slot0, slot1, result, tem, tem2;
enum machine_mode mode;
switch (comparison)
{
case EQ:
qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
break;
case NE:
qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
break;
case GT:
qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
break;
case GE:
qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
break;
case LT:
qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
break;
case LE:
qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
break;
/* case UNORDERED:
case UNGT:
case UNLT:
case UNEQ:
case UNGE:
case UNLE:
case LTGT:
qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
break;
*/
default:
abort();
break;
}
if (TARGET_ARCH64)
{
if (GET_CODE (x) != MEM)
{
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot0, x));
}
else
slot0 = x;
if (GET_CODE (y) != MEM)
{
slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot1, y));
}
else
slot1 = y;
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), 1,
DImode, 2,
XEXP (slot0, 0), Pmode,
XEXP (slot1, 0), Pmode);
mode = DImode;
}
else
{
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), 1,
SImode, 2,
x, TFmode, y, TFmode);
mode = SImode;
}
/* Immediately move the result of the libcall into a pseudo
register so reload doesn't clobber the value if it needs
the return register for a spill reg. */
result = gen_reg_rtx (mode);
emit_move_insn (result, hard_libcall_value (mode));
switch (comparison)
{
default:
emit_cmp_insn (result, const0_rtx, NE,
NULL_RTX, mode, 0, 0);
break;
/* case ORDERED:
case UNORDERED:
emit_cmp_insn (result, GEN_INT(3),
(comparison == UNORDERED) ? EQ : NE,
NULL_RTX, mode, 0, 0);
break;
case UNGT:
case UNGE:
emit_cmp_insn (result, const1_rtx,
(comparison == UNGT) ? GT : NE,
NULL_RTX, mode, 0, 0);
break;
case UNLE:
emit_cmp_insn (result, const2_rtx, NE,
NULL_RTX, mode, 0, 0);
break;
case UNLT:
tem = gen_reg_rtx (mode);
if (TARGET_ARCH32)
emit_insn (gen_andsi3 (tem, result, const1_rtx));
else
emit_insn (gen_anddi3 (tem, result, const1_rtx));
emit_cmp_insn (tem, const0_rtx, NE,
NULL_RTX, mode, 0, 0);
break;
case UNEQ:
case LTGT:
tem = gen_reg_rtx (mode);
if (TARGET_ARCH32)
emit_insn (gen_addsi3 (tem, result, const1_rtx));
else
emit_insn (gen_adddi3 (tem, result, const1_rtx));
tem2 = gen_reg_rtx (mode);
if (TARGET_ARCH32)
emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
else
emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
emit_cmp_insn (tem2, const0_rtx,
(comparison == UNEQ) ? EQ : NE,
NULL_RTX, mode, 0, 0);
break;*/
}
}
/* Return the string to output a conditional branch to LABEL, testing
register REG. LABEL is the operand number of the label; REG is the
operand number of the reg. OP is the conditional expression. The mode

View File

@ -2625,26 +2625,25 @@ do { \
#define MULSI3_LIBCALL "*.umul"
/* Define library calls for quad FP operations. These are all part of the
SPARC ABI.
??? ARCH64 still does not work as the _Qp_* routines take pointers. */
#define ADDTF3_LIBCALL (TARGET_ARCH64 ? "_Qp_add" : "_Q_add")
#define SUBTF3_LIBCALL (TARGET_ARCH64 ? "_Qp_sub" : "_Q_sub")
#define NEGTF2_LIBCALL (TARGET_ARCH64 ? "_Qp_neg" : "_Q_neg")
#define MULTF3_LIBCALL (TARGET_ARCH64 ? "_Qp_mul" : "_Q_mul")
#define DIVTF3_LIBCALL (TARGET_ARCH64 ? "_Qp_div" : "_Q_div")
#define FLOATSITF2_LIBCALL (TARGET_ARCH64 ? "_Qp_itoq" : "_Q_itoq")
#define FIX_TRUNCTFSI2_LIBCALL (TARGET_ARCH64 ? "_Qp_qtoi" : "_Q_qtoi")
#define FIXUNS_TRUNCTFSI2_LIBCALL (TARGET_ARCH64 ? "_Qp_qtoui" : "_Q_qtou")
#define EXTENDSFTF2_LIBCALL (TARGET_ARCH64 ? "_Qp_stoq" : "_Q_stoq")
#define TRUNCTFSF2_LIBCALL (TARGET_ARCH64 ? "_Qp_qtos" : "_Q_qtos")
#define EXTENDDFTF2_LIBCALL (TARGET_ARCH64 ? "_Qp_dtoq" : "_Q_dtoq")
#define TRUNCTFDF2_LIBCALL (TARGET_ARCH64 ? "_Qp_qtod" : "_Q_qtod")
#define EQTF2_LIBCALL (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq")
#define NETF2_LIBCALL (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne")
#define GTTF2_LIBCALL (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt")
#define GETF2_LIBCALL (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge")
#define LTTF2_LIBCALL (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt")
#define LETF2_LIBCALL (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle")
SPARC 32bit ABI. */
#define ADDTF3_LIBCALL "_Q_add"
#define SUBTF3_LIBCALL "_Q_sub"
#define NEGTF2_LIBCALL "_Q_neg"
#define MULTF3_LIBCALL "_Q_mul"
#define DIVTF3_LIBCALL "_Q_div"
#define FLOATSITF2_LIBCALL "_Q_itoq"
#define FIX_TRUNCTFSI2_LIBCALL "_Q_qtoi"
#define FIXUNS_TRUNCTFSI2_LIBCALL "_Q_qtou"
#define EXTENDSFTF2_LIBCALL "_Q_stoq"
#define TRUNCTFSF2_LIBCALL "_Q_qtos"
#define EXTENDDFTF2_LIBCALL "_Q_dtoq"
#define TRUNCTFDF2_LIBCALL "_Q_qtod"
#define EQTF2_LIBCALL "_Q_feq"
#define NETF2_LIBCALL "_Q_fne"
#define GTTF2_LIBCALL "_Q_fgt"
#define GETF2_LIBCALL "_Q_fge"
#define LTTF2_LIBCALL "_Q_flt"
#define LETF2_LIBCALL "_Q_fle"
/* We can define the TFmode sqrt optab only if TARGET_FPU. This is because
with soft-float, the SFmode and DFmode sqrt instructions will be absent,
@ -2652,34 +2651,37 @@ do { \
for calls to the builtin function sqrt, but this fails. */
#define INIT_TARGET_OPTABS \
do { \
add_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, ADDTF3_LIBCALL); \
sub_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, SUBTF3_LIBCALL); \
neg_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, NEGTF2_LIBCALL); \
smul_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, MULTF3_LIBCALL); \
flodiv_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, DIVTF3_LIBCALL); \
eqtf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EQTF2_LIBCALL); \
netf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, NETF2_LIBCALL); \
gttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, GTTF2_LIBCALL); \
getf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, GETF2_LIBCALL); \
lttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, LTTF2_LIBCALL); \
letf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, LETF2_LIBCALL); \
trunctfsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, TRUNCTFSF2_LIBCALL); \
trunctfdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, TRUNCTFDF2_LIBCALL); \
extendsftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EXTENDSFTF2_LIBCALL); \
extenddftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EXTENDDFTF2_LIBCALL); \
floatsitf_libfunc = gen_rtx_SYMBOL_REF (Pmode, FLOATSITF2_LIBCALL); \
fixtfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, FIX_TRUNCTFSI2_LIBCALL); \
fixunstfsi_libfunc \
= gen_rtx_SYMBOL_REF (Pmode, FIXUNS_TRUNCTFSI2_LIBCALL); \
if (TARGET_FPU) \
sqrt_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, "_Q_sqrt"); \
INIT_SUBTARGET_OPTABS; \
if (TARGET_ARCH32) \
{ \
add_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, ADDTF3_LIBCALL); \
sub_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, SUBTF3_LIBCALL); \
neg_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, NEGTF2_LIBCALL); \
smul_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, MULTF3_LIBCALL); \
flodiv_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, DIVTF3_LIBCALL); \
eqtf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EQTF2_LIBCALL); \
netf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, NETF2_LIBCALL); \
gttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, GTTF2_LIBCALL); \
getf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, GETF2_LIBCALL); \
lttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, LTTF2_LIBCALL); \
letf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, LETF2_LIBCALL); \
trunctfsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, TRUNCTFSF2_LIBCALL); \
trunctfdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, TRUNCTFDF2_LIBCALL); \
extendsftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EXTENDSFTF2_LIBCALL);\
extenddftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EXTENDDFTF2_LIBCALL);\
floatsitf_libfunc = gen_rtx_SYMBOL_REF (Pmode, FLOATSITF2_LIBCALL); \
fixtfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, FIX_TRUNCTFSI2_LIBCALL); \
fixunstfsi_libfunc \
= gen_rtx_SYMBOL_REF (Pmode, FIXUNS_TRUNCTFSI2_LIBCALL); \
if (TARGET_FPU) \
sqrt_optab->handlers[(int) TFmode].libfunc \
= gen_rtx_SYMBOL_REF (Pmode, "_Q_sqrt"); \
} \
INIT_SUBTARGET_OPTABS; \
} while (0)
/* This is meant to be redefined in the host dependent files */

View File

@ -837,7 +837,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
emit_insn (gen_sne (operands[0]));
DONE;
}
@ -890,7 +890,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
emit_insn (gen_sne (operands[0]));
DONE;
}
@ -911,7 +911,7 @@
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
emit_insn (gen_sne (operands[0]));
DONE;
}
@ -932,7 +932,7 @@
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
emit_insn (gen_sne (operands[0]));
DONE;
}
@ -953,7 +953,7 @@
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
emit_insn (gen_sne (operands[0]));
DONE;
}
@ -974,7 +974,7 @@
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
emit_insn (gen_sne (operands[0]));
DONE;
}
@ -1608,7 +1608,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@ -1632,7 +1632,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@ -1656,7 +1656,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@ -1690,7 +1690,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@ -1724,7 +1724,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@ -1758,7 +1758,7 @@
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
emit_jump_insn (gen_bne (operands[0]));
DONE;
}
@ -1774,6 +1774,145 @@
"
{ operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
}")
;;(define_expand "bunordered"
;; [(set (pc)
;; (if_then_else (unordered (match_dup 1) (const_int 0))
;; (label_ref (match_operand 0 "" ""))
;; (pc)))]
;; ""
;; "
;;{
;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
;; {
;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1,
;; UNORDERED);
;; emit_jump_insn (gen_beq (operands[0]));
;; DONE;
;; }
;; operands[1] = gen_compare_reg (UNORDERED, sparc_compare_op0,
;; sparc_compare_op1);
;;}")
;;(define_expand "bordered"
;; [(set (pc)
;; (if_then_else (ordered (match_dup 1) (const_int 0))
;; (label_ref (match_operand 0 "" ""))
;; (pc)))]
;; ""
;; "
;;{
;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
;; {
;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, ORDERED);
;; emit_jump_insn (gen_bne (operands[0]));
;; DONE;
;; }
;; operands[1] = gen_compare_reg (ORDERED, sparc_compare_op0,
;; sparc_compare_op1);
;;}")
;;
;;(define_expand "bungt"
;; [(set (pc)
;; (if_then_else (ungt (match_dup 1) (const_int 0))
;; (label_ref (match_operand 0 "" ""))
;; (pc)))]
;; ""
;; "
;;{
;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
;; {
;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNGT);
;; emit_jump_insn (gen_bgt (operands[0]));
;; DONE;
;; }
;; operands[1] = gen_compare_reg (UNGT, sparc_compare_op0, sparc_compare_op1);
;;}")
;;
;;(define_expand "bunlt"
;; [(set (pc)
;; (if_then_else (unlt (match_dup 1) (const_int 0))
;; (label_ref (match_operand 0 "" ""))
;; (pc)))]
;; ""
;; "
;;{
;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
;; {
;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNLT);
;; emit_jump_insn (gen_bne (operands[0]));
;; DONE;
;; }
;; operands[1] = gen_compare_reg (UNLT, sparc_compare_op0, sparc_compare_op1);
;;}")
;;
;;(define_expand "buneq"
;; [(set (pc)
;; (if_then_else (uneq (match_dup 1) (const_int 0))
;; (label_ref (match_operand 0 "" ""))
;; (pc)))]
;; ""
;; "
;;{
;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
;; {
;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNEQ);
;; emit_jump_insn (gen_beq (operands[0]));
;; DONE;
;; }
;; operands[1] = gen_compare_reg (UNEQ, sparc_compare_op0, sparc_compare_op1);
;;}")
;;
;;(define_expand "bunge"
;; [(set (pc)
;; (if_then_else (unge (match_dup 1) (const_int 0))
;; (label_ref (match_operand 0 "" ""))
;; (pc)))]
;; ""
;; "
;;{
;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
;; {
;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNGE);
;; emit_jump_insn (gen_bne (operands[0]));
;; DONE;
;; }
;; operands[1] = gen_compare_reg (UNGE, sparc_compare_op0, sparc_compare_op1);
;;}")
;;
;;(define_expand "bunle"
;; [(set (pc)
;; (if_then_else (unle (match_dup 1) (const_int 0))
;; (label_ref (match_operand 0 "" ""))
;; (pc)))]
;; ""
;; "
;;{
;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
;; {
;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNLE);
;; emit_jump_insn (gen_bne (operands[0]));
;; DONE;
;; }
;; operands[1] = gen_compare_reg (UNLE, sparc_compare_op0, sparc_compare_op1);
;;}")
;;
;;(define_expand "bltgt"
;; [(set (pc)
;; (if_then_else (ltgt (match_dup 1) (const_int 0))
;; (label_ref (match_operand 0 "" ""))
;; (pc)))]
;; ""
;; "
;;{
;; if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
;; {
;; sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LTGT);
;; emit_jump_insn (gen_bne (operands[0]));
;; DONE;
;; }
;; operands[1] = gen_compare_reg (LTGT, sparc_compare_op0, sparc_compare_op1);
;;}")
;; Now match both normal and inverted jump.
@ -4518,16 +4657,70 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "extendsftf2"
(define_expand "extendsftf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:SF 1 "register_operand" "f")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0;
if (GET_CODE (operands[0]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[0];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_stoq\"), 0,
VOIDmode, 2,
XEXP (slot0, 0), Pmode,
operands[1], SFmode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}
}")
(define_insn "*extendsftf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:SF 1 "register_operand" "f")))]
"TARGET_FPU && TARGET_HARD_QUAD"
"fstoq\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "extenddftf2"
(define_expand "extenddftf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:DF 1 "register_operand" "e")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0;
if (GET_CODE (operands[0]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[0];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_dtoq\"), 0,
VOIDmode, 2,
XEXP (slot0, 0), Pmode,
operands[1], DFmode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}
}")
(define_insn "*extenddftf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:DF 1 "register_operand" "e")))]
@ -4545,7 +4738,33 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "trunctfsf2"
(define_expand "trunctfsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_truncate:SF
(match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
{
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
}
else
slot0 = operands[1];
emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtos\"),
operands[0], 0, SFmode, 1,
XEXP (slot0, 0), Pmode);
DONE;
}
}")
(define_insn "*trunctfsf2_hq"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_truncate:SF
(match_operand:TF 1 "register_operand" "e")))]
@ -4554,7 +4773,33 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "trunctfdf2"
(define_expand "trunctfdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
(float_truncate:DF
(match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
{
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
}
else
slot0 = operands[1];
emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtod\"),
operands[0], 0, DFmode, 1,
XEXP (slot0, 0), Pmode);
DONE;
}
}")
(define_insn "*trunctfdf2_hq"
[(set (match_operand:DF 0 "register_operand" "=e")
(float_truncate:DF
(match_operand:TF 1 "register_operand" "e")))]
@ -4581,7 +4826,33 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "floatsitf2"
(define_expand "floatsitf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:SI 1 "register_operand" "f")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[1];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_itoq\"), 0,
VOIDmode, 2,
XEXP (slot0, 0), Pmode,
operands[1], SImode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}
}")
(define_insn "*floatsitf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:SI 1 "register_operand" "f")))]
"TARGET_FPU && TARGET_HARD_QUAD"
@ -4589,6 +4860,29 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_expand "floatunssitf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(unsigned_float:TF (match_operand:SI 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
"
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[1];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uitoq\"), 0,
VOIDmode, 2,
XEXP (slot0, 0), Pmode,
operands[1], SImode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}")
;; Now the same for 64 bit sources.
(define_insn "floatdisf2"
@ -4607,7 +4901,33 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "floatditf2"
(define_expand "floatditf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:DI 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_V9 && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[1];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_xtoq\"), 0,
VOIDmode, 2,
XEXP (slot0, 0), Pmode,
operands[1], DImode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}
}")
(define_insn "*floatditf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:DI 1 "register_operand" "e")))]
"TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
@ -4615,6 +4935,29 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_expand "floatunsditf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(unsigned_float:TF (match_operand:DI 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
"
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[1];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uxtoq\"), 0,
VOIDmode, 2,
XEXP (slot0, 0), Pmode,
operands[1], DImode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}")
;; Convert a float to an actual integer.
;; Truncation is performed as part of the conversion.
@ -4634,7 +4977,32 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "fix_trunctfsi2"
(define_expand "fix_trunctfsi2"
[(set (match_operand:SI 0 "register_operand" "=f")
(fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
{
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
}
else
slot0 = operands[1];
emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoi\"),
operands[0], 0, SImode, 1,
XEXP (slot0, 0), Pmode);
DONE;
}
}")
(define_insn "*fix_trunctfsi2_hq"
[(set (match_operand:SI 0 "register_operand" "=f")
(fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_FPU && TARGET_HARD_QUAD"
@ -4642,6 +5010,28 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_expand "fixuns_trunctfsi2"
[(set (match_operand:SI 0 "register_operand" "=f")
(unsigned_fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
"
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
{
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
}
else
slot0 = operands[1];
emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoui\"),
operands[0], 0, SImode, 1,
XEXP (slot0, 0), Pmode);
DONE;
}")
;; Now the same, for V9 targets
(define_insn "fix_truncsfdi2"
@ -4660,13 +5050,61 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "fix_trunctfdi2"
(define_expand "fix_trunctfdi2"
[(set (match_operand:DI 0 "register_operand" "=e")
(fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_V9 && TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
{
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
}
else
slot0 = operands[1];
emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtox\"),
operands[0], 0, DImode, 1,
XEXP (slot0, 0), Pmode);
DONE;
}
}")
(define_insn "*fix_trunctfdi2_hq"
[(set (match_operand:DI 0 "register_operand" "=e")
(fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
"fqtox\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_expand "fixuns_trunctfdi2"
[(set (match_operand:DI 0 "register_operand" "=f")
(unsigned_fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
"
{
rtx slot0;
if (GET_CODE (operands[1]) != MEM)
{
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
}
else
slot0 = operands[1];
emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoux\"),
operands[0], 0, DImode, 1,
XEXP (slot0, 0), Pmode);
DONE;
}")
;;- arithmetic instructions
@ -6592,7 +7030,49 @@
;; Floating point arithmetic instructions.
(define_insn "addtf3"
(define_expand "addtf3"
[(set (match_operand:TF 0 "nonimmediate_operand" "")
(plus:TF (match_operand:TF 1 "general_operand" "")
(match_operand:TF 2 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0, slot1, slot2;
if (GET_CODE (operands[0]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[0];
if (GET_CODE (operands[1]) != MEM)
{
slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
}
else
slot1 = operands[1];
if (GET_CODE (operands[2]) != MEM)
{
slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
}
else
slot2 = operands[2];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_add\"), 0,
VOIDmode, 3,
XEXP (slot0, 0), Pmode,
XEXP (slot1, 0), Pmode,
XEXP (slot2, 0), Pmode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}
}")
(define_insn "*addtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(plus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
@ -6619,7 +7099,49 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "subtf3"
(define_expand "subtf3"
[(set (match_operand:TF 0 "nonimmediate_operand" "")
(minus:TF (match_operand:TF 1 "general_operand" "")
(match_operand:TF 2 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0, slot1, slot2;
if (GET_CODE (operands[0]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[0];
if (GET_CODE (operands[1]) != MEM)
{
slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
}
else
slot1 = operands[1];
if (GET_CODE (operands[2]) != MEM)
{
slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
}
else
slot2 = operands[2];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sub\"), 0,
VOIDmode, 3,
XEXP (slot0, 0), Pmode,
XEXP (slot1, 0), Pmode,
XEXP (slot2, 0), Pmode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}
}")
(define_insn "*subtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(minus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
@ -6646,7 +7168,49 @@
[(set_attr "type" "fp")
(set_attr "length" "1")])
(define_insn "multf3"
(define_expand "multf3"
[(set (match_operand:TF 0 "nonimmediate_operand" "")
(mult:TF (match_operand:TF 1 "general_operand" "")
(match_operand:TF 2 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0, slot1, slot2;
if (GET_CODE (operands[0]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[0];
if (GET_CODE (operands[1]) != MEM)
{
slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
}
else
slot1 = operands[1];
if (GET_CODE (operands[2]) != MEM)
{
slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
}
else
slot2 = operands[2];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_mul\"), 0,
VOIDmode, 3,
XEXP (slot0, 0), Pmode,
XEXP (slot1, 0), Pmode,
XEXP (slot2, 0), Pmode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}
}")
(define_insn "*multf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(mult:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
@ -6691,8 +7255,50 @@
[(set_attr "type" "fpmul")
(set_attr "length" "1")])
(define_expand "divtf3"
[(set (match_operand:TF 0 "nonimmediate_operand" "")
(div:TF (match_operand:TF 1 "general_operand" "")
(match_operand:TF 2 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0, slot1, slot2;
if (GET_CODE (operands[0]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[0];
if (GET_CODE (operands[1]) != MEM)
{
slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
}
else
slot1 = operands[1];
if (GET_CODE (operands[2]) != MEM)
{
slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
}
else
slot2 = operands[2];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_div\"), 0,
VOIDmode, 3,
XEXP (slot0, 0), Pmode,
XEXP (slot1, 0), Pmode,
XEXP (slot2, 0), Pmode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}
}")
;; don't have timing for quad-prec. divide.
(define_insn "divtf3"
(define_insn "*divtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(div:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
@ -6963,7 +7569,40 @@
[(set_attr "type" "fpmove")
(set_attr "length" "1")])
(define_insn "sqrttf2"
(define_expand "sqrttf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
"
{
if (! TARGET_HARD_QUAD)
{
rtx slot0, slot1;
if (GET_CODE (operands[0]) != MEM)
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
else
slot0 = operands[0];
if (GET_CODE (operands[1]) != MEM)
{
slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
}
else
slot1 = operands[1];
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sqrt\"), 0,
VOIDmode, 2,
XEXP (slot0, 0), Pmode,
XEXP (slot1, 0), Pmode);
if (GET_CODE (operands[0]) != MEM)
emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
DONE;
}
}")
(define_insn "*sqrttf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"