import GCC 4.1 branch from 20061021.

includes fixes for these 109 GCC PR's:

 4520 13685 13983 17519 19505 20256 22209 22313 23454 24367 25211
25468 25519 25636 25636 26435 26504 26570 26719 26764 26881 26969
26983 26991 26991 26993 27075 27184 27226 27287 27287 27291 27334
27363 27428 27489 27490 27537 27558 27565 27566 27616 27639 27681
27697 27721 27724 27768 27793 27793 27795 27827 27878 27889 27893
28029 28075 28136 28148 28150 28162 28170 28187 28207 28207 28218
28221 28238 28243 28247 28257 28259 28267 28283 28286 28299 28386
28402 28403 28418 28473 28490 28493 28621 28634 28636 28649 28651
28677 28683 28726 28814 28825 28862 28900 28924 28946 28952 28960
28980 29006 29091 29119 29132 29154 29198 29230 29290 29323
This commit is contained in:
mrg 2006-10-21 22:40:12 +00:00
parent 622e071e10
commit 3849e25709
31 changed files with 686 additions and 332 deletions

View File

@ -4827,6 +4827,16 @@ handle_weakref_attribute (tree *node, tree ARG_UNUSED (name), tree args,
{
tree attr = NULL_TREE;
/* We must ignore the attribute when it is associated with
local-scoped decls, since attribute alias is ignored and many
such symbols do not even have a DECL_WEAK field. */
if (decl_function_context (*node) || current_function_decl)
{
warning (OPT_Wattributes, "%qE attribute ignored", name);
*no_add_attrs = true;
return NULL_TREE;
}
/* The idea here is that `weakref("name")' mutates into `weakref,
alias("name")', and weakref without arguments, in turn,
implicitly adds weak. */

View File

@ -187,7 +187,7 @@ gimplify_compound_literal_expr (tree *expr_p, tree *pre_p)
/* This decl isn't mentioned in the enclosing block, so add it to the
list of temps. FIXME it seems a bit of a kludge to say that
anonymous artificial vars aren't pushed, but everything else is. */
if (DECL_NAME (decl) == NULL_TREE)
if (DECL_NAME (decl) == NULL_TREE && !DECL_SEEN_IN_BIND_EXPR_P (decl))
gimple_add_tmp_var (decl);
gimplify_and_add (decl_s, pre_p);

View File

@ -3314,6 +3314,8 @@ c_parser_compound_statement_nostart (c_parser *parser)
last_stmt = true;
c_parser_statement_after_labels (parser);
}
parser->error = false;
}
if (last_label)
error ("label at end of compound statement");
@ -5201,7 +5203,7 @@ c_parser_postfix_expression_after_paren_type (c_parser *parser,
struct c_expr expr;
start_init (NULL_TREE, NULL, 0);
type = groktypename (type_name);
if (C_TYPE_VARIABLE_SIZE (type))
if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type))
{
error ("compound literal has variable size");
type = error_mark_node;

View File

@ -158,6 +158,8 @@ handle_pragma_pack (cpp_reader * ARG_UNUSED (dummy))
}
else if (token == CPP_NUMBER)
{
if (TREE_CODE (x) != INTEGER_CST)
GCC_BAD ("invalid constant in %<#pragma pack%> - ignored");
align = TREE_INT_CST_LOW (x);
action = set;
if (c_lex (&x) != CPP_CLOSE_PAREN)
@ -188,6 +190,8 @@ handle_pragma_pack (cpp_reader * ARG_UNUSED (dummy))
}
else if (token == CPP_NUMBER && action == push && align == -1)
{
if (TREE_CODE (x) != INTEGER_CST)
GCC_BAD ("invalid constant in %<#pragma pack%> - ignored");
align = TREE_INT_CST_LOW (x);
if (align == -1)
action = set;

View File

@ -1734,11 +1734,17 @@ build_component_ref (tree datum, tree component)
do
{
tree subdatum = TREE_VALUE (field);
int quals;
tree subtype;
if (TREE_TYPE (subdatum) == error_mark_node)
return error_mark_node;
ref = build3 (COMPONENT_REF, TREE_TYPE (subdatum), datum, subdatum,
quals = TYPE_QUALS (strip_array_types (TREE_TYPE (subdatum)));
quals |= TYPE_QUALS (TREE_TYPE (datum));
subtype = c_build_qualified_type (TREE_TYPE (subdatum), quals);
ref = build3 (COMPONENT_REF, subtype, datum, subdatum,
NULL_TREE);
if (TREE_READONLY (datum) || TREE_READONLY (subdatum))
TREE_READONLY (ref) = 1;
@ -3299,6 +3305,9 @@ build_compound_expr (tree expr1, tree expr2)
else if (warn_unused_value)
warn_if_unused_value (expr1, input_location);
if (expr2 == error_mark_node)
return error_mark_node;
return build2 (COMPOUND_EXPR, TREE_TYPE (expr2), expr1, expr2);
}
@ -3540,6 +3549,9 @@ build_modify_expr (tree lhs, enum tree_code modifycode, tree rhs)
if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK)
return error_mark_node;
if (!lvalue_or_else (lhs, lv_assign))
return error_mark_node;
STRIP_TYPE_NOPS (rhs);
newrhs = rhs;
@ -3553,9 +3565,6 @@ build_modify_expr (tree lhs, enum tree_code modifycode, tree rhs)
newrhs = build_binary_op (modifycode, lhs, rhs, 1);
}
if (!lvalue_or_else (lhs, lv_assign))
return error_mark_node;
/* Give an error for storing in something that is 'const'. */
if (TREE_READONLY (lhs) || TYPE_READONLY (lhstype)
@ -4204,16 +4213,18 @@ store_init_value (tree decl, tree init)
if (TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
{
tree decl = COMPOUND_LITERAL_EXPR_DECL (inside_init);
tree cldecl = COMPOUND_LITERAL_EXPR_DECL (inside_init);
if (TYPE_DOMAIN (TREE_TYPE (decl)))
if (TYPE_DOMAIN (TREE_TYPE (cldecl)))
{
/* For int foo[] = (int [3]){1}; we need to set array size
now since later on array initializer will be just the
brace enclosed list of the compound literal. */
TYPE_DOMAIN (type) = TYPE_DOMAIN (TREE_TYPE (decl));
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TREE_TYPE (decl) = type;
TYPE_DOMAIN (type) = TYPE_DOMAIN (TREE_TYPE (cldecl));
layout_type (type);
layout_decl (decl, 0);
layout_decl (cldecl, 0);
}
}
}

View File

@ -58,7 +58,9 @@ add_reg_br_prob_note (FILE *dump_file, rtx last, int probability)
if (!any_condjump_p (last)
|| !JUMP_P (NEXT_INSN (last))
|| !simplejump_p (NEXT_INSN (last))
|| !NEXT_INSN (NEXT_INSN (last))
|| !BARRIER_P (NEXT_INSN (NEXT_INSN (last)))
|| !NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))
|| !LABEL_P (NEXT_INSN (NEXT_INSN (NEXT_INSN (last))))
|| NEXT_INSN (NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))))
goto failed;

View File

@ -7022,7 +7022,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
nonzero = nonzero_bits (x, mode);
/* If none of the bits in X are needed, return a zero. */
if (! just_select && (nonzero & mask) == 0)
if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
x = const0_rtx;
/* If X is a CONST_INT, return a new one. Do this here since the
@ -8802,14 +8802,14 @@ simplify_shift_const (rtx x, enum rtx_code code,
== 0))
code = LSHIFTRT;
if (code == LSHIFTRT
&& GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
&& !(nonzero_bits (varop, shift_mode) >> count))
varop = const0_rtx;
if (code == ASHIFT
&& GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
&& !((nonzero_bits (varop, shift_mode) << count)
& GET_MODE_MASK (shift_mode)))
if (((code == LSHIFTRT
&& GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
&& !(nonzero_bits (varop, shift_mode) >> count))
|| (code == ASHIFT
&& GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
&& !((nonzero_bits (varop, shift_mode) << count)
& GET_MODE_MASK (shift_mode))))
&& !side_effects_p (varop))
varop = const0_rtx;
switch (GET_CODE (varop))
@ -9443,9 +9443,12 @@ simplify_shift_const (rtx x, enum rtx_code code,
if (outer_op == AND)
x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
else if (outer_op == SET)
/* This means that we have determined that the result is
equivalent to a constant. This should be rare. */
x = GEN_INT (outer_const);
{
/* This means that we have determined that the result is
equivalent to a constant. This should be rare. */
if (!side_effects_p (x))
x = GEN_INT (outer_const);
}
else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
else
@ -12260,12 +12263,14 @@ distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
continue;
}
/* If the register is being set at TEM, see if that is all
TEM is doing. If so, delete TEM. Otherwise, make this
into a REG_UNUSED note instead. Don't delete sets to
global register vars. */
if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
|| !global_regs[REGNO (XEXP (note, 0))])
/* If TEM is a (reaching) definition of the use to which the
note was attached, see if that is all TEM is doing. If so,
delete TEM. Otherwise, make this into a REG_UNUSED note
instead. Don't delete sets to global register vars. */
if ((!from_insn
|| INSN_CUID (tem) < INSN_CUID (from_insn))
&& (REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
|| !global_regs[REGNO (XEXP (note, 0))])
&& reg_set_p (XEXP (note, 0), PATTERN (tem)))
{
rtx set = single_set (tem);

View File

@ -1,6 +1,6 @@
/* Utility routines for data type conversion for GCC.
Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998,
2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of GCC.
@ -355,27 +355,36 @@ convert_to_integer (tree type, tree expr)
/* Only convert in ISO C99 mode. */
if (!TARGET_C99_FUNCTIONS)
break;
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (long_long_integer_type_node))
fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
else
if (outprec < TYPE_PRECISION (long_integer_type_node)
|| (outprec == TYPE_PRECISION (long_integer_type_node)
&& !TYPE_UNSIGNED (type)))
fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
&& !TYPE_UNSIGNED (type))
fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
break;
case BUILT_IN_FLOOR: case BUILT_IN_FLOORF: case BUILT_IN_FLOORL:
/* Only convert in ISO C99 mode. */
if (!TARGET_C99_FUNCTIONS)
break;
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (long_long_integer_type_node))
fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
else
if (outprec < TYPE_PRECISION (long_integer_type_node)
|| (outprec == TYPE_PRECISION (long_integer_type_node)
&& !TYPE_UNSIGNED (type)))
fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
&& !TYPE_UNSIGNED (type))
fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
break;
case BUILT_IN_ROUND: case BUILT_IN_ROUNDF: case BUILT_IN_ROUNDL:
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (long_long_integer_type_node))
fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
else
if (outprec < TYPE_PRECISION (long_integer_type_node)
|| (outprec == TYPE_PRECISION (long_integer_type_node)
&& !TYPE_UNSIGNED (type)))
fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
&& !TYPE_UNSIGNED (type))
fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
break;
case BUILT_IN_RINT: case BUILT_IN_RINTF: case BUILT_IN_RINTL:
@ -383,11 +392,16 @@ convert_to_integer (tree type, tree expr)
if (flag_trapping_math)
break;
/* ... Fall through ... */
case BUILT_IN_NEARBYINT: case BUILT_IN_NEARBYINTF: case BUILT_IN_NEARBYINTL:
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (long_long_integer_type_node))
fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
else
fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
case BUILT_IN_NEARBYINT:
case BUILT_IN_NEARBYINTF:
case BUILT_IN_NEARBYINTL:
if (outprec < TYPE_PRECISION (long_integer_type_node)
|| (outprec == TYPE_PRECISION (long_integer_type_node)
&& !TYPE_UNSIGNED (type)))
fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
&& !TYPE_UNSIGNED (type))
fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
break;
case BUILT_IN_TRUNC: case BUILT_IN_TRUNCF: case BUILT_IN_TRUNCL:

View File

@ -630,14 +630,28 @@ cselib_hash_rtx (rtx x, int create)
/* Assume there is only one rtx object for any given label. */
case LABEL_REF:
hash
+= ((unsigned) LABEL_REF << 7) + (unsigned long) XEXP (x, 0);
/* We don't hash on the address of the CODE_LABEL to avoid bootstrap
differences and differences between each stage's debugging dumps. */
hash += (((unsigned int) LABEL_REF << 7)
+ CODE_LABEL_NUMBER (XEXP (x, 0)));
return hash ? hash : (unsigned int) LABEL_REF;
case SYMBOL_REF:
hash
+= ((unsigned) SYMBOL_REF << 7) + (unsigned long) XSTR (x, 0);
return hash ? hash : (unsigned int) SYMBOL_REF;
{
/* Don't hash on the symbol's address to avoid bootstrap differences.
Different hash values may cause expressions to be recorded in
different orders and thus different registers to be used in the
final assembler. This also avoids differences in the dump files
between various stages. */
unsigned int h = 0;
const unsigned char *p = (const unsigned char *) XSTR (x, 0);
while (*p)
h += (h << 7) + *p++; /* ??? revisit */
hash += ((unsigned int) SYMBOL_REF << 7) + h;
return hash ? hash : (unsigned int) SYMBOL_REF;
}
case PRE_DEC:
case PRE_INC:

View File

@ -6676,16 +6676,175 @@ Generates the @code{movhps} machine instruction as a store to memory.
Generates the @code{movlps} machine instruction as a store to memory.
@end table
The following built-in functions are available when @option{-msse2} is used.
All of them generate the machine instruction that is part of the name.
@smallexample
int __builtin_ia32_comisdeq (v2df, v2df)
int __builtin_ia32_comisdlt (v2df, v2df)
int __builtin_ia32_comisdle (v2df, v2df)
int __builtin_ia32_comisdgt (v2df, v2df)
int __builtin_ia32_comisdge (v2df, v2df)
int __builtin_ia32_comisdneq (v2df, v2df)
int __builtin_ia32_ucomisdeq (v2df, v2df)
int __builtin_ia32_ucomisdlt (v2df, v2df)
int __builtin_ia32_ucomisdle (v2df, v2df)
int __builtin_ia32_ucomisdgt (v2df, v2df)
int __builtin_ia32_ucomisdge (v2df, v2df)
int __builtin_ia32_ucomisdneq (v2df, v2df)
v2df __builtin_ia32_cmpeqpd (v2df, v2df)
v2df __builtin_ia32_cmpltpd (v2df, v2df)
v2df __builtin_ia32_cmplepd (v2df, v2df)
v2df __builtin_ia32_cmpgtpd (v2df, v2df)
v2df __builtin_ia32_cmpgepd (v2df, v2df)
v2df __builtin_ia32_cmpunordpd (v2df, v2df)
v2df __builtin_ia32_cmpneqpd (v2df, v2df)
v2df __builtin_ia32_cmpnltpd (v2df, v2df)
v2df __builtin_ia32_cmpnlepd (v2df, v2df)
v2df __builtin_ia32_cmpngtpd (v2df, v2df)
v2df __builtin_ia32_cmpngepd (v2df, v2df)
v2df __builtin_ia32_cmpordpd (v2df, v2df)
v2df __builtin_ia32_cmpeqsd (v2df, v2df)
v2df __builtin_ia32_cmpltsd (v2df, v2df)
v2df __builtin_ia32_cmplesd (v2df, v2df)
v2df __builtin_ia32_cmpunordsd (v2df, v2df)
v2df __builtin_ia32_cmpneqsd (v2df, v2df)
v2df __builtin_ia32_cmpnltsd (v2df, v2df)
v2df __builtin_ia32_cmpnlesd (v2df, v2df)
v2df __builtin_ia32_cmpordsd (v2df, v2df)
v2di __builtin_ia32_paddq (v2di, v2di)
v2di __builtin_ia32_psubq (v2di, v2di)
v2df __builtin_ia32_addpd (v2df, v2df)
v2df __builtin_ia32_subpd (v2df, v2df)
v2df __builtin_ia32_mulpd (v2df, v2df)
v2df __builtin_ia32_divpd (v2df, v2df)
v2df __builtin_ia32_addsd (v2df, v2df)
v2df __builtin_ia32_subsd (v2df, v2df)
v2df __builtin_ia32_mulsd (v2df, v2df)
v2df __builtin_ia32_divsd (v2df, v2df)
v2df __builtin_ia32_minpd (v2df, v2df)
v2df __builtin_ia32_maxpd (v2df, v2df)
v2df __builtin_ia32_minsd (v2df, v2df)
v2df __builtin_ia32_maxsd (v2df, v2df)
v2df __builtin_ia32_andpd (v2df, v2df)
v2df __builtin_ia32_andnpd (v2df, v2df)
v2df __builtin_ia32_orpd (v2df, v2df)
v2df __builtin_ia32_xorpd (v2df, v2df)
v2df __builtin_ia32_movsd (v2df, v2df)
v2df __builtin_ia32_unpckhpd (v2df, v2df)
v2df __builtin_ia32_unpcklpd (v2df, v2df)
v16qi __builtin_ia32_paddb128 (v16qi, v16qi)
v8hi __builtin_ia32_paddw128 (v8hi, v8hi)
v4si __builtin_ia32_paddd128 (v4si, v4si)
v2di __builtin_ia32_paddq128 (v2di, v2di)
v16qi __builtin_ia32_psubb128 (v16qi, v16qi)
v8hi __builtin_ia32_psubw128 (v8hi, v8hi)
v4si __builtin_ia32_psubd128 (v4si, v4si)
v2di __builtin_ia32_psubq128 (v2di, v2di)
v8hi __builtin_ia32_pmullw128 (v8hi, v8hi)
v8hi __builtin_ia32_pmulhw128 (v8hi, v8hi)
v2di __builtin_ia32_pand128 (v2di, v2di)
v2di __builtin_ia32_pandn128 (v2di, v2di)
v2di __builtin_ia32_por128 (v2di, v2di)
v2di __builtin_ia32_pxor128 (v2di, v2di)
v16qi __builtin_ia32_pavgb128 (v16qi, v16qi)
v8hi __builtin_ia32_pavgw128 (v8hi, v8hi)
v16qi __builtin_ia32_pcmpeqb128 (v16qi, v16qi)
v8hi __builtin_ia32_pcmpeqw128 (v8hi, v8hi)
v4si __builtin_ia32_pcmpeqd128 (v4si, v4si)
v16qi __builtin_ia32_pcmpgtb128 (v16qi, v16qi)
v8hi __builtin_ia32_pcmpgtw128 (v8hi, v8hi)
v4si __builtin_ia32_pcmpgtd128 (v4si, v4si)
v16qi __builtin_ia32_pmaxub128 (v16qi, v16qi)
v8hi __builtin_ia32_pmaxsw128 (v8hi, v8hi)
v16qi __builtin_ia32_pminub128 (v16qi, v16qi)
v8hi __builtin_ia32_pminsw128 (v8hi, v8hi)
v16qi __builtin_ia32_punpckhbw128 (v16qi, v16qi)
v8hi __builtin_ia32_punpckhwd128 (v8hi, v8hi)
v4si __builtin_ia32_punpckhdq128 (v4si, v4si)
v2di __builtin_ia32_punpckhqdq128 (v2di, v2di)
v16qi __builtin_ia32_punpcklbw128 (v16qi, v16qi)
v8hi __builtin_ia32_punpcklwd128 (v8hi, v8hi)
v4si __builtin_ia32_punpckldq128 (v4si, v4si)
v2di __builtin_ia32_punpcklqdq128 (v2di, v2di)
v16qi __builtin_ia32_packsswb128 (v16qi, v16qi)
v8hi __builtin_ia32_packssdw128 (v8hi, v8hi)
v16qi __builtin_ia32_packuswb128 (v16qi, v16qi)
v8hi __builtin_ia32_pmulhuw128 (v8hi, v8hi)
void __builtin_ia32_maskmovdqu (v16qi, v16qi)
v2df __builtin_ia32_loadupd (double *)
void __builtin_ia32_storeupd (double *, v2df)
v2df __builtin_ia32_loadhpd (v2df, double *)
v2df __builtin_ia32_loadlpd (v2df, double *)
int __builtin_ia32_movmskpd (v2df)
int __builtin_ia32_pmovmskb128 (v16qi)
void __builtin_ia32_movnti (int *, int)
void __builtin_ia32_movntpd (double *, v2df)
void __builtin_ia32_movntdq (v2df *, v2df)
v4si __builtin_ia32_pshufd (v4si, int)
v8hi __builtin_ia32_pshuflw (v8hi, int)
v8hi __builtin_ia32_pshufhw (v8hi, int)
v2di __builtin_ia32_psadbw128 (v16qi, v16qi)
v2df __builtin_ia32_sqrtpd (v2df)
v2df __builtin_ia32_sqrtsd (v2df)
v2df __builtin_ia32_shufpd (v2df, v2df, int)
v2df __builtin_ia32_cvtdq2pd (v4si)
v4sf __builtin_ia32_cvtdq2ps (v4si)
v4si __builtin_ia32_cvtpd2dq (v2df)
v2si __builtin_ia32_cvtpd2pi (v2df)
v4sf __builtin_ia32_cvtpd2ps (v2df)
v4si __builtin_ia32_cvttpd2dq (v2df)
v2si __builtin_ia32_cvttpd2pi (v2df)
v2df __builtin_ia32_cvtpi2pd (v2si)
int __builtin_ia32_cvtsd2si (v2df)
int __builtin_ia32_cvttsd2si (v2df)
long long __builtin_ia32_cvtsd2si64 (v2df)
long long __builtin_ia32_cvttsd2si64 (v2df)
v4si __builtin_ia32_cvtps2dq (v4sf)
v2df __builtin_ia32_cvtps2pd (v4sf)
v4si __builtin_ia32_cvttps2dq (v4sf)
v2df __builtin_ia32_cvtsi2sd (v2df, int)
v2df __builtin_ia32_cvtsi642sd (v2df, long long)
v4sf __builtin_ia32_cvtsd2ss (v4sf, v2df)
v2df __builtin_ia32_cvtss2sd (v2df, v4sf)
void __builtin_ia32_clflush (const void *)
void __builtin_ia32_lfence (void)
void __builtin_ia32_mfence (void)
v16qi __builtin_ia32_loaddqu (const char *)
void __builtin_ia32_storedqu (char *, v16qi)
unsigned long long __builtin_ia32_pmuludq (v2si, v2si)
v2di __builtin_ia32_pmuludq128 (v4si, v4si)
v8hi __builtin_ia32_psllw128 (v8hi, v2di)
v4si __builtin_ia32_pslld128 (v4si, v2di)
v2di __builtin_ia32_psllq128 (v4si, v2di)
v8hi __builtin_ia32_psrlw128 (v8hi, v2di)
v4si __builtin_ia32_psrld128 (v4si, v2di)
v2di __builtin_ia32_psrlq128 (v2di, v2di)
v8hi __builtin_ia32_psraw128 (v8hi, v2di)
v4si __builtin_ia32_psrad128 (v4si, v2di)
v2di __builtin_ia32_pslldqi128 (v2di, int)
v8hi __builtin_ia32_psllwi128 (v8hi, int)
v4si __builtin_ia32_pslldi128 (v4si, int)
v2di __builtin_ia32_psllqi128 (v2di, int)
v2di __builtin_ia32_psrldqi128 (v2di, int)
v8hi __builtin_ia32_psrlwi128 (v8hi, int)
v4si __builtin_ia32_psrldi128 (v4si, int)
v2di __builtin_ia32_psrlqi128 (v2di, int)
v8hi __builtin_ia32_psrawi128 (v8hi, int)
v4si __builtin_ia32_psradi128 (v4si, int)
v4si __builtin_ia32_pmaddwd128 (v8hi, v8hi)
@end smallexample
The following built-in functions are available when @option{-msse3} is used.
All of them generate the machine instruction that is part of the name.
@smallexample
v2df __builtin_ia32_addsubpd (v2df, v2df)
v2df __builtin_ia32_addsubps (v2df, v2df)
v4sf __builtin_ia32_addsubps (v4sf, v4sf)
v2df __builtin_ia32_haddpd (v2df, v2df)
v2df __builtin_ia32_haddps (v2df, v2df)
v4sf __builtin_ia32_haddps (v4sf, v4sf)
v2df __builtin_ia32_hsubpd (v2df, v2df)
v2df __builtin_ia32_hsubps (v2df, v2df)
v4sf __builtin_ia32_hsubps (v4sf, v4sf)
v16qi __builtin_ia32_lddqu (char const *)
void __builtin_ia32_monitor (void *, unsigned int, unsigned int)
v2df __builtin_ia32_movddup (v2df)

View File

@ -428,7 +428,7 @@ message ``never executed'' is printed.
For a call, if it was executed at least once, then a percentage
indicating the number of times the call returned divided by the number
of times the call was executed will be printed. This will usually be
100%, but may be less for functions call @code{exit} or @code{longjmp},
100%, but may be less for functions that call @code{exit} or @code{longjmp},
and thus may not return every time they are called.
The execution counts are cumulative. If the example program were

View File

@ -3463,6 +3463,11 @@ to build despite this, running into an internal error of the native
its maximum of 262144 bytes. If you have root access, you can use the
@command{systune} command to do this.
@code{wchar_t} support in @samp{libstdc++} is not available for old
IRIX 6.5.x releases, @math{x < 19}. The problem cannot be autodetected
and in order to build GCC for such targets you need to configure with
@option{--disable-wchar_t}.
See @uref{http://freeware.sgi.com/} for more
information about using GCC on IRIX platforms.
@ -3725,9 +3730,9 @@ ld: warning: relocation error: R_SPARC_UA32: @dots{}
To work around this problem, compile with @option{-gstabs+} instead of
plain @option{-g}.
When configuring the GNU Multiple Precision Library (GMP) on a Solaris 7
or later system, the canonical target triplet must be specified as the
@command{build} parameter on the configure line:
When configuring the GNU Multiple Precision Library (GMP) version 4.1.x
on a Solaris 7 or later system, the canonical target triplet must be
specified as the @command{build} parameter on the configure line:
@smallexample
./configure --build=sparc-sun-solaris2.7 --prefix=xxx --enable-mpfr

View File

@ -913,7 +913,7 @@ FIXME: discuss non-C testsuites here.
@subsection Directives used within DejaGnu tests
Test directives appear within comments in a test source file and begin
with @code{dg-}. Some of these are defined within DegaGnu and others
with @code{dg-}. Some of these are defined within DejaGnu and others
are local to the GCC testsuite.
The order in which test directives appear in a test can be important:

View File

@ -2701,6 +2701,9 @@ set_nothrow_function_flags (void)
{
rtx insn;
if (!targetm.binds_local_p (current_function_decl))
return;
TREE_NOTHROW (current_function_decl) = 1;
/* Assume cfun->all_throwers_are_sibcalls until we encounter

View File

@ -1067,7 +1067,7 @@ gimplify_decl_expr (tree *stmt_p)
{
tree init = DECL_INITIAL (decl);
if (!TREE_CONSTANT (DECL_SIZE (decl)))
if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
/* This is a variable-sized decl. Simplify its size and mark it
for deferred expansion. Note that mudflap depends on the format
@ -1717,7 +1717,7 @@ gimplify_self_mod_expr (tree *expr_p, tree *pre_p, tree *post_p,
bool want_value)
{
enum tree_code code;
tree lhs, lvalue, rhs, t1;
tree lhs, lvalue, rhs, t1, post = NULL, *orig_post_p = post_p;
bool postfix;
enum tree_code arith_code;
enum gimplify_status ret;
@ -1734,6 +1734,11 @@ gimplify_self_mod_expr (tree *expr_p, tree *pre_p, tree *post_p,
else
postfix = false;
/* For postfix, make sure the inner expression's post side effects
are executed after side effects from this expression. */
if (postfix)
post_p = &post;
/* Add or subtract? */
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
arith_code = PLUS_EXPR;
@ -1764,7 +1769,8 @@ gimplify_self_mod_expr (tree *expr_p, tree *pre_p, tree *post_p,
if (postfix)
{
gimplify_and_add (t1, post_p);
gimplify_and_add (t1, orig_post_p);
append_to_statement_list (post, orig_post_p);
*expr_p = lhs;
return GS_ALL_DONE;
}
@ -2517,7 +2523,7 @@ gimplify_init_ctor_preeval (tree *expr_p, tree *pre_p, tree *post_p,
/* If this is of variable size, we have no choice but to assume it doesn't
overlap since we can't make a temporary for it. */
if (!TREE_CONSTANT (TYPE_SIZE (TREE_TYPE (*expr_p))))
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST)
return;
/* Otherwise, we must search for overlap ... */
@ -3787,9 +3793,9 @@ gimplify_asm_expr (tree *expr_p, tree *pre_p, tree *post_p)
/* If the operand is a memory input, it should be an lvalue. */
if (!allows_reg && allows_mem)
{
lang_hooks.mark_addressable (TREE_VALUE (link));
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_lvalue, fb_lvalue | fb_mayfail);
lang_hooks.mark_addressable (TREE_VALUE (link));
if (tret == GS_ERROR)
{
error ("memory input %d is not directly addressable", i);

View File

@ -1,6 +1,6 @@
/* Threads compatibility routines for libgcc2 and libobjc. */
/* Compile this one with gcc. */
/* Copyright (C) 1997, 1999, 2000, 2004, 2005
/* Copyright (C) 1997, 1999, 2000, 2004, 2005, 2006
Free Software Foundation, Inc.
This file is part of GCC.
@ -39,6 +39,12 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include <thread.h>
#include <errno.h>
#ifdef __cplusplus
#define UNUSED(x)
#else
#define UNUSED(x) x __attribute__((unused))
#endif
typedef thread_key_t __gthread_key_t;
typedef struct {
mutex_t mutex;
@ -69,16 +75,17 @@ __gthrw(thr_keycreate)
__gthrw(thr_getspecific)
__gthrw(thr_setspecific)
__gthrw(thr_create)
__gthrw(thr_self)
__gthrw(mutex_init)
__gthrw(mutex_destroy)
__gthrw(mutex_lock)
__gthrw(mutex_trylock)
__gthrw(mutex_unlock)
#ifdef _LIBOBJC
__gthrw(thr_exit)
__gthrw(thr_keycreate)
__gthrw(thr_getprio)
__gthrw(thr_self)
__gthrw(thr_setprio)
__gthrw(thr_yield)
@ -88,8 +95,6 @@ __gthrw(cond_wait)
__gthrw(cond_broadcast)
__gthrw(cond_signal)
__gthrw(mutex_init)
__gthrw(mutex_destroy)
#endif
#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
@ -434,7 +439,7 @@ __gthread_key_create (__gthread_key_t *key, void (*dtor) (void *))
}
static inline int
__gthread_key_delete (__gthread_key_t key)
__gthread_key_delete (__gthread_key_t UNUSED (key))
{
/* Not possible. */
return -1;
@ -544,4 +549,6 @@ __gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
#endif /* _LIBOBJC */
#undef UNUSED
#endif /* ! GCC_GTHR_SOLARIS_H */

View File

@ -1,5 +1,5 @@
/* Loop transformation code generation
Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
Contributed by Daniel Berlin <dberlin@dberlin.org>
This file is part of GCC.
@ -149,6 +149,7 @@ static lambda_lattice lambda_lattice_new (int, int);
static lambda_lattice lambda_lattice_compute_base (lambda_loopnest);
static tree find_induction_var_from_exit_cond (struct loop *);
static bool can_convert_to_perfect_nest (struct loop *);
/* Create a new lambda body vector. */
@ -1498,14 +1499,13 @@ DEF_VEC_ALLOC_P(lambda_loop,heap);
lambda_loopnest
gcc_loopnest_to_lambda_loopnest (struct loops *currloops,
struct loop * loop_nest,
struct loop *loop_nest,
VEC(tree,heap) **inductionvars,
VEC(tree,heap) **invariants,
bool need_perfect_nest)
VEC(tree,heap) **invariants)
{
lambda_loopnest ret = NULL;
struct loop *temp;
int depth = 0;
struct loop *temp = loop_nest;
int depth = depth_of_nest (loop_nest);
size_t i;
VEC(lambda_loop,heap) *loops = NULL;
VEC(tree,heap) *uboundvars = NULL;
@ -1513,9 +1513,11 @@ gcc_loopnest_to_lambda_loopnest (struct loops *currloops,
VEC(int,heap) *steps = NULL;
lambda_loop newloop;
tree inductionvar = NULL;
depth = depth_of_nest (loop_nest);
temp = loop_nest;
bool perfect_nest = perfect_nest_p (loop_nest);
if (!perfect_nest && !can_convert_to_perfect_nest (loop_nest))
goto fail;
while (temp)
{
newloop = gcc_loop_to_lambda_loop (temp, depth, invariants,
@ -1523,12 +1525,14 @@ gcc_loopnest_to_lambda_loopnest (struct loops *currloops,
&lboundvars, &uboundvars,
&steps);
if (!newloop)
return NULL;
goto fail;
VEC_safe_push (tree, heap, *inductionvars, inductionvar);
VEC_safe_push (lambda_loop, heap, loops, newloop);
temp = temp->inner;
}
if (need_perfect_nest)
if (!perfect_nest)
{
if (!perfect_nestify (currloops, loop_nest,
lboundvars, uboundvars, steps, *inductionvars))
@ -1542,9 +1546,12 @@ gcc_loopnest_to_lambda_loopnest (struct loops *currloops,
fprintf (dump_file,
"Successfully converted loop nest to perfect loop nest.\n");
}
ret = lambda_loopnest_new (depth, 2 * depth);
for (i = 0; VEC_iterate (lambda_loop, loops, i, newloop); i++)
LN_LOOPS (ret)[i] = newloop;
fail:
VEC_free (lambda_loop, heap, loops);
VEC_free (tree, heap, uboundvars);
@ -2156,13 +2163,12 @@ replace_uses_equiv_to_x_with_y (struct loop *loop, tree stmt, tree x,
{
tree use = USE_FROM_PTR (use_p);
tree step = NULL_TREE;
tree access_fn = NULL_TREE;
access_fn = instantiate_parameters
(loop, analyze_scalar_evolution (loop, use));
if (access_fn != NULL_TREE && access_fn != chrec_dont_know)
step = evolution_part_in_loop_num (access_fn, loop->num);
tree scev = instantiate_parameters (loop,
analyze_scalar_evolution (loop, use));
if (scev != NULL_TREE && scev != chrec_dont_know)
step = evolution_part_in_loop_num (scev, loop->num);
if ((step && step != chrec_dont_know
&& TREE_CODE (step) == INTEGER_CST
&& int_cst_value (step) == xstep)
@ -2171,22 +2177,6 @@ replace_uses_equiv_to_x_with_y (struct loop *loop, tree stmt, tree x,
}
}
/* Return TRUE if STMT uses tree OP in it's uses. */
static bool
stmt_uses_op (tree stmt, tree op)
{
ssa_op_iter iter;
tree use;
FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
{
if (use == op)
return true;
}
return false;
}
/* Return true if STMT is an exit PHI for LOOP */
static bool
@ -2236,15 +2226,39 @@ can_put_in_inner_loop (struct loop *inner, tree stmt)
}
/* Return TRUE if LOOP is an imperfect nest that we can convert to a perfect
one. LOOPIVS is a vector of induction variables, one per loop.
ATM, we only handle imperfect nests of depth 2, where all of the statements
occur after the inner loop. */
/* Return true if STMT can be put *after* the inner loop of LOOP. */
static bool
can_convert_to_perfect_nest (struct loop *loop,
VEC(tree,heap) *loopivs)
can_put_after_inner_loop (struct loop *loop, tree stmt)
{
imm_use_iterator imm_iter;
use_operand_p use_p;
if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
return false;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, TREE_OPERAND (stmt, 0))
{
if (!exit_phi_for_loop_p (loop, USE_STMT (use_p)))
{
basic_block immbb = bb_for_stmt (USE_STMT (use_p));
if (!dominated_by_p (CDI_DOMINATORS,
immbb,
loop->inner->header)
&& !can_put_in_inner_loop (loop->inner, stmt))
return false;
}
}
return true;
}
/* Return TRUE if LOOP is an imperfect nest that we can convert to a
perfect one. At the moment, we only handle imperfect nests of
depth 2, where all of the statements occur after the inner loop. */
static bool
can_convert_to_perfect_nest (struct loop *loop)
{
basic_block *bbs;
tree exit_condition, phi;
@ -2264,19 +2278,13 @@ can_convert_to_perfect_nest (struct loop *loop,
{
for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi); bsi_next (&bsi))
{
size_t j;
tree stmt = bsi_stmt (bsi);
tree iv;
if (stmt == exit_condition
|| not_interesting_stmt (stmt)
|| stmt_is_bumper_for_loop (loop, stmt))
continue;
/* If the statement uses inner loop ivs, we == screwed. */
for (j = 1; VEC_iterate (tree, loopivs, j, iv); j++)
if (stmt_uses_op (stmt, iv))
goto fail;
/* If this is a simple operation like a cast that is invariant
in the inner loop, only used there, and we can place it
there, then it's not going to hurt us.
@ -2286,10 +2294,65 @@ can_convert_to_perfect_nest (struct loop *loop,
theory that we are going to gain a lot more by interchanging
the loop than we are by leaving some invariant code there for
some other pass to clean up. */
if (TREE_CODE (stmt) == MODIFY_EXPR
&& is_gimple_cast (TREE_OPERAND (stmt, 1))
&& can_put_in_inner_loop (loop->inner, stmt))
continue;
if (TREE_CODE (stmt) == MODIFY_EXPR)
{
use_operand_p use_a, use_b;
imm_use_iterator imm_iter;
ssa_op_iter op_iter, op_iter1;
tree op0 = TREE_OPERAND (stmt, 0);
tree scev = instantiate_parameters
(loop, analyze_scalar_evolution (loop, op0));
/* If the IV is simple, it can be duplicated. */
if (!automatically_generated_chrec_p (scev))
{
tree step = evolution_part_in_loop_num (scev, loop->num);
if (step && step != chrec_dont_know
&& TREE_CODE (step) == INTEGER_CST)
continue;
}
/* The statement should not define a variable used
in the inner loop. */
if (TREE_CODE (op0) == SSA_NAME)
FOR_EACH_IMM_USE_FAST (use_a, imm_iter, op0)
if (bb_for_stmt (USE_STMT (use_a))->loop_father
== loop->inner)
goto fail;
FOR_EACH_SSA_USE_OPERAND (use_a, stmt, op_iter, SSA_OP_USE)
{
tree node, op = USE_FROM_PTR (use_a);
/* The variables should not be used in both loops. */
FOR_EACH_IMM_USE_FAST (use_b, imm_iter, op)
if (bb_for_stmt (USE_STMT (use_b))->loop_father
== loop->inner)
goto fail;
/* The statement should not use the value of a
scalar that was modified in the loop. */
node = SSA_NAME_DEF_STMT (op);
if (TREE_CODE (node) == PHI_NODE)
FOR_EACH_PHI_ARG (use_b, node, op_iter1, SSA_OP_USE)
{
tree arg = USE_FROM_PTR (use_b);
if (TREE_CODE (arg) == SSA_NAME)
{
tree arg_stmt = SSA_NAME_DEF_STMT (arg);
if (bb_for_stmt (arg_stmt)->loop_father
== loop->inner)
goto fail;
}
}
}
if (can_put_in_inner_loop (loop->inner, stmt)
|| can_put_after_inner_loop (loop, stmt))
continue;
}
/* Otherwise, if the bb of a statement we care about isn't
dominated by the header of the inner loop, then we can't
@ -2379,14 +2442,10 @@ perfect_nestify (struct loops *loops,
tree stmt;
tree oldivvar, ivvar, ivvarinced;
VEC(tree,heap) *phis = NULL;
if (!can_convert_to_perfect_nest (loop, loopivs))
return false;
/* Create the new loop */
/* Create the new loop. */
olddest = loop->single_exit->dest;
preheaderbb = loop_split_edge_with (loop->single_exit, NULL);
preheaderbb = loop_split_edge_with (loop->single_exit, NULL);
headerbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
/* Push the exit phi nodes that we are moving. */
@ -2501,37 +2560,22 @@ perfect_nestify (struct loops *loops,
if (dominated_by_p (CDI_DOMINATORS, loop->inner->header, bbs[i]))
{
for (bsi = bsi_last (bbs[i]); !bsi_end_p (bsi);)
block_stmt_iterator header_bsi
= bsi_after_labels (loop->inner->header);
for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi);)
{
use_operand_p use_p;
imm_use_iterator imm_iter;
tree stmt = bsi_stmt (bsi);
if (stmt == exit_condition
|| not_interesting_stmt (stmt)
|| stmt_is_bumper_for_loop (loop, stmt))
{
if (!bsi_end_p (bsi))
bsi_prev (&bsi);
bsi_next (&bsi);
continue;
}
/* Move this statement back into the inner loop.
This looks a bit confusing, but we are really just
finding the first non-exit phi use and moving the
statement to the beginning of that use's basic
block. */
FOR_EACH_IMM_USE_SAFE (use_p, imm_iter,
TREE_OPERAND (stmt, 0))
{
tree imm_stmt = USE_STMT (use_p);
if (!exit_phi_for_loop_p (loop->inner, imm_stmt))
{
block_stmt_iterator tobsi = bsi_after_labels (bb_for_stmt (imm_stmt));
bsi_move_before (&bsi, &tobsi);
update_stmt (stmt);
BREAK_FROM_SAFE_IMM_USE (imm_iter);
}
}
bsi_move_before (&bsi, &header_bsi);
}
}
else
@ -2552,10 +2596,9 @@ perfect_nestify (struct loops *loops,
continue;
}
replace_uses_equiv_to_x_with_y (loop, stmt,
oldivvar,
VEC_index (int, steps, 0),
ivvar);
replace_uses_equiv_to_x_with_y
(loop, stmt, oldivvar, VEC_index (int, steps, 0), ivvar);
bsi_move_before (&bsi, &tobsi);
/* If the statement has any virtual operands, they may

View File

@ -1,5 +1,5 @@
/* Lambda matrix and vector interface.
Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
Contributed by Daniel Berlin <dberlin@dberlin.org>
This file is part of GCC.
@ -200,8 +200,7 @@ void print_lambda_body_vector (FILE *, lambda_body_vector);
lambda_loopnest gcc_loopnest_to_lambda_loopnest (struct loops *,
struct loop *,
VEC(tree,heap) **,
VEC(tree,heap) **,
bool);
VEC(tree,heap) **);
void lambda_loopnest_to_gcc_loopnest (struct loop *,
VEC(tree,heap) *, VEC(tree,heap) *,
lambda_loopnest, lambda_trans_matrix);

View File

@ -223,15 +223,19 @@ cleanup:
return result;
}
/* Adds test of COND jumping to DEST to the end of BB. */
/* Adds test of COND jumping to DEST on edge *E and set *E to the new fallthru
edge. If the condition is always false, do not do anything. If it is always
true, redirect E to DEST and return false. In all other cases, true is
returned. */
static void
add_test (rtx cond, basic_block bb, basic_block dest)
static bool
add_test (rtx cond, edge *e, basic_block dest)
{
rtx seq, jump, label;
enum machine_mode mode;
rtx op0 = XEXP (cond, 0), op1 = XEXP (cond, 1);
enum rtx_code code = GET_CODE (cond);
basic_block bb;
mode = GET_MODE (XEXP (cond, 0));
if (mode == VOIDmode)
@ -244,18 +248,36 @@ add_test (rtx cond, basic_block bb, basic_block dest)
do_compare_rtx_and_jump (op0, op1, code, 0, mode, NULL_RTX, NULL_RTX, label);
jump = get_last_insn ();
if (!JUMP_P (jump))
{
/* The condition is always false and the jump was optimized out. */
end_sequence ();
return true;
}
seq = get_insns ();
end_sequence ();
bb = loop_split_edge_with (*e, seq);
*e = single_succ_edge (bb);
if (any_uncondjump_p (jump))
{
/* The condition is always true. */
delete_insn (jump);
redirect_edge_and_branch_force (*e, dest);
return false;
}
JUMP_LABEL (jump) = label;
/* The jump is supposed to handle an unlikely special case. */
REG_NOTES (jump)
= gen_rtx_EXPR_LIST (REG_BR_PROB,
const0_rtx, REG_NOTES (jump));
LABEL_NUSES (label)++;
seq = get_insns ();
end_sequence ();
emit_insn_after (seq, BB_END (bb));
make_edge (bb, dest, (*e)->flags & ~EDGE_FALLTHRU);
return true;
}
/* Modify the loop to use the low-overhead looping insn where LOOP
@ -273,7 +295,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
rtx sequence;
rtx jump_insn;
rtx jump_label;
int nonneg = 0, irr;
int nonneg = 0;
bool increment_count;
basic_block loop_end = desc->out_edge->src;
enum machine_mode mode;
@ -353,39 +375,57 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
= loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX);
basic_block new_preheader
= loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX);
basic_block bb;
edge te;
gcov_type cnt;
/* Expand the condition testing the assumptions and if it does not pass,
reset the count register to 0. */
add_test (XEXP (ass, 0), preheader, set_zero);
single_succ_edge (preheader)->flags &= ~EDGE_FALLTHRU;
cnt = single_succ_edge (preheader)->count;
single_succ_edge (preheader)->probability = 0;
single_succ_edge (preheader)->count = 0;
irr = single_succ_edge (preheader)->flags & EDGE_IRREDUCIBLE_LOOP;
te = make_edge (preheader, new_preheader, EDGE_FALLTHRU | irr);
te->probability = REG_BR_PROB_BASE;
te->count = cnt;
redirect_edge_and_branch_force (single_succ_edge (preheader), new_preheader);
set_immediate_dominator (CDI_DOMINATORS, new_preheader, preheader);
set_zero->count = 0;
set_zero->frequency = 0;
for (ass = XEXP (ass, 1); ass; ass = XEXP (ass, 1))
te = single_succ_edge (preheader);
for (; ass; ass = XEXP (ass, 1))
if (!add_test (XEXP (ass, 0), &te, set_zero))
break;
if (ass)
{
bb = loop_split_edge_with (te, NULL_RTX);
te = single_succ_edge (bb);
add_test (XEXP (ass, 0), bb, set_zero);
make_edge (bb, set_zero, irr);
/* We reached a condition that is always true. This is very hard to
reproduce (such a loop does not roll, and thus it would most
likely get optimized out by some of the preceding optimizations).
In fact, I do not have any testcase for it. However, it would
also be very hard to show that it is impossible, so we must
handle this case. */
set_zero->count = preheader->count;
set_zero->frequency = preheader->frequency;
}
start_sequence ();
convert_move (counter_reg, noloop, 0);
sequence = get_insns ();
end_sequence ();
emit_insn_after (sequence, BB_END (set_zero));
if (EDGE_COUNT (set_zero->preds) == 0)
{
/* All the conditions were simplified to false, remove the
unreachable set_zero block. */
remove_bb_from_loops (set_zero);
delete_basic_block (set_zero);
}
else
{
/* Reset the counter to zero in the set_zero block. */
start_sequence ();
convert_move (counter_reg, noloop, 0);
sequence = get_insns ();
end_sequence ();
emit_insn_after (sequence, BB_END (set_zero));
set_immediate_dominator (CDI_DOMINATORS, set_zero,
recount_dominator (CDI_DOMINATORS,
set_zero));
}
set_immediate_dominator (CDI_DOMINATORS, new_preheader,
recount_dominator (CDI_DOMINATORS,
new_preheader));
}
/* Some targets (eg, C4x) need to initialize special looping

View File

@ -1,3 +1,9 @@
2006-10-03 Andrew Pinski <pinskia@physics.uc.edu>
PR objc/29195
* objc-act.c (objc_push_parm): If we change the type of the
decl, relayout the decl.
2006-05-24 Release Manager
* GCC 4.1.1 released.

View File

@ -8215,11 +8215,22 @@ static GTY(()) tree objc_parmlist = NULL_TREE;
static void
objc_push_parm (tree parm)
{
bool relayout_needed = false;
/* Decay arrays and functions into pointers. */
if (TREE_CODE (TREE_TYPE (parm)) == ARRAY_TYPE)
TREE_TYPE (parm) = build_pointer_type (TREE_TYPE (TREE_TYPE (parm)));
{
TREE_TYPE (parm) = build_pointer_type (TREE_TYPE (TREE_TYPE (parm)));
relayout_needed = true;
}
else if (TREE_CODE (TREE_TYPE (parm)) == FUNCTION_TYPE)
TREE_TYPE (parm) = build_pointer_type (TREE_TYPE (parm));
{
TREE_TYPE (parm) = build_pointer_type (TREE_TYPE (parm));
relayout_needed = true;
}
if (relayout_needed)
relayout_decl (parm);
DECL_ARG_TYPE (parm)
= lang_hooks.types.type_promotes_to (TREE_TYPE (parm));

View File

@ -485,6 +485,78 @@ flush_pending_lists (struct deps *deps, rtx insn, int for_read,
deps->pending_flush_length = 1;
}
/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
The type of the reference is specified by REF and can be SET,
CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
static void
sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
enum rtx_code ref, rtx insn)
{
/* A hard reg in a wide mode may really be multiple registers.
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
int i = hard_regno_nregs[regno][mode];
if (ref == SET)
{
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_sets, regno + i);
}
else if (ref == USE)
{
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_uses, regno + i);
}
else
{
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
}
}
/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
it does not reload. Ignore these as they have served their
purpose already. */
else if (regno >= deps->max_reg)
{
enum rtx_code code = GET_CODE (PATTERN (insn));
gcc_assert (code == USE || code == CLOBBER);
}
else
{
if (ref == SET)
SET_REGNO_REG_SET (reg_pending_sets, regno);
else if (ref == USE)
SET_REGNO_REG_SET (reg_pending_uses, regno);
else
SET_REGNO_REG_SET (reg_pending_clobbers, regno);
/* Pseudos that are REG_EQUIV to something may be replaced
by that during reloading. We need only add dependencies for
the address in the REG_EQUIV note. */
if (!reload_completed && get_reg_known_equiv_p (regno))
{
rtx t = get_reg_known_value (regno);
if (MEM_P (t))
sched_analyze_2 (deps, XEXP (t, 0), insn);
}
/* Don't let it cross a call after scheduling if it doesn't
already cross one. */
if (REG_N_CALLS_CROSSED (regno) == 0)
{
if (ref == USE)
deps->sched_before_next_call
= alloc_INSN_LIST (insn, deps->sched_before_next_call);
else
add_dependence_list (insn, deps->last_function_call, 1,
REG_DEP_ANTI);
}
}
}
/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
rtx, X, creating all dependencies generated by the write to the
destination of X, and reads of everything mentioned. */
@ -492,7 +564,6 @@ flush_pending_lists (struct deps *deps, rtx insn, int for_read,
static void
sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
{
int regno;
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
@ -541,64 +612,21 @@ sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
if (REG_P (dest))
{
regno = REGNO (dest);
int regno = REGNO (dest);
enum machine_mode mode = GET_MODE (dest);
sched_analyze_reg (deps, regno, mode, code, insn);
#ifdef STACK_REGS
/* Treat all writes to a stack register as modifying the TOS. */
if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
{
SET_REGNO_REG_SET (reg_pending_uses, FIRST_STACK_REG);
regno = FIRST_STACK_REG;
/* Avoid analyzing the same register twice. */
if (regno != FIRST_STACK_REG)
sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
}
#endif
/* A hard reg in a wide mode may really be multiple registers.
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
int i = hard_regno_nregs[regno][GET_MODE (dest)];
if (code == SET)
{
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_sets, regno + i);
}
else
{
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
}
}
/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
it does not reload. Ignore these as they have served their
purpose already. */
else if (regno >= deps->max_reg)
{
gcc_assert (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER);
}
else
{
if (code == SET)
SET_REGNO_REG_SET (reg_pending_sets, regno);
else
SET_REGNO_REG_SET (reg_pending_clobbers, regno);
/* Pseudos that are REG_EQUIV to something may be replaced
by that during reloading. We need only add dependencies for
the address in the REG_EQUIV note. */
if (!reload_completed && get_reg_known_equiv_p (regno))
{
rtx t = get_reg_known_value (regno);
if (MEM_P (t))
sched_analyze_2 (deps, XEXP (t, 0), insn);
}
/* Don't let it cross a call after scheduling if it doesn't
already cross one. */
if (REG_N_CALLS_CROSSED (regno) == 0)
add_dependence_list (insn, deps->last_function_call, 1,
REG_DEP_ANTI);
}
}
else if (MEM_P (dest))
{
@ -705,51 +733,20 @@ sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
case REG:
{
int regno = REGNO (x);
enum machine_mode mode = GET_MODE (x);
sched_analyze_reg (deps, regno, mode, USE, insn);
#ifdef STACK_REGS
/* Treat all reads of a stack register as modifying the TOS. */
if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
{
SET_REGNO_REG_SET (reg_pending_sets, FIRST_STACK_REG);
regno = FIRST_STACK_REG;
/* Avoid analyzing the same register twice. */
if (regno != FIRST_STACK_REG)
sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
}
#endif
if (regno < FIRST_PSEUDO_REGISTER)
{
int i = hard_regno_nregs[regno][GET_MODE (x)];
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_uses, regno + i);
}
/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
it does not reload. Ignore these as they have served their
purpose already. */
else if (regno >= deps->max_reg)
{
gcc_assert (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER);
}
else
{
SET_REGNO_REG_SET (reg_pending_uses, regno);
/* Pseudos that are REG_EQUIV to something may be replaced
by that during reloading. We need only add dependencies for
the address in the REG_EQUIV note. */
if (!reload_completed && get_reg_known_equiv_p (regno))
{
rtx t = get_reg_known_value (regno);
if (MEM_P (t))
sched_analyze_2 (deps, XEXP (t, 0), insn);
}
/* If the register does not already cross any calls, then add this
insn to the sched_before_next_call list so that it will still
not cross calls after scheduling. */
if (REG_N_CALLS_CROSSED (regno) == 0)
deps->sched_before_next_call
= alloc_INSN_LIST (insn, deps->sched_before_next_call);
}
return;
}

View File

@ -3075,19 +3075,18 @@ simplify_const_relational_operation (enum rtx_code code,
a register or a CONST_INT, this can't help; testing for these cases will
prevent infinite recursion here and speed things up.
If CODE is an unsigned comparison, then we can never do this optimization,
because it gives an incorrect result if the subtraction wraps around zero.
ANSI C defines unsigned operations such that they never overflow, and
thus such cases can not be ignored; but we cannot do it even for
signed comparisons for languages such as Java, so test flag_wrapv. */
We can only do this for EQ and NE comparisons as otherwise we may
lose or introduce overflow which we cannot disregard as undefined as
we do not know the signedness of the operation on either the left or
the right hand side of the comparison. */
if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
&& (code == EQ || code == NE)
&& ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
&& (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
&& 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
/* We cannot do this for == or != if tem is a nonzero address. */
&& ((code != EQ && code != NE) || ! nonzero_address_p (tem))
&& code != GTU && code != GEU && code != LTU && code != LEU)
/* We cannot do this if tem is a nonzero address. */
&& ! nonzero_address_p (tem))
return simplify_const_relational_operation (signed_condition (code),
mode, tem, const0_rtx);

View File

@ -239,6 +239,9 @@ static bool
tree_forwarder_block_p (basic_block bb, bool phi_wanted)
{
block_stmt_iterator bsi;
edge_iterator ei;
edge e, succ;
basic_block dest;
/* BB must have a single outgoing edge. */
if (single_succ_p (bb) != 1
@ -290,6 +293,22 @@ tree_forwarder_block_p (basic_block bb, bool phi_wanted)
return false;
}
/* If we have an EH edge leaving this block, make sure that the
destination of this block has only one predecessor. This ensures
that we don't get into the situation where we try to remove two
forwarders that go to the same basic block but are handlers for
different EH regions. */
succ = single_succ_edge (bb);
dest = succ->dest;
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->flags & EDGE_EH)
{
if (!single_pred_p (dest))
return false;
}
}
return true;
}

View File

@ -76,8 +76,6 @@ static void add_referenced_var (tree, bool);
/* Array of all variables referenced in the function. */
htab_t referenced_vars;
/* List of referenced variables with duplicate UID's. */
VEC(tree,gc) *referenced_vars_dup_list;
/*---------------------------------------------------------------------------
@ -97,7 +95,6 @@ find_referenced_vars (void)
basic_block bb;
block_stmt_iterator si;
gcc_assert (VEC_length (tree, referenced_vars_dup_list) == 0);
FOR_EACH_BB (bb)
for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
{
@ -613,31 +610,18 @@ static void
add_referenced_var (tree var, bool always)
{
var_ann_t v_ann;
tree dup = referenced_var_lookup_if_exists (DECL_UID (var));
tree ref;
v_ann = get_var_ann (var);
gcc_assert (DECL_P (var));
/* PRs 26757 and 27793. Maintain a list of duplicate variable pointers
with the same DECL_UID. There isn't usually very many.
TODO. Once the C++ front end doesn't create duplicate DECL UID's, this
code can be removed. */
if (dup && dup != var)
{
unsigned u;
tree t = NULL_TREE;
for (u = 0; u < VEC_length (tree, referenced_vars_dup_list); u++)
{
t = VEC_index (tree, referenced_vars_dup_list, u);
if (t == var)
break;
}
if (t != var)
VEC_safe_push (tree, gc, referenced_vars_dup_list, var);
}
ref = referenced_var_lookup_if_exists (DECL_UID (var));
if (always || dup == NULL_TREE)
/* Catch PRs 26757 and 27793. If this assert triggers, REF and VAR are
two different variables in this function with the same DECL_UID. */
gcc_assert (!ref || ref == var);
if (always || ref == NULL_TREE)
{
/* This is the first time we find this variable, add it to the
REFERENCED_VARS array and annotate it with attributes that are

View File

@ -420,8 +420,6 @@ typedef struct
/* Array of all variables referenced in the function. */
extern GTY((param_is (struct int_tree_map))) htab_t referenced_vars;
/* List of referenced variables in the function with duplicate UID's. */
extern VEC(tree,gc) *referenced_vars_dup_list;
extern tree referenced_var_lookup (unsigned int);
extern tree referenced_var_lookup_if_exists (unsigned int);
@ -735,9 +733,13 @@ tree find_loop_niter (struct loop *, edge *);
tree loop_niter_by_eval (struct loop *, edge);
tree find_loop_niter_by_eval (struct loop *, edge *);
void estimate_numbers_of_iterations (struct loops *);
bool scev_probably_wraps_p (tree, tree, tree, tree, struct loop *, bool *,
bool *);
tree convert_step (struct loop *, tree, tree, tree, tree);
bool scev_probably_wraps_p (tree, tree, tree, struct loop *, bool);
bool convert_affine_scev (struct loop *, tree, tree *, tree *, tree, bool);
bool nowrap_type_p (tree);
enum ev_direction {EV_DIR_GROWS, EV_DIR_DECREASES, EV_DIR_UNKNOWN};
enum ev_direction scev_direction (tree);
void free_numbers_of_iterations_estimates (struct loops *);
void free_numbers_of_iterations_estimates_loop (struct loop *);
void rewrite_into_loop_closed_ssa (bitmap, unsigned);

View File

@ -753,6 +753,14 @@ copy_bb (inline_data *id, basic_block bb, int frequency_scale, int count_scale)
if (stmt)
{
tree call, decl;
/* With return slot optimization we can end up with
non-gimple (foo *)&this->m, fix that here. */
if (TREE_CODE (stmt) == MODIFY_EXPR
&& TREE_CODE (TREE_OPERAND (stmt, 1)) == NOP_EXPR
&& !is_gimple_val (TREE_OPERAND (TREE_OPERAND (stmt, 1), 0)))
gimplify_stmt (&stmt);
bsi_insert_after (&copy_bsi, stmt, BSI_NEW_STMT);
call = get_call_expr_in (stmt);
/* We're duplicating a CALL_EXPR. Find any corresponding
@ -1156,6 +1164,8 @@ setup_one_parameter (inline_data *id, tree p, tree value, tree fn,
if (rhs == error_mark_node)
return;
STRIP_USELESS_TYPE_CONVERSION (rhs);
/* We want to use MODIFY_EXPR, not INIT_EXPR here so that we
keep our trees in gimple form. */
init_stmt = build (MODIFY_EXPR, TREE_TYPE (var), var, rhs);
@ -1344,6 +1354,8 @@ declare_return_variable (inline_data *id, tree return_slot_addr,
use = var;
if (!lang_hooks.types_compatible_p (TREE_TYPE (var), caller_type))
use = fold_convert (caller_type, var);
STRIP_USELESS_TYPE_CONVERSION (use);
done:
/* Register the VAR_DECL as the equivalent for the RESULT_DECL; that

View File

@ -143,6 +143,9 @@ create_tmp_var_for (struct nesting_info *info, tree type, const char *prefix)
DECL_CONTEXT (tmp_var) = info->context;
TREE_CHAIN (tmp_var) = info->new_local_var_chain;
DECL_SEEN_IN_BIND_EXPR_P (tmp_var) = 1;
if (TREE_CODE (type) == COMPLEX_TYPE)
DECL_COMPLEX_GIMPLE_REG_P (tmp_var) = 1;
info->new_local_var_chain = tmp_var;
return tmp_var;

View File

@ -821,7 +821,6 @@ delete_tree_ssa (void)
block_stmt_iterator bsi;
referenced_var_iterator rvi;
tree var;
unsigned u;
/* Release any ssa_names still in use. */
for (i = 0; i < num_ssa_names; i++)
@ -856,16 +855,6 @@ delete_tree_ssa (void)
ggc_free (var->common.ann);
var->common.ann = NULL;
}
/* Remove any referenced variables which had duplicate UID's. */
for (u = 0; u < VEC_length (tree, referenced_vars_dup_list); u++)
{
var = VEC_index (tree, referenced_vars_dup_list, u);
ggc_free (var->common.ann);
var->common.ann = NULL;
}
VEC_free (tree, gc, referenced_vars_dup_list);
htab_delete (referenced_vars);
referenced_vars = NULL;

View File

@ -1878,7 +1878,8 @@ vect_analyze_loop_form (struct loop *loop)
that the loop is represented as a do-while (with a proper if-guard
before the loop if needed), where the loop header contains all the
executable statements, and the latch is empty. */
if (!empty_block_p (loop->latch))
if (!empty_block_p (loop->latch)
|| phi_nodes (loop->latch))
{
if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
fprintf (vect_dump, "not vectorized: unexpected loop form.");

View File

@ -3935,15 +3935,18 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align)
if (type_size > op_size
&& TREE_CODE (exp) != VIEW_CONVERT_EXPR
&& TREE_CODE (TREE_TYPE (exp)) != UNION_TYPE)
internal_error ("no-op convert from %wd to %wd bytes in initializer",
op_size, type_size);
exp = TREE_OPERAND (exp, 0);
/* Keep the conversion. */
break;
else
exp = TREE_OPERAND (exp, 0);
}
code = TREE_CODE (TREE_TYPE (exp));
thissize = int_size_in_bytes (TREE_TYPE (exp));
/* Give the front end another chance to expand constants. */
exp = lang_hooks.expand_constant (exp);
/* Allow a constructor with no elements for any data type.
This means to fill the space with zeros. */
if (TREE_CODE (exp) == CONSTRUCTOR
@ -4022,8 +4025,12 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align)
link = TREE_VECTOR_CST_ELTS (exp);
output_constant (TREE_VALUE (link), elt_size, align);
thissize = elt_size;
while ((link = TREE_CHAIN (link)) != NULL)
output_constant (TREE_VALUE (link), elt_size, nalign);
{
output_constant (TREE_VALUE (link), elt_size, nalign);
thissize += elt_size;
}
break;
}
default: