import GCC 4.1 branch from 20061021.

includes fixes for these 109 GCC PR's:

 4520 13685 13983 17519 19505 20256 22209 22313 23454 24367 25211
25468 25519 25636 25636 26435 26504 26570 26719 26764 26881 26969
26983 26991 26991 26993 27075 27184 27226 27287 27287 27291 27334
27363 27428 27489 27490 27537 27558 27565 27566 27616 27639 27681
27697 27721 27724 27768 27793 27793 27795 27827 27878 27889 27893
28029 28075 28136 28148 28150 28162 28170 28187 28207 28207 28218
28221 28238 28243 28247 28257 28259 28267 28283 28286 28299 28386
28402 28403 28418 28473 28490 28493 28621 28634 28636 28649 28651
28677 28683 28726 28814 28825 28862 28900 28924 28946 28952 28960
28980 29006 29091 29119 29132 29154 29198 29230 29290 29323
This commit is contained in:
mrg 2006-10-21 22:29:06 +00:00
parent 2c32a05a20
commit 622e071e10
23 changed files with 549 additions and 178 deletions

View File

@ -4290,12 +4290,23 @@ grokdeclarator (const struct c_declarator *declarator,
type = error_mark_node; type = error_mark_node;
} }
else else
/* When itype is NULL, a shared incomplete array type is
returned for all array of a given type. Elsewhere we
make sure we don't complete that type before copying
it, but here we want to make sure we don't ever
modify the shared type, so we gcc_assert (itype)
below. */
type = build_array_type (type, itype); type = build_array_type (type, itype);
if (type != error_mark_node) if (type != error_mark_node)
{ {
if (size_varies) if (size_varies)
{ {
/* It is ok to modify type here even if itype is
NULL: if size_varies, we're in a
multi-dimentional array and the inner type has
variable size, so the enclosing shared array type
must too. */
if (size && TREE_CODE (size) == INTEGER_CST) if (size && TREE_CODE (size) == INTEGER_CST)
type type
= build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
@ -4307,6 +4318,7 @@ grokdeclarator (const struct c_declarator *declarator,
zero. */ zero. */
if (size && integer_zerop (size)) if (size && integer_zerop (size))
{ {
gcc_assert (itype);
TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node;
} }
@ -4464,21 +4476,6 @@ grokdeclarator (const struct c_declarator *declarator,
return decl; return decl;
} }
/* Detect the case of an array type of unspecified size
which came, as such, direct from a typedef name.
We must copy the type, so that each identifier gets
a distinct type, so that each identifier's size can be
controlled separately by its own initializer. */
if (type != 0 && typedef_type != 0
&& TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0
&& TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (typedef_type))
{
type = build_array_type (TREE_TYPE (type), 0);
if (size_varies)
C_TYPE_VARIABLE_SIZE (type) = 1;
}
/* If this is a type name (such as, in a cast or sizeof), /* If this is a type name (such as, in a cast or sizeof),
compute the type and return it now. */ compute the type and return it now. */
@ -5928,6 +5925,8 @@ start_function (struct c_declspecs *declspecs, struct c_declarator *declarator,
/* If this definition isn't a prototype and we had a prototype declaration /* If this definition isn't a prototype and we had a prototype declaration
before, copy the arg type info from that prototype. */ before, copy the arg type info from that prototype. */
old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope); old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope);
if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL)
old_decl = 0;
current_function_prototype_locus = UNKNOWN_LOCATION; current_function_prototype_locus = UNKNOWN_LOCATION;
current_function_prototype_built_in = false; current_function_prototype_built_in = false;
current_function_prototype_arg_types = NULL_TREE; current_function_prototype_arg_types = NULL_TREE;

View File

@ -522,7 +522,16 @@ pp_c_direct_abstract_declarator (c_pretty_printer *pp, tree t)
case ARRAY_TYPE: case ARRAY_TYPE:
pp_c_left_bracket (pp); pp_c_left_bracket (pp);
if (TYPE_DOMAIN (t) && TYPE_MAX_VALUE (TYPE_DOMAIN (t))) if (TYPE_DOMAIN (t) && TYPE_MAX_VALUE (TYPE_DOMAIN (t)))
pp_expression (pp, TYPE_MAX_VALUE (TYPE_DOMAIN (t))); {
tree maxval = TYPE_MAX_VALUE (TYPE_DOMAIN (t));
tree type = TREE_TYPE (maxval);
if (host_integerp (maxval, 0))
pp_wide_integer (pp, tree_low_cst (maxval, 0) + 1);
else
pp_expression (pp, fold_build2 (PLUS_EXPR, type, maxval,
build_int_cst (type, 1)));
}
pp_c_right_bracket (pp); pp_c_right_bracket (pp);
pp_direct_abstract_declarator (pp, TREE_TYPE (t)); pp_direct_abstract_declarator (pp, TREE_TYPE (t));
break; break;

View File

@ -173,6 +173,10 @@ static void cgraph_expand_function (struct cgraph_node *);
static tree record_reference (tree *, int *, void *); static tree record_reference (tree *, int *, void *);
static void cgraph_analyze_function (struct cgraph_node *node); static void cgraph_analyze_function (struct cgraph_node *node);
/* Local static variables needs to be passed to debug info after the function
bodies are compiled. */
static GTY(()) VEC(tree,gc) *local_static_output;
/* Records tree nodes seen in record_reference. Simply using /* Records tree nodes seen in record_reference. Simply using
walk_tree_without_duplicates doesn't guarantee each node is visited walk_tree_without_duplicates doesn't guarantee each node is visited
once because it gets a new htab upon each recursive call from once because it gets a new htab upon each recursive call from
@ -807,6 +811,15 @@ verify_cgraph (void)
} }
static void
cgraph_varpool_debug_local_statics (void)
{
timevar_push (TV_SYMOUT);
while (VEC_length (tree, local_static_output) > 0)
(*debug_hooks->global_decl) (VEC_pop (tree, local_static_output));
timevar_pop (TV_SYMOUT);
}
/* Output all variables enqueued to be assembled. */ /* Output all variables enqueued to be assembled. */
bool bool
cgraph_varpool_assemble_pending_decls (void) cgraph_varpool_assemble_pending_decls (void)
@ -837,9 +850,9 @@ cgraph_varpool_assemble_pending_decls (void)
|| TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL) || TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL)
&& errorcount == 0 && sorrycount == 0) && errorcount == 0 && sorrycount == 0)
{ {
timevar_push (TV_SYMOUT); if (!local_static_output)
(*debug_hooks->global_decl) (decl); local_static_output = VEC_alloc (tree, gc, 20);
timevar_pop (TV_SYMOUT); VEC_safe_push (tree, gc, local_static_output, decl);
} }
changed = true; changed = true;
} }
@ -887,6 +900,9 @@ cgraph_finalize_compilation_unit (void)
intermodule optimization. */ intermodule optimization. */
static struct cgraph_node *first_analyzed; static struct cgraph_node *first_analyzed;
if (errorcount || sorrycount)
return;
finish_aliases_1 (); finish_aliases_1 ();
if (!flag_unit_at_a_time) if (!flag_unit_at_a_time)
@ -1229,12 +1245,16 @@ ipa_passes (void)
void void
cgraph_optimize (void) cgraph_optimize (void)
{ {
if (errorcount || sorrycount)
return;
#ifdef ENABLE_CHECKING #ifdef ENABLE_CHECKING
verify_cgraph (); verify_cgraph ();
#endif #endif
if (!flag_unit_at_a_time) if (!flag_unit_at_a_time)
{ {
cgraph_varpool_assemble_pending_decls (); cgraph_varpool_assemble_pending_decls ();
cgraph_varpool_debug_local_statics ();
return; return;
} }
@ -1308,6 +1328,7 @@ cgraph_optimize (void)
internal_error ("nodes with no released memory found"); internal_error ("nodes with no released memory found");
} }
#endif #endif
cgraph_varpool_debug_local_statics ();
} }
/* Generate and emit a static constructor or destructor. WHICH must be /* Generate and emit a static constructor or destructor. WHICH must be
@ -1521,3 +1542,5 @@ cgraph_function_versioning (struct cgraph_node *old_version_node,
new_version_node->lowered = true; new_version_node->lowered = true;
return new_version_node; return new_version_node;
} }
#include "gt-cgraphunit.h"

View File

@ -528,6 +528,10 @@ struct table_elt
static struct table_elt *table[HASH_SIZE]; static struct table_elt *table[HASH_SIZE];
/* Number of elements in the hash table. */
static unsigned int table_size;
/* Chain of `struct table_elt's made so far for this function /* Chain of `struct table_elt's made so far for this function
but currently removed from the table. */ but currently removed from the table. */
@ -962,6 +966,8 @@ new_basic_block (void)
} }
} }
table_size = 0;
#ifdef HAVE_cc0 #ifdef HAVE_cc0
prev_insn = 0; prev_insn = 0;
prev_insn_cc0 = 0; prev_insn_cc0 = 0;
@ -1372,6 +1378,8 @@ remove_from_table (struct table_elt *elt, unsigned int hash)
/* Now add it to the free element chain. */ /* Now add it to the free element chain. */
elt->next_same_hash = free_element_chain; elt->next_same_hash = free_element_chain;
free_element_chain = elt; free_element_chain = elt;
table_size--;
} }
/* Look up X in the hash table and return its table element, /* Look up X in the hash table and return its table element,
@ -1649,6 +1657,8 @@ insert (rtx x, struct table_elt *classp, unsigned int hash, enum machine_mode mo
} }
} }
table_size++;
return elt; return elt;
} }
@ -3441,10 +3451,10 @@ fold_rtx_subreg (rtx x, rtx insn)
return x; return x;
} }
/* Fold MEM. */ /* Fold MEM. Not to be called directly, see fold_rtx_mem instead. */
static rtx static rtx
fold_rtx_mem (rtx x, rtx insn) fold_rtx_mem_1 (rtx x, rtx insn)
{ {
enum machine_mode mode = GET_MODE (x); enum machine_mode mode = GET_MODE (x);
rtx new; rtx new;
@ -3607,6 +3617,51 @@ fold_rtx_mem (rtx x, rtx insn)
} }
} }
/* Fold MEM. */
static rtx
fold_rtx_mem (rtx x, rtx insn)
{
/* To avoid infinite oscillations between fold_rtx and fold_rtx_mem,
refuse to allow recursion of the latter past n levels. This can
happen because fold_rtx_mem will try to fold the address of the
memory reference it is passed, i.e. conceptually throwing away
the MEM and reinjecting the bare address into fold_rtx. As a
result, patterns like
set (reg1)
(plus (reg)
(mem (plus (reg2) (const_int))))
set (reg2)
(plus (reg)
(mem (plus (reg1) (const_int))))
will defeat any "first-order" short-circuit put in either
function to prevent these infinite oscillations.
The heuristics for determining n is as follows: since each time
it is invoked fold_rtx_mem throws away a MEM, and since MEMs
are generically not nested, we assume that each invocation of
fold_rtx_mem corresponds to a new "top-level" operand, i.e.
the source or the destination of a SET. So fold_rtx_mem is
bound to stop or cycle before n recursions, n being the number
of expressions recorded in the hash table. We also leave some
play to account for the initial steps. */
static unsigned int depth;
rtx ret;
if (depth > 3 + table_size)
return x;
depth++;
ret = fold_rtx_mem_1 (x, insn);
depth--;
return ret;
}
/* If X is a nontrivial arithmetic operation on an argument /* If X is a nontrivial arithmetic operation on an argument
for which a constant value can be determined, return for which a constant value can be determined, return
the result of operating on that value, as a constant. the result of operating on that value, as a constant.
@ -4220,21 +4275,23 @@ fold_rtx (rtx x, rtx insn)
{ {
int is_shift int is_shift
= (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT); = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
rtx y = lookup_as_function (folded_arg0, code); rtx y, inner_const, new_const;
rtx inner_const;
enum rtx_code associate_code; enum rtx_code associate_code;
rtx new_const;
if (y == 0 y = lookup_as_function (folded_arg0, code);
|| 0 == (inner_const if (y == 0)
= equiv_constant (fold_rtx (XEXP (y, 1), 0))) break;
|| GET_CODE (inner_const) != CONST_INT
/* If we have compiled a statement like /* If we have compiled a statement like
"if (x == (x & mask1))", and now are looking at "if (x == (x & mask1))", and now are looking at
"x & mask2", we will have a case where the first operand "x & mask2", we will have a case where the first operand
of Y is the same as our first operand. Unless we detect of Y is the same as our first operand. Unless we detect
this case, an infinite loop will result. */ this case, an infinite loop will result. */
|| XEXP (y, 0) == folded_arg0) if (XEXP (y, 0) == folded_arg0)
break;
inner_const = equiv_constant (fold_rtx (XEXP (y, 1), 0));
if (!inner_const || GET_CODE (inner_const) != CONST_INT)
break; break;
/* Don't associate these operations if they are a PLUS with the /* Don't associate these operations if they are a PLUS with the
@ -4697,6 +4754,8 @@ struct set
unsigned src_const_hash; unsigned src_const_hash;
/* Table entry for constant equivalent for SET_SRC, if any. */ /* Table entry for constant equivalent for SET_SRC, if any. */
struct table_elt *src_const_elt; struct table_elt *src_const_elt;
/* Table entry for the destination address. */
struct table_elt *dest_addr_elt;
}; };
static void static void
@ -5936,6 +5995,40 @@ cse_insn (rtx insn, rtx libcall_insn)
so that the destination goes into that class. */ so that the destination goes into that class. */
sets[i].src_elt = src_eqv_elt; sets[i].src_elt = src_eqv_elt;
/* Record destination addresses in the hash table. This allows us to
check if they are invalidated by other sets. */
for (i = 0; i < n_sets; i++)
{
if (sets[i].rtl)
{
rtx x = sets[i].inner_dest;
struct table_elt *elt;
enum machine_mode mode;
unsigned hash;
if (MEM_P (x))
{
x = XEXP (x, 0);
mode = GET_MODE (x);
hash = HASH (x, mode);
elt = lookup (x, hash, mode);
if (!elt)
{
if (insert_regs (x, NULL, 0))
{
rehash_using_reg (x);
hash = HASH (x, mode);
}
elt = insert (x, NULL, hash, mode);
}
sets[i].dest_addr_elt = elt;
}
else
sets[i].dest_addr_elt = NULL;
}
}
invalidate_from_clobbers (x); invalidate_from_clobbers (x);
/* Some registers are invalidated by subroutine calls. Memory is /* Some registers are invalidated by subroutine calls. Memory is
@ -6028,12 +6121,20 @@ cse_insn (rtx insn, rtx libcall_insn)
} }
/* We may have just removed some of the src_elt's from the hash table. /* We may have just removed some of the src_elt's from the hash table.
So replace each one with the current head of the same class. */ So replace each one with the current head of the same class.
Also check if destination addresses have been removed. */
for (i = 0; i < n_sets; i++) for (i = 0; i < n_sets; i++)
if (sets[i].rtl) if (sets[i].rtl)
{ {
if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0) if (sets[i].dest_addr_elt
&& sets[i].dest_addr_elt->first_same_value == 0)
{
/* The elt was removed, which means this destination s not
valid after this instruction. */
sets[i].rtl = NULL_RTX;
}
else if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
/* If elt was removed, find current head of same class, /* If elt was removed, find current head of same class,
or 0 if nothing remains of that class. */ or 0 if nothing remains of that class. */
{ {

View File

@ -656,13 +656,19 @@ add_fde_cfi (const char *label, dw_cfi_ref cfi)
{ {
dw_cfi_ref xcfi; dw_cfi_ref xcfi;
fde->dw_fde_current_label = label = xstrdup (label); label = xstrdup (label);
/* Set the location counter to the new label. */ /* Set the location counter to the new label. */
xcfi = new_cfi (); xcfi = new_cfi ();
xcfi->dw_cfi_opc = DW_CFA_advance_loc4; /* If we have a current label, advance from there, otherwise
set the location directly using set_loc. */
xcfi->dw_cfi_opc = fde->dw_fde_current_label
? DW_CFA_advance_loc4
: DW_CFA_set_loc;
xcfi->dw_cfi_oprnd1.dw_cfi_addr = label; xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
add_cfi (&fde->dw_fde_cfi, xcfi); add_cfi (&fde->dw_fde_cfi, xcfi);
fde->dw_fde_current_label = label;
} }
add_cfi (&fde->dw_fde_cfi, cfi); add_cfi (&fde->dw_fde_cfi, cfi);
@ -2050,6 +2056,7 @@ output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
else else
dw2_asm_output_addr (DWARF2_ADDR_SIZE, dw2_asm_output_addr (DWARF2_ADDR_SIZE,
cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL); cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
break; break;
case DW_CFA_advance_loc1: case DW_CFA_advance_loc1:
@ -2532,7 +2539,7 @@ dwarf2out_begin_prologue (unsigned int line ATTRIBUTE_UNUSED,
fde = &fde_table[fde_table_in_use++]; fde = &fde_table[fde_table_in_use++];
fde->decl = current_function_decl; fde->decl = current_function_decl;
fde->dw_fde_begin = dup_label; fde->dw_fde_begin = dup_label;
fde->dw_fde_current_label = NULL; fde->dw_fde_current_label = dup_label;
fde->dw_fde_hot_section_label = NULL; fde->dw_fde_hot_section_label = NULL;
fde->dw_fde_hot_section_end_label = NULL; fde->dw_fde_hot_section_end_label = NULL;
fde->dw_fde_unlikely_section_label = NULL; fde->dw_fde_unlikely_section_label = NULL;
@ -3934,6 +3941,10 @@ dwarf2out_switch_text_section (void)
fde->dw_fde_unlikely_section_label = cfun->cold_section_label; fde->dw_fde_unlikely_section_label = cfun->cold_section_label;
fde->dw_fde_unlikely_section_end_label = cfun->cold_section_end_label; fde->dw_fde_unlikely_section_end_label = cfun->cold_section_end_label;
have_switched_text_section = true; have_switched_text_section = true;
/* Reset the current label on switching text sections, so that we
don't attempt to advance_loc4 between labels in different sections. */
fde->dw_fde_current_label = NULL;
} }
#endif #endif
@ -9095,6 +9106,7 @@ loc_descriptor_from_tree_1 (tree loc, int want_address)
/* FALLTHRU */ /* FALLTHRU */
case RESULT_DECL: case RESULT_DECL:
case FUNCTION_DECL:
{ {
rtx rtl = rtl_for_decl_location (loc); rtx rtl = rtl_for_decl_location (loc);
@ -10378,6 +10390,7 @@ convert_cfa_to_loc_list (void)
for (cfi = fde->dw_fde_cfi; cfi; cfi = cfi->dw_cfi_next) for (cfi = fde->dw_fde_cfi; cfi; cfi = cfi->dw_cfi_next)
switch (cfi->dw_cfi_opc) switch (cfi->dw_cfi_opc)
{ {
case DW_CFA_set_loc:
case DW_CFA_advance_loc1: case DW_CFA_advance_loc1:
case DW_CFA_advance_loc2: case DW_CFA_advance_loc2:
case DW_CFA_advance_loc4: case DW_CFA_advance_loc4:

View File

@ -282,14 +282,16 @@ force_fit_type (tree t, int overflowable,
} }
/* Add two doubleword integers with doubleword result. /* Add two doubleword integers with doubleword result.
Return nonzero if the operation overflows according to UNSIGNED_P.
Each argument is given as two `HOST_WIDE_INT' pieces. Each argument is given as two `HOST_WIDE_INT' pieces.
One argument is L1 and H1; the other, L2 and H2. One argument is L1 and H1; the other, L2 and H2.
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int int
add_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2, unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
bool unsigned_p)
{ {
unsigned HOST_WIDE_INT l; unsigned HOST_WIDE_INT l;
HOST_WIDE_INT h; HOST_WIDE_INT h;
@ -299,7 +301,11 @@ add_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
*lv = l; *lv = l;
*hv = h; *hv = h;
return OVERFLOW_SUM_SIGN (h1, h2, h);
if (unsigned_p)
return (unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1;
else
return OVERFLOW_SUM_SIGN (h1, h2, h);
} }
/* Negate a doubleword integer with doubleword result. /* Negate a doubleword integer with doubleword result.
@ -326,15 +332,16 @@ neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
} }
/* Multiply two doubleword integers with doubleword result. /* Multiply two doubleword integers with doubleword result.
Return nonzero if the operation overflows, assuming it's signed. Return nonzero if the operation overflows according to UNSIGNED_P.
Each argument is given as two `HOST_WIDE_INT' pieces. Each argument is given as two `HOST_WIDE_INT' pieces.
One argument is L1 and H1; the other, L2 and H2. One argument is L1 and H1; the other, L2 and H2.
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int int
mul_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2, unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
bool unsigned_p)
{ {
HOST_WIDE_INT arg1[4]; HOST_WIDE_INT arg1[4];
HOST_WIDE_INT arg2[4]; HOST_WIDE_INT arg2[4];
@ -365,11 +372,15 @@ mul_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
prod[i + 4] = carry; prod[i + 4] = carry;
} }
decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */ decode (prod, lv, hv);
/* Check for overflow by calculating the top half of the answer in full;
it should agree with the low half's sign bit. */
decode (prod + 4, &toplow, &tophigh); decode (prod + 4, &toplow, &tophigh);
/* Unsigned overflow is immediate. */
if (unsigned_p)
return (toplow | tophigh) != 0;
/* Check for signed overflow by calculating the signed representation of the
top half of the result; it should agree with the low half's sign bit. */
if (h1 < 0) if (h1 < 0)
{ {
neg_double (l2, h2, &neglow, &neghigh); neg_double (l2, h2, &neglow, &neghigh);
@ -5946,28 +5957,30 @@ fold_div_compare (enum tree_code code, tree type, tree arg0, tree arg1)
tree arg01 = TREE_OPERAND (arg0, 1); tree arg01 = TREE_OPERAND (arg0, 1);
unsigned HOST_WIDE_INT lpart; unsigned HOST_WIDE_INT lpart;
HOST_WIDE_INT hpart; HOST_WIDE_INT hpart;
bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
int overflow; int overflow;
/* We have to do this the hard way to detect unsigned overflow. /* We have to do this the hard way to detect unsigned overflow.
prod = int_const_binop (MULT_EXPR, arg01, arg1, 0); */ prod = int_const_binop (MULT_EXPR, arg01, arg1, 0); */
overflow = mul_double (TREE_INT_CST_LOW (arg01), overflow = mul_double_with_sign (TREE_INT_CST_LOW (arg01),
TREE_INT_CST_HIGH (arg01), TREE_INT_CST_HIGH (arg01),
TREE_INT_CST_LOW (arg1), TREE_INT_CST_LOW (arg1),
TREE_INT_CST_HIGH (arg1), &lpart, &hpart); TREE_INT_CST_HIGH (arg1),
&lpart, &hpart, unsigned_p);
prod = build_int_cst_wide (TREE_TYPE (arg00), lpart, hpart); prod = build_int_cst_wide (TREE_TYPE (arg00), lpart, hpart);
prod = force_fit_type (prod, -1, overflow, false); prod = force_fit_type (prod, -1, overflow, false);
if (TYPE_UNSIGNED (TREE_TYPE (arg0))) if (unsigned_p)
{ {
tmp = int_const_binop (MINUS_EXPR, arg01, integer_one_node, 0); tmp = int_const_binop (MINUS_EXPR, arg01, integer_one_node, 0);
lo = prod; lo = prod;
/* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp, 0). */ /* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp, 0). */
overflow = add_double (TREE_INT_CST_LOW (prod), overflow = add_double_with_sign (TREE_INT_CST_LOW (prod),
TREE_INT_CST_HIGH (prod), TREE_INT_CST_HIGH (prod),
TREE_INT_CST_LOW (tmp), TREE_INT_CST_LOW (tmp),
TREE_INT_CST_HIGH (tmp), TREE_INT_CST_HIGH (tmp),
&lpart, &hpart); &lpart, &hpart, unsigned_p);
hi = build_int_cst_wide (TREE_TYPE (arg00), lpart, hpart); hi = build_int_cst_wide (TREE_TYPE (arg00), lpart, hpart);
hi = force_fit_type (hi, -1, overflow | TREE_OVERFLOW (prod), hi = force_fit_type (hi, -1, overflow | TREE_OVERFLOW (prod),
TREE_CONSTANT_OVERFLOW (prod)); TREE_CONSTANT_OVERFLOW (prod));
@ -7794,12 +7807,12 @@ fold_binary (enum tree_code code, tree type, tree op0, tree op1)
/* (-A) * (-B) -> A * B */ /* (-A) * (-B) -> A * B */
if (TREE_CODE (arg0) == NEGATE_EXPR && negate_expr_p (arg1)) if (TREE_CODE (arg0) == NEGATE_EXPR && negate_expr_p (arg1))
return fold_build2 (MULT_EXPR, type, return fold_build2 (MULT_EXPR, type,
TREE_OPERAND (arg0, 0), fold_convert (type, TREE_OPERAND (arg0, 0)),
negate_expr (arg1)); fold_convert (type, negate_expr (arg1)));
if (TREE_CODE (arg1) == NEGATE_EXPR && negate_expr_p (arg0)) if (TREE_CODE (arg1) == NEGATE_EXPR && negate_expr_p (arg0))
return fold_build2 (MULT_EXPR, type, return fold_build2 (MULT_EXPR, type,
negate_expr (arg0), fold_convert (type, negate_expr (arg0)),
TREE_OPERAND (arg1, 0)); fold_convert (type, TREE_OPERAND (arg1, 0)));
if (! FLOAT_TYPE_P (type)) if (! FLOAT_TYPE_P (type))
{ {
@ -9361,7 +9374,8 @@ fold_binary (enum tree_code code, tree type, tree op0, tree op1)
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
&& 0 != (tem = const_binop (TREE_CODE (arg0) == PLUS_EXPR && 0 != (tem = const_binop (TREE_CODE (arg0) == PLUS_EXPR
? MINUS_EXPR : PLUS_EXPR, ? MINUS_EXPR : PLUS_EXPR,
arg1, TREE_OPERAND (arg0, 1), 0)) fold_convert (TREE_TYPE (arg0), arg1),
TREE_OPERAND (arg0, 1), 0))
&& ! TREE_CONSTANT_OVERFLOW (tem)) && ! TREE_CONSTANT_OVERFLOW (tem))
return fold_build2 (code, type, TREE_OPERAND (arg0, 0), tem); return fold_build2 (code, type, TREE_OPERAND (arg0, 0), tem);

View File

@ -40,6 +40,28 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#define ATTRIBUTE_HIDDEN #define ATTRIBUTE_HIDDEN
#endif #endif
#ifndef MIN_UNITS_PER_WORD
#define MIN_UNITS_PER_WORD UNITS_PER_WORD
#endif
/* Work out the largest "word" size that we can deal with on this target. */
#if MIN_UNITS_PER_WORD > 4
# define LIBGCC2_MAX_UNITS_PER_WORD 8
#elif (MIN_UNITS_PER_WORD > 2 \
|| (MIN_UNITS_PER_WORD > 1 && LONG_LONG_TYPE_SIZE > 32))
# define LIBGCC2_MAX_UNITS_PER_WORD 4
#else
# define LIBGCC2_MAX_UNITS_PER_WORD MIN_UNITS_PER_WORD
#endif
/* Work out what word size we are using for this compilation.
The value can be set on the command line. */
#ifndef LIBGCC2_UNITS_PER_WORD
#define LIBGCC2_UNITS_PER_WORD LIBGCC2_MAX_UNITS_PER_WORD
#endif
#if LIBGCC2_UNITS_PER_WORD <= LIBGCC2_MAX_UNITS_PER_WORD
#include "libgcc2.h" #include "libgcc2.h"
#ifdef DECLARE_LIBRARY_RENAMES #ifdef DECLARE_LIBRARY_RENAMES
@ -2010,3 +2032,4 @@ func_ptr __DTOR_LIST__[2];
#endif #endif
#endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */ #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
#endif /* L_ctors */ #endif /* L_ctors */
#endif /* LIBGCC2_UNITS_PER_WORD <= MIN_UNITS_PER_WORD */

View File

@ -79,10 +79,6 @@ extern short int __get_eh_table_version (struct exception_descriptor *);
(BITS_PER_UNIT == 8 && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128) (BITS_PER_UNIT == 8 && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
#endif #endif
#ifndef MIN_UNITS_PER_WORD
#define MIN_UNITS_PER_WORD UNITS_PER_WORD
#endif
/* In the first part of this file, we are interfacing to calls generated /* In the first part of this file, we are interfacing to calls generated
by the compiler itself. These calls pass values into these routines by the compiler itself. These calls pass values into these routines
which have very specific modes (rather than very specific types), and which have very specific modes (rather than very specific types), and
@ -155,7 +151,7 @@ typedef int word_type __attribute__ ((mode (__word__)));
turns out that no platform would define COMPAT_DIMODE_TRAPPING_ARITHMETIC turns out that no platform would define COMPAT_DIMODE_TRAPPING_ARITHMETIC
if it existed. */ if it existed. */
#if MIN_UNITS_PER_WORD > 4 #if LIBGCC2_UNITS_PER_WORD == 8
#define W_TYPE_SIZE (8 * BITS_PER_UNIT) #define W_TYPE_SIZE (8 * BITS_PER_UNIT)
#define Wtype DItype #define Wtype DItype
#define UWtype UDItype #define UWtype UDItype
@ -166,8 +162,7 @@ typedef int word_type __attribute__ ((mode (__word__)));
#define __NW(a,b) __ ## a ## di ## b #define __NW(a,b) __ ## a ## di ## b
#define __NDW(a,b) __ ## a ## ti ## b #define __NDW(a,b) __ ## a ## ti ## b
#define COMPAT_SIMODE_TRAPPING_ARITHMETIC #define COMPAT_SIMODE_TRAPPING_ARITHMETIC
#elif MIN_UNITS_PER_WORD > 2 \ #elif LIBGCC2_UNITS_PER_WORD == 4
|| (MIN_UNITS_PER_WORD > 1 && LONG_LONG_TYPE_SIZE > 32)
#define W_TYPE_SIZE (4 * BITS_PER_UNIT) #define W_TYPE_SIZE (4 * BITS_PER_UNIT)
#define Wtype SItype #define Wtype SItype
#define UWtype USItype #define UWtype USItype
@ -177,7 +172,7 @@ typedef int word_type __attribute__ ((mode (__word__)));
#define UDWtype UDItype #define UDWtype UDItype
#define __NW(a,b) __ ## a ## si ## b #define __NW(a,b) __ ## a ## si ## b
#define __NDW(a,b) __ ## a ## di ## b #define __NDW(a,b) __ ## a ## di ## b
#elif MIN_UNITS_PER_WORD > 1 #elif LIBGCC2_UNITS_PER_WORD == 2
#define W_TYPE_SIZE (2 * BITS_PER_UNIT) #define W_TYPE_SIZE (2 * BITS_PER_UNIT)
#define Wtype HItype #define Wtype HItype
#define UWtype UHItype #define UWtype UHItype

View File

@ -962,8 +962,7 @@ update_equiv_regs (void)
/* If we haven't done so, record for reload that this is an /* If we haven't done so, record for reload that this is an
equivalencing insn. */ equivalencing insn. */
if (!reg_equiv[regno].is_arg_equivalence if (!reg_equiv[regno].is_arg_equivalence)
&& (!MEM_P (x) || rtx_equal_p (src, x)))
reg_equiv_init[regno] reg_equiv_init[regno]
= gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[regno]); = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[regno]);

View File

@ -8824,14 +8824,13 @@ biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
} }
/* Return false iff it is provable that biv BL plus BIAS will not wrap /* Return false iff it is provable that biv BL will not wrap at any point
at any point in its update sequence. Note that at the rtl level we in its update sequence. Note that at the RTL level we may not have
may not have information about the signedness of BL; in that case, information about the signedness of BL; in that case, check for both
check for both signed and unsigned overflow. */ signed and unsigned overflow. */
static bool static bool
biased_biv_may_wrap_p (const struct loop *loop, struct iv_class *bl, biv_may_wrap_p (const struct loop *loop, struct iv_class *bl)
unsigned HOST_WIDE_INT bias)
{ {
HOST_WIDE_INT incr; HOST_WIDE_INT incr;
bool check_signed, check_unsigned; bool check_signed, check_unsigned;
@ -8867,12 +8866,12 @@ biased_biv_may_wrap_p (const struct loop *loop, struct iv_class *bl,
mode = GET_MODE (bl->biv->src_reg); mode = GET_MODE (bl->biv->src_reg);
if (check_unsigned if (check_unsigned
&& !biased_biv_fits_mode_p (loop, bl, incr, mode, bias)) && !biased_biv_fits_mode_p (loop, bl, incr, mode, 0))
return true; return true;
if (check_signed) if (check_signed)
{ {
bias += (GET_MODE_MASK (mode) >> 1) + 1; unsigned HOST_WIDE_INT bias = (GET_MODE_MASK (mode) >> 1) + 1;
if (!biased_biv_fits_mode_p (loop, bl, incr, mode, bias)) if (!biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
return true; return true;
} }
@ -10306,8 +10305,7 @@ maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
valid programs. */ valid programs. */
/* Without lifetime analysis, we don't know how COMPARE will be /* Without lifetime analysis, we don't know how COMPARE will be
used, so we must assume the worst. */ used, so we must assume the worst. */
if (code != EQ && code != NE if (code != EQ && code != NE && biv_may_wrap_p (loop, bl))
&& biased_biv_may_wrap_p (loop, bl, INTVAL (arg)))
return 0; return 0;
/* Try to replace with any giv that has constant positive mult_val /* Try to replace with any giv that has constant positive mult_val

View File

@ -24,6 +24,7 @@
# FPBIT # FPBIT
# FPBIT_FUNCS # FPBIT_FUNCS
# LIB2_DIVMOD_FUNCS # LIB2_DIVMOD_FUNCS
# LIB2_SIDITI_CONV_FUNCS
# DPBIT # DPBIT
# DPBIT_FUNCS # DPBIT_FUNCS
# TPBIT # TPBIT
@ -53,11 +54,26 @@ echo 'all: stmp-dirs'
echo 'dirs = libgcc' echo 'dirs = libgcc'
echo echo
# Library members defined in libgcc2.c. # The floating-point conversion routines that involve a single-word integer.
# XX stands for the integer mode.
swfloatfuncs=
for mode in sf df xf; do
swfloatfuncs="$swfloatfuncs _fixuns${mode}XX"
done
# Likewise double-word routines.
dwfloatfuncs=
for mode in sf df xf tf; do
dwfloatfuncs="$dwfloatfuncs _fix${mode}XX _fixuns${mode}XX"
dwfloatfuncs="$dwfloatfuncs _floatXX${mode}"
done
# Entries of the form <objfile>:<func>:<wordsize> indicate that libgcc2.c
# should be compiled with L<func> defined and with LIBGCC2_UNITS_PER_WORD
# set to <wordsize>. <objfile> is the name of the associated object file
lib2funcs='_muldi3 _negdi2 _lshrdi3 _ashldi3 _ashrdi3 lib2funcs='_muldi3 _negdi2 _lshrdi3 _ashldi3 _ashrdi3
_cmpdi2 _ucmpdi2 _floatdidf _floatdisf _fixunsdfsi _fixunssfsi _cmpdi2 _ucmpdi2 _clear_cache
_fixunsdfdi _fixdfdi _fixunssfdi _fixsfdi _fixxfdi _fixunsxfdi
_floatdixf _fixunsxfsi _fixtfdi _fixunstfdi _floatditf _clear_cache
_enable_execute_stack _trampoline __main _absvsi2 _absvdi2 _addvsi3 _enable_execute_stack _trampoline __main _absvsi2 _absvdi2 _addvsi3
_addvdi3 _subvsi3 _subvdi3 _mulvsi3 _mulvdi3 _negvsi2 _negvdi2 _ctors _addvdi3 _subvsi3 _subvdi3 _mulvsi3 _mulvdi3 _negvsi2 _negvdi2 _ctors
_ffssi2 _ffsdi2 _clz _clzsi2 _clzdi2 _ctzsi2 _ctzdi2 _popcount_tab _ffssi2 _ffsdi2 _clz _clzsi2 _clzdi2 _ctzsi2 _ctzdi2 _popcount_tab
@ -65,6 +81,21 @@ lib2funcs='_muldi3 _negdi2 _lshrdi3 _ashldi3 _ashrdi3
_powixf2 _powitf2 _mulsc3 _muldc3 _mulxc3 _multc3 _divsc3 _divdc3 _powixf2 _powitf2 _mulsc3 _muldc3 _mulxc3 _multc3 _divsc3 _divdc3
_divxc3 _divtc3' _divxc3 _divtc3'
if [ "$LIB2_SIDITI_CONV_FUNCS" ]; then
for func in $swfloatfuncs; do
sifunc=`echo $func | sed -e 's/XX/si/'`
lib2funcs="$lib2funcs $sifunc:$sifunc:4"
done
for func in $dwfloatfuncs; do
difunc=`echo $func | sed -e 's/XX/di/'`
tifunc=`echo $func | sed -e 's/XX/ti/'`
lib2funcs="$lib2funcs $difunc:$difunc:4 $tifunc:$difunc:8"
done
else
lib2funcs="$lib2funcs `echo $swfloatfuncs | sed -e 's/XX/si/g'`"
lib2funcs="$lib2funcs `echo $dwfloatfuncs | sed -e 's/XX/di/g'`"
fi
# Disable SHLIB_LINK if shared libgcc not enabled. # Disable SHLIB_LINK if shared libgcc not enabled.
if [ "@enable_shared@" = "no" ]; then if [ "@enable_shared@" = "no" ]; then
SHLIB_LINK="" SHLIB_LINK=""
@ -145,8 +176,8 @@ fi
# Remove any objects from lib2funcs and LIB2_DIVMOD_FUNCS that are # Remove any objects from lib2funcs and LIB2_DIVMOD_FUNCS that are
# defined as optimized assembly code in LIB1ASMFUNCS. # defined as optimized assembly code in LIB1ASMFUNCS.
for name in $LIB1ASMFUNCS; do for name in $LIB1ASMFUNCS; do
lib2funcs=`echo $lib2funcs | sed -e 's/^'$name' //' \ lib2funcs=`echo $lib2funcs | sed -e 's/^'$name'[ :]//' \
-e 's/ '$name' / /' \ -e 's/ '$name'[ :]/ /' \
-e 's/ '$name'$//'` -e 's/ '$name'$//'`
LIB2_DIVMOD_FUNCS=`echo $LIB2_DIVMOD_FUNCS | sed -e 's/^'$name' //' \ LIB2_DIVMOD_FUNCS=`echo $LIB2_DIVMOD_FUNCS | sed -e 's/^'$name' //' \
-e 's/ '$name' / /' \ -e 's/ '$name' / /' \
@ -248,16 +279,25 @@ for ml in $MULTILIBS; do
# #
for name in $lib2funcs; do for name in $lib2funcs; do
case $name in
*:*:*)
defines=`echo $name | sed -e 's/.*:\(.*\):\(.*\)/-DL\1 -DLIBGCC2_UNITS_PER_WORD=\2/'`
name=`echo $name | sed -e 's/\(.*\):.*:.*/\1/'`
;;
*)
defines="-DL$name"
;;
esac
if [ "$libgcc_s_so" ]; then if [ "$libgcc_s_so" ]; then
out="libgcc/${dir}/${name}${objext}" out="libgcc/${dir}/${name}${objext}"
outS="libgcc/${dir}/${name}_s${objext}" outS="libgcc/${dir}/${name}_s${objext}"
echo $outS: $libgcc2_c_dep echo $outS: $libgcc2_c_dep
echo " $gcc_s_compile" $flags -DL$name -c '$(srcdir)/libgcc2.c' \ echo " $gcc_s_compile" $flags $defines -c '$(srcdir)/libgcc2.c' \
-o $outS -o $outS
echo $out: $libgcc2_c_dep echo $out: $libgcc2_c_dep
echo " $gcc_compile" $flags -DL$name '$(vis_hide)' \ echo " $gcc_compile" $flags $defines '$(vis_hide)' \
-c '$(srcdir)/libgcc2.c' -o $out -c '$(srcdir)/libgcc2.c' -o $out
echo $libgcc_a: $out echo $libgcc_a: $out
@ -268,7 +308,7 @@ for ml in $MULTILIBS; do
else else
out="libgcc/${dir}/${name}${objext}" out="libgcc/${dir}/${name}${objext}"
echo ${out}: stmp-dirs '$(srcdir)/config/$(LIB1ASMSRC)' echo ${out}: stmp-dirs '$(srcdir)/config/$(LIB1ASMSRC)'
echo " $gcc_compile" $flags -DL$name -c '$(srcdir)/libgcc2.c' -o $out echo " $gcc_compile" $flags $defines -c '$(srcdir)/libgcc2.c' -o $out
echo $libgcc_a: $out echo $libgcc_a: $out
fi fi
done done
@ -823,12 +863,15 @@ for ml in $MULTILIBS; do
ldir='$(DESTDIR)$(libsubdir)' ldir='$(DESTDIR)$(libsubdir)'
fi fi
echo ' $(INSTALL_DATA)' ${dir}/libgcc.a ${ldir}/ echo ' $(INSTALL_DATA)' ${dir}/libgcc.a ${ldir}/
echo ' chmod 644' ${ldir}/libgcc.a
echo ' $(RANLIB_FOR_TARGET)' ${ldir}/libgcc.a echo ' $(RANLIB_FOR_TARGET)' ${ldir}/libgcc.a
echo ' $(INSTALL_DATA)' ${dir}/libgcov.a ${ldir}/ echo ' $(INSTALL_DATA)' ${dir}/libgcov.a ${ldir}/
echo ' chmod 644' ${ldir}/libgcov.a
echo ' $(RANLIB_FOR_TARGET)' ${ldir}/libgcov.a echo ' $(RANLIB_FOR_TARGET)' ${ldir}/libgcov.a
if [ "$SHLIB_LINK" ]; then if [ "$SHLIB_LINK" ]; then
echo ' $(INSTALL_DATA)' ${dir}/libgcc_eh.a ${ldir}/ echo ' $(INSTALL_DATA)' ${dir}/libgcc_eh.a ${ldir}/
echo ' chmod 644' ${ldir}/libgcc_eh.a
echo ' $(RANLIB_FOR_TARGET)' ${ldir}/libgcc_eh.a echo ' $(RANLIB_FOR_TARGET)' ${ldir}/libgcc_eh.a
shlib_slibdir_qual= shlib_slibdir_qual=
@ -847,6 +890,7 @@ for ml in $MULTILIBS; do
-e "s%@shlib_slibdir_qual@%$shlib_slibdir_qual%g" -e "s%@shlib_slibdir_qual@%$shlib_slibdir_qual%g"
libunwinddir='$(DESTDIR)$(slibdir)$(shlib_slibdir_qual)/$(shlib_dir)' libunwinddir='$(DESTDIR)$(slibdir)$(shlib_slibdir_qual)/$(shlib_dir)'
echo ' $(INSTALL_DATA)' ${dir}/libunwind.a ${libunwinddir}/ echo ' $(INSTALL_DATA)' ${dir}/libunwind.a ${libunwinddir}/
echo ' chmod 644' ${dir}/libunwind.a
echo ' $(RANLIB_FOR_TARGET)' ${libunwinddir}/libunwind.a echo ' $(RANLIB_FOR_TARGET)' ${libunwinddir}/libunwind.a
fi fi
fi fi

View File

@ -226,7 +226,6 @@ instrument_values (histogram_values values)
gcc_unreachable (); gcc_unreachable ();
} }
} }
VEC_free (histogram_value, heap, values);
} }
@ -1158,6 +1157,7 @@ branch_prob (void)
dump_flow_info (profile_dump_file()); dump_flow_info (profile_dump_file());
} }
VEC_free (histogram_value, heap, values);
free_edge_list (el); free_edge_list (el);
if (flag_branch_probabilities) if (flag_branch_probabilities)
profile_status = PROFILE_READ; profile_status = PROFILE_READ;

View File

@ -2867,6 +2867,8 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread,
dest = SET_DEST (pat), src = SET_SRC (pat); dest = SET_DEST (pat), src = SET_SRC (pat);
if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS) if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
&& rtx_equal_p (XEXP (src, 0), dest) && rtx_equal_p (XEXP (src, 0), dest)
&& (!FLOAT_MODE_P (GET_MODE (src))
|| flag_unsafe_math_optimizations)
&& ! reg_overlap_mentioned_p (dest, XEXP (src, 1)) && ! reg_overlap_mentioned_p (dest, XEXP (src, 1))
&& ! side_effects_p (pat)) && ! side_effects_p (pat))
{ {
@ -3377,11 +3379,11 @@ relax_delay_slots (rtx first)
continue; continue;
} }
/* See if this jump (with its delay slots) branches around another /* See if this jump (with its delay slots) conditionally branches
jump (without delay slots). If so, invert this jump and point around an unconditional jump (without delay slots). If so, invert
it to the target of the second jump. We cannot do this for this jump and point it to the target of the second jump. We cannot
annulled jumps, though. Again, don't convert a jump to a RETURN do this for annulled jumps, though. Again, don't convert a jump to
here. */ a RETURN here. */
if (! INSN_ANNULLED_BRANCH_P (delay_insn) if (! INSN_ANNULLED_BRANCH_P (delay_insn)
&& any_condjump_p (delay_insn) && any_condjump_p (delay_insn)
&& next && JUMP_P (next) && next && JUMP_P (next)

View File

@ -484,7 +484,8 @@ relayout_decl (tree decl)
{ {
DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0; DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
DECL_MODE (decl) = VOIDmode; DECL_MODE (decl) = VOIDmode;
DECL_ALIGN (decl) = 0; if (!DECL_USER_ALIGN (decl))
DECL_ALIGN (decl) = 0;
SET_DECL_RTL (decl, 0); SET_DECL_RTL (decl, 0);
layout_decl (decl, 0); layout_decl (decl, 0);
@ -1524,6 +1525,8 @@ finalize_type_size (tree type)
void void
finish_record_layout (record_layout_info rli, int free_p) finish_record_layout (record_layout_info rli, int free_p)
{ {
tree variant;
/* Compute the final size. */ /* Compute the final size. */
finalize_record_size (rli); finalize_record_size (rli);
@ -1533,6 +1536,12 @@ finish_record_layout (record_layout_info rli, int free_p)
/* Perform any last tweaks to the TYPE_SIZE, etc. */ /* Perform any last tweaks to the TYPE_SIZE, etc. */
finalize_type_size (rli->t); finalize_type_size (rli->t);
/* Propagate TYPE_PACKED to variants. With C++ templates,
handle_packed_attribute is too early to do this. */
for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
variant = TYPE_NEXT_VARIANT (variant))
TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
/* Lay out any static members. This is done now because their type /* Lay out any static members. This is done now because their type
may use the record's type. */ may use the record's type. */
while (rli->pending_statics) while (rli->pending_statics)

View File

@ -998,7 +998,7 @@ compile_file (void)
what's left of the symbol table output. */ what's left of the symbol table output. */
timevar_pop (TV_PARSE); timevar_pop (TV_PARSE);
if (flag_syntax_only) if (flag_syntax_only || errorcount || sorrycount)
return; return;
lang_hooks.decls.final_write_globals (); lang_hooks.decls.final_write_globals ();

View File

@ -1082,6 +1082,122 @@ nb_vars_in_chrec (tree chrec)
} }
} }
/* Returns true if TYPE is a type in that we cannot directly perform
arithmetics, even though it is a scalar type. */
static bool
avoid_arithmetics_in_type_p (tree type)
{
/* Ada frontend uses subtypes -- an arithmetic cannot be directly performed
in the subtype, but a base type must be used, and the result then can
be casted to the subtype. */
if (TREE_CODE (type) == INTEGER_TYPE && TREE_TYPE (type) != NULL_TREE)
return true;
return false;
}
static tree chrec_convert_1 (tree, tree, tree, bool);
/* Converts BASE and STEP of affine scev to TYPE. LOOP is the loop whose iv
the scev corresponds to. AT_STMT is the statement at that the scev is
evaluated. USE_OVERFLOW_SEMANTICS is true if this function should assume that
the rules for overflow of the given language apply (e.g., that signed
arithmetics in C does not overflow) -- i.e., to use them to avoid unnecessary
tests, but also to enforce that the result follows them. Returns true if the
conversion succeeded, false otherwise. */
bool
convert_affine_scev (struct loop *loop, tree type,
tree *base, tree *step, tree at_stmt,
bool use_overflow_semantics)
{
tree ct = TREE_TYPE (*step);
bool enforce_overflow_semantics;
bool must_check_src_overflow, must_check_rslt_overflow;
tree new_base, new_step;
/* If we cannot perform arithmetic in TYPE, avoid creating an scev. */
if (avoid_arithmetics_in_type_p (type))
return false;
/* In general,
(TYPE) (BASE + STEP * i) = (TYPE) BASE + (TYPE -- sign extend) STEP * i,
but we must check some assumptions.
1) If [BASE, +, STEP] wraps, the equation is not valid when precision
of CT is smaller than the precision of TYPE. For example, when we
cast unsigned char [254, +, 1] to unsigned, the values on left side
are 254, 255, 0, 1, ..., but those on the right side are
254, 255, 256, 257, ...
2) In case that we must also preserve the fact that signed ivs do not
overflow, we must additionally check that the new iv does not wrap.
For example, unsigned char [125, +, 1] casted to signed char could
become a wrapping variable with values 125, 126, 127, -128, -127, ...,
which would confuse optimizers that assume that this does not
happen. */
must_check_src_overflow = TYPE_PRECISION (ct) < TYPE_PRECISION (type);
enforce_overflow_semantics = (use_overflow_semantics
&& nowrap_type_p (type));
if (enforce_overflow_semantics)
{
/* We can avoid checking whether the result overflows in the following
cases:
-- must_check_src_overflow is true, and the range of TYPE is superset
of the range of CT -- i.e., in all cases except if CT signed and
TYPE unsigned.
-- both CT and TYPE have the same precision and signedness. */
if (must_check_src_overflow)
{
if (TYPE_UNSIGNED (type) && !TYPE_UNSIGNED (ct))
must_check_rslt_overflow = true;
else
must_check_rslt_overflow = false;
}
else if (TYPE_UNSIGNED (ct) == TYPE_UNSIGNED (type)
&& TYPE_PRECISION (ct) == TYPE_PRECISION (type))
must_check_rslt_overflow = false;
else
must_check_rslt_overflow = true;
}
else
must_check_rslt_overflow = false;
if (must_check_src_overflow
&& scev_probably_wraps_p (*base, *step, at_stmt, loop,
use_overflow_semantics))
return false;
new_base = chrec_convert_1 (type, *base, at_stmt,
use_overflow_semantics);
/* The step must be sign extended, regardless of the signedness
of CT and TYPE. This only needs to be handled specially when
CT is unsigned -- to avoid e.g. unsigned char [100, +, 255]
(with values 100, 99, 98, ...) from becoming signed or unsigned
[100, +, 255] with values 100, 355, ...; the sign-extension is
performed by default when CT is signed. */
new_step = *step;
if (TYPE_PRECISION (type) > TYPE_PRECISION (ct) && TYPE_UNSIGNED (ct))
new_step = chrec_convert_1 (signed_type_for (ct), new_step, at_stmt,
use_overflow_semantics);
new_step = chrec_convert_1 (type, new_step, at_stmt, use_overflow_semantics);
if (automatically_generated_chrec_p (new_base)
|| automatically_generated_chrec_p (new_step))
return false;
if (must_check_rslt_overflow
/* Note that in this case we cannot use the fact that signed variables
do not overflow, as this is what we are verifying for the new iv. */
&& scev_probably_wraps_p (new_base, new_step, at_stmt, loop, false))
return false;
*base = new_base;
*step = new_step;
return true;
}
/* Convert CHREC to TYPE. When the analyzer knows the context in /* Convert CHREC to TYPE. When the analyzer knows the context in
@ -1110,8 +1226,29 @@ nb_vars_in_chrec (tree chrec)
tree tree
chrec_convert (tree type, tree chrec, tree at_stmt) chrec_convert (tree type, tree chrec, tree at_stmt)
{
return chrec_convert_1 (type, chrec, at_stmt, true);
}
/* Convert CHREC to TYPE. When the analyzer knows the context in
which the CHREC is built, it sets AT_STMT to the statement that
contains the definition of the analyzed variable, otherwise the
conversion is less accurate: the information is used for
determining a more accurate estimation of the number of iterations.
By default AT_STMT could be safely set to NULL_TREE.
USE_OVERFLOW_SEMANTICS is true if this function should assume that
the rules for overflow of the given language apply (e.g., that signed
arithmetics in C does not overflow) -- i.e., to use them to avoid unnecessary
tests, but also to enforce that the result follows them. */
static tree
chrec_convert_1 (tree type, tree chrec, tree at_stmt,
bool use_overflow_semantics)
{ {
tree ct, res; tree ct, res;
tree base, step;
struct loop *loop;
if (automatically_generated_chrec_p (chrec)) if (automatically_generated_chrec_p (chrec))
return chrec; return chrec;
@ -1120,56 +1257,19 @@ chrec_convert (tree type, tree chrec, tree at_stmt)
if (ct == type) if (ct == type)
return chrec; return chrec;
if (evolution_function_is_affine_p (chrec)) if (!evolution_function_is_affine_p (chrec))
{ goto keep_cast;
tree base, step;
bool dummy;
struct loop *loop = current_loops->parray[CHREC_VARIABLE (chrec)];
base = instantiate_parameters (loop, CHREC_LEFT (chrec)); loop = current_loops->parray[CHREC_VARIABLE (chrec)];
step = instantiate_parameters (loop, CHREC_RIGHT (chrec)); base = CHREC_LEFT (chrec);
step = CHREC_RIGHT (chrec);
/* Avoid conversion of (signed char) {(uchar)1, +, (uchar)1}_x if (convert_affine_scev (loop, type, &base, &step, at_stmt,
when it is not possible to prove that the scev does not wrap. use_overflow_semantics))
See PR22236, where a sequence 1, 2, ..., 255 has to be return build_polynomial_chrec (loop->num, base, step);
converted to signed char, but this would wrap:
1, 2, ..., 127, -128, ... The result should not be
{(schar)1, +, (schar)1}_x, but instead, we should keep the
conversion: (schar) {(uchar)1, +, (uchar)1}_x. */
if (scev_probably_wraps_p (type, base, step, at_stmt, loop,
&dummy, &dummy))
goto failed_to_convert;
step = convert_step (loop, type, base, step, at_stmt);
if (!step)
{
failed_to_convert:;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "(failed conversion:");
fprintf (dump_file, "\n type: ");
print_generic_expr (dump_file, type, 0);
fprintf (dump_file, "\n base: ");
print_generic_expr (dump_file, base, 0);
fprintf (dump_file, "\n step: ");
print_generic_expr (dump_file, step, 0);
fprintf (dump_file, "\n estimated_nb_iterations: ");
print_generic_expr (dump_file, loop->estimated_nb_iterations, 0);
fprintf (dump_file, "\n)\n");
}
return fold_convert (type, chrec);
}
return build_polynomial_chrec (CHREC_VARIABLE (chrec),
chrec_convert (type, CHREC_LEFT (chrec),
at_stmt),
step);
}
if (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
return chrec_dont_know;
/* If we cannot propagate the cast inside the chrec, just keep the cast. */
keep_cast:
res = fold_convert (type, chrec); res = fold_convert (type, chrec);
/* Don't propagate overflows. */ /* Don't propagate overflows. */
@ -1210,6 +1310,10 @@ chrec_convert_aggressive (tree type, tree chrec)
if (TYPE_PRECISION (type) > TYPE_PRECISION (inner_type)) if (TYPE_PRECISION (type) > TYPE_PRECISION (inner_type))
return NULL_TREE; return NULL_TREE;
/* If we cannot perform arithmetic in TYPE, avoid creating an scev. */
if (avoid_arithmetics_in_type_p (type))
return NULL_TREE;
left = CHREC_LEFT (chrec); left = CHREC_LEFT (chrec);
right = CHREC_RIGHT (chrec); right = CHREC_RIGHT (chrec);
lc = chrec_convert_aggressive (type, left); lc = chrec_convert_aggressive (type, left);
@ -1218,7 +1322,7 @@ chrec_convert_aggressive (tree type, tree chrec)
rc = chrec_convert_aggressive (type, right); rc = chrec_convert_aggressive (type, right);
if (!rc) if (!rc)
rc = chrec_convert (type, right, NULL_TREE); rc = chrec_convert (type, right, NULL_TREE);
return build_polynomial_chrec (CHREC_VARIABLE (chrec), lc, rc); return build_polynomial_chrec (CHREC_VARIABLE (chrec), lc, rc);
} }
@ -1232,3 +1336,25 @@ chrec_type (tree chrec)
return TREE_TYPE (chrec); return TREE_TYPE (chrec);
} }
/* Returns EV_GROWS if CHREC grows (assuming that it does not overflow),
EV_DECREASES if it decreases, and EV_UNKNOWN if we cannot determine
which of these cases happens. */
enum ev_direction
scev_direction (tree chrec)
{
tree step;
if (!evolution_function_is_affine_p (chrec))
return EV_DIR_UNKNOWN;
step = CHREC_RIGHT (chrec);
if (TREE_CODE (step) != INTEGER_CST)
return EV_DIR_UNKNOWN;
if (tree_int_cst_sign_bit (step))
return EV_DIR_DECREASES;
else
return EV_DIR_GROWS;
}

View File

@ -666,7 +666,7 @@ find_phi_replacement_condition (struct loop *loop,
{ {
basic_block first_bb = NULL; basic_block first_bb = NULL;
basic_block second_bb = NULL; basic_block second_bb = NULL;
tree tmp_cond; tree tmp_cond, new_stmts;
gcc_assert (EDGE_COUNT (bb->preds) == 2); gcc_assert (EDGE_COUNT (bb->preds) == 2);
first_bb = (EDGE_PRED (bb, 0))->src; first_bb = (EDGE_PRED (bb, 0))->src;
@ -732,6 +732,9 @@ find_phi_replacement_condition (struct loop *loop,
value as condition. Various targets use different means to communicate value as condition. Various targets use different means to communicate
condition in vector compare operation. Using gimple value allows compiler condition in vector compare operation. Using gimple value allows compiler
to emit vector compare and select RTL without exposing compare's result. */ to emit vector compare and select RTL without exposing compare's result. */
*cond = force_gimple_operand (*cond, &new_stmts, false, NULL_TREE);
if (new_stmts)
bsi_insert_before (bsi, new_stmts, BSI_SAME_STMT);
if (!is_gimple_reg (*cond) && !is_gimple_condexpr (*cond)) if (!is_gimple_reg (*cond) && !is_gimple_condexpr (*cond))
{ {
tree new_stmt; tree new_stmt;

View File

@ -766,15 +766,22 @@ compute_flow_insensitive_aliasing (struct alias_info *ai)
struct alias_map_d *p_map = ai->pointers[i]; struct alias_map_d *p_map = ai->pointers[i];
tree tag = var_ann (p_map->var)->type_mem_tag; tree tag = var_ann (p_map->var)->type_mem_tag;
var_ann_t tag_ann = var_ann (tag); var_ann_t tag_ann = var_ann (tag);
tree var;
p_map->total_alias_vops = 0; p_map->total_alias_vops = 0;
p_map->may_aliases = BITMAP_ALLOC (&alias_obstack); p_map->may_aliases = BITMAP_ALLOC (&alias_obstack);
/* Add any pre-existing may_aliases to the bitmap used to represent
TAG's alias set in case we need to group aliases. */
if (tag_ann->may_aliases)
for (j = 0; j < VARRAY_ACTIVE_SIZE (tag_ann->may_aliases); ++j)
bitmap_set_bit (p_map->may_aliases,
DECL_UID (VARRAY_TREE (tag_ann->may_aliases, j)));
for (j = 0; j < ai->num_addressable_vars; j++) for (j = 0; j < ai->num_addressable_vars; j++)
{ {
struct alias_map_d *v_map; struct alias_map_d *v_map;
var_ann_t v_ann; var_ann_t v_ann;
tree var;
bool tag_stored_p, var_stored_p; bool tag_stored_p, var_stored_p;
v_map = ai->addressable_vars[j]; v_map = ai->addressable_vars[j];
@ -1811,8 +1818,7 @@ get_tmt_for (tree ptr, struct alias_info *ai)
{ {
struct alias_map_d *curr = ai->pointers[i]; struct alias_map_d *curr = ai->pointers[i];
tree curr_tag = var_ann (curr->var)->type_mem_tag; tree curr_tag = var_ann (curr->var)->type_mem_tag;
if (tag_set == curr->set if (tag_set == curr->set)
&& TYPE_READONLY (tag_type) == TYPE_READONLY (TREE_TYPE (curr_tag)))
{ {
tag = curr_tag; tag = curr_tag;
break; break;
@ -1849,10 +1855,6 @@ get_tmt_for (tree ptr, struct alias_info *ai)
pointed-to type. */ pointed-to type. */
gcc_assert (tag_set == get_alias_set (tag)); gcc_assert (tag_set == get_alias_set (tag));
/* If PTR's pointed-to type is read-only, then TAG's type must also
be read-only. */
gcc_assert (TYPE_READONLY (tag_type) == TYPE_READONLY (TREE_TYPE (tag)));
return tag; return tag;
} }

View File

@ -1385,7 +1385,7 @@ idx_find_step (tree base, tree *idx, void *data)
{ {
struct ifs_ivopts_data *dta = data; struct ifs_ivopts_data *dta = data;
struct iv *iv; struct iv *iv;
tree step, iv_step, lbound, off; tree step, iv_base, iv_step, lbound, off;
struct loop *loop = dta->ivopts_data->current_loop; struct loop *loop = dta->ivopts_data->current_loop;
if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
@ -1438,12 +1438,11 @@ idx_find_step (tree base, tree *idx, void *data)
/* The step for pointer arithmetics already is 1 byte. */ /* The step for pointer arithmetics already is 1 byte. */
step = build_int_cst (sizetype, 1); step = build_int_cst (sizetype, 1);
/* FIXME: convert_step should not be used outside chrec_convert: fix iv_base = iv->base;
this by calling chrec_convert. */ iv_step = iv->step;
iv_step = convert_step (dta->ivopts_data->current_loop, if (!convert_affine_scev (dta->ivopts_data->current_loop,
sizetype, iv->base, iv->step, dta->stmt); sizetype, &iv_base, &iv_step, dta->stmt,
false))
if (!iv_step)
{ {
/* The index might wrap. */ /* The index might wrap. */
return false; return false;

View File

@ -486,7 +486,8 @@ execute_cse_reciprocals (void)
sizeof (struct occurrence), sizeof (struct occurrence),
n_basic_blocks / 3 + 1); n_basic_blocks / 3 + 1);
calculate_dominance_info (CDI_DOMINATORS | CDI_POST_DOMINATORS); calculate_dominance_info (CDI_DOMINATORS);
calculate_dominance_info (CDI_POST_DOMINATORS);
#ifdef ENABLE_CHECKING #ifdef ENABLE_CHECKING
FOR_EACH_BB (bb) FOR_EACH_BB (bb)
@ -523,7 +524,8 @@ execute_cse_reciprocals (void)
} }
} }
free_dominance_info (CDI_DOMINATORS | CDI_POST_DOMINATORS); free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
free_alloc_pool (occ_pool); free_alloc_pool (occ_pool);
} }

View File

@ -1968,14 +1968,14 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
then_clause = TREE_OPERAND (op, 1); then_clause = TREE_OPERAND (op, 1);
else_clause = TREE_OPERAND (op, 2); else_clause = TREE_OPERAND (op, 2);
if (!vect_is_simple_cond (cond_expr, loop_vinfo))
return false;
/* We do not handle two different vector types for the condition /* We do not handle two different vector types for the condition
and the values. */ and the values. */
if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype)) if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype))
return false; return false;
if (!vect_is_simple_cond (cond_expr, loop_vinfo))
return false;
if (TREE_CODE (then_clause) == SSA_NAME) if (TREE_CODE (then_clause) == SSA_NAME)
{ {
tree then_def_stmt = SSA_NAME_DEF_STMT (then_clause); tree then_def_stmt = SSA_NAME_DEF_STMT (then_clause);

View File

@ -5036,7 +5036,11 @@ build_array_type (tree elt_type, tree index_type)
if (index_type == 0) if (index_type == 0)
{ {
layout_type (t); tree save = t;
hashcode = iterative_hash_object (TYPE_HASH (elt_type), hashcode);
t = type_hash_canon (hashcode, t);
if (save == t)
layout_type (t);
return t; return t;
} }

View File

@ -3821,14 +3821,20 @@ extern tree fold_indirect_ref_1 (tree, tree);
extern tree force_fit_type (tree, int, bool, bool); extern tree force_fit_type (tree, int, bool, bool);
extern int add_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, extern int add_double_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT, HOST_WIDE_INT, unsigned HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,
bool);
#define add_double(l1,h1,l2,h2,lv,hv) \
add_double_with_sign (l1, h1, l2, h2, lv, hv, false)
extern int neg_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, extern int neg_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
extern int mul_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, extern int mul_double_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT, HOST_WIDE_INT, unsigned HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,
bool);
#define mul_double(l1,h1,l2,h2,lv,hv) \
mul_double_with_sign (l1, h1, l2, h2, lv, hv, false)
extern void lshift_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, extern void lshift_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, unsigned int, HOST_WIDE_INT, unsigned int,
unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, int); unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, int);