From b6ef591915db1b33c7f2dc05ea3ea2457693c456 Mon Sep 17 00:00:00 2001 From: mark Date: Fri, 17 May 1996 21:16:45 +0000 Subject: [PATCH] Commit of arm32 architecture sources for GCC 2.7.2 --- gnu/usr.bin/gcc/arch/arm32/arm32.c | 3749 ++++++++++++++++ gnu/usr.bin/gcc/arch/arm32/arm32.h | 1759 ++++++++ gnu/usr.bin/gcc/arch/arm32/arm32.md | 5697 +++++++++++++++++++++++++ gnu/usr.bin/gcc/arch/arm32/config.h | 4 + gnu/usr.bin/gcc/arch/arm32/hconfig.h | 1 + gnu/usr.bin/gcc/arch/arm32/netbsd.h | 141 + gnu/usr.bin/gcc/arch/arm32/tconfig.h | 1 + gnu/usr.bin/gcc/arch/arm32/tm.h | 1 + gnu/usr.bin/gcc/arch/arm32/xm-arm32.h | 74 + 9 files changed, 11427 insertions(+) create mode 100644 gnu/usr.bin/gcc/arch/arm32/arm32.c create mode 100644 gnu/usr.bin/gcc/arch/arm32/arm32.h create mode 100644 gnu/usr.bin/gcc/arch/arm32/arm32.md create mode 100644 gnu/usr.bin/gcc/arch/arm32/config.h create mode 100644 gnu/usr.bin/gcc/arch/arm32/hconfig.h create mode 100644 gnu/usr.bin/gcc/arch/arm32/netbsd.h create mode 100644 gnu/usr.bin/gcc/arch/arm32/tconfig.h create mode 100644 gnu/usr.bin/gcc/arch/arm32/tm.h create mode 100644 gnu/usr.bin/gcc/arch/arm32/xm-arm32.h diff --git a/gnu/usr.bin/gcc/arch/arm32/arm32.c b/gnu/usr.bin/gcc/arch/arm32/arm32.c new file mode 100644 index 000000000000..4c3927538058 --- /dev/null +++ b/gnu/usr.bin/gcc/arch/arm32/arm32.c @@ -0,0 +1,3749 @@ +/* Output routines for GCC for ARM/RISCiX. + Copyright (C) 1991, 1993, 1994, 1995 Free Software Foundation, Inc. + Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) + and Martin Simmons (@harleqn.co.uk). + More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk) + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +#include +#include +#include "assert.h" +#include "config.h" +#include "rtl.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "real.h" +#include "insn-config.h" +#include "conditions.h" +#include "insn-flags.h" +#include "output.h" +#include "insn-attr.h" +#include "flags.h" +#include "reload.h" +#include "tree.h" +#include "expr.h" + +/* The maximum number of insns skipped which will be conditionalised if + possible. */ +#define MAX_INSNS_SKIPPED 5 + +/* Some function declarations. */ +extern FILE *asm_out_file; +extern char *output_multi_immediate (); +extern void arm_increase_location (); + +HOST_WIDE_INT int_log2 PROTO ((HOST_WIDE_INT)); +static int get_prologue_size PROTO ((void)); + +/* Define the information needed to generate branch insns. This is + stored from the compare operation. */ + +rtx arm_compare_op0, arm_compare_op1; +int arm_compare_fp; + +/* What type of cpu are we compiling for? */ +enum processor_type arm_cpu; + +/* What type of floating point are we compiling for? */ +enum floating_point_type arm_fpu; + +/* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we + must report the mode of the memory reference from PRINT_OPERAND to + PRINT_OPERAND_ADDRESS. */ +enum machine_mode output_memory_reference_mode; + +/* Nonzero if the prologue must setup `fp'. */ +int current_function_anonymous_args; + +/* Location counter of .text segment. */ +int arm_text_location = 0; + +/* Set to one if we think that lr is only saved because of subroutine calls, + but all of these can be `put after' return insns */ +int lr_save_eliminated; + +/* A hash table is used to store text segment labels and their associated + offset from the start of the text segment. */ +struct label_offset +{ + char *name; + int offset; + struct label_offset *cdr; +}; + +#define LABEL_HASH_SIZE 257 + +static struct label_offset *offset_table[LABEL_HASH_SIZE]; + +/* Set to 1 when a return insn is output, this means that the epilogue + is not needed. */ + +static int return_used_this_function; + +/* For an explanation of these variables, see final_prescan_insn below. */ +int arm_ccfsm_state; +int arm_current_cc; +rtx arm_target_insn; +int arm_target_label; + +/* The condition codes of the ARM, and the inverse function. */ +char *arm_condition_codes[] = +{ + "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", "gt", "le", "al", "nv" +}; + +#define ARM_INVERSE_CONDITION_CODE(X) ((X) ^ 1) + +/* Return 1 if it is possible to return using a single instruction */ + +int +use_return_insn () +{ + int regno; + + if (!reload_completed ||current_function_pretend_args_size + || current_function_anonymous_args + || (get_frame_size () && !(TARGET_APCS || frame_pointer_needed))) + return 0; + + /* Can't be done if any of the FPU regs are pushed, since this also + requires an insn */ + for (regno = 20; regno < 24; regno++) + if (regs_ever_live[regno]) + return 0; + + return 1; +} + +/* Return TRUE if int I is a valid immediate ARM constant. */ + +int +const_ok_for_arm (i) + HOST_WIDE_INT i; +{ + unsigned HOST_WIDE_INT mask = ~0xFF; + + /* Fast return for 0 and powers of 2 */ + if ((i & (i - 1)) == 0) + return TRUE; + + do + { + if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0) + return TRUE; + mask = + (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff) + >> (32 - 2)) | ~((unsigned HOST_WIDE_INT) 0xffffffff); + } while (mask != ~0xFF); + + return FALSE; +} + +/* Return true if I is a valid constant for the operation CODE. */ +int +const_ok_for_op (i, code, mode) + HOST_WIDE_INT i; + enum rtx_code code; + enum machine_mode mode; +{ + if (const_ok_for_arm (i)) + return 1; + + switch (code) + { + case PLUS: + return const_ok_for_arm (ARM_SIGN_EXTEND (-i)); + + case MINUS: /* Should only occur with (MINUS I reg) => rsb */ + case XOR: + case IOR: + return 0; + + case AND: + return const_ok_for_arm (ARM_SIGN_EXTEND (~i)); + + default: + abort (); + } +} + +/* Emit a sequence of insns to handle a large constant. + CODE is the code of the operation required, it can be any of SET, PLUS, + IOR, AND, XOR, MINUS; + MODE is the mode in which the operation is being performed; + VAL is the integer to operate on; + SOURCE is the other operand (a register, or a null-pointer for SET); + SUBTARGETS means it is safe to create scratch registers if that will + either produce a simpler sequence, or we will want to cse the values. */ + +int +arm_split_constant (code, mode, val, target, source, subtargets) + enum rtx_code code; + enum machine_mode mode; + HOST_WIDE_INT val; + rtx target; + rtx source; + int subtargets; +{ + int can_add = 0; + int can_invert = 0; + int can_negate = 0; + int can_negate_initial = 0; + int can_shift = 0; + int i; + int num_bits_set = 0; + int set_sign_bit_copies = 0; + int clear_sign_bit_copies = 0; + int clear_zero_bit_copies = 0; + int set_zero_bit_copies = 0; + int insns = 0; + rtx new_src; + unsigned HOST_WIDE_INT temp1, temp2; + unsigned HOST_WIDE_INT remainder = val & 0xffffffff; + + /* find out which operations are safe for a given CODE. Also do a quick + check for degenerate cases; these can occur when DImode operations + are split. */ + switch (code) + { + case SET: + can_invert = 1; + can_shift = 1; + can_negate = 1; + break; + + case PLUS: + can_negate = 1; + can_negate_initial = 1; + break; + + case IOR: + if (remainder == 0xffffffff) + { + emit_insn (gen_rtx (SET, VOIDmode, target, + GEN_INT (ARM_SIGN_EXTEND (val)))); + return 1; + } + if (remainder == 0) + { + if (reload_completed && rtx_equal_p (target, source)) + return 0; + emit_insn (gen_rtx (SET, VOIDmode, target, source)); + return 1; + } + break; + + case AND: + if (remainder == 0) + { + emit_insn (gen_rtx (SET, VOIDmode, target, const0_rtx)); + return 1; + } + if (remainder == 0xffffffff) + { + if (reload_completed && rtx_equal_p (target, source)) + return 0; + emit_insn (gen_rtx (SET, VOIDmode, target, source)); + return 1; + } + can_invert = 1; + break; + + case XOR: + if (remainder == 0) + { + if (reload_completed && rtx_equal_p (target, source)) + return 0; + emit_insn (gen_rtx (SET, VOIDmode, target, source)); + return 1; + } + if (remainder == 0xffffffff) + { + emit_insn (gen_rtx (SET, VOIDmode, target, + gen_rtx (NOT, mode, source))); + return 1; + } + + /* We don't know how to handle this yet below. */ + abort (); + + case MINUS: + /* We treat MINUS as (val - source), since (source - val) is always + passed as (source + (-val)). */ + if (remainder == 0) + { + emit_insn (gen_rtx (SET, VOIDmode, target, + gen_rtx (NEG, mode, source))); + return 1; + } + if (const_ok_for_arm (val)) + { + emit_insn (gen_rtx (SET, VOIDmode, target, + gen_rtx (MINUS, mode, GEN_INT (val), source))); + return 1; + } + can_negate = 1; + + break; + + default: + abort (); + } + + /* If we can do it in one insn get out quickly */ + if (const_ok_for_arm (val) + || (can_negate_initial && const_ok_for_arm (-val)) + || (can_invert && const_ok_for_arm (~val))) + { + emit_insn (gen_rtx (SET, VOIDmode, target, + (source ? gen_rtx (code, mode, source, + GEN_INT (val)) : GEN_INT (val)))); + return 1; + } + + + /* Calculate a few attributes that may be useful for specific + optimizations. */ + + for (i = 31; i >= 0; i--) + { + if ((remainder & (1 << i)) == 0) + clear_sign_bit_copies++; + else + break; + } + + for (i = 31; i >= 0; i--) + { + if ((remainder & (1 << i)) != 0) + set_sign_bit_copies++; + else + break; + } + + for (i = 0; i <= 31; i++) + { + if ((remainder & (1 << i)) == 0) + clear_zero_bit_copies++; + else + break; + } + + for (i = 0; i <= 31; i++) + { + if ((remainder & (1 << i)) != 0) + set_zero_bit_copies++; + else + break; + } + + switch (code) + { + case SET: + /* See if we can do this by sign_extending a constant that is known + to be negative. This is a good, way of doing it, since the shift + may well merge into a subsequent insn. */ + if (set_sign_bit_copies > 1) + { + if (const_ok_for_arm + (temp1 = ARM_SIGN_EXTEND (remainder + << (set_sign_bit_copies - 1)))) + { + new_src = subtargets ? gen_reg_rtx (mode) : target; + emit_insn (gen_rtx (SET, VOIDmode, new_src, GEN_INT (temp1))); + emit_insn (gen_ashrsi3 (target, new_src, + GEN_INT (set_sign_bit_copies - 1))); + return 2; + } + /* For an inverted constant, we will need to set the low bits, + these will be shifted out of harm's way. */ + temp1 |= (1 << (set_sign_bit_copies - 1)) - 1; + if (const_ok_for_arm (~temp1)) + { + new_src = subtargets ? gen_reg_rtx (mode) : target; + emit_insn (gen_rtx (SET, VOIDmode, new_src, GEN_INT (temp1))); + emit_insn (gen_ashrsi3 (target, new_src, + GEN_INT (set_sign_bit_copies - 1))); + return 2; + } + } + + /* See if we can generate this by setting the bottom (or the top) + 16 bits, and then shifting these into the other half of the + word. We only look for the simplest cases, to do more would cost + too much. Be careful, however, not to generate this when the + alternative would take fewer insns. */ + if (val & 0xffff0000) + { + temp1 = remainder & 0xffff0000; + temp2 = remainder & 0x0000ffff; + + /* Overlaps outside this range are best done using other methods. */ + for (i = 9; i < 24; i++) + { + if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder) + && ! const_ok_for_arm (temp2)) + { + insns + = arm_split_constant (code, mode, temp2, + (new_src + = subtargets ? gen_reg_rtx (mode) + : target), + source, subtargets); + source = new_src; + emit_insn (gen_rtx (SET, VOIDmode, target, + gen_rtx (IOR, mode, + gen_rtx (ASHIFT, mode, source, + GEN_INT (i)), + source))); + return insns + 1; + } + } + + /* Don't duplicate cases already considered. */ + for (i = 17; i < 24; i++) + { + if (((temp1 | (temp1 >> i)) == remainder) + && ! const_ok_for_arm (temp1)) + { + insns + = arm_split_constant (code, mode, temp1, + (new_src + = subtargets ? gen_reg_rtx (mode) + : target), + source, subtargets); + source = new_src; + emit_insn (gen_rtx (SET, VOIDmode, target, + gen_rtx (IOR, mode, + gen_rtx (LSHIFTRT, mode, source, + GEN_INT (i)), + source))); + return insns + 1; + } + } + } + break; + + case IOR: + case XOR: + /* If we have IOR or XOR, and the inverse of the constant can be loaded + in a single instruction, and we can find a temporary to put it in, + then this can be done in two instructions instead of 3-4. */ + if (subtargets + || (reload_completed && ! reg_mentioned_p (target, source))) + { + if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val))) + { + rtx sub = subtargets ? gen_reg_rtx (mode) : target; + + emit_insn (gen_rtx (SET, VOIDmode, sub, + GEN_INT (ARM_SIGN_EXTEND (~ val)))); + emit_insn (gen_rtx (SET, VOIDmode, target, + gen_rtx (code, mode, source, sub))); + return 2; + } + } + + if (code == XOR) + break; + + if (set_sign_bit_copies > 8 + && (val & (-1 << (32 - set_sign_bit_copies))) == val) + { + rtx sub = subtargets ? gen_reg_rtx (mode) : target; + rtx shift = GEN_INT (set_sign_bit_copies); + + emit_insn (gen_rtx (SET, VOIDmode, sub, + gen_rtx (NOT, mode, + gen_rtx (ASHIFT, mode, source, + shift)))); + emit_insn (gen_rtx (SET, VOIDmode, target, + gen_rtx (NOT, mode, + gen_rtx (LSHIFTRT, mode, sub, + shift)))); + return 2; + } + + if (set_zero_bit_copies > 8 + && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder) + { + rtx sub = subtargets ? gen_reg_rtx (mode) : target; + rtx shift = GEN_INT (set_zero_bit_copies); + + emit_insn (gen_rtx (SET, VOIDmode, sub, + gen_rtx (NOT, mode, + gen_rtx (LSHIFTRT, mode, source, + shift)))); + emit_insn (gen_rtx (SET, VOIDmode, target, + gen_rtx (NOT, mode, + gen_rtx (ASHIFT, mode, sub, + shift)))); + return 2; + } + + if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~ val))) + { + rtx sub = subtargets ? gen_reg_rtx (mode) : target; + emit_insn (gen_rtx (SET, VOIDmode, sub, + gen_rtx (NOT, mode, source))); + source = sub; + if (subtargets) + sub = gen_reg_rtx (mode); + emit_insn (gen_rtx (SET, VOIDmode, sub, + gen_rtx (AND, mode, source, GEN_INT (temp1)))); + emit_insn (gen_rtx (SET, VOIDmode, target, + gen_rtx (NOT, mode, sub))); + return 3; + } + break; + + case AND: + /* See if two shifts will do 2 or more insn's worth of work. */ + if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24) + { + HOST_WIDE_INT shift_mask = ((0xffffffff + << (32 - clear_sign_bit_copies)) + & 0xffffffff); + rtx new_source; + rtx shift = GEN_INT (clear_sign_bit_copies); + + if ((remainder | shift_mask) != 0xffffffff) + { + new_source = subtargets ? gen_reg_rtx (mode) : target; + insns = arm_split_constant (AND, mode, remainder | shift_mask, + new_source, source, subtargets); + source = new_source; + } + + new_source = subtargets ? gen_reg_rtx (mode) : target; + emit_insn (gen_ashlsi3 (new_source, source, shift)); + emit_insn (gen_lshrsi3 (target, new_source, shift)); + return insns + 2; + } + + if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24) + { + HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1; + rtx new_source; + rtx shift = GEN_INT (clear_zero_bit_copies); + + if ((remainder | shift_mask) != 0xffffffff) + { + new_source = subtargets ? gen_reg_rtx (mode) : target; + insns = arm_split_constant (AND, mode, remainder | shift_mask, + new_source, source, subtargets); + source = new_source; + } + + new_source = subtargets ? gen_reg_rtx (mode) : target; + emit_insn (gen_lshrsi3 (new_source, source, shift)); + emit_insn (gen_ashlsi3 (target, new_source, shift)); + return insns + 2; + } + + break; + + default: + break; + } + + for (i = 0; i < 32; i++) + if (remainder & (1 << i)) + num_bits_set++; + + if (code == AND || (can_invert && num_bits_set > 16)) + remainder = (~remainder) & 0xffffffff; + else if (code == PLUS && num_bits_set > 16) + remainder = (-remainder) & 0xffffffff; + else + { + can_invert = 0; + can_negate = 0; + } + + /* Now try and find a way of doing the job in either two or three + instructions. + We start by looking for the largest block of zeros that are aligned on + a 2-bit boundary, we then fill up the temps, wrapping around to the + top of the word when we drop off the bottom. + In the worst case this code should produce no more than four insns. */ + { + int best_start = 0; + int best_consecutive_zeros = 0; + + for (i = 0; i < 32; i += 2) + { + int consecutive_zeros = 0; + + if (! (remainder & (3 << i))) + { + while ((i < 32) && ! (remainder & (3 << i))) + { + consecutive_zeros += 2; + i += 2; + } + if (consecutive_zeros > best_consecutive_zeros) + { + best_consecutive_zeros = consecutive_zeros; + best_start = i - consecutive_zeros; + } + i -= 2; + } + } + + /* Now start emitting the insns, starting with the one with the highest + bit set: we do this so that the smallest number will be emitted last; + this is more likely to be combinable with addressing insns. */ + i = best_start; + do + { + int end; + + if (i <= 0) + i += 32; + if (remainder & (3 << (i - 2))) + { + end = i - 8; + if (end < 0) + end += 32; + temp1 = remainder & ((0x0ff << end) + | ((i < end) ? (0xff >> (32 - end)) : 0)); + remainder &= ~temp1; + + if (code == SET) + { + emit_insn (gen_rtx (SET, VOIDmode, + new_src = (subtargets ? gen_reg_rtx (mode) + : target), + GEN_INT (can_invert ? ~temp1 : temp1))); + can_invert = 0; + code = PLUS; + } + else if (code == MINUS) + { + emit_insn (gen_rtx (SET, VOIDmode, + new_src = (subtargets ? gen_reg_rtx (mode) + : target), + gen_rtx (code, mode, GEN_INT (temp1), + source))); + code = PLUS; + } + else + { + emit_insn (gen_rtx (SET, VOIDmode, + new_src = remainder ? (subtargets + ? gen_reg_rtx (mode) + : target) : target, + gen_rtx (code, mode, source, + GEN_INT (can_invert ? ~temp1 + : (can_negate + ? -temp1 : temp1))))); + } + + insns++; + source = new_src; + i -= 6; + } + i -= 2; + } while (remainder); + } + return insns; +} + +#define REG_OR_SUBREG_REG(X) \ + (GET_CODE (X) == REG \ + || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG)) + +#define REG_OR_SUBREG_RTX(X) \ + (GET_CODE (X) == REG ? (X) : SUBREG_REG (X)) + +#define ARM_FRAME_RTX(X) \ + ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \ + || (X) == arg_pointer_rtx) + +int +arm_rtx_costs (x, code, outer_code) + rtx x; + enum rtx_code code, outer_code; +{ + enum machine_mode mode = GET_MODE (x); + enum rtx_code subcode; + int extra_cost; + + switch (code) + { + case MEM: + /* Memory costs quite a lot for the first word, but subsequent words + load at the equivalent of a single insn each. */ + return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD) + + (CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0)); + + case DIV: + case MOD: + return 100; + + case ROTATE: + if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG) + return 4; + /* Fall through */ + case ROTATERT: + if (mode != SImode) + return 8; + /* Fall through */ + case ASHIFT: case LSHIFTRT: case ASHIFTRT: + if (mode == DImode) + return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8) + + ((GET_CODE (XEXP (x, 0)) == REG + || (GET_CODE (XEXP (x, 0)) == SUBREG + && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG)) + ? 0 : 8)); + return (1 + ((GET_CODE (XEXP (x, 0)) == REG + || (GET_CODE (XEXP (x, 0)) == SUBREG + && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG)) + ? 0 : 4) + + ((GET_CODE (XEXP (x, 1)) == REG + || (GET_CODE (XEXP (x, 1)) == SUBREG + && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG) + || (GET_CODE (XEXP (x, 1)) == CONST_INT)) + ? 0 : 4)); + + case MINUS: + if (mode == DImode) + return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8) + + ((REG_OR_SUBREG_REG (XEXP (x, 0)) + || (GET_CODE (XEXP (x, 0)) == CONST_INT + && const_ok_for_arm (INTVAL (XEXP (x, 0))))) + ? 0 : 8)); + + if (GET_MODE_CLASS (mode) == MODE_FLOAT) + return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1)) + || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE + && const_double_rtx_ok_for_fpu (XEXP (x, 1)))) + ? 0 : 8) + + ((REG_OR_SUBREG_REG (XEXP (x, 0)) + || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE + && const_double_rtx_ok_for_fpu (XEXP (x, 0)))) + ? 0 : 8)); + + if (((GET_CODE (XEXP (x, 0)) == CONST_INT + && const_ok_for_arm (INTVAL (XEXP (x, 0))) + && REG_OR_SUBREG_REG (XEXP (x, 1)))) + || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT + || subcode == ASHIFTRT || subcode == LSHIFTRT + || subcode == ROTATE || subcode == ROTATERT + || (subcode == MULT + && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT + && ((INTVAL (XEXP (XEXP (x, 1), 1)) & + (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0))) + && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0)) + && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1)) + || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT) + && REG_OR_SUBREG_REG (XEXP (x, 0)))) + return 1; + /* Fall through */ + + case PLUS: + if (GET_MODE_CLASS (mode) == MODE_FLOAT) + return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8) + + ((REG_OR_SUBREG_REG (XEXP (x, 1)) + || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE + && const_double_rtx_ok_for_fpu (XEXP (x, 1)))) + ? 0 : 8)); + + /* Fall through */ + case AND: case XOR: case IOR: + extra_cost = 0; + + /* Normally the frame registers will be spilt into reg+const during + reload, so it is a bad idea to combine them with other instructions, + since then they might not be moved outside of loops. As a compromise + we allow integration with ops that have a constant as their second + operand. */ + if ((REG_OR_SUBREG_REG (XEXP (x, 0)) + && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0))) + && GET_CODE (XEXP (x, 1)) != CONST_INT) + || (REG_OR_SUBREG_REG (XEXP (x, 0)) + && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0))))) + extra_cost = 4; + + if (mode == DImode) + return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8) + + ((REG_OR_SUBREG_REG (XEXP (x, 1)) + || (GET_CODE (XEXP (x, 1)) == CONST_INT + && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode))) + ? 0 : 8)); + + if (REG_OR_SUBREG_REG (XEXP (x, 0))) + return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost) + + ((REG_OR_SUBREG_REG (XEXP (x, 1)) + || (GET_CODE (XEXP (x, 1)) == CONST_INT + && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode))) + ? 0 : 4)); + + else if (REG_OR_SUBREG_REG (XEXP (x, 1))) + return (1 + extra_cost + + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT + || subcode == LSHIFTRT || subcode == ASHIFTRT + || subcode == ROTATE || subcode == ROTATERT + || (subcode == MULT + && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT + && ((INTVAL (XEXP (XEXP (x, 0), 1)) & + (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)) + && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0))) + && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1))) + || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))) + ? 0 : 4)); + + return 8; + + case MULT: + if (GET_MODE_CLASS (mode) == MODE_FLOAT + || mode == DImode) + return 30; + + if (GET_CODE (XEXP (x, 1)) == CONST_INT) + { + HOST_WIDE_INT i = INTVAL (XEXP (x, 1)) & 0xffffffff; + int add_cost = const_ok_for_arm (i) ? 4 : 8; + int j; + + /* This will need adjusting for ARM's with fast multiplies */ + for (j = 0; i && j < 32; j += 2) + { + i &= ~(3 << j); + add_cost += 2; + } + + return add_cost; + } + + return (30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4) + + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4)); + + case NEG: + if (GET_MODE_CLASS (mode) == MODE_FLOAT) + return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6); + /* Fall through */ + case NOT: + if (mode == DImode) + return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4); + + return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4); + + case IF_THEN_ELSE: + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC) + return 14; + return 2; + + case COMPARE: + return 1; + + case ABS: + return 4 + (mode == DImode ? 4 : 0); + + case SIGN_EXTEND: + if (GET_MODE (XEXP (x, 0)) == QImode) + return (4 + (mode == DImode ? 4 : 0) + + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); + /* Fall through */ + case ZERO_EXTEND: + switch (GET_MODE (XEXP (x, 0))) + { + case QImode: + return (1 + (mode == DImode ? 4 : 0) + + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); + + case HImode: + return (4 + (mode == DImode ? 4 : 0) + + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); + + case SImode: + return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); + } + abort (); + + default: + return 99; + } +} + +/* This code has been fixed for cross compilation. */ + +static int fpa_consts_inited = 0; + +char *strings_fpa[8] = { + "0.0", + "1.0", + "2.0", + "3.0", + "4.0", + "5.0", + "0.5", + "10.0" + }; + +static REAL_VALUE_TYPE values_fpa[8]; + +static void +init_fpa_table () +{ + int i; + REAL_VALUE_TYPE r; + + for (i = 0; i < 8; i++) + { + r = REAL_VALUE_ATOF (strings_fpa[i], DFmode); + values_fpa[i] = r; + } + + fpa_consts_inited = 1; +} + +/* Return TRUE if rtx X is a valid immediate FPU constant. */ + +int +const_double_rtx_ok_for_fpu (x) + rtx x; +{ + REAL_VALUE_TYPE r; + int i; + + if (!fpa_consts_inited) + init_fpa_table (); + + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + if (REAL_VALUE_MINUS_ZERO (r)) + return 0; + + for (i = 0; i < 8; i++) + if (REAL_VALUES_EQUAL (r, values_fpa[i])) + return 1; + + return 0; +} + +/* Return TRUE if rtx X is a valid immediate FPU constant. */ + +int +neg_const_double_rtx_ok_for_fpu (x) + rtx x; +{ + REAL_VALUE_TYPE r; + int i; + + if (!fpa_consts_inited) + init_fpa_table (); + + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + r = REAL_VALUE_NEGATE (r); + if (REAL_VALUE_MINUS_ZERO (r)) + return 0; + + for (i = 0; i < 8; i++) + if (REAL_VALUES_EQUAL (r, values_fpa[i])) + return 1; + + return 0; +} + +/* Predicates for `match_operand' and `match_operator'. */ + +/* s_register_operand is the same as register_operand, but it doesn't accept + (SUBREG (MEM)...). */ + +int +s_register_operand (op, mode) + register rtx op; + enum machine_mode mode; +{ + if (GET_MODE (op) != mode && mode != VOIDmode) + return 0; + + if (GET_CODE (op) == SUBREG) + op = SUBREG_REG (op); + + /* We don't consider registers whose class is NO_REGS + to be a register operand. */ + return (GET_CODE (op) == REG + && (REGNO (op) >= FIRST_PSEUDO_REGISTER + || REGNO_REG_CLASS (REGNO (op)) != NO_REGS)); +} + +/* Only accept reg, subreg(reg), const_int. */ + +int +reg_or_int_operand (op, mode) + register rtx op; + enum machine_mode mode; +{ + if (GET_CODE (op) == CONST_INT) + return 1; + + if (GET_MODE (op) != mode && mode != VOIDmode) + return 0; + + if (GET_CODE (op) == SUBREG) + op = SUBREG_REG (op); + + /* We don't consider registers whose class is NO_REGS + to be a register operand. */ + return (GET_CODE (op) == REG + && (REGNO (op) >= FIRST_PSEUDO_REGISTER + || REGNO_REG_CLASS (REGNO (op)) != NO_REGS)); +} + +/* Return 1 if OP is an item in memory, given that we are in reload. */ + +int +reload_memory_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + int regno = true_regnum (op); + + return (! CONSTANT_P (op) + && (regno == -1 + || (GET_CODE (op) == REG + && REGNO (op) >= FIRST_PSEUDO_REGISTER))); +} + +/* Return TRUE for valid operands for the rhs of an ARM instruction. */ + +int +arm_rhs_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (s_register_operand (op, mode) + || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))); +} + +/* Return TRUE for valid operands for the rhs of an ARM instruction, or a load. + */ + +int +arm_rhsm_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (s_register_operand (op, mode) + || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))) + || memory_operand (op, mode)); +} + +/* Return TRUE for valid operands for the rhs of an ARM instruction, or if a + constant that is valid when negated. */ + +int +arm_add_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (s_register_operand (op, mode) + || (GET_CODE (op) == CONST_INT + && (const_ok_for_arm (INTVAL (op)) + || const_ok_for_arm (-INTVAL (op))))); +} + +int +arm_not_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (s_register_operand (op, mode) + || (GET_CODE (op) == CONST_INT + && (const_ok_for_arm (INTVAL (op)) + || const_ok_for_arm (~INTVAL (op))))); +} + +/* Return TRUE for valid operands for the rhs of an FPU instruction. */ + +int +fpu_rhs_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if (s_register_operand (op, mode)) + return TRUE; + else if (GET_CODE (op) == CONST_DOUBLE) + return (const_double_rtx_ok_for_fpu (op)); + + return FALSE; +} + +int +fpu_add_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if (s_register_operand (op, mode)) + return TRUE; + else if (GET_CODE (op) == CONST_DOUBLE) + return (const_double_rtx_ok_for_fpu (op) + || neg_const_double_rtx_ok_for_fpu (op)); + + return FALSE; +} + +/* Return nonzero if OP is a constant power of two. */ + +int +power_of_two_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if (GET_CODE (op) == CONST_INT) + { + HOST_WIDE_INT value = INTVAL(op); + return value != 0 && (value & (value - 1)) == 0; + } + return FALSE; +} + +/* Return TRUE for a valid operand of a DImode operation. + Either: REG, CONST_DOUBLE or MEM(DImode_address). + Note that this disallows MEM(REG+REG), but allows + MEM(PRE/POST_INC/DEC(REG)). */ + +int +di_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if (s_register_operand (op, mode)) + return TRUE; + + switch (GET_CODE (op)) + { + case CONST_DOUBLE: + case CONST_INT: + return TRUE; + + case MEM: + return memory_address_p (DImode, XEXP (op, 0)); + + default: + return FALSE; + } +} + +/* Return TRUE for a valid operand of a DFmode operation when -msoft-float. + Either: REG, CONST_DOUBLE or MEM(DImode_address). + Note that this disallows MEM(REG+REG), but allows + MEM(PRE/POST_INC/DEC(REG)). */ + +int +soft_df_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if (s_register_operand (op, mode)) + return TRUE; + + switch (GET_CODE (op)) + { + case CONST_DOUBLE: + return TRUE; + + case MEM: + return memory_address_p (DFmode, XEXP (op, 0)); + + default: + return FALSE; + } +} + +/* Return TRUE for valid index operands. */ + +int +index_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (s_register_operand(op, mode) + || (immediate_operand (op, mode) + && INTVAL (op) < 4096 && INTVAL (op) > -4096)); +} + +/* Return TRUE for valid shifts by a constant. This also accepts any + power of two on the (somewhat overly relaxed) assumption that the + shift operator in this case was a mult. */ + +int +const_shift_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (power_of_two_operand (op, mode) + || (immediate_operand (op, mode) + && (INTVAL (op) < 32 && INTVAL (op) > 0))); +} + +/* Return TRUE for arithmetic operators which can be combined with a multiply + (shift). */ + +int +shiftable_operator (x, mode) + rtx x; + enum machine_mode mode; +{ + if (GET_MODE (x) != mode) + return FALSE; + else + { + enum rtx_code code = GET_CODE (x); + + return (code == PLUS || code == MINUS + || code == IOR || code == XOR || code == AND); + } +} + +/* Return TRUE for shift operators. */ + +int +shift_operator (x, mode) + rtx x; + enum machine_mode mode; +{ + if (GET_MODE (x) != mode) + return FALSE; + else + { + enum rtx_code code = GET_CODE (x); + + if (code == MULT) + return power_of_two_operand (XEXP (x, 1)); + + return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT + || code == ROTATERT); + } +} + +int equality_operator (x, mode) + rtx x; + enum machine_mode mode; +{ + return GET_CODE (x) == EQ || GET_CODE (x) == NE; +} + +/* Return TRUE for SMIN SMAX UMIN UMAX operators. */ + +int +minmax_operator (x, mode) + rtx x; + enum machine_mode mode; +{ + enum rtx_code code = GET_CODE (x); + + if (GET_MODE (x) != mode) + return FALSE; + + return code == SMIN || code == SMAX || code == UMIN || code == UMAX; +} + +/* return TRUE if x is EQ or NE */ + +/* Return TRUE if this is the condition code register, if we aren't given + a mode, accept any class CCmode register */ + +int +cc_register (x, mode) + rtx x; + enum machine_mode mode; +{ + if (mode == VOIDmode) + { + mode = GET_MODE (x); + if (GET_MODE_CLASS (mode) != MODE_CC) + return FALSE; + } + + if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24) + return TRUE; + + return FALSE; +} + +/* Return TRUE if this is the condition code register, if we aren't given + a mode, accept any mode in class CC_MODE that is reversible */ + +int +reversible_cc_register (x, mode) + rtx x; + enum machine_mode mode; +{ + if (mode == VOIDmode) + { + mode = GET_MODE (x); + if (GET_MODE_CLASS (mode) != MODE_CC + && GET_CODE (x) == REG && REGNO (x) == 24) + abort (); + if (GET_MODE_CLASS (mode) != MODE_CC + || (! flag_fast_math && ! REVERSIBLE_CC_MODE (mode))) + return FALSE; + } + + if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24) + return TRUE; + + return FALSE; +} + +enum rtx_code +minmax_code (x) + rtx x; +{ + enum rtx_code code = GET_CODE (x); + + if (code == SMAX) + return GE; + else if (code == SMIN) + return LE; + else if (code == UMIN) + return LEU; + else if (code == UMAX) + return GEU; + + abort (); +} + +/* Return 1 if memory locations are adjacent */ + +int +adjacent_mem_locations (a, b) + rtx a, b; +{ + int val0 = 0, val1 = 0; + int reg0, reg1; + + if ((GET_CODE (XEXP (a, 0)) == REG + || (GET_CODE (XEXP (a, 0)) == PLUS + && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT)) + && (GET_CODE (XEXP (b, 0)) == REG + || (GET_CODE (XEXP (b, 0)) == PLUS + && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT))) + { + if (GET_CODE (XEXP (a, 0)) == PLUS) + { + reg0 = REGNO (XEXP (XEXP (a, 0), 0)); + val0 = INTVAL (XEXP (XEXP (a, 0), 1)); + } + else + reg0 = REGNO (XEXP (a, 0)); + if (GET_CODE (XEXP (b, 0)) == PLUS) + { + reg1 = REGNO (XEXP (XEXP (b, 0), 0)); + val1 = INTVAL (XEXP (XEXP (b, 0), 1)); + } + else + reg1 = REGNO (XEXP (b, 0)); + return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4); + } + return 0; +} + +/* Return 1 if OP is a load multiple operation. It is known to be + parallel and the first section will be tested. */ + +int +load_multiple_operation (op, mode) + rtx op; + enum machine_mode mode; +{ + HOST_WIDE_INT count = XVECLEN (op, 0); + int dest_regno; + rtx src_addr; + HOST_WIDE_INT i = 1, base = 0; + rtx elt; + + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET) + return 0; + + /* Check to see if this might be a write-back */ + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS) + { + i++; + base = 1; + + /* Now check it more carefully */ + if (GET_CODE (SET_DEST (elt)) != REG + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG + || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt)) + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4 + || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER + || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG + || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0)) + != REGNO (SET_DEST (elt))) + return 0; + + count--; + } + + /* Perform a quick check so we don't blow up below. */ + if (count <= i + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM) + return 0; + + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1))); + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0); + + for (; i < count; i++) + { + rtx elt = XVECEXP (op, 0, i); + + if (GET_CODE (elt) != SET + || GET_CODE (SET_DEST (elt)) != REG + || GET_MODE (SET_DEST (elt)) != SImode + || REGNO (SET_DEST (elt)) != dest_regno + i - base + || GET_CODE (SET_SRC (elt)) != MEM + || GET_MODE (SET_SRC (elt)) != SImode + || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) + || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT + || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4) + return 0; + } + + return 1; +} + +/* Return 1 if OP is a store multiple operation. It is known to be + parallel and the first section will be tested. */ + +int +store_multiple_operation (op, mode) + rtx op; + enum machine_mode mode; +{ + HOST_WIDE_INT count = XVECLEN (op, 0); + int src_regno; + rtx dest_addr; + HOST_WIDE_INT i = 1, base = 0; + rtx elt; + + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET) + return 0; + + /* Check to see if this might be a write-back */ + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS) + { + i++; + base = 1; + + /* Now check it more carefully */ + if (GET_CODE (SET_DEST (elt)) != REG + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG + || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt)) + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4 + || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER + || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG + || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0)) + != REGNO (SET_DEST (elt))) + return 0; + + count--; + } + + /* Perform a quick check so we don't blow up below. */ + if (count <= i + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG) + return 0; + + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1))); + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0); + + for (; i < count; i++) + { + elt = XVECEXP (op, 0, i); + + if (GET_CODE (elt) != SET + || GET_CODE (SET_SRC (elt)) != REG + || GET_MODE (SET_SRC (elt)) != SImode + || REGNO (SET_SRC (elt)) != src_regno + i - base + || GET_CODE (SET_DEST (elt)) != MEM + || GET_MODE (SET_DEST (elt)) != SImode + || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr) + || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT + || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4) + return 0; + } + + return 1; +} + +int +multi_register_push (op, mode) + rtx op; + enum machine_mode mode; +{ + if (GET_CODE (op) != PARALLEL + || (GET_CODE (XVECEXP (op, 0, 0)) != SET) + || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC) + || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != 2)) + return 0; + + return 1; +} + + +/* Routines for use with attributes */ + +int +const_pool_offset (symbol) + rtx symbol; +{ + return get_pool_offset (symbol) - get_pool_size () - get_prologue_size (); +} + +/* Routines for use in generating RTL */ + +rtx +arm_gen_load_multiple (base_regno, count, from, up, write_back) + int base_regno; + int count; + rtx from; + int up; + int write_back; +{ + int i = 0, j; + rtx result; + int sign = up ? 1 : -1; + + result = gen_rtx (PARALLEL, VOIDmode, + rtvec_alloc (count + (write_back ? 2 : 0))); + if (write_back) + { + XVECEXP (result, 0, 0) + = gen_rtx (SET, GET_MODE (from), from, + plus_constant (from, count * 4 * sign)); + i = 1; + count++; + } + + for (j = 0; i < count; i++, j++) + { + XVECEXP (result, 0, i) + = gen_rtx (SET, VOIDmode, gen_rtx (REG, SImode, base_regno + j), + gen_rtx (MEM, SImode, + plus_constant (from, j * 4 * sign))); + } + + if (write_back) + XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, from); + + return result; +} + +rtx +arm_gen_store_multiple (base_regno, count, to, up, write_back) + int base_regno; + int count; + rtx to; + int up; + int write_back; +{ + int i = 0, j; + rtx result; + int sign = up ? 1 : -1; + + result = gen_rtx (PARALLEL, VOIDmode, + rtvec_alloc (count + (write_back ? 2 : 0))); + if (write_back) + { + XVECEXP (result, 0, 0) + = gen_rtx (SET, GET_MODE (to), to, + plus_constant (to, count * 4 * sign)); + i = 1; + count++; + } + + for (j = 0; i < count; i++, j++) + { + XVECEXP (result, 0, i) + = gen_rtx (SET, VOIDmode, + gen_rtx (MEM, SImode, plus_constant (to, j * 4 * sign)), + gen_rtx (REG, SImode, base_regno + j)); + } + + if (write_back) + XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, to); + + return result; +} + +int +arm_gen_movstrqi (operands) + rtx *operands; +{ + HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes; + int i, r; + rtx src, dst; + rtx st_src, st_dst, end_src, end_dst, fin_src, fin_dst; + rtx part_bytes_reg = NULL; + extern int optimize; + + if (GET_CODE (operands[2]) != CONST_INT + || GET_CODE (operands[3]) != CONST_INT + || INTVAL (operands[2]) > 64 + || INTVAL (operands[3]) & 3) + return 0; + + st_dst = XEXP (operands[0], 0); + st_src = XEXP (operands[1], 0); + fin_dst = dst = copy_to_mode_reg (SImode, st_dst); + fin_src = src = copy_to_mode_reg (SImode, st_src); + + in_words_to_go = (INTVAL (operands[2]) + 3) / 4; + out_words_to_go = INTVAL (operands[2]) / 4; + last_bytes = INTVAL (operands[2]) & 3; + + if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0) + part_bytes_reg = gen_rtx (REG, SImode, (in_words_to_go - 1) & 3); + + for (i = 0; in_words_to_go >= 2; i+=4) + { + emit_insn (arm_gen_load_multiple (0, (in_words_to_go > 4 + ? 4 : in_words_to_go), + src, TRUE, TRUE)); + if (out_words_to_go) + { + if (out_words_to_go != 1) + emit_insn (arm_gen_store_multiple (0, (out_words_to_go > 4 + ? 4 : out_words_to_go), + dst, TRUE, TRUE)); + else + { + emit_move_insn (gen_rtx (MEM, SImode, dst), + gen_rtx (REG, SImode, 0)); + emit_insn (gen_addsi3 (dst, dst, GEN_INT (4))); + } + } + + in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4; + out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4; + } + + /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */ + if (out_words_to_go) + { + rtx sreg; + + emit_move_insn (sreg = gen_reg_rtx (SImode), gen_rtx (MEM, SImode, src)); + emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4)); + emit_move_insn (gen_rtx (MEM, SImode, dst), sreg); + emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4)); + in_words_to_go--; + + if (in_words_to_go) /* Sanity check */ + abort (); + } + + if (in_words_to_go) + { + if (in_words_to_go < 0) + abort (); + + part_bytes_reg = copy_to_mode_reg (SImode, gen_rtx (MEM, SImode, src)); + emit_insn (gen_addsi3 (src, src, GEN_INT (4))); + } + + if (BYTES_BIG_ENDIAN && last_bytes) + { + rtx tmp = gen_reg_rtx (SImode); + + if (part_bytes_reg == NULL) + abort (); + + /* The bytes we want are in the top end of the word */ + emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, + GEN_INT (8 * (4 - last_bytes)))); + part_bytes_reg = tmp; + + while (last_bytes) + { + emit_move_insn (gen_rtx (MEM, QImode, + plus_constant (dst, last_bytes - 1)), + gen_rtx (SUBREG, QImode, part_bytes_reg, 0)); + if (--last_bytes) + { + tmp = gen_reg_rtx (SImode); + emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8))); + part_bytes_reg = tmp; + } + } + + } + else + { + while (last_bytes) + { + if (part_bytes_reg == NULL) + abort (); + + emit_move_insn (gen_rtx (MEM, QImode, dst), + gen_rtx (SUBREG, QImode, part_bytes_reg, 0)); + emit_insn (gen_addsi3 (dst, dst, const1_rtx)); + if (--last_bytes) + { + rtx tmp = gen_reg_rtx (SImode); + emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8))); + part_bytes_reg = tmp; + } + } + } + + return 1; +} + +/* X and Y are two things to compare using CODE. Emit the compare insn and + return the rtx for register 0 in the proper mode. FP means this is a + floating point compare: I don't think that it is needed on the arm. */ + +rtx +gen_compare_reg (code, x, y, fp) + enum rtx_code code; + rtx x, y; +{ + enum machine_mode mode = SELECT_CC_MODE (code, x, y); + rtx cc_reg = gen_rtx (REG, mode, 24); + + emit_insn (gen_rtx (SET, VOIDmode, cc_reg, + gen_rtx (COMPARE, mode, x, y))); + + return cc_reg; +} + +void +arm_reload_in_hi (operands) + rtx *operands; +{ + rtx base = find_replacement (&XEXP (operands[1], 0)); + + emit_insn (gen_zero_extendqisi2 (operands[2], gen_rtx (MEM, QImode, base))); + emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG, SImode, operands[0], 0), + gen_rtx (MEM, QImode, + plus_constant (base, 1)))); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode, + operands[0], 0), + gen_rtx (IOR, SImode, + gen_rtx (ASHIFT, SImode, + gen_rtx (SUBREG, SImode, + operands[0], 0), + GEN_INT (8)), + operands[2]))); + else + emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode, + operands[0], 0), + gen_rtx (IOR, SImode, + gen_rtx (ASHIFT, SImode, + operands[2], + GEN_INT (8)), + gen_rtx (SUBREG, SImode, operands[0], 0)))); +} + +void +arm_reload_out_hi (operands) + rtx *operands; +{ + rtx base = find_replacement (&XEXP (operands[0], 0)); + + if (BYTES_BIG_ENDIAN) + { + emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)), + gen_rtx (SUBREG, QImode, operands[1], 0))); + emit_insn (gen_lshrsi3 (operands[2], + gen_rtx (SUBREG, SImode, operands[1], 0), + GEN_INT (8))); + emit_insn (gen_movqi (gen_rtx (MEM, QImode, base), + gen_rtx (SUBREG, QImode, operands[2], 0))); + } + else + { + emit_insn (gen_movqi (gen_rtx (MEM, QImode, base), + gen_rtx (SUBREG, QImode, operands[1], 0))); + emit_insn (gen_lshrsi3 (operands[2], + gen_rtx (SUBREG, SImode, operands[1], 0), + GEN_INT (8))); + emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)), + gen_rtx (SUBREG, QImode, operands[2], 0))); + } +} + +/* Check to see if a branch is forwards or backwards. Return TRUE if it + is backwards. */ + +int +arm_backwards_branch (from, to) + int from, to; +{ + return insn_addresses[to] <= insn_addresses[from]; +} + +/* Check to see if a branch is within the distance that can be done using + an arithmetic expression. */ +int +short_branch (from, to) + int from, to; +{ + int delta = insn_addresses[from] + 8 - insn_addresses[to]; + + return abs (delta) < 980; /* A small margin for safety */ +} + +/* Check to see that the insn isn't the target of the conditionalizing + code */ +int +arm_insn_not_targeted (insn) + rtx insn; +{ + return insn != arm_target_insn; +} + + +/* Routines to output assembly language. */ + +/* If the rtx is the correct value then return the string of the number. + In this way we can ensure that valid double constants are generated even + when cross compiling. */ +char * +fp_immediate_constant (x) + rtx x; +{ + REAL_VALUE_TYPE r; + int i; + + if (!fpa_consts_inited) + init_fpa_table (); + + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + for (i = 0; i < 8; i++) + if (REAL_VALUES_EQUAL (r, values_fpa[i])) + return strings_fpa[i]; + + abort (); +} + +/* As for fp_immediate_constant, but value is passed directly, not in rtx. */ +static char * +fp_const_from_val (r) + REAL_VALUE_TYPE *r; +{ + int i; + + if (! fpa_consts_inited) + init_fpa_table (); + + for (i = 0; i < 8; i++) + if (REAL_VALUES_EQUAL (*r, values_fpa[i])) + return strings_fpa[i]; + + abort (); +} + +/* Output the operands of a LDM/STM instruction to STREAM. + MASK is the ARM register set mask of which only bits 0-15 are important. + INSTR is the possibly suffixed base register. HAT unequals zero if a hat + must follow the register list. */ + +void +print_multi_reg (stream, instr, mask, hat) + FILE *stream; + char *instr; + int mask, hat; +{ + int i; + int not_first = FALSE; + + fputc ('\t', stream); + fprintf (stream, instr, REGISTER_PREFIX); + fputs (", {", stream); + for (i = 0; i < 16; i++) + if (mask & (1 << i)) + { + if (not_first) + fprintf (stream, ", "); + fprintf (stream, "%s%s", REGISTER_PREFIX, reg_names[i]); + not_first = TRUE; + } + + fprintf (stream, "}%s\n", hat ? "^" : ""); +} + +/* Output a 'call' insn. */ + +char * +output_call (operands) + rtx *operands; +{ + /* Handle calls to lr using ip (which may be clobbered in subr anyway). */ + + if (REGNO (operands[0]) == 14) + { + operands[0] = gen_rtx (REG, SImode, 12); + output_asm_insn ("mov%?\t%0, %|lr", operands); + } + output_asm_insn ("mov%?\t%|lr, %|pc", operands); + output_asm_insn ("mov%?\t%|pc, %0", operands); + return ""; +} + +static int +eliminate_lr2ip (x) + rtx *x; +{ + int something_changed = 0; + rtx x0 = *x; + int code = GET_CODE (x0); + register int i, j; + register char *fmt; + + switch (code) + { + case REG: + if (REGNO (x0) == 14) + { + *x = gen_rtx (REG, SImode, 12); + return 1; + } + return 0; + default: + /* Scan through the sub-elements and change any references there */ + fmt = GET_RTX_FORMAT (code); + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + if (fmt[i] == 'e') + something_changed |= eliminate_lr2ip (&XEXP (x0, i)); + else if (fmt[i] == 'E') + for (j = 0; j < XVECLEN (x0, i); j++) + something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j)); + return something_changed; + } +} + +/* Output a 'call' insn that is a reference in memory. */ + +char * +output_call_mem (operands) + rtx *operands; +{ + operands[0] = copy_rtx (operands[0]); /* Be ultra careful */ + /* Handle calls using lr by using ip (which may be clobbered in subr anyway). + */ + if (eliminate_lr2ip (&operands[0])) + output_asm_insn ("mov%?\t%|ip, %|lr", operands); + + output_asm_insn ("mov%?\t%|lr, %|pc", operands); + output_asm_insn ("ldr%?\t%|pc, %0", operands); + return ""; +} + + +/* Output a move from arm registers to an fpu registers. + OPERANDS[0] is an fpu register. + OPERANDS[1] is the first registers of an arm register pair. */ + +char * +output_mov_long_double_fpu_from_arm (operands) + rtx *operands; +{ + int arm_reg0 = REGNO (operands[1]); + rtx ops[3]; + + if (arm_reg0 == 12) + abort(); + + ops[0] = gen_rtx (REG, SImode, arm_reg0); + ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0); + ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0); + + output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops); + output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands); + return ""; +} + +/* Output a move from an fpu register to arm registers. + OPERANDS[0] is the first registers of an arm register pair. + OPERANDS[1] is an fpu register. */ + +char * +output_mov_long_double_arm_from_fpu (operands) + rtx *operands; +{ + int arm_reg0 = REGNO (operands[0]); + rtx ops[3]; + + if (arm_reg0 == 12) + abort(); + + ops[0] = gen_rtx (REG, SImode, arm_reg0); + ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0); + ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0); + + output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands); + output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops); + return ""; +} + +/* Output a move from arm registers to arm registers of a long double + OPERANDS[0] is the destination. + OPERANDS[1] is the source. */ +char * +output_mov_long_double_arm_from_arm (operands) + rtx *operands; +{ + /* We have to be careful here because the two might overlap */ + int dest_start = REGNO (operands[0]); + int src_start = REGNO (operands[1]); + rtx ops[2]; + int i; + + if (dest_start < src_start) + { + for (i = 0; i < 3; i++) + { + ops[0] = gen_rtx (REG, SImode, dest_start + i); + ops[1] = gen_rtx (REG, SImode, src_start + i); + output_asm_insn ("mov%?\t%0, %1", ops); + } + } + else + { + for (i = 2; i >= 0; i--) + { + ops[0] = gen_rtx (REG, SImode, dest_start + i); + ops[1] = gen_rtx (REG, SImode, src_start + i); + output_asm_insn ("mov%?\t%0, %1", ops); + } + } + + return ""; +} + + +/* Output a move from arm registers to an fpu registers. + OPERANDS[0] is an fpu register. + OPERANDS[1] is the first registers of an arm register pair. */ + +char * +output_mov_double_fpu_from_arm (operands) + rtx *operands; +{ + int arm_reg0 = REGNO (operands[1]); + rtx ops[2]; + + if (arm_reg0 == 12) + abort(); + ops[0] = gen_rtx (REG, SImode, arm_reg0); + ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0); + output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops); + output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands); + return ""; +} + +/* Output a move from an fpu register to arm registers. + OPERANDS[0] is the first registers of an arm register pair. + OPERANDS[1] is an fpu register. */ + +char * +output_mov_double_arm_from_fpu (operands) + rtx *operands; +{ + int arm_reg0 = REGNO (operands[0]); + rtx ops[2]; + + if (arm_reg0 == 12) + abort(); + + ops[0] = gen_rtx (REG, SImode, arm_reg0); + ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0); + output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands); + output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops); + return ""; +} + +/* Output a move between double words. + It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM + or MEM<-REG and all MEMs must be offsettable addresses. */ + +char * +output_move_double (operands) + rtx *operands; +{ + enum rtx_code code0 = GET_CODE (operands[0]); + enum rtx_code code1 = GET_CODE (operands[1]); + rtx otherops[2]; + + if (code0 == REG) + { + int reg0 = REGNO (operands[0]); + + otherops[0] = gen_rtx (REG, SImode, 1 + reg0); + if (code1 == REG) + { + int reg1 = REGNO (operands[1]); + if (reg1 == 12) + abort(); + + otherops[1] = gen_rtx (REG, SImode, 1 + reg1); + + /* Ensure the second source is not overwritten */ + if (reg0 == 1 + reg1) + { + output_asm_insn("mov%?\t%0, %1", otherops); + output_asm_insn("mov%?\t%0, %1", operands); + } + else + { + output_asm_insn("mov%?\t%0, %1", operands); + output_asm_insn("mov%?\t%0, %1", otherops); + } + } + else if (code1 == CONST_DOUBLE) + { + otherops[1] = gen_rtx (CONST_INT, VOIDmode, + CONST_DOUBLE_HIGH (operands[1])); + operands[1] = gen_rtx (CONST_INT, VOIDmode, + CONST_DOUBLE_LOW (operands[1])); + output_mov_immediate (operands, FALSE, ""); + output_mov_immediate (otherops, FALSE, ""); + } + else if (code1 == CONST_INT) + { + otherops[1] = const0_rtx; + /* sign extend the intval into the high-order word */ + /* Note: output_mov_immediate may clobber operands[1], so we + put this out first */ + if (INTVAL (operands[1]) < 0) + output_asm_insn ("mvn%?\t%0, %1", otherops); + else + output_asm_insn ("mov%?\t%0, %1", otherops); + output_mov_immediate (operands, FALSE, ""); + } + else if (code1 == MEM) + { + switch (GET_CODE (XEXP (operands[1], 0))) + { + case REG: + /* Handle the simple case where address is [r, #0] more + efficient. */ + output_asm_insn ("ldm%?ia\t%m1, %M0", operands); + break; + case PRE_INC: + output_asm_insn ("add%?\t%m1, %m1, #8", operands); + output_asm_insn ("ldm%?ia\t%m1, %M0", operands); + break; + case PRE_DEC: + output_asm_insn ("sub%?\t%m1, %m1, #8", operands); + output_asm_insn ("ldm%?ia\t%m1, %M0", operands); + break; + case POST_INC: + output_asm_insn ("ldm%?ia\t%m1!, %M0", operands); + break; + case POST_DEC: + output_asm_insn ("ldm%?ia\t%m1, %M0", operands); + output_asm_insn ("sub%?\t%m1, %m1, #8", operands); + break; + default: + otherops[1] = adj_offsettable_operand (operands[1], 4); + /* Take care of overlapping base/data reg. */ + if (reg_mentioned_p (operands[0], operands[1])) + { + output_asm_insn ("ldr%?\t%0, %1", otherops); + output_asm_insn ("ldr%?\t%0, %1", operands); + } + else + { + output_asm_insn ("ldr%?\t%0, %1", operands); + output_asm_insn ("ldr%?\t%0, %1", otherops); + } + } + } + else abort(); /* Constraints should prevent this */ + } + else if (code0 == MEM && code1 == REG) + { + if (REGNO (operands[1]) == 12) + abort(); + switch (GET_CODE (XEXP (operands[0], 0))) + { + case REG: + output_asm_insn ("stm%?ia\t%m0, %M1", operands); + break; + case PRE_INC: + output_asm_insn ("add%?\t%m0, %m0, #8", operands); + output_asm_insn ("stm%?ia\t%m0, %M1", operands); + break; + case PRE_DEC: + output_asm_insn ("sub%?\t%m0, %m0, #8", operands); + output_asm_insn ("stm%?ia\t%m0, %M1", operands); + break; + case POST_INC: + output_asm_insn ("stm%?ia\t%m0!, %M1", operands); + break; + case POST_DEC: + output_asm_insn ("stm%?ia\t%m0, %M1", operands); + output_asm_insn ("sub%?\t%m0, %m0, #8", operands); + break; + default: + otherops[0] = adj_offsettable_operand (operands[0], 4); + otherops[1] = gen_rtx (REG, SImode, 1 + REGNO (operands[1])); + output_asm_insn ("str%?\t%1, %0", operands); + output_asm_insn ("str%?\t%1, %0", otherops); + } + } + else abort(); /* Constraints should prevent this */ + + return ""; +} + + +/* Output an arbitrary MOV reg, #n. + OPERANDS[0] is a register. OPERANDS[1] is a const_int. */ + +char * +output_mov_immediate (operands) + rtx *operands; +{ + HOST_WIDE_INT n = INTVAL (operands[1]); + int n_ones = 0; + int i; + + /* Try to use one MOV */ + if (const_ok_for_arm (n)) + { + output_asm_insn ("mov%?\t%0, %1", operands); + return ""; + } + + /* Try to use one MVN */ + if (const_ok_for_arm (~n)) + { + operands[1] = GEN_INT (~n); + output_asm_insn ("mvn%?\t%0, %1", operands); + return ""; + } + + /* If all else fails, make it out of ORRs or BICs as appropriate. */ + + for (i=0; i < 32; i++) + if (n & 1 << i) + n_ones++; + + if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */ + output_multi_immediate(operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, + ~n); + else + output_multi_immediate(operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, + n); + + return ""; +} + + +/* Output an ADD r, s, #n where n may be too big for one instruction. If + adding zero to one register, output nothing. */ + +char * +output_add_immediate (operands) + rtx *operands; +{ + HOST_WIDE_INT n = INTVAL (operands[2]); + + if (n != 0 || REGNO (operands[0]) != REGNO (operands[1])) + { + if (n < 0) + output_multi_immediate (operands, + "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2, + -n); + else + output_multi_immediate (operands, + "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2, + n); + } + + return ""; +} + +/* Output a multiple immediate operation. + OPERANDS is the vector of operands referred to in the output patterns. + INSTR1 is the output pattern to use for the first constant. + INSTR2 is the output pattern to use for subsequent constants. + IMMED_OP is the index of the constant slot in OPERANDS. + N is the constant value. */ + +char * +output_multi_immediate (operands, instr1, instr2, immed_op, n) + rtx *operands; + char *instr1, *instr2; + int immed_op; + HOST_WIDE_INT n; +{ +#if HOST_BITS_PER_WIDE_INT > 32 + n &= 0xffffffff; +#endif + + if (n == 0) + { + operands[immed_op] = const0_rtx; + output_asm_insn (instr1, operands); /* Quick and easy output */ + } + else + { + int i; + char *instr = instr1; + + /* Note that n is never zero here (which would give no output) */ + for (i = 0; i < 32; i += 2) + { + if (n & (3 << i)) + { + operands[immed_op] = GEN_INT (n & (255 << i)); + output_asm_insn (instr, operands); + instr = instr2; + i += 6; + } + } + } + return ""; +} + + +/* Return the appropriate ARM instruction for the operation code. + The returned result should not be overwritten. OP is the rtx of the + operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator + was shifted. */ + +char * +arithmetic_instr (op, shift_first_arg) + rtx op; + int shift_first_arg; +{ + switch (GET_CODE (op)) + { + case PLUS: + return "add"; + + case MINUS: + return shift_first_arg ? "rsb" : "sub"; + + case IOR: + return "orr"; + + case XOR: + return "eor"; + + case AND: + return "and"; + + default: + abort (); + } +} + + +/* Ensure valid constant shifts and return the appropriate shift mnemonic + for the operation code. The returned result should not be overwritten. + OP is the rtx code of the shift. + On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant + shift. */ + +static char * +shift_op (op, amountp) + rtx op; + HOST_WIDE_INT *amountp; +{ + char *mnem; + enum rtx_code code = GET_CODE (op); + + if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG) + *amountp = -1; + else if (GET_CODE (XEXP (op, 1)) == CONST_INT) + *amountp = INTVAL (XEXP (op, 1)); + else + abort (); + + switch (code) + { + case ASHIFT: + mnem = "asl"; + break; + + case ASHIFTRT: + mnem = "asr"; + break; + + case LSHIFTRT: + mnem = "lsr"; + break; + + case ROTATERT: + mnem = "ror"; + break; + + case MULT: + /* We never have to worry about the amount being other than a + power of 2, since this case can never be reloaded from a reg. */ + if (*amountp != -1) + *amountp = int_log2 (*amountp); + else + abort (); + return "asl"; + + default: + abort (); + } + + if (*amountp != -1) + { + /* This is not 100% correct, but follows from the desire to merge + multiplication by a power of 2 with the recognizer for a + shift. >=32 is not a valid shift for "asl", so we must try and + output a shift that produces the correct arithmetical result. + Using lsr #32 is identical except for the fact that the carry bit + is not set correctly if we set the flags; but we never use the + carry bit from such an operation, so we can ignore that. */ + if (code == ROTATERT) + *amountp &= 31; /* Rotate is just modulo 32 */ + else if (*amountp != (*amountp & 31)) + { + if (code == ASHIFT) + mnem = "lsr"; + *amountp = 32; + } + + /* Shifts of 0 are no-ops. */ + if (*amountp == 0) + return NULL; + } + + return mnem; +} + + +/* Obtain the shift from the POWER of two. */ + +HOST_WIDE_INT +int_log2 (power) + HOST_WIDE_INT power; +{ + HOST_WIDE_INT shift = 0; + + while (((1 << shift) & power) == 0) + { + if (shift > 31) + abort (); + shift++; + } + + return shift; +} + +/* Output a .ascii pseudo-op, keeping track of lengths. This is because + /bin/as is horribly restrictive. */ + +void +output_ascii_pseudo_op (stream, p, len) + FILE *stream; + unsigned char *p; + int len; +{ + int i; + int len_so_far = 1000; + int chars_so_far = 0; + + for (i = 0; i < len; i++) + { + register int c = p[i]; + + if (len_so_far > 50) + { + if (chars_so_far) + fputs ("\"\n", stream); + fputs ("\t.ascii\t\"", stream); + len_so_far = 0; + arm_increase_location (chars_so_far); + chars_so_far = 0; + } + + if (c == '\"' || c == '\\') + { + putc('\\', stream); + len_so_far++; + } + + if (c >= ' ' && c < 0177) + { + putc (c, stream); + len_so_far++; + } + else + { + fprintf (stream, "\\%03o", c); + len_so_far +=4; + } + + chars_so_far++; + } + + fputs ("\"\n", stream); + arm_increase_location (chars_so_far); +} + + +/* Try to determine whether a pattern really clobbers the link register. + This information is useful when peepholing, so that lr need not be pushed + if we combine a call followed by a return. + NOTE: This code does not check for side-effect expressions in a SET_SRC: + such a check should not be needed because these only update an existing + value within a register; the register must still be set elsewhere within + the function. */ + +static int +pattern_really_clobbers_lr (x) + rtx x; +{ + int i; + + switch (GET_CODE (x)) + { + case SET: + switch (GET_CODE (SET_DEST (x))) + { + case REG: + return REGNO (SET_DEST (x)) == 14; + + case SUBREG: + if (GET_CODE (XEXP (SET_DEST (x), 0)) == REG) + return REGNO (XEXP (SET_DEST (x), 0)) == 14; + + if (GET_CODE (XEXP (SET_DEST (x), 0)) == MEM) + return 0; + abort (); + + default: + return 0; + } + + case PARALLEL: + for (i = 0; i < XVECLEN (x, 0); i++) + if (pattern_really_clobbers_lr (XVECEXP (x, 0, i))) + return 1; + return 0; + + case CLOBBER: + switch (GET_CODE (XEXP (x, 0))) + { + case REG: + return REGNO (XEXP (x, 0)) == 14; + + case SUBREG: + if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG) + return REGNO (XEXP (XEXP (x, 0), 0)) == 14; + abort (); + + default: + return 0; + } + + case UNSPEC: + return 1; + + default: + return 0; + } +} + +static int +function_really_clobbers_lr (first) + rtx first; +{ + rtx insn, next; + + for (insn = first; insn; insn = next_nonnote_insn (insn)) + { + switch (GET_CODE (insn)) + { + case BARRIER: + case NOTE: + case CODE_LABEL: + case JUMP_INSN: /* Jump insns only change the PC (and conds) */ + case INLINE_HEADER: + break; + + case INSN: + if (pattern_really_clobbers_lr (PATTERN (insn))) + return 1; + break; + + case CALL_INSN: + /* Don't yet know how to handle those calls that are not to a + SYMBOL_REF */ + if (GET_CODE (PATTERN (insn)) != PARALLEL) + abort (); + + switch (GET_CODE (XVECEXP (PATTERN (insn), 0, 0))) + { + case CALL: + if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn), 0, 0), 0), 0)) + != SYMBOL_REF) + return 1; + break; + + case SET: + if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn), + 0, 0)), 0), 0)) + != SYMBOL_REF) + return 1; + break; + + default: /* Don't recognize it, be safe */ + return 1; + } + + /* A call can be made (by peepholing) not to clobber lr iff it is + followed by a return. There may, however, be a use insn iff + we are returning the result of the call. + If we run off the end of the insn chain, then that means the + call was at the end of the function. Unfortunately we don't + have a return insn for the peephole to recognize, so we + must reject this. (Can this be fixed by adding our own insn?) */ + if ((next = next_nonnote_insn (insn)) == NULL) + return 1; + + if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == USE + && (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET) + && (REGNO (SET_DEST (XVECEXP (PATTERN (insn), 0, 0))) + == REGNO (XEXP (PATTERN (next), 0)))) + if ((next = next_nonnote_insn (next)) == NULL) + return 1; + + if (GET_CODE (next) == JUMP_INSN + && GET_CODE (PATTERN (next)) == RETURN) + break; + return 1; + + default: + abort (); + } + } + + /* We have reached the end of the chain so lr was _not_ clobbered */ + return 0; +} + +char * +output_return_instruction (operand, really_return) + rtx operand; + int really_return; +{ + char instr[100]; + int reg, live_regs = 0; + int volatile_func = (optimize > 0 + && TREE_THIS_VOLATILE (current_function_decl)); + + return_used_this_function = 1; + + if (volatile_func) + { + rtx ops[2]; + /* If this function was declared non-returning, and we have found a tail + call, then we have to trust that the called function won't return. */ + if (! really_return) + return ""; + + /* Otherwise, trap an attempted return by aborting. */ + ops[0] = operand; + ops[1] = gen_rtx (SYMBOL_REF, Pmode, "abort"); + output_asm_insn ("bl%d0\t%a1", ops); + return ""; + } + + if (current_function_calls_alloca && ! really_return) + abort(); + + for (reg = 0; reg <= 10; reg++) + if (regs_ever_live[reg] && ! call_used_regs[reg]) + live_regs++; + + if (live_regs || (regs_ever_live[14] && ! lr_save_eliminated)) + live_regs++; + + if (frame_pointer_needed) + live_regs += 4; + + if (live_regs) + { + if (lr_save_eliminated || ! regs_ever_live[14]) + live_regs++; + + if (frame_pointer_needed) + strcpy (instr, "ldm%?%d0ea\t%|fp, {"); + else + strcpy (instr, "ldm%?%d0fd\t%|sp!, {"); + + for (reg = 0; reg <= 10; reg++) + if (regs_ever_live[reg] && ! call_used_regs[reg]) + { + strcat (instr, "%|"); + strcat (instr, reg_names[reg]); + if (--live_regs) + strcat (instr, ", "); + } + + if (frame_pointer_needed) + { + strcat (instr, "%|"); + strcat (instr, reg_names[11]); + strcat (instr, ", "); + strcat (instr, "%|"); + strcat (instr, reg_names[13]); + strcat (instr, ", "); + strcat (instr, "%|"); + strcat (instr, really_return ? reg_names[15] : reg_names[14]); + } + else + { + strcat (instr, "%|"); + strcat (instr, really_return ? reg_names[15] : reg_names[14]); + } + strcat (instr, (TARGET_6 || !really_return) ? "}" : "}^"); + output_asm_insn (instr, &operand); + } + else if (really_return) + { + strcpy (instr, + TARGET_6 ? "mov%?%d0\t%|pc, lr" : "mov%?%d0s\t%|pc, %|lr"); + output_asm_insn (instr, &operand); + } + + return ""; +} + +/* Return nonzero if optimizing and the current function is volatile. + Such functions never return, and many memory cycles can be saved + by not storing register values that will never be needed again. + This optimization was added to speed up context switching in a + kernel application. */ + +int +arm_volatile_func () +{ + return (optimize > 0 && TREE_THIS_VOLATILE (current_function_decl)); +} + +/* Return the size of the prologue. It's not too bad if we slightly + over-estimate. */ + +static int +get_prologue_size () +{ + return profile_flag ? 12 : 0; +} + +/* The amount of stack adjustment that happens here, in output_return and in + output_epilogue must be exactly the same as was calculated during reload, + or things will point to the wrong place. The only time we can safely + ignore this constraint is when a function has no arguments on the stack, + no stack frame requirement and no live registers execpt for `lr'. If we + can guarantee that by making all function calls into tail calls and that + lr is not clobbered in any other way, then there is no need to push lr + onto the stack. */ + +void +output_func_prologue (f, frame_size) + FILE *f; + int frame_size; +{ + int reg, live_regs_mask = 0; + rtx operands[3]; + int volatile_func = (optimize > 0 + && TREE_THIS_VOLATILE (current_function_decl)); + + /* Nonzero if we must stuff some register arguments onto the stack as if + they were passed there. */ + int store_arg_regs = 0; + + if (arm_ccfsm_state || arm_target_insn) + abort (); /* Sanity check */ + + return_used_this_function = 0; + lr_save_eliminated = 0; + + fprintf (f, "\t%s args = %d, pretend = %d, frame = %d\n", + ASM_COMMENT_START, current_function_args_size, + current_function_pretend_args_size, frame_size); + fprintf (f, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n", + ASM_COMMENT_START, frame_pointer_needed, + current_function_anonymous_args); + + if (volatile_func) + fprintf (f, "\t%s Volatile function.\n", ASM_COMMENT_START); + + if (current_function_anonymous_args && current_function_pretend_args_size) + store_arg_regs = 1; + + for (reg = 0; reg <= 10; reg++) + if (regs_ever_live[reg] && ! call_used_regs[reg]) + live_regs_mask |= (1 << reg); + + if (frame_pointer_needed) + live_regs_mask |= 0xD800; + else if (regs_ever_live[14]) + { + if (! current_function_args_size + && ! function_really_clobbers_lr (get_insns ())) + lr_save_eliminated = 1; + else + live_regs_mask |= 0x4000; + } + + if (live_regs_mask) + { + /* if a di mode load/store multiple is used, and the base register + is r3, then r4 can become an ever live register without lr + doing so, in this case we need to push lr as well, or we + will fail to get a proper return. */ + + live_regs_mask |= 0x4000; + lr_save_eliminated = 0; + + } + + if (lr_save_eliminated) + fprintf (f,"\t%s I don't think this function clobbers lr\n", + ASM_COMMENT_START); +} + + +void +output_func_epilogue (f, frame_size) + FILE *f; + int frame_size; +{ + int reg, live_regs_mask = 0, code_size = 0; + /* If we need this then it will always be at lesat this much */ + int floats_offset = 24; + rtx operands[3]; + int volatile_func = (optimize > 0 + && TREE_THIS_VOLATILE (current_function_decl)); + + if (use_return_insn() && return_used_this_function) + { + if (frame_size && !(frame_pointer_needed || TARGET_APCS)) + { + abort (); + } + goto epilogue_done; + } + + /* A volatile function should never return. Call abort. */ + if (volatile_func) + { + rtx op = gen_rtx (SYMBOL_REF, Pmode, "abort"); + output_asm_insn ("bl\t%a0", &op); + code_size = 4; + goto epilogue_done; + } + + for (reg = 0; reg <= 10; reg++) + if (regs_ever_live[reg] && ! call_used_regs[reg]) + { + live_regs_mask |= (1 << reg); + floats_offset += 4; + } + + if (frame_pointer_needed) + { + for (reg = 23; reg > 15; reg--) + if (regs_ever_live[reg] && ! call_used_regs[reg]) + { + fprintf (f, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX, + reg_names[reg], REGISTER_PREFIX, floats_offset); + floats_offset += 12; + code_size += 4; + } + + live_regs_mask |= 0xA800; + print_multi_reg (f, "ldmea\t%sfp", live_regs_mask, + TARGET_6 ? FALSE : TRUE); + code_size += 4; + } + else + { + /* Restore stack pointer if necessary. */ + if (frame_size) + { + operands[0] = operands[1] = stack_pointer_rtx; + operands[2] = gen_rtx (CONST_INT, VOIDmode, frame_size); + output_add_immediate (operands); + } + + for (reg = 16; reg < 24; reg++) + if (regs_ever_live[reg] && ! call_used_regs[reg]) + { + fprintf (f, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX, + reg_names[reg], REGISTER_PREFIX); + code_size += 4; + } + if (current_function_pretend_args_size == 0 && regs_ever_live[14]) + { + print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask | 0x8000, + TARGET_6 ? FALSE : TRUE); + code_size += 4; + } + else + { + if (live_regs_mask || regs_ever_live[14]) + { + live_regs_mask |= 0x4000; + print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask, FALSE); + code_size += 4; + } + if (current_function_pretend_args_size) + { + operands[0] = operands[1] = stack_pointer_rtx; + operands[2] = gen_rtx (CONST_INT, VOIDmode, + current_function_pretend_args_size); + output_add_immediate (operands); + } + fprintf (f, + TARGET_6 ? "\tmov\t%spc, %slr\n" : "\tmovs\t%spc, %slr\n", + REGISTER_PREFIX, REGISTER_PREFIX, f); + code_size += 4; + } + } + + epilogue_done: + + /* insn_addresses isn't allocated when not optimizing */ + + if (optimize > 0) + arm_increase_location (code_size + + insn_addresses[INSN_UID (get_last_insn ())] + + get_prologue_size ()); + + current_function_anonymous_args = 0; +} + +static void +emit_multi_reg_push (mask) + int mask; +{ + int num_regs = 0; + int i, j; + rtx par; + + for (i = 0; i < 16; i++) + if (mask & (1 << i)) + num_regs++; + + if (num_regs == 0 || num_regs > 16) + abort (); + + par = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (num_regs)); + + for (i = 0; i < 16; i++) + { + if (mask & (1 << i)) + { + XVECEXP (par, 0, 0) + = gen_rtx (SET, VOIDmode, gen_rtx (MEM, BLKmode, + gen_rtx (PRE_DEC, BLKmode, + stack_pointer_rtx)), + gen_rtx (UNSPEC, BLKmode, + gen_rtvec (1, gen_rtx (REG, SImode, i)), + 2)); + break; + } + } + + for (j = 1, i++; j < num_regs; i++) + { + if (mask & (1 << i)) + { + XVECEXP (par, 0, j) + = gen_rtx (USE, VOIDmode, gen_rtx (REG, SImode, i)); + j++; + } + } + emit_insn (par); +} + +void +arm_expand_prologue () +{ + int reg; + rtx amount = GEN_INT (- get_frame_size ()); + rtx push_insn; + int num_regs; + int live_regs_mask = 0; + int store_arg_regs = 0; + int volatile_func = (optimize > 0 + && TREE_THIS_VOLATILE (current_function_decl)); + + if (current_function_anonymous_args && current_function_pretend_args_size) + store_arg_regs = 1; + + if (! volatile_func) + for (reg = 0; reg <= 10; reg++) + if (regs_ever_live[reg] && ! call_used_regs[reg]) + live_regs_mask |= 1 << reg; + + if (! volatile_func && regs_ever_live[14]) + live_regs_mask |= 0x4000; + + if (frame_pointer_needed) + { + live_regs_mask |= 0xD800; + emit_insn (gen_movsi (gen_rtx (REG, SImode, 12), + stack_pointer_rtx)); + } + + if (current_function_pretend_args_size) + { + if (store_arg_regs) + emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size / 4)) + & 0xf); + else + emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, + GEN_INT (-current_function_pretend_args_size))); + } + + if (live_regs_mask) + { + /* If we have to push any regs, then we must push lr as well, or + we won't get a proper return. */ + live_regs_mask |= 0x4000; + emit_multi_reg_push (live_regs_mask); + } + + /* For now the integer regs are still pushed in output_func_epilogue (). */ + + if (! volatile_func) + for (reg = 23; reg > 15; reg--) + if (regs_ever_live[reg] && ! call_used_regs[reg]) + emit_insn (gen_rtx (SET, VOIDmode, + gen_rtx (MEM, XFmode, + gen_rtx (PRE_DEC, XFmode, + stack_pointer_rtx)), + gen_rtx (REG, XFmode, reg))); + + if (frame_pointer_needed) + emit_insn (gen_addsi3 (hard_frame_pointer_rtx, gen_rtx (REG, SImode, 12), + (GEN_INT + (-(4 + current_function_pretend_args_size))))); + + if (amount != const0_rtx) + { + emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, amount)); + emit_insn (gen_rtx (CLOBBER, VOIDmode, + gen_rtx (MEM, BLKmode, stack_pointer_rtx))); + } + + /* If we are profiling, make sure no instructions are scheduled before + the call to mcount. */ + if (profile_flag || profile_block_flag) + emit_insn (gen_blockage ()); +} + + +/* If CODE is 'd', then the X is a condition operand and the instruction + should only be executed if the condition is true. + if CODE is 'D', then the X is a condition operand and the instruction + should only be executed if the condition is false: however, if the mode + of the comparison is CCFPEmode, then always execute the instruction -- we + do this because in these circumstances !GE does not necessarily imply LT; + in these cases the instruction pattern will take care to make sure that + an instruction containing %d will follow, thereby undoing the effects of + doing this instruction unconditionally. + If CODE is 'N' then X is a floating point operand that must be negated + before output. + If CODE is 'B' then output a bitwise inverted value of X (a const int). + If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */ + +void +arm_print_operand (stream, x, code) + FILE *stream; + rtx x; + int code; +{ + switch (code) + { + case '@': + fputs (ASM_COMMENT_START, stream); + return; + + case '|': + fputs (REGISTER_PREFIX, stream); + return; + + case '?': + if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4) + fputs (arm_condition_codes[arm_current_cc], stream); + return; + + case 'N': + { + REAL_VALUE_TYPE r; + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + r = REAL_VALUE_NEGATE (r); + fprintf (stream, "%s", fp_const_from_val (&r)); + } + return; + + case 'B': + if (GET_CODE (x) == CONST_INT) + fprintf (stream, +#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT + "%d", +#else + "%ld", +#endif + ARM_SIGN_EXTEND (~ INTVAL (x))); + else + { + putc ('~', stream); + output_addr_const (stream, x); + } + return; + + case 'i': + fprintf (stream, "%s", arithmetic_instr (x, 1)); + return; + + case 'I': + fprintf (stream, "%s", arithmetic_instr (x, 0)); + return; + + case 'S': + { + HOST_WIDE_INT val; + char *shift = shift_op (x, &val); + + if (shift) + { + fprintf (stream, ", %s ", shift_op (x, &val)); + if (val == -1) + arm_print_operand (stream, XEXP (x, 1), 0); + else + fprintf (stream, +#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT + "#%d", +#else + "#%ld", +#endif + val); + } + } + return; + + case 'R': + if (REGNO (x) > 15) + abort (); + fputs (REGISTER_PREFIX, stream); + fputs (reg_names[REGNO (x) + 1], stream); + return; + + case 'm': + fputs (REGISTER_PREFIX, stream); + if (GET_CODE (XEXP (x, 0)) == REG) + fputs (reg_names[REGNO (XEXP (x, 0))], stream); + else + fputs (reg_names[REGNO (XEXP (XEXP (x, 0), 0))], stream); + return; + + case 'M': + fprintf (stream, "{%s%s-%s%s}", REGISTER_PREFIX, reg_names[REGNO (x)], + REGISTER_PREFIX, reg_names[REGNO (x) - 1 + + ((GET_MODE_SIZE (GET_MODE (x)) + + GET_MODE_SIZE (SImode) - 1) + / GET_MODE_SIZE (SImode))]); + return; + + case 'd': + if (x) + fputs (arm_condition_codes[get_arm_condition_code (x)], + stream); + return; + + case 'D': + if (x && (flag_fast_math + || GET_CODE (x) == EQ || GET_CODE (x) == NE + || (GET_MODE (XEXP (x, 0)) != CCFPEmode + && (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) + != MODE_FLOAT)))) + fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE + (get_arm_condition_code (x))], + stream); + return; + + default: + if (x == 0) + abort (); + + if (GET_CODE (x) == REG) + { + fputs (REGISTER_PREFIX, stream); + fputs (reg_names[REGNO (x)], stream); + } + else if (GET_CODE (x) == MEM) + { + output_memory_reference_mode = GET_MODE (x); + output_address (XEXP (x, 0)); + } + else if (GET_CODE (x) == CONST_DOUBLE) + fprintf (stream, "#%s", fp_immediate_constant (x)); + else if (GET_CODE (x) == NEG) + abort (); /* This should never happen now. */ + else + { + fputc ('#', stream); + output_addr_const (stream, x); + } + } +} + +/* Increase the `arm_text_location' by AMOUNT if we're in the text + segment. */ + +void +arm_increase_location (amount) + int amount; +{ + if (in_text_section ()) + arm_text_location += amount; +} + + +/* Output a label definition. If this label is within the .text segment, it + is stored in OFFSET_TABLE, to be used when building `llc' instructions. + Maybe GCC remembers names not starting with a `*' for a long time, but this + is a minority anyway, so we just make a copy. Do not store the leading `*' + if the name starts with one. */ + +void +arm_asm_output_label (stream, name) + FILE *stream; + char *name; +{ + char *real_name, *s; + struct label_offset *cur; + int hash = 0; + + assemble_name (stream, name); + fputs (":\n", stream); + if (! in_text_section ()) + return; + + if (name[0] == '*') + { + real_name = xmalloc (1 + strlen (&name[1])); + strcpy (real_name, &name[1]); + } + else + { + real_name = xmalloc (2 + strlen (name)); + strcpy (real_name, USER_LABEL_PREFIX); + strcat (real_name, name); + } + for (s = real_name; *s; s++) + hash += *s; + + hash = hash % LABEL_HASH_SIZE; + cur = (struct label_offset *) xmalloc (sizeof (struct label_offset)); + cur->name = real_name; + cur->offset = arm_text_location; + cur->cdr = offset_table[hash]; + offset_table[hash] = cur; +} + +/* Load a symbol that is known to be in the text segment into a register. + This should never be called when not optimizing. */ + +char * +output_load_symbol (insn, operands) + rtx insn; + rtx *operands; +{ + char *s; + char *name = XSTR (operands[1], 0); + struct label_offset *he; + int hash = 0; + int offset; + unsigned int mask, never_mask = 0xffffffff; + int shift, inst; + char buffer[100]; + + if (optimize == 0 || *name != '*') + abort (); + + for (s = &name[1]; *s; s++) + hash += *s; + + hash = hash % LABEL_HASH_SIZE; + he = offset_table[hash]; + while (he && strcmp (he->name, &name[1])) + he = he->cdr; + + if (!he) + abort (); + + offset = (arm_text_location + insn_addresses[INSN_UID (insn)] + + get_prologue_size () + 8 - he->offset); + if (offset < 0) + abort (); + + /* When generating the instructions, we never mask out the bits that we + think will be always zero, then if a mistake has occurred somewhere, the + assembler will spot it and generate an error. */ + + /* If the symbol is word aligned then we might be able to reduce the + number of loads. */ + shift = ((offset & 3) == 0) ? 2 : 0; + + /* Clear the bits from NEVER_MASK that will be orred in with the individual + instructions. */ + for (; shift < 32; shift += 8) + { + mask = 0xff << shift; + if ((offset & mask) || ((unsigned) offset) > mask) + never_mask &= ~mask; + } + + inst = 8; + mask = 0xff << (shift - 32); + + while (mask && (never_mask & mask) == 0) + { + if (inst == 8) + { + strcpy (buffer, "sub%?\t%0, %|pc, #(8 + . -%a1)"); + if ((never_mask | mask) != 0xffffffff) + sprintf (buffer + strlen (buffer), " & 0x%x", mask | never_mask); + } + else + sprintf (buffer, "sub%%?\t%%0, %%0, #(%d + . -%%a1) & 0x%x", + inst, mask | never_mask); + + output_asm_insn (buffer, operands); + mask <<= 8; + inst -= 4; + } + + return ""; +} + +/* Output code resembling an .lcomm directive. /bin/as doesn't have this + directive hence this hack, which works by reserving some `.space' in the + bss segment directly. + + XXX This is a severe hack, which is guaranteed NOT to work since it doesn't + define STATIC COMMON space but merely STATIC BSS space. */ + +void +output_lcomm_directive (stream, name, size, rounded) + FILE *stream; + char *name; + int size, rounded; +{ + fprintf (stream, "\n\t.bss\t%s .lcomm\n", ASM_COMMENT_START); + assemble_name (stream, name); + fprintf (stream, ":\t.space\t%d\n", rounded); + if (in_text_section ()) + fputs ("\n\t.text\n", stream); + else + fputs ("\n\t.data\n", stream); +} + +/* A finite state machine takes care of noticing whether or not instructions + can be conditionally executed, and thus decrease execution time and code + size by deleting branch instructions. The fsm is controlled by + final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */ + +/* The state of the fsm controlling condition codes are: + 0: normal, do nothing special + 1: make ASM_OUTPUT_OPCODE not output this instruction + 2: make ASM_OUTPUT_OPCODE not output this instruction + 3: make instructions conditional + 4: make instructions conditional + + State transitions (state->state by whom under condition): + 0 -> 1 final_prescan_insn if the `target' is a label + 0 -> 2 final_prescan_insn if the `target' is an unconditional branch + 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch + 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch + 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached + (the target label has CODE_LABEL_NUMBER equal to arm_target_label). + 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached + (the target insn is arm_target_insn). + + If the jump clobbers the conditions then we use states 2 and 4. + + A similar thing can be done with conditional return insns. + + XXX In case the `target' is an unconditional branch, this conditionalising + of the instructions always reduces code size, but not always execution + time. But then, I want to reduce the code size to somewhere near what + /bin/cc produces. */ + +/* Returns the index of the ARM condition code string in + `arm_condition_codes'. COMPARISON should be an rtx like + `(eq (...) (...))'. */ + +int +get_arm_condition_code (comparison) + rtx comparison; +{ + switch (GET_CODE (comparison)) + { + case NE: return (1); + case EQ: return (0); + case GE: return (10); + case GT: return (12); + case LE: return (13); + case LT: return (11); + case GEU: return (2); + case GTU: return (8); + case LEU: return (9); + case LTU: return (3); + default: abort (); + } + /*NOTREACHED*/ + return (42); +} + + +void +final_prescan_insn (insn, opvec, noperands) + rtx insn; + rtx *opvec; + int noperands; +{ + /* BODY will hold the body of INSN. */ + register rtx body = PATTERN (insn); + + /* This will be 1 if trying to repeat the trick, and things need to be + reversed if it appears to fail. */ + int reverse = 0; + + /* JUMP_CLOBBERS will be one implies that the conditions if a branch is + taken are clobbered, even if the rtl suggests otherwise. It also + means that we have to grub around within the jump expression to find + out what the conditions are when the jump isn't taken. */ + int jump_clobbers = 0; + + /* If we start with a return insn, we only succeed if we find another one. */ + int seeking_return = 0; + + /* START_INSN will hold the insn from where we start looking. This is the + first insn after the following code_label if REVERSE is true. */ + rtx start_insn = insn; + + /* If in state 4, check if the target branch is reached, in order to + change back to state 0. */ + if (arm_ccfsm_state == 4) + { + if (insn == arm_target_insn) + { + arm_target_insn = NULL; + arm_ccfsm_state = 0; + } + return; + } + + /* If in state 3, it is possible to repeat the trick, if this insn is an + unconditional branch to a label, and immediately following this branch + is the previous target label which is only used once, and the label this + branch jumps to is not too far off. */ + if (arm_ccfsm_state == 3) + { + if (simplejump_p (insn)) + { + start_insn = next_nonnote_insn (start_insn); + if (GET_CODE (start_insn) == BARRIER) + { + /* XXX Isn't this always a barrier? */ + start_insn = next_nonnote_insn (start_insn); + } + if (GET_CODE (start_insn) == CODE_LABEL + && CODE_LABEL_NUMBER (start_insn) == arm_target_label + && LABEL_NUSES (start_insn) == 1) + reverse = TRUE; + else + return; + } + else if (GET_CODE (body) == RETURN) + { + start_insn = next_nonnote_insn (start_insn); + if (GET_CODE (start_insn) == BARRIER) + start_insn = next_nonnote_insn (start_insn); + if (GET_CODE (start_insn) == CODE_LABEL + && CODE_LABEL_NUMBER (start_insn) == arm_target_label + && LABEL_NUSES (start_insn) == 1) + { + reverse = TRUE; + seeking_return = 1; + } + else + return; + } + else + return; + } + + if (arm_ccfsm_state != 0 && !reverse) + abort (); + if (GET_CODE (insn) != JUMP_INSN) + return; + + /* This jump might be paralleled with a clobber of the condition codes + the jump should always come first */ + if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0) + body = XVECEXP (body, 0, 0); + +#if 0 + /* If this is a conditional return then we don't want to know */ + if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC + && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE + && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN + || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)) + return; +#endif + + if (reverse + || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC + && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE)) + { + int insns_skipped = 0, fail = FALSE, succeed = FALSE; + /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */ + int then_not_else = TRUE; + rtx this_insn = start_insn, label = 0; + + if (get_attr_conds (insn) == CONDS_JUMP_CLOB) + { + /* The code below is wrong for these, and I haven't time to + fix it now. So we just do the safe thing and return. This + whole function needs re-writing anyway. */ + jump_clobbers = 1; + return; + } + + /* Register the insn jumped to. */ + if (reverse) + { + if (!seeking_return) + label = XEXP (SET_SRC (body), 0); + } + else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF) + label = XEXP (XEXP (SET_SRC (body), 1), 0); + else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF) + { + label = XEXP (XEXP (SET_SRC (body), 2), 0); + then_not_else = FALSE; + } + else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN) + seeking_return = 1; + else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN) + { + seeking_return = 1; + then_not_else = FALSE; + } + else + abort (); + + /* See how many insns this branch skips, and what kind of insns. If all + insns are okay, and the label or unconditional branch to the same + label is not too far away, succeed. */ + for (insns_skipped = 0; + !fail && !succeed && insns_skipped < MAX_INSNS_SKIPPED; + insns_skipped++) + { + rtx scanbody; + + this_insn = next_nonnote_insn (this_insn); + if (!this_insn) + break; + + scanbody = PATTERN (this_insn); + + switch (GET_CODE (this_insn)) + { + case CODE_LABEL: + /* Succeed if it is the target label, otherwise fail since + control falls in from somewhere else. */ + if (this_insn == label) + { + if (jump_clobbers) + { + arm_ccfsm_state = 2; + this_insn = next_nonnote_insn (this_insn); + } + else + arm_ccfsm_state = 1; + succeed = TRUE; + } + else + fail = TRUE; + break; + + case BARRIER: + /* Succeed if the following insn is the target label. + Otherwise fail. + If return insns are used then the last insn in a function + will be a barrier. */ + this_insn = next_nonnote_insn (this_insn); + if (this_insn && this_insn == label) + { + if (jump_clobbers) + { + arm_ccfsm_state = 2; + this_insn = next_nonnote_insn (this_insn); + } + else + arm_ccfsm_state = 1; + succeed = TRUE; + } + else + fail = TRUE; + break; + + case CALL_INSN: + /* The arm 6xx uses full 32 bit addresses so the cc is not + preserved over calls */ + if (TARGET_6) + fail = TRUE; + break; + case JUMP_INSN: + /* If this is an unconditional branch to the same label, succeed. + If it is to another label, do nothing. If it is conditional, + fail. */ + /* XXX Probably, the test for the SET and the PC are unnecessary. */ + + if (GET_CODE (scanbody) == SET + && GET_CODE (SET_DEST (scanbody)) == PC) + { + if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF + && XEXP (SET_SRC (scanbody), 0) == label && !reverse) + { + arm_ccfsm_state = 2; + succeed = TRUE; + } + else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE) + fail = TRUE; + } + else if (GET_CODE (scanbody) == RETURN + && seeking_return) + { + arm_ccfsm_state = 2; + succeed = TRUE; + } + else if (GET_CODE (scanbody) == PARALLEL) + { + switch (get_attr_conds (this_insn)) + { + case CONDS_NOCOND: + break; + default: + fail = TRUE; + break; + } + } + break; + + case INSN: + /* Instructions using or affecting the condition codes make it + fail. */ + if ((GET_CODE (scanbody) == SET + || GET_CODE (scanbody) == PARALLEL) + && get_attr_conds (this_insn) != CONDS_NOCOND) + fail = TRUE; + break; + + default: + break; + } + } + if (succeed) + { + if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse)) + arm_target_label = CODE_LABEL_NUMBER (label); + else if (seeking_return || arm_ccfsm_state == 2) + { + while (this_insn && GET_CODE (PATTERN (this_insn)) == USE) + { + this_insn = next_nonnote_insn (this_insn); + if (this_insn && (GET_CODE (this_insn) == BARRIER + || GET_CODE (this_insn) == CODE_LABEL)) + abort (); + } + if (!this_insn) + { + /* Oh, dear! we ran off the end.. give up */ + recog (PATTERN (insn), insn, NULL_PTR); + arm_ccfsm_state = 0; + arm_target_insn = NULL; + return; + } + arm_target_insn = this_insn; + } + else + abort (); + if (jump_clobbers) + { + if (reverse) + abort (); + arm_current_cc = + get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body), + 0), 0), 1)); + if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND) + arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc); + if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE) + arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc); + } + else + { + /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from + what it was. */ + if (!reverse) + arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body), + 0)); + } + + if (reverse || then_not_else) + arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc); + } + /* restore recog_operand (getting the attributes of other insns can + destroy this array, but final.c assumes that it remains intact + across this call; since the insn has been recognized already we + call recog direct). */ + recog (PATTERN (insn), insn, NULL_PTR); + } +} + +/* EOF */ diff --git a/gnu/usr.bin/gcc/arch/arm32/arm32.h b/gnu/usr.bin/gcc/arch/arm32/arm32.h new file mode 100644 index 000000000000..db5ffdead350 --- /dev/null +++ b/gnu/usr.bin/gcc/arch/arm32/arm32.h @@ -0,0 +1,1759 @@ +/* Definitions of target machine for GNU compiler, for Acorn RISC Machine. + Copyright (C) 1991, 1993, 1994, 1995 Free Software Foundation, Inc. + Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) + and Martin Simmons (@harleqn.co.uk). + More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk) + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* Sometimes the directive `riscos' is checked. This does not imply that this + tm file can be used unchanged to build a GCC for RISC OS. + (Since in fact, it can't.) */ + +extern void output_func_prologue (); +extern void output_func_epilogue (); +extern char *output_add_immediate (); +extern char *output_call (); +extern char *output_call_mem (); +extern char *output_move_double (); +extern char *output_mov_double_fpu_from_arm (); +extern char *output_mov_double_arm_from_fpu (); +extern char *output_mov_long_double_fpu_from_arm (); +extern char *output_mov_long_double_arm_from_fpu (); +extern char *output_mov_long_double_arm_from_arm (); +extern char *output_mov_immediate (); +extern char *output_multi_immediate (); +extern char *output_return_instruction (); +extern char *output_load_symbol (); +extern char *fp_immediate_constant (); +extern struct rtx_def *gen_compare_reg (); +extern struct rtx_def *arm_gen_store_multiple (); +extern struct rtx_def *arm_gen_load_multiple (); + +extern char *arm_condition_codes[]; + +/* This is needed by the tail-calling peepholes */ +extern int frame_pointer_needed; + + +#ifndef CPP_PREDEFINES +#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)" +#endif + +#ifndef CPP_SPEC +#define CPP_SPEC "%{m6:-D__arm6__}" +#endif + +/* Run-time Target Specification. */ +#ifndef TARGET_VERSION +#define TARGET_VERSION \ + fputs (" (ARM/generic)", stderr); +#endif + +/* Run-time compilation parameters selecting different hardware subsets. + On the ARM, misuse it in a different way. */ +extern int target_flags; + +/* Nonzero if the function prologue (and epilogue) should obey + the ARM Procedure Call Standard. */ +#define TARGET_APCS (target_flags & 1) + +/* Nonzero if the function prologue should output the function name to enable + the post mortem debugger to print a backtrace (very useful on RISCOS, + unused on RISCiX). Specifying this flag also enables -mapcs. + XXX Must still be implemented in the prologue. */ +#define TARGET_POKE_FUNCTION_NAME (target_flags & 2) + +/* Nonzero if floating point instructions are emulated by the FPE, in which + case instruction scheduling becomes very uninteresting. */ +#define TARGET_FPE (target_flags & 4) + +/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration + of condition flags when returning from a branch & link (ie. a function) */ +#define TARGET_6 (target_flags & 8) + +/* Leave some bits for new processor variants */ + +/* Nonzero if shorts must be loaded byte at a time. This is not necessary + for the arm processor chip, but it is needed for some MMU chips. */ +#define TARGET_SHORT_BY_BYTES (target_flags & 0x200) + +/* Nonzero if GCC should use a floating point library. + GCC will assume the fp regs don't exist and will not emit any fp insns. + Note that this is different than fp emulation which still uses fp regs + and insns - the kernel catches the trap and performs the operation. */ +#define TARGET_SOFT_FLOAT (target_flags & 0x400) +#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT) + +/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. + Bit 31 is reserved. See riscix.h. */ +#ifndef SUBTARGET_SWITCHES +#define SUBTARGET_SWITCHES +#endif + +#define TARGET_SWITCHES \ +{ \ + {"apcs", 1}, \ + {"poke-function-name", 2}, \ + {"fpe", 4}, \ + {"6", 8}, \ + {"2", -8}, \ + {"3", -8}, \ + {"short-load-bytes", (0x200)}, \ + {"no-short-load-bytes", -(0x200)}, \ + {"short-load-words", -(0x200)}, \ + {"no-short-load-words", (0x200)}, \ + {"soft-float", (0x400)}, \ + {"hard-float", -(0x400)}, \ + SUBTARGET_SWITCHES \ + {"", TARGET_DEFAULT } \ +} + +/* Which processor we are running on. Currently this is only used to + get the condition code clobbering attribute right when we are running on + an arm 6 */ + +enum processor_type +{ + PROCESSOR_ARM2, + PROCESSOR_ARM3, + PROCESSOR_ARM6 +}; + +/* Recast the cpu class to be the cpu attribute. */ + +/* Recast the cpu class to be the cpu attribute. */ +#define arm_cpu_attr ((enum attr_cpu)arm_cpu) + +extern enum processor_type arm_cpu; + +/* What sort of floating point unit do we have? Hardware or software. */ +enum floating_point_type +{ + FP_HARD, + FP_SOFT +}; + +/* Recast the floating point class to be the floating point attribute. */ +#define arm_fpu_attr ((enum attr_fpu) arm_fpu) + +extern enum floating_point_type arm_fpu; + +#ifndef TARGET_DEFAULT +#define TARGET_DEFAULT 0 +#endif + +#define TARGET_MEM_FUNCTIONS 1 + +/* OVERRIDE_OPTIONS takes care of the following: + - if -mpoke-function-name, then -mapcs. + - if doing debugging, then -mapcs; if RISCOS, then -mpoke-function-name. + - if floating point is done by emulation, forget about instruction + scheduling. Note that this only saves compilation time; it doesn't + matter for the final code. */ + +#define OVERRIDE_OPTIONS \ +{ \ + if (write_symbols != NO_DEBUG && flag_omit_frame_pointer) \ + warning ("-g without a frame pointer may not give sensible debugging");\ + if (TARGET_POKE_FUNCTION_NAME) \ + target_flags |= 1; \ + if (TARGET_FPE) \ + flag_schedule_insns = flag_schedule_insns_after_reload = 0; \ + arm_cpu = TARGET_6 ? PROCESSOR_ARM6: PROCESSOR_ARM2; \ +} + +/* Target machine storage Layout. */ + + +/* Define this macro if it is advisable to hold scalars in registers + in a wider mode than that declared by the program. In such cases, + the value is constrained to be within the bounds of the declared + type, but kept valid in the wider mode. The signedness of the + extension may differ from that of the type. */ + +/* It is far faster to zero extend chars than to sign extend them */ + +#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \ + if (GET_MODE_CLASS (MODE) == MODE_INT \ + && GET_MODE_SIZE (MODE) < 4) \ + { \ + if (MODE == QImode) \ + UNSIGNEDP = 1; \ + else if (MODE == HImode) \ + UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \ + (MODE) = SImode; \ + } + +/* Define for XFmode extended real floating point support. + This will automatically cause REAL_ARITHMETIC to be defined. */ +/* For the ARM: + I think I have added all the code to make this work. Unfortunately, + early releases of the floating point emulation code on RISCiX used a + different format for extended precision numbers. On my RISCiX box there + is a bug somewhere which causes the machine to lock up when running enquire + with long doubles. There is the additional aspect that Norcroft C + treats long doubles as doubles and we ought to remain compatible. + Perhaps someone with an FPA coprocessor and not running RISCiX would like + to try this someday. */ +/* #define LONG_DOUBLE_TYPE_SIZE 96 */ + +/* Disable XFmode patterns in md file */ +#define ENABLE_XF_PATTERNS 0 + +/* Define if you don't want extended real, but do want to use the + software floating point emulator for REAL_ARITHMETIC and + decimal <-> binary conversion. */ +/* See comment above */ +#define REAL_ARITHMETIC + +/* Define this if most significant bit is lowest numbered + in instructions that operate on numbered bit-fields. */ +#define BITS_BIG_ENDIAN 0 + +/* Define this if most significant byte of a word is the lowest numbered. + Most ARM processors are run in little endian mode, so that is the default. + If you want to have it run-time selectable, change the definition in a + cover file to be TARGET_BIG_ENDIAN. */ +#define BYTES_BIG_ENDIAN 0 + +/* Define this if most significant word of a multiword number is the lowest + numbered. */ +#define WORDS_BIG_ENDIAN 0 + +/* Define this if most significant word of doubles is the lowest numbered */ +#define FLOAT_WORDS_BIG_ENDIAN 1 + +/* Number of bits in an addressable storage unit */ +#define BITS_PER_UNIT 8 + +#define BITS_PER_WORD 32 + +#define UNITS_PER_WORD 4 + +#define POINTER_SIZE 32 + +#define PARM_BOUNDARY 32 + +#define STACK_BOUNDARY 32 + +#define FUNCTION_BOUNDARY 32 + +#define EMPTY_FIELD_BOUNDARY 32 + +#define BIGGEST_ALIGNMENT 32 + +/* Make strings word-aligned so strcpy from constants will be faster. */ +#define CONSTANT_ALIGNMENT(EXP, ALIGN) \ + (TREE_CODE (EXP) == STRING_CST \ + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN)) + +/* Every structures size must be a multiple of 32 bits. */ +#define STRUCTURE_SIZE_BOUNDARY 32 + +/* Non-zero if move instructions will actually fail to work + when given unaligned data. */ +#define STRICT_ALIGNMENT 1 + +#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT + +/* Define number of bits in most basic integer type. + (If undefined, default is BITS_PER_WORD). */ +/* #define INT_TYPE_SIZE */ + +/* Standard register usage. */ + +/* Register allocation in ARM Procedure Call Standard (as used on RISCiX): + (S - saved over call). + + r0 * argument word/integer result + r1-r3 argument word + + r4-r8 S register variable + r9 S (rfp) register variable (real frame pointer) + + r10 F S (sl) stack limit (not currently used) + r11 F S (fp) argument pointer + r12 (ip) temp workspace + r13 F S (sp) lower end of current stack frame + r14 (lr) link address/workspace + r15 F (pc) program counter + + f0 floating point result + f1-f3 floating point scratch + + f4-f7 S floating point variable + + cc This is NOT a real register, but is used internally + to represent things that use or set the condition + codes. + sfp This isn't either. It is used during rtl generation + since the offset between the frame pointer and the + auto's isn't known until after register allocation. + afp Nor this, we only need this because of non-local + goto. Without it fp appears to be used and the + elimination code won't get rid of sfp. It tracks + fp exactly at all times. + + *: See CONDITIONAL_REGISTER_USAGE */ + +/* The stack backtrace structure is as follows: + fp points to here: | save code pointer | [fp] + | return link value | [fp, #-4] + | return sp value | [fp, #-8] + | return fp value | [fp, #-12] + [| saved r10 value |] + [| saved r9 value |] + [| saved r8 value |] + [| saved r7 value |] + [| saved r6 value |] + [| saved r5 value |] + [| saved r4 value |] + [| saved r3 value |] + [| saved r2 value |] + [| saved r1 value |] + [| saved r0 value |] + [| saved f7 value |] three words + [| saved f6 value |] three words + [| saved f5 value |] three words + [| saved f4 value |] three words + r0-r3 are not normally saved in a C function. */ + +/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */ +#define FIRST_PSEUDO_REGISTER 27 + +/* 1 for registers that have pervasive standard uses + and are not available for the register allocator. */ +#define FIXED_REGISTERS \ +{ \ + 0,0,0,0,0,0,0,0, \ + 0,0,1,1,0,1,0,1, \ + 0,0,0,0,0,0,0,0, \ + 1,1,1 \ +} + +/* 1 for registers not available across function calls. + These must include the FIXED_REGISTERS and also any + registers that can be used without being saved. + The latter must include the registers where values are returned + and the register where structure-value addresses are passed. + Aside from that, you can include as many other registers as you like. + The CC is not preserved over function calls on the ARM 6, so it is + easier to assume this for all. SFP is preserved, since FP is. */ +#define CALL_USED_REGISTERS \ +{ \ + 1,1,1,1,0,0,0,0, \ + 0,0,1,1,1,1,1,1, \ + 1,1,1,1,0,0,0,0, \ + 1,1,1 \ +} + +/* If doing stupid life analysis, avoid a bug causing a return value r0 to be + trampled. This effectively reduces the number of available registers by 1. + XXX It is a hack, I know. + XXX Is this still needed? */ +#define CONDITIONAL_REGISTER_USAGE \ +{ \ + if (obey_regdecls) \ + fixed_regs[0] = 1; \ + if (TARGET_SOFT_FLOAT) \ + { \ + int regno; \ + for (regno = 16; regno < 24; ++regno) \ + fixed_regs[regno] = call_used_regs[regno] = 1; \ + } \ +} + +/* Return number of consecutive hard regs needed starting at reg REGNO + to hold something of mode MODE. + This is ordinarily the length in words of a value of mode MODE + but can be less for certain modes in special long registers. + + On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP + mode. */ +#define HARD_REGNO_NREGS(REGNO, MODE) \ + (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \ + && (REGNO) != ARG_POINTER_REGNUM) ? 1 \ + : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) + +/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. + This is TRUE for ARM regs since they can hold anything, and TRUE for FPU + regs holding FP. */ +#define HARD_REGNO_MODE_OK(REGNO, MODE) \ + ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \ + ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \ + || REGNO == ARG_POINTER_REGNUM \ + || GET_MODE_CLASS (MODE) == MODE_FLOAT)) + +/* Value is 1 if it is a good idea to tie two pseudo registers + when one has mode MODE1 and one has mode MODE2. + If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2, + for any hard reg, then this must be 0 for correct output. */ +#define MODES_TIEABLE_P(MODE1, MODE2) \ + (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2)) + +/* Specify the registers used for certain standard purposes. + The values of these macros are register numbers. */ + +/* Define this if the program counter is overloaded on a register. */ +#define PC_REGNUM 15 + +/* Register to use for pushing function arguments. */ +#define STACK_POINTER_REGNUM 13 + +/* Base register for access to local variables of the function. */ +#define FRAME_POINTER_REGNUM 25 + +/* Define this to be where the real frame pointer is if it is not possible to + work out the offset between the frame pointer and the automatic variables + until after register allocation has taken place. FRAME_POINTER_REGNUM + should point to a special register that we will make sure is eliminated. */ +#define HARD_FRAME_POINTER_REGNUM 11 + +/* Value should be nonzero if functions must have frame pointers. + Zero means the frame pointer need not be set up (and parms may be accessed + via the stack pointer) in functions that seem suitable. + If we have to have a frame pointer we might as well make use of it. + APCS says that the frame pointer does not need to be pushed in leaf + functions. */ +#define FRAME_POINTER_REQUIRED \ + (current_function_has_nonlocal_label || (TARGET_APCS && !leaf_function_p ())) + +/* Base register for access to arguments of the function. */ +#define ARG_POINTER_REGNUM 26 + +/* The native (Norcroft) Pascal compiler for the ARM passes the static chain + as an invisible last argument (possible since varargs don't exist in + Pascal), so the following is not true. */ +#define STATIC_CHAIN_REGNUM 8 + +/* Register in which address to store a structure value + is passed to a function. */ +#define STRUCT_VALUE_REGNUM 0 + +/* Internal, so that we don't need to refer to a raw number */ +#define CC_REGNUM 24 + +/* The order in which register should be allocated. It is good to use ip + since no saving is required (though calls clobber it) and it never contains + function parameters. It is quite good to use lr since other calls may + clobber it anyway. Allocate r0 through r3 in reverse order since r3 is + least likely to contain a function parameter; in addition results are + returned in r0. + */ +#define REG_ALLOC_ORDER \ +{ \ + 3, 2, 1, 0, 12, 14, 4, 5, \ + 6, 7, 8, 10, 9, 11, 13, 15, \ + 16, 17, 18, 19, 20, 21, 22, 23, \ + 24, 25 \ +} + +/* Register and constant classes. */ + +/* Register classes: all ARM regs or all FPU regs---simple! */ +enum reg_class +{ + NO_REGS, + FPU_REGS, + GENERAL_REGS, + ALL_REGS, + LIM_REG_CLASSES +}; + +#define N_REG_CLASSES (int) LIM_REG_CLASSES + +/* Give names of register classes as strings for dump file. */ +#define REG_CLASS_NAMES \ +{ \ + "NO_REGS", \ + "FPU_REGS", \ + "GENERAL_REGS", \ + "ALL_REGS", \ +} + +/* Define which registers fit in which classes. + This is an initializer for a vector of HARD_REG_SET + of length N_REG_CLASSES. */ +#define REG_CLASS_CONTENTS \ +{ \ + 0x0000000, /* NO_REGS */ \ + 0x0FF0000, /* FPU_REGS */ \ + 0x200FFFF, /* GENERAL_REGS */ \ + 0x2FFFFFF /* ALL_REGS */ \ +} + +/* The same information, inverted: + Return the class number of the smallest class containing + reg number REGNO. This could be a conditional expression + or could index an array. */ +#define REGNO_REG_CLASS(REGNO) \ + (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \ + || REGNO == ARG_POINTER_REGNUM) \ + ? GENERAL_REGS : (REGNO) == CC_REGNUM \ + ? NO_REGS : FPU_REGS) + +/* The class value for index registers, and the one for base regs. */ +#define INDEX_REG_CLASS GENERAL_REGS +#define BASE_REG_CLASS GENERAL_REGS + +/* Get reg_class from a letter such as appears in the machine description. + We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */ +#define REG_CLASS_FROM_LETTER(C) \ + ((C)=='f' ? FPU_REGS : NO_REGS) + +/* The letters I, J, K, L and M in a register constraint string + can be used to stand for particular ranges of immediate operands. + This macro defines what the ranges are. + C is the letter, and VALUE is a constant value. + Return 1 if VALUE is in the range specified by C. + I: immediate arithmetic operand (i.e. 8 bits shifted as required). + J: valid indexing constants. + K: ~value ok in rhs argument of data operand. + L: -value ok in rhs argument of data operand. + M: 0..32, or a power of 2 (for shifts, or mult done by shift). */ +#define CONST_OK_FOR_LETTER_P(VALUE, C) \ + ((C) == 'I' ? const_ok_for_arm (VALUE) : \ + (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \ + (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \ + (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \ + (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \ + || (((VALUE) & ((VALUE) - 1)) == 0)) \ + : 0) + +/* For the ARM, `Q' means that this is a memory operand that is just + an offset from a register. + `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL + address. This means that the symbol is in the text segment and can be + accessed without using a load. */ + +#define EXTRA_CONSTRAINT(OP, C) \ + ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \ + : (C) == 'R' ? (GET_CODE (OP) == MEM \ + && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \ + && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \ + : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) : 0) + +/* Constant letter 'G' for the FPU immediate constants. + 'H' means the same constant negated. */ +#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \ + ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \ + : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0) + +/* Given an rtx X being reloaded into a reg required to be + in class CLASS, return the class of reg to actually use. + In general this is just CLASS; but on some machines + in some cases it is preferable to use a more restrictive class. */ +#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS) + +/* Return the register class of a scratch register needed to copy IN into + or out of a register in CLASS in MODE. If it can be done directly, + NO_REGS is returned. */ +#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \ + (((MODE) == DFmode && (CLASS) == GENERAL_REGS \ + && true_regnum (X) == -1 && TARGET_HARD_FLOAT) \ + ? GENERAL_REGS \ + : ((MODE) == HImode && true_regnum (X) == -1) ? GENERAL_REGS : NO_REGS) + +/* If we need to load shorts byte-at-a-time, then we need a scratch. */ +#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \ + (((MODE) == HImode && TARGET_SHORT_BY_BYTES && true_regnum (X) == -1) \ + ? GENERAL_REGS : NO_REGS) + +/* Return the maximum number of consecutive registers + needed to represent mode MODE in a register of class CLASS. + ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */ +#define CLASS_MAX_NREGS(CLASS, MODE) \ + ((CLASS) == FPU_REGS ? 1 \ + : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) + +/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */ +#define REGISTER_MOVE_COST(CLASS1, CLASS2) \ + ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \ + || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \ + ? 20 : 2) + +/* Stack layout; function entry, exit and calling. */ + +/* Define this if pushing a word on the stack + makes the stack pointer a smaller address. */ +#define STACK_GROWS_DOWNWARD 1 + +/* Define this if the nominal address of the stack frame + is at the high-address end of the local variables; + that is, each additional local variable allocated + goes at a more negative offset in the frame. */ +#define FRAME_GROWS_DOWNWARD 1 + +/* Offset within stack frame to start allocating local variables at. + If FRAME_GROWS_DOWNWARD, this is the offset to the END of the + first local allocated. Otherwise, it is the offset to the BEGINNING + of the first local allocated. */ +#define STARTING_FRAME_OFFSET 0 + +/* If we generate an insn to push BYTES bytes, + this says how many the stack pointer really advances by. */ +#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) + +/* Offset of first parameter from the argument pointer register value. */ +#define FIRST_PARM_OFFSET(FNDECL) 4 + +/* Value is the number of byte of arguments automatically + popped when returning from a subroutine call. + FUNDECL is the declaration node of the function (as a tree), + FUNTYPE is the data type of the function (as a tree), + or for a library call it is an identifier node for the subroutine name. + SIZE is the number of bytes of arguments passed on the stack. + + On the ARM, the caller does not pop any of its arguments that were passed + on the stack. */ +#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0 + +/* Define how to find the value returned by a function. + VALTYPE is the data type of the value (as a tree). + If the precise function being called is known, FUNC is its FUNCTION_DECL; + otherwise, FUNC is 0. */ +#define FUNCTION_VALUE(VALTYPE, FUNC) \ + (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \ + ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \ + : gen_rtx (REG, TYPE_MODE (VALTYPE), 0)) + +/* Define how to find the value returned by a library function + assuming the value has mode MODE. */ +#define LIBCALL_VALUE(MODE) \ + (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \ + ? gen_rtx (REG, MODE, 16) \ + : gen_rtx (REG, MODE, 0)) + +/* 1 if N is a possible register number for a function value. + On the ARM, only r0 and f0 can return results. */ +#define FUNCTION_VALUE_REGNO_P(REGNO) \ + ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT) + +/* Define where to put the arguments to a function. + Value is zero to push the argument on the stack, + or a hard register in which to store the argument. + + MODE is the argument's machine mode. + TYPE is the data type of the argument (as a tree). + This is null for libcalls where that information may + not be available. + CUM is a variable of type CUMULATIVE_ARGS which gives info about + the preceding args and about the function being called. + NAMED is nonzero if this argument is a named parameter + (otherwise it is an extra parameter matching an ellipsis). + + On the ARM, normally the first 16 bytes are passed in registers r0-r3; all + other arguments are passed on the stack. If (NAMED == 0) (which happens + only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is + passed in the stack (function_prologue will indeed make it pass in the + stack if necessary). */ +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \ + ((NAMED) \ + ? ((CUM) >= 16 ? 0 : gen_rtx (REG, MODE, (CUM) / 4)) \ + : 0) + +/* For an arg passed partly in registers and partly in memory, + this is the number of registers used. + For args passed entirely in registers or entirely in memory, zero. */ +#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \ + ((CUM) < 16 && 16 < (CUM) + ((MODE) != BLKmode \ + ? GET_MODE_SIZE (MODE) \ + : int_size_in_bytes (TYPE)) \ + ? 4 - (CUM) / 4 : 0) + +/* A C type for declaring a variable that is used as the first argument of + `FUNCTION_ARG' and other related values. For some target machines, the + type `int' suffices and can hold the number of bytes of argument so far. + + On the ARM, this is the number of bytes of arguments scanned so far. */ +#define CUMULATIVE_ARGS int + +/* Initialize a variable CUM of type CUMULATIVE_ARGS + for a call to a function whose data type is FNTYPE. + For a library call, FNTYPE is 0. + On the ARM, the offset starts at 0. */ +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME) \ + ((CUM) = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) ? 4 : 0)) + +/* Update the data in CUM to advance over an argument + of mode MODE and data type TYPE. + (TYPE is null for libcalls where that information may not be available.) */ +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ + (CUM) += ((MODE) != BLKmode \ + ? (GET_MODE_SIZE (MODE) + 3) & ~3 \ + : (int_size_in_bytes (TYPE) + 3) & ~3) \ + +/* 1 if N is a possible register number for function argument passing. + On the ARM, r0-r3 are used to pass args. */ +#define FUNCTION_ARG_REGNO_P(REGNO) \ + ((REGNO) >= 0 && (REGNO) <= 3) + +/* Perform any actions needed for a function that is receiving a variable + number of arguments. CUM is as above. MODE and TYPE are the mode and type + of the current parameter. PRETEND_SIZE is a variable that should be set to + the amount of stack that must be pushed by the prolog to pretend that our + caller pushed it. + + Normally, this macro will push all remaining incoming registers on the + stack and set PRETEND_SIZE to the length of the registers pushed. + + On the ARM, PRETEND_SIZE is set in order to have the prologue push the last + named arg and all anonymous args onto the stack. + XXX I know the prologue shouldn't be pushing registers, but it is faster + that way. */ +#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \ +{ \ + extern int current_function_anonymous_args; \ + current_function_anonymous_args = 1; \ + if ((CUM) < 16) \ + (PRETEND_SIZE) = 16 - (CUM); \ +} + +/* Generate assembly output for the start of a function. */ +#define FUNCTION_PROLOGUE(STREAM, SIZE) \ + output_func_prologue ((STREAM), (SIZE)) + +/* Call the function profiler with a given profile label. The Acorn compiler + puts this BEFORE the prolog but gcc pust it afterwards. The ``mov ip,lr'' + seems like a good idea to stick with cc convention. ``prof'' doesn't seem + to mind about this! */ +#define FUNCTION_PROFILER(STREAM,LABELNO) \ +{ \ + fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf(STREAM, "\tbl\tmcount\n"); \ + fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \ +} + +/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, + the stack pointer does not matter. The value is tested only in + functions that have frame pointers. + No definition is equivalent to always zero. + + On the ARM, the function epilogue recovers the stack pointer from the + frame. */ +#define EXIT_IGNORE_STACK 1 + +/* Generate the assembly code for function exit. */ +#define FUNCTION_EPILOGUE(STREAM, SIZE) \ + output_func_epilogue ((STREAM), (SIZE)) + +/* Determine if the epilogue should be output as RTL. + You should override this if you define FUNCTION_EXTRA_EPILOGUE. */ +#define USE_RETURN_INSN use_return_insn () + +/* Definitions for register eliminations. + + This is an array of structures. Each structure initializes one pair + of eliminable registers. The "from" register number is given first, + followed by "to". Eliminations of the same "from" register are listed + in order of preference. + + We have two registers that can be eliminated on the ARM. First, the + arg pointer register can often be eliminated in favor of the stack + pointer register. Secondly, the pseudo frame pointer register can always + be eliminated; it is replaced with either the stack or the real frame + pointer. */ + +#define ELIMINABLE_REGS \ +{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} + +/* Given FROM and TO register numbers, say whether this elimination is allowed. + Frame pointer elimination is automatically handled. + + All eliminations are permissible. Note that ARG_POINTER_REGNUM and + HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame + pointer, we must eliminate FRAME_POINTER_REGNUM into + HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */ +#define CAN_ELIMINATE(FROM, TO) \ + (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1) + +/* Define the offset between two registers, one to be eliminated, and the other + its replacement, at the start of a routine. */ +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ +{ \ + int volatile_func = arm_volatile_func (); \ + if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\ + (OFFSET) = 0; \ + else if ((FROM) == FRAME_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM)\ + (OFFSET) = (get_frame_size () + 3 & ~3); \ + else \ + { \ + int regno; \ + int offset = 12; \ + int saved_hard_reg = 0; \ + \ + if (! volatile_func) \ + { \ + for (regno = 0; regno <= 10; regno++) \ + if (regs_ever_live[regno] && ! call_used_regs[regno]) \ + saved_hard_reg = 1, offset += 4; \ + for (regno = 16; regno <=23; regno++) \ + if (regs_ever_live[regno] && ! call_used_regs[regno]) \ + offset += 12; \ + } \ + if ((FROM) == FRAME_POINTER_REGNUM) \ + (OFFSET) = -offset; \ + else \ + { \ + if (! frame_pointer_needed) \ + offset -= 16; \ + if (! volatile_func && (regs_ever_live[14] || saved_hard_reg)) \ + offset += 4; \ + (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \ + } \ + } \ +} + +/* Output assembler code for a block containing the constant parts + of a trampoline, leaving space for the variable parts. + + On the ARM, (if r8 is the static chain regnum, and remembering that + referencing pc adds an offset of 8) the trampoline looks like: + ldr r8, [pc, #0] + ldr pc, [pc] + .word static chain value + .word function's address */ +#define TRAMPOLINE_TEMPLATE(FILE) \ +{ \ + fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \ + REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \ + REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf ((FILE), "\t.word\t0\n"); \ + fprintf ((FILE), "\t.word\t0\n"); \ +} + +/* Length in units of the trampoline for entering a nested function. */ +#define TRAMPOLINE_SIZE 16 + +/* Alignment required for a trampoline in units. */ +#define TRAMPOLINE_ALIGN 4 + +/* Emit RTL insns to initialize the variable parts of a trampoline. + FNADDR is an RTX for the address of the function's pure code. + CXT is an RTX for the static chain value for the function. */ +#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \ +{ \ + emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \ + (CXT)); \ + emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \ + (FNADDR)); \ +} + + +/* Addressing modes, and classification of registers for them. */ + +#define HAVE_POST_INCREMENT 1 +#define HAVE_PRE_INCREMENT 1 +#define HAVE_POST_DECREMENT 1 +#define HAVE_PRE_DECREMENT 1 + +/* Macros to check register numbers against specific register classes. */ + +/* These assume that REGNO is a hard or pseudo reg number. + They give nonzero only if REGNO is a hard reg of the suitable class + or a pseudo reg currently allocated to a suitable hard reg. + Since they use reg_renumber, they are safe only once reg_renumber + has been allocated, which happens in local-alloc.c. + + On the ARM, don't allow the pc to be used. */ +#define REGNO_OK_FOR_BASE_P(REGNO) \ + ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \ + || (REGNO) == ARG_POINTER_REGNUM \ + || (unsigned) reg_renumber[(REGNO)] < 15 \ + || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \ + || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM) +#define REGNO_OK_FOR_INDEX_P(REGNO) \ + REGNO_OK_FOR_BASE_P(REGNO) + +/* Maximum number of registers that can appear in a valid memory address. + Shifts in addresses can't be by a register. */ + +#define MAX_REGS_PER_ADDRESS 2 + +/* Recognize any constant value that is a valid address. */ +/* XXX We can address any constant, eventually... */ +#if 0 +#define CONSTANT_ADDRESS_P(X) \ + ( GET_CODE(X) == LABEL_REF \ + || GET_CODE(X) == SYMBOL_REF \ + || GET_CODE(X) == CONST_INT \ + || GET_CODE(X) == CONST ) +#endif + +#define CONSTANT_ADDRESS_P(X) \ + (GET_CODE (X) == SYMBOL_REF \ + && (CONSTANT_POOL_ADDRESS_P (X) \ + || (optimize > 0 && SYMBOL_REF_FLAG (X)))) + +/* Nonzero if the constant value X is a legitimate general operand. + It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. + + On the ARM, allow any integer (invalid ones are removed later by insn + patterns), nice doubles and symbol_refs which refer to the function's + constant pool XXX. */ +#define LEGITIMATE_CONSTANT_P(X) \ + (GET_CODE (X) == CONST_INT \ + || (GET_CODE (X) == CONST_DOUBLE \ + && (const_double_rtx_ok_for_fpu (X) \ + || neg_const_double_rtx_ok_for_fpu (X))) \ + || CONSTANT_ADDRESS_P (X)) + +/* Symbols in the text segment can be accessed without indirecting via the + constant pool; it may take an extra binary operation, but this is still + faster than indirecting via memory. Don't do this when not optimizing, + since we won't be calculating al of the offsets necessary to do this + simplification. */ + +#define ENCODE_SECTION_INFO(decl) \ +{ \ + if (optimize > 0 && TREE_CONSTANT (decl) \ + && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \ + { \ + rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \ + ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \ + SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \ + } \ +} + +/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx + and check its validity for a certain class. + We have two alternate definitions for each of them. + The usual definition accepts all pseudo regs; the other rejects + them unless they have been allocated suitable hard regs. + The symbol REG_OK_STRICT causes the latter definition to be used. */ +#ifndef REG_OK_STRICT + +/* Nonzero if X is a hard reg that can be used as a base reg + or if it is a pseudo reg. */ +#define REG_OK_FOR_BASE_P(X) \ + (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \ + || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM) + +/* Nonzero if X is a hard reg that can be used as an index + or if it is a pseudo reg. */ +#define REG_OK_FOR_INDEX_P(X) \ + REG_OK_FOR_BASE_P(X) + +#define REG_OK_FOR_PRE_POST_P(X) \ + (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \ + || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM) + +#else + +/* Nonzero if X is a hard reg that can be used as a base reg. */ +#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) + +/* Nonzero if X is a hard reg that can be used as an index. */ +#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X)) + +#define REG_OK_FOR_PRE_POST_P(X) \ + (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \ + || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \ + || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \ + || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM) + +#endif + +/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression + that is a valid memory address for an instruction. + The MODE argument is the machine mode for the MEM expression + that wants to use this address. + + The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */ +#define BASE_REGISTER_RTX_P(X) \ + (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) + +#define INDEX_REGISTER_RTX_P(X) \ + (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X)) + +/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs + used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can + only be small constants. */ +#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \ +do \ +{ \ + HOST_WIDE_INT range; \ + enum rtx_code code = GET_CODE (INDEX); \ + \ + if (GET_MODE_CLASS (MODE) == MODE_FLOAT) \ + { \ + if (code == CONST_INT && INTVAL (INDEX) < 1024 \ + && INTVAL (INDEX) > -1024 \ + && (INTVAL (INDEX) & 3) == 0) \ + goto LABEL; \ + } \ + else \ + { \ + if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \ + goto LABEL; \ + if (GET_MODE_SIZE (MODE) <= 4 && code == MULT) \ + { \ + rtx xiop0 = XEXP (INDEX, 0); \ + rtx xiop1 = XEXP (INDEX, 1); \ + if (INDEX_REGISTER_RTX_P (xiop0) \ + && power_of_two_operand (xiop1, SImode)) \ + goto LABEL; \ + if (INDEX_REGISTER_RTX_P (xiop1) \ + && power_of_two_operand (xiop0, SImode)) \ + goto LABEL; \ + } \ + if (GET_MODE_SIZE (MODE) <= 4 \ + && (code == LSHIFTRT || code == ASHIFTRT \ + || code == ASHIFT || code == ROTATERT)) \ + { \ + rtx op = XEXP (INDEX, 1); \ + if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \ + && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \ + && INTVAL (op) <= 31) \ + goto LABEL; \ + } \ + range = (MODE) == HImode ? 4095 : 4096; \ + if (code == CONST_INT && INTVAL (INDEX) < range \ + && INTVAL (INDEX) > -range) \ + goto LABEL; \ + } \ +} while (0) + +/* Jump to LABEL if X is a valid address RTX. This must also take + REG_OK_STRICT into account when deciding about valid registers, but it uses + the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX, + INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool. + Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol + refs must be forced though a static cell to ensure addressability. */ +#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \ +{ \ + if (BASE_REGISTER_RTX_P (X)) \ + goto LABEL; \ + else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \ + && GET_CODE (XEXP (X, 0)) == REG \ + && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \ + goto LABEL; \ + else if ((MODE) == TImode) \ + ; \ + else if (GET_CODE (X) == PLUS) \ + { \ + rtx xop0 = XEXP(X,0); \ + rtx xop1 = XEXP(X,1); \ + \ + if (BASE_REGISTER_RTX_P (xop0)) \ + GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \ + else if (BASE_REGISTER_RTX_P (xop1)) \ + GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \ + } \ + else if (GET_CODE (X) == MINUS) \ + { \ + rtx xop0 = XEXP (X,0); \ + rtx xop1 = XEXP (X,1); \ + \ + if (BASE_REGISTER_RTX_P (xop0)) \ + GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \ + } \ + else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \ + && GET_CODE (X) == SYMBOL_REF \ + && CONSTANT_POOL_ADDRESS_P (X)) \ + goto LABEL; \ + else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \ + && GET_CODE (XEXP (X, 0)) == REG \ + && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \ + goto LABEL; \ +} + +/* Try machine-dependent ways of modifying an illegitimate address + to be legitimate. If we find one, return the new, valid address. + This macro is used in only one place: `memory_address' in explow.c. + + OLDX is the address as it was before break_out_memory_refs was called. + In some cases it is useful to look at this to decide what needs to be done. + + MODE and WIN are passed so that this macro can use + GO_IF_LEGITIMATE_ADDRESS. + + It is always safe for this macro to do nothing. It exists to recognize + opportunities to optimize the output. + + On the ARM, try to convert [REG, #BIGCONST] + into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST], + where VALIDCONST == 0 in case of TImode. */ +#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \ +{ \ + if (GET_CODE (X) == PLUS) \ + { \ + rtx xop0 = XEXP (X, 0); \ + rtx xop1 = XEXP (X, 1); \ + \ + if (CONSTANT_P (xop0) && ! LEGITIMATE_CONSTANT_P (xop0)) \ + xop0 = force_reg (SImode, xop0); \ + if (CONSTANT_P (xop1) && ! LEGITIMATE_CONSTANT_P (xop1)) \ + xop1 = force_reg (SImode, xop1); \ + if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \ + { \ + HOST_WIDE_INT n, low_n; \ + rtx base_reg, val; \ + n = INTVAL (xop1); \ + \ + if (MODE == DImode) \ + { \ + low_n = n & 0x0f; \ + n &= ~0x0f; \ + if (low_n > 4) \ + { \ + n += 16; \ + low_n -= 16; \ + } \ + } \ + else \ + { \ + low_n = ((MODE) == TImode ? 0 \ + : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \ + n -= low_n; \ + } \ + base_reg = gen_reg_rtx (SImode); \ + val = force_operand (gen_rtx (PLUS, SImode, xop0, \ + GEN_INT (n)), NULL_RTX); \ + emit_move_insn (base_reg, val); \ + (X) = (low_n == 0 ? base_reg \ + : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \ + } \ + else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \ + (X) = gen_rtx (PLUS, SImode, xop0, xop1); \ + } \ + else if (GET_CODE (X) == MINUS) \ + { \ + rtx xop0 = XEXP (X, 0); \ + rtx xop1 = XEXP (X, 1); \ + \ + if (CONSTANT_P (xop0)) \ + xop0 = force_reg (SImode, xop0); \ + if (CONSTANT_P (xop1) && ! LEGITIMATE_CONSTANT_P (xop1)) \ + xop1 = force_reg (SImode, xop1); \ + if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \ + (X) = gen_rtx (MINUS, SImode, xop0, xop1); \ + } \ + if (memory_address_p (MODE, X)) \ + goto WIN; \ +} + + +/* Go to LABEL if ADDR (a legitimate address expression) + has an effect that depends on the machine mode it is used for. */ +#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \ +{ \ + if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \ + || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \ + goto LABEL; \ +} + +/* Specify the machine mode that this machine uses + for the index in the tablejump instruction. */ +#define CASE_VECTOR_MODE SImode + +/* Define this if the tablejump instruction expects the table + to contain offsets from the address of the table. + Do not define this if the table should contain absolute addresses. */ +/* #define CASE_VECTOR_PC_RELATIVE */ + +/* Specify the tree operation to be used to convert reals to integers. */ +#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR + +/* This is the kind of divide that is easiest to do in the general case. */ +#define EASY_DIV_EXPR TRUNC_DIV_EXPR + +/* signed 'char' is most compatible, but RISC OS wants it unsigned. + unsigned is probably best, but may break some code. */ +#ifndef DEFAULT_SIGNED_CHAR +#define DEFAULT_SIGNED_CHAR 0 +#endif + +/* Don't cse the address of the function being compiled. */ +#define NO_RECURSIVE_FUNCTION_CSE 1 + +/* Max number of bytes we can move from memory to memory + in one reasonably fast instruction. */ +#define MOVE_MAX 4 + +/* Define if operations between registers always perform the operation + on the full register even if a narrower mode is specified. */ +#define WORD_REGISTER_OPERATIONS + +/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD + will either zero-extend or sign-extend. The value of this macro should + be the code that says which one of the two operations is implicitly + done, NIL if none. */ +#define LOAD_EXTEND_OP(MODE) \ + ((MODE) == QImode ? ZERO_EXTEND \ + : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL)) + +/* Define this if zero-extension is slow (more than one real instruction). + On the ARM, it is more than one instruction only if not fetching from + memory. */ +/* #define SLOW_ZERO_EXTEND */ + +/* Nonzero if access to memory by bytes is slow and undesirable. */ +#define SLOW_BYTE_ACCESS 0 + +/* Immediate shift counts are truncated by the output routines (or was it + the assembler?). Shift counts in a register are truncated by ARM. Note + that the native compiler puts too large (> 32) immediate shift counts + into a register and shifts by the register, letting the ARM decide what + to do instead of doing that itself. */ +/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that + code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y). + On the arm, Y in a register is used modulo 256 for the shift. Only for + rotates is modulo 32 used. */ +/* #define SHIFT_COUNT_TRUNCATED 1 */ + +/* XX This is not true, is it? */ +/* All integers have the same format so truncation is easy. */ +#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1 + +/* Calling from registers is a massive pain. */ +#define NO_FUNCTION_CSE 1 + +/* Chars and shorts should be passed as ints. */ +#define PROMOTE_PROTOTYPES 1 + +/* The machine modes of pointers and functions */ +#define Pmode SImode +#define FUNCTION_MODE Pmode + +/* The structure type of the machine dependent info field of insns + No uses for this yet. */ +/* #define INSN_MACHINE_INFO struct machine_info */ + +/* The relative costs of various types of constants. Note that cse.c defines + REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */ +#define CONST_COSTS(RTX, CODE, OUTER_CODE) \ + case CONST_INT: \ + if (const_ok_for_arm (INTVAL (RTX))) \ + return (OUTER_CODE) == SET ? 2 : -1; \ + else if (OUTER_CODE == AND \ + && const_ok_for_arm (~INTVAL (RTX))) \ + return -1; \ + else if ((OUTER_CODE == COMPARE \ + || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \ + && const_ok_for_arm (-INTVAL (RTX))) \ + return -1; \ + else \ + return 5; \ + case CONST: \ + case LABEL_REF: \ + case SYMBOL_REF: \ + return 6; \ + case CONST_DOUBLE: \ + if (const_double_rtx_ok_for_fpu (RTX)) \ + return (OUTER_CODE) == SET ? 2 : -1; \ + else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \ + && neg_const_double_rtx_ok_for_fpu (RTX)) \ + return -1; \ + return(7); + +#define ARM_FRAME_RTX(X) \ + ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \ + || (X) == arg_pointer_rtx) + +#define RTX_COSTS(X,CODE,OUTER_CODE) \ + default: \ + return arm_rtx_costs (X, CODE, OUTER_CODE); + +/* Moves to and from memory are quite expensive */ +#define MEMORY_MOVE_COST(MODE) 10 + +/* All address computations that can be done are free, but rtx cost returns + the same for practically all of them. So we weight the different types + of address here in the order (most pref first): + PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */ +#define ADDRESS_COST(X) \ + (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \ + || GET_CODE (X) == SYMBOL_REF) \ + ? 0 \ + : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \ + || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \ + ? 10 \ + : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \ + ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \ + : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \ + || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \ + || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \ + || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \ + ? 1 : 0)) \ + : 4))))) + + + +/* Try to generate sequences that don't involve branches, we can then use + conditional instructions */ +#define BRANCH_COST 4 + +/* Condition code information. */ +/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE, + return the mode to be used for the comparison. + CCFPEmode should be used with floating inequalities, + CCFPmode should be used with floating equalities. + CC_NOOVmode should be used with SImode integer equalities. + CCmode should be used otherwise. */ + +#define EXTRA_CC_MODES CC_NOOVmode, CCFPmode, CCFPEmode + +#define EXTRA_CC_NAMES "CC_NOOV", "CCFP", "CCFPE" + +#define SELECT_CC_MODE(OP,X,Y) \ + (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \ + ? ((OP == EQ || OP == NE) ? CCFPmode : CCFPEmode) \ + : ((GET_MODE (X) == SImode) \ + && ((OP) == EQ || (OP) == NE) \ + && (GET_CODE (X) == PLUS || GET_CODE (X) == MINUS \ + || GET_CODE (X) == AND || GET_CODE (X) == IOR \ + || GET_CODE (X) == XOR || GET_CODE (X) == MULT \ + || GET_CODE (X) == NOT || GET_CODE (X) == NEG \ + || GET_CODE (X) == LSHIFTRT \ + || GET_CODE (X) == ASHIFT || GET_CODE (X) == ASHIFTRT \ + || GET_CODE (X) == ROTATERT || GET_CODE (X) == ZERO_EXTRACT) \ + ? CC_NOOVmode \ + : GET_MODE (X) == QImode ? CC_NOOVmode : CCmode)) + +#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode) + +#define STORE_FLAG_VALUE 1 + +/* Define the information needed to generate branch insns. This is + stored from the compare operation. Note that we can't use "rtx" here + since it hasn't been defined! */ + +extern struct rtx_def *arm_compare_op0, *arm_compare_op1; +extern int arm_compare_fp; + +/* Define the codes that are matched by predicates in arm.c */ +#define PREDICATE_CODES \ + {"s_register_operand", {SUBREG, REG}}, \ + {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \ + {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \ + {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \ + {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \ + {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \ + {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \ + {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \ + {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \ + {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \ + {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \ + {"load_multiple_operation", {PARALLEL}}, \ + {"store_multiple_operation", {PARALLEL}}, \ + {"equality_operator", {EQ, NE}}, \ + {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \ + {"const_shift_operand", {CONST_INT}}, \ + {"index_operand", {SUBREG, REG, CONST_INT}}, \ + {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \ + {"multi_register_push", {PARALLEL}}, \ + {"cc_register", {REG}}, \ + {"reversible_cc_register", {REG}}, + + +/* Assembler output control */ + +#ifndef ARM_OS_NAME +#define ARM_OS_NAME "(generic)" +#endif + +/* The text to go at the start of the assembler file */ +#define ASM_FILE_START(STREAM) \ +{ \ + extern char *version_string; \ + fprintf (STREAM,"%s Generated by gcc %s for ARM/%s\n", \ + ASM_COMMENT_START, version_string, ARM_OS_NAME); \ + fprintf (STREAM,"%srfp\t.req\t%sr9\n", REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf (STREAM,"%ssl\t.req\t%sr10\n", REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf (STREAM,"%sfp\t.req\t%sr11\n", REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf (STREAM,"%sip\t.req\t%sr12\n", REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf (STREAM,"%ssp\t.req\t%sr13\n", REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf (STREAM,"%slr\t.req\t%sr14\n", REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf (STREAM,"%spc\t.req\t%sr15\n", REGISTER_PREFIX, REGISTER_PREFIX); \ +} + +#define ASM_APP_ON "" +#define ASM_APP_OFF "" + +/* Switch to the text or data segment. */ +#define TEXT_SECTION_ASM_OP ".text" +#define DATA_SECTION_ASM_OP ".data" + +#define REGISTER_PREFIX "" +#define USER_LABEL_PREFIX "_" +#define LOCAL_LABEL_PREFIX "" + +/* The assembler's names for the registers. */ +#ifndef REGISTER_NAMES +#define REGISTER_NAMES \ +{ \ + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ + "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", \ + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ + "cc", "sfp", "afp" \ +} +#endif + +#ifndef ADDITIONAL_REGISTER_NAMES +#define ADDITIONAL_REGISTER_NAMES \ +{ \ + {"a1", 0}, \ + {"a2", 1}, \ + {"a3", 2}, \ + {"a4", 3}, \ + {"v1", 4}, \ + {"v2", 5}, \ + {"v3", 6}, \ + {"v4", 7}, \ + {"v5", 8}, \ + {"v6", 9}, \ + {"rfp", 9}, /* Gcc used to call it this */ \ + {"sb", 9}, \ + {"v7", 10}, \ + {"r10", 10}, \ + {"r11", 11}, /* fp */ \ + {"r12", 12}, /* ip */ \ + {"r13", 13}, /* sp */ \ + {"r14", 14}, /* lr */ \ + {"r15", 15} /* pc */ \ +} +#endif + +/* Arm Assembler barfs on dollars */ +#define DOLLARS_IN_IDENTIFIERS 0 + +#define NO_DOLLAR_IN_LABEL + +/* DBX register number for a given compiler register number */ +#define DBX_REGISTER_NUMBER(REGNO) (REGNO) + +/* Generate DBX debugging information. riscix.h will undefine this because + the native assembler does not support stabs. */ +#define DBX_DEBUGGING_INFO 1 + +/* Acorn dbx moans about continuation chars, so don't use any. */ +#ifndef DBX_CONTIN_LENGTH +#define DBX_CONTIN_LENGTH 0 +#endif + +/* Output a source filename for the debugger. RISCiX dbx insists that the + ``desc'' field is set to compiler version number >= 315 (sic). */ +#define DBX_OUTPUT_MAIN_SOURCE_FILENAME(STREAM,NAME) \ +do { \ + fprintf (STREAM, ".stabs \"%s\",%d,0,315,%s\n", (NAME), N_SO, \ + <ext_label_name[1]); \ + text_section (); \ + ASM_OUTPUT_INTERNAL_LABEL (STREAM, "Ltext", 0); \ +} while (0) + +/* Output a label definition. */ +#define ASM_OUTPUT_LABEL(STREAM,NAME) \ + arm_asm_output_label ((STREAM), (NAME)) + +/* Output a function label definition. */ +#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) \ + ASM_OUTPUT_LABEL(STREAM, NAME) + +/* Output a globalising directive for a label. */ +#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \ + (fprintf (STREAM, "\t.global\t"), \ + assemble_name (STREAM, NAME), \ + fputc ('\n',STREAM)) \ + +/* Output a reference to a label. */ +#define ASM_OUTPUT_LABELREF(STREAM,NAME) \ + fprintf (STREAM, "%s%s", USER_LABEL_PREFIX, NAME) + +/* Make an internal label into a string. */ +#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \ + sprintf (STRING, "*%s%d", PREFIX, NUM) + +/* Output an internal label definition. */ +#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \ + do \ + { \ + char *s = (char *) alloca (11 + strlen (PREFIX)); \ + extern int arm_target_label, arm_ccfsm_state; \ + extern rtx arm_target_insn; \ + \ + if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \ + && !strcmp (PREFIX, "L")) \ + { \ + arm_ccfsm_state = 0; \ + arm_target_insn = NULL; \ + } \ + strcpy (s, "*"); \ + sprintf (&s[strlen (s)], "%s%d", (PREFIX), (NUM)); \ + arm_asm_output_label (STREAM, s); \ + } while (0) + +/* Nothing special is done about jump tables */ +/* #define ASM_OUTPUT_CASE_LABEL(STREAM,PREFIX,NUM,TABLE) */ +/* #define ASM_OUTPUT_CASE_END(STREAM,NUM,TABLE) */ + +/* Construct a private name. */ +#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \ + ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \ + sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER))) + +/* Output a push or a pop instruction (only used when profiling). */ +#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \ + fprintf(STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \ + REGISTER_PREFIX, REGISTER_PREFIX, reg_names[REGNO]) + +#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \ + fprintf(STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \ + REGISTER_PREFIX, REGISTER_PREFIX, reg_names[REGNO]) + +/* Output a relative address. Not needed since jump tables are absolute + but we must define it anyway. */ +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,VALUE,REL) \ + fputs ("- - - ASM_OUTPUT_ADDR_DIFF_ELT called!\n", STREAM) + +/* Output an element of a dispatch table. */ +#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \ + fprintf (STREAM, "\t.word\tL%d\n", VALUE) + +/* Output various types of constants. For real numbers we output hex, with + a comment containing the "human" value, this allows us to pass NaN's which + the riscix assembler doesn't understand (it also makes cross-assembling + less likely to fail). */ + +#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \ +do { char dstr[30]; \ + long l[3]; \ + arm_increase_location (12); \ + REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \ + REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \ + if (sizeof (int) == sizeof (long)) \ + fprintf (STREAM, "\t.long 0x%x,0x%x,0x%x\t%s long double %s\n", \ + l[2], l[1], l[0], ASM_COMMENT_START, dstr); \ + else \ + fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n",\ + l[0], l[1], l[2], ASM_COMMENT_START, dstr); \ + } while (0) + + +#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \ +do { char dstr[30]; \ + long l[2]; \ + arm_increase_location (8); \ + REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \ + REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \ + if (sizeof (int) == sizeof (long)) \ + fprintf (STREAM, "\t.long 0x%x, 0x%x\t%s double %s\n", l[0], \ + l[1], ASM_COMMENT_START, dstr); \ + else \ + fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \ + l[1], ASM_COMMENT_START, dstr); \ + } while (0) + +#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \ +do { char dstr[30]; \ + long l; \ + arm_increase_location (4); \ + REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \ + REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \ + if (sizeof (int) == sizeof (long)) \ + fprintf (STREAM, "\t.word 0x%x\t%s float %s\n", l, \ + ASM_COMMENT_START, dstr); \ + else \ + fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \ + ASM_COMMENT_START, dstr); \ + } while (0); + +#define ASM_OUTPUT_INT(STREAM, EXP) \ + (fprintf (STREAM, "\t.word\t"), \ + output_addr_const (STREAM, (EXP)), \ + arm_increase_location (4), \ + fputc ('\n', STREAM)) + +#define ASM_OUTPUT_SHORT(STREAM, EXP) \ + (fprintf (STREAM, "\t.short\t"), \ + output_addr_const (STREAM, (EXP)), \ + arm_increase_location (2), \ + fputc ('\n', STREAM)) + +#define ASM_OUTPUT_CHAR(STREAM, EXP) \ + (fprintf (STREAM, "\t.byte\t"), \ + output_addr_const (STREAM, (EXP)), \ + arm_increase_location (1), \ + fputc ('\n', STREAM)) + +#define ASM_OUTPUT_BYTE(STREAM, VALUE) \ + (fprintf (STREAM, "\t.byte\t%d\n", VALUE), \ + arm_increase_location (1)) + +#define ASM_OUTPUT_ASCII(STREAM, PTR, LEN) \ + output_ascii_pseudo_op ((STREAM), (unsigned char *)(PTR), (LEN)) + +/* Output a gap. In fact we fill it with nulls. */ +#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \ + (arm_increase_location (NBYTES), \ + fprintf (STREAM, "\t.space\t%d\n", NBYTES)) + +/* Align output to a power of two. Horrible /bin/as. */ +#define ASM_OUTPUT_ALIGN(STREAM, POWER) \ + do \ + { \ + register int amount = 1 << (POWER); \ + extern int arm_text_location; \ + \ + if (amount == 2) \ + fprintf (STREAM, "\t.even\n"); \ + else \ + fprintf (STREAM, "\t.align\t%d\n", amount - 4); \ + \ + if (in_text_section ()) \ + arm_text_location = ((arm_text_location + amount - 1) \ + & ~(amount - 1)); \ + } while (0) + +/* Output a common block */ +#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \ + (fprintf (STREAM, "\t.comm\t"), \ + assemble_name ((STREAM), (NAME)), \ + fprintf(STREAM, ", %d\t%s %d\n", ROUNDED, ASM_COMMENT_START, SIZE)) + +/* Output a local common block. /bin/as can't do this, so hack a `.space' into + the bss segment. Note that this is *bad* practice. */ +#define ASM_OUTPUT_LOCAL(STREAM,NAME,SIZE,ROUNDED) \ + output_lcomm_directive (STREAM, NAME, SIZE, ROUNDED) + +/* Output a source line for the debugger. */ +/* #define ASM_OUTPUT_SOURCE_LINE(STREAM,LINE) */ + +/* Output a #ident directive. */ +#define ASM_OUTPUT_IDENT(STREAM,STRING) \ + fprintf (STREAM,"- - - ident %s\n",STRING) + +/* The assembler's parentheses characters. */ +#define ASM_OPEN_PAREN "(" +#define ASM_CLOSE_PAREN ")" + +/* Target characters. */ +#define TARGET_BELL 007 +#define TARGET_BS 010 +#define TARGET_TAB 011 +#define TARGET_NEWLINE 012 +#define TARGET_VT 013 +#define TARGET_FF 014 +#define TARGET_CR 015 + +/* Only perform branch elimination (by making instructions conditional) if + we're optimising. Otherwise it's of no use anyway. */ +#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \ + if (optimize) \ + final_prescan_insn (INSN, OPVEC, NOPERANDS) + +#ifndef ASM_COMMENT_START +#define ASM_COMMENT_START "@" +#endif + +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \ + ((CODE) == '?' || (CODE) == '|' || (CODE) == '@') +/* Output an operand of an instruction. */ +#define PRINT_OPERAND(STREAM, X, CODE) \ + arm_print_operand (STREAM, X, CODE) + +#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \ + (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \ + : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \ + (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \ + ? ((~ (HOST_WIDE_INT) 0) \ + & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \ + : 0)))) + +/* Output the address of an operand. */ +#define PRINT_OPERAND_ADDRESS(STREAM,X) \ +{ \ + int is_minus = GET_CODE (X) == MINUS; \ + \ + if (GET_CODE (X) == REG) \ + fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \ + reg_names[REGNO (X)]); \ + else if (GET_CODE (X) == PLUS || is_minus) \ + { \ + rtx base = XEXP (X, 0); \ + rtx index = XEXP (X, 1); \ + char *base_reg_name; \ + HOST_WIDE_INT offset = 0; \ + if (GET_CODE (base) != REG) \ + { \ + /* Ensure that BASE is a register (one of them must be). */ \ + rtx temp = base; \ + base = index; \ + index = temp; \ + } \ + base_reg_name = reg_names[REGNO (base)]; \ + switch (GET_CODE (index)) \ + { \ + case CONST_INT: \ + offset = INTVAL (index); \ + if (is_minus) \ + offset = -offset; \ + fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \ + base_reg_name, offset); \ + break; \ + \ + case REG: \ + fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \ + base_reg_name, is_minus ? "-" : "", \ + REGISTER_PREFIX, reg_names[REGNO (index)] ); \ + break; \ + \ + case MULT: \ + case ASHIFTRT: \ + case LSHIFTRT: \ + case ASHIFT: \ + case ROTATERT: \ + { \ + fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \ + base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\ + reg_names[REGNO (XEXP (index, 0))]); \ + arm_print_operand (STREAM, index, 'S'); \ + fputs ("]", STREAM); \ + break; \ + } \ + \ + default: \ + abort(); \ + } \ + } \ + else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \ + || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \ + { \ + extern int output_memory_reference_mode; \ + \ + if (GET_CODE (XEXP (X, 0)) != REG) \ + abort (); \ + \ + if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \ + fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \ + reg_names[REGNO (XEXP (X, 0))], \ + GET_CODE (X) == PRE_DEC ? "-" : "", \ + GET_MODE_SIZE (output_memory_reference_mode)); \ + else \ + fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \ + reg_names[REGNO (XEXP (X, 0))], \ + GET_CODE (X) == POST_DEC ? "-" : "", \ + GET_MODE_SIZE (output_memory_reference_mode)); \ + } \ + else output_addr_const(STREAM, X); \ +} diff --git a/gnu/usr.bin/gcc/arch/arm32/arm32.md b/gnu/usr.bin/gcc/arch/arm32/arm32.md new file mode 100644 index 000000000000..5c06fb64bf3c --- /dev/null +++ b/gnu/usr.bin/gcc/arch/arm32/arm32.md @@ -0,0 +1,5697 @@ +;;- Machine description for Advanced RISC Machines' ARM for GNU compiler +;; Copyright (C) 1991, 1993, 1994, 1995 Free Software Foundation, Inc. +;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) +;; and Martin Simmons (@harleqn.co.uk). +;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk) + +;; This file is part of GNU CC. + +;; GNU CC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 2, or (at your option) +;; any later version. + +;; GNU CC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GNU CC; see the file COPYING. If not, write to +;; the Free Software Foundation, 59 Temple Place - Suite 330, +;; Boston, MA 02111-1307, USA. + +;;- See file "rtl.def" for documentation on define_insn, match_*, et. al. + +;; There are patterns in this file to support XFmode arithmetic. +;; Unfortunately RISC iX doesn't work well with these so they are disabled. +;; (See arm.h) + +;; UNSPEC Usage: +;; 0 `sin' operation: operand 0 is the result, operand 1 the parameter, +;; the mode is MODE_FLOAT +;; 1 `cos' operation: operand 0 is the result, operand 1 the parameter, +;; the mode is MODE_FLOAT +;; 2 `push multiple' operation: operand 0 is the first register. Subsequent +;; registers are in parallel (use...) expressions. + +;; Attributes + +; condition codes: this one is used by final_prescan_insn to speed up +; conditionalizing instructions. It saves having to scan the rtl to see if +; it uses or alters the condition codes. + +; USE means that the condition codes are used by the insn in the process of +; outputting code, this means (at present) that we can't use the insn in +; inlined branches + +; SET means that the purpose of the insn is to set the condition codes in a +; well defined manner. + +; CLOB means that the condition codes are altered in an undefined manner, if +; they are altered at all + +; JUMP_CLOB is used when the conditions are not defined if a branch is taken, +; but are if the branch wasn't taken; the effect is to limit the branch +; elimination scanning. + +; NOCOND means that the condition codes are neither altered nor affect the +; output of this insn + +(define_attr "conds" "use,set,clob,jump_clob,nocond" + (const_string "nocond")) + +; CPU attribute is used to determine whether condition codes are clobbered +; by a call insn: on the arm6 they are if in 32-bit addressing mode; on the +; arm2 and arm3 the condition codes are restored by the return. + +(define_attr "cpu" "arm2,arm3,arm6" (const (symbol_ref "arm_cpu_attr"))) + +; Floating Point Unit. If we only have floating point emulation, then there +; is no point in scheduling the floating point insns. (Well, for best +; performance we should try and group them together). + +(define_attr "fpu" "fpa,fpe" (const (symbol_ref "arm_fpu_attr"))) + +; LENGTH of an instruction (in bytes) +(define_attr "length" "" (const_int 4)) + +; An assembler sequence may clobber the condition codes without us knowing +(define_asm_attributes + [(set_attr "conds" "clob") + (set_attr "length" "4")]) + +; TYPE attribute is used to detect floating point instructions which, if +; running on a co-processor can run in parallel with other, basic instructions +; If write-buffer scheduling is enabled then it can also be used in the +; scheduling of writes. + +; Classification of each insn +; normal any data instruction that doesn't hit memory or fp regs +; block blockage insn, this blocks all functional units +; float a floating point arithmetic operation (subject to expansion) +; fdivx XFmode floating point division +; fdivd DFmode floating point division +; fdivs SFmode floating point division +; fmul Floating point multiply +; ffmul Fast floating point multiply +; farith Floating point arithmetic (4 cycle) +; ffarith Fast floating point arithmetic (2 cycle) +; float_em a floating point arithmetic operation that is normally emulated +; even on a machine with an fpa. +; f_load a floating point load from memory +; f_store a floating point store to memory +; f_mem_r a transfer of a floating point register to a real reg via mem +; r_mem_f the reverse of f_mem_r +; f_2_r fast transfer float to arm (no memory needed) +; r_2_f fast transfer arm to float +; call a subroutine call +; load any load from memory +; store1 store 1 word to memory from arm registers +; store2 store 2 words +; store3 store 3 words +; store4 store 4 words +; +(define_attr "type" + "normal,block,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith,float_em,f_load,f_store,f_mem_r,r_mem_f,f_2_r,r_2_f,call,load,store1,store2,store3,store4" + (const_string "normal")) + +(define_attr "write_conflict" "no,yes" + (if_then_else (eq_attr "type" + "block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load") + (const_string "yes") + (const_string "no"))) + +; The write buffer on some of the arm6 processors is hard to model exactly. +; There is room in the buffer for up to two addresses and up to eight words +; of memory, but the two needn't be split evenly. When writing the two +; addresses are fully pipelined. However, a read from memory that is not +; currently in the cache will block until the writes have completed. +; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so +; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous +; (they aren't allowed to be at present) then there is a startup cost of 1MCLK +; cycle to add as well. + +;; (define_function_unit {name} {num-units} {n-users} {test} +;; {ready-delay} {issue-delay} [{conflict-list}]) +(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "fdivx")) 71 69) + +(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "fdivd")) 59 57) + +(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "fdivs")) 31 29) + +(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "fmul")) 9 7) + +(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "ffmul")) 6 4) + +(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "farith")) 4 2) + +(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "ffarith")) 2 2) + +(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "r_2_f")) 5 3) + +(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "f_2_r")) 1 2) + +;; The fpa10 doesn't really have a memory read unit, but it can start to +;; speculatively execute the instruction in the pipeline, provided the data +;; is already loaded, so pretend reads have a delay of 2 (and that the +;; pipeline is infinite. + +(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa") + (eq_attr "type" "f_load")) 3 1) + +(define_function_unit "write_buf" 1 2 (eq_attr "type" "store1") 3 3 + [(eq_attr "write_conflict" "yes")]) +(define_function_unit "write_buf" 1 2 (eq_attr "type" "store2") 5 5 + [(eq_attr "write_conflict" "yes")]) +(define_function_unit "write_buf" 1 2 (eq_attr "type" "store3") 7 7 + [(eq_attr "write_conflict" "yes")]) +(define_function_unit "write_buf" 1 2 (eq_attr "type" "store4") 9 9 + [(eq_attr "write_conflict" "yes")]) +(define_function_unit "write_buf" 1 2 (eq_attr "type" "r_mem_f") 3 3 + [(eq_attr "write_conflict" "yes")]) + +;; Note: For DImode insns, there is normally no reason why operands should +;; not be in the same register, what we don't want is for something being +;; written to partially overlap something that is an input. + +;; Addition insns. + +(define_insn "adddi3" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (plus:DI (match_operand:DI 1 "s_register_operand" "%0,0") + (match_operand:DI 2 "s_register_operand" "r,0"))) + (clobber (reg:CC 24))] + "" + "adds\\t%0, %1, %2\;adc\\t%R0, %R1, %R2" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (plus:DI (sign_extend:DI + (match_operand:SI 1 "s_register_operand" "r,r")) + (match_operand:DI 2 "s_register_operand" "r,0"))) + (clobber (reg:CC 24))] + "" + "adds\\t%0, %2, %1\;adc\\t%R0, %R2, %1, asr #31" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (plus:DI (zero_extend:DI + (match_operand:SI 1 "s_register_operand" "r,r")) + (match_operand:DI 2 "s_register_operand" "r,0"))) + (clobber (reg:CC 24))] + "" + "adds\\t%0, %2, %1\;adc\\t%R0, %R2, #0" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_expand "addsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (plus:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "reg_or_int_operand" "")))] + "" + " + if (GET_CODE (operands[2]) == CONST_INT) + { + arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0], + operands[1], + (reload_in_progress || reload_completed ? 0 + : preserve_subexpressions_p ())); + DONE; + } +") + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (plus:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "const_int_operand" "")))] + "! (const_ok_for_arm (INTVAL (operands[2])) + || const_ok_for_arm (-INTVAL (operands[2])))" + [(clobber (const_int 0))] + " + arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0], + operands[1], 0); + DONE; +") + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (plus:SI (match_operand:SI 1 "s_register_operand" "r,r,r") + (match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))] + "" + "@ + add%?\\t%0, %1, %2 + sub%?\\t%0, %1, #%n2 + #" +[(set_attr "length" "4,4,16")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV + (plus:SI (match_operand:SI 1 "s_register_operand" "r,r") + (match_operand:SI 2 "arm_add_operand" "rI,L")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r,r") + (plus:SI (match_dup 1) (match_dup 2)))] + "" + "@ + add%?s\\t%0, %1, %2 + sub%?s\\t%0, %1, #%n2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC 24) + (compare:CC (match_operand:SI 1 "s_register_operand" "r,r") + (neg:SI (match_operand:SI 2 "arm_add_operand" "rI,L")))) + (set (match_operand:SI 0 "s_register_operand" "=r,r") + (plus:SI (match_dup 1) (match_dup 2)))] + "" + "@ + add%?s\\t%0, %1, %2 + sub%?s\\t%0, %1, #%n2" +[(set_attr "conds" "set")]) + +(define_insn "incscc" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (plus:SI (match_operator:SI 2 "comparison_operator" + [(reg 24) (const_int 0)]) + (match_operand:SI 1 "s_register_operand" "0,?r")))] + "" + "@ + add%d2\\t%0, %1, #1 + mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1" +[(set_attr "conds" "use") + (set_attr "length" "4,8")]) + +; If a constant is too big to fit in a single instruction then the constant +; will be pre-loaded into a register taking at least two insns, we might be +; able to merge it with an add, but it depends on the exact value. + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "immediate_operand" "n")))] + "!(const_ok_for_arm (INTVAL (operands[2])) + || const_ok_for_arm (-INTVAL (operands[2])))" + [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2))) + (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))] + " +{ + unsigned int val = (unsigned) INTVAL (operands[2]); + int i; + unsigned int temp; + + /* this code is similar to the approach followed in movsi, but it must + generate exactly two insns */ + + for (i = 30; i >= 0; i -= 2) + { + if (val & (3 << i)) + { + i -= 6; + if (i < 0) i = 0; + if (const_ok_for_arm (temp = (val & ~(255 << i)))) + { + val &= 255 << i; + break; + } + /* we might be able to do this as (larger number - small number) */ + temp = ((val >> i) & 255) + 1; + if (temp > 255 && i < 24) + { + i += 2; + temp = ((val >> i) & 255) + 1; + } + if (const_ok_for_arm ((temp << i) - val)) + { + i = temp << i; + temp = (unsigned) - (int) (i - val); + val = i; + break; + } + FAIL; + } + } + /* if we got here, we have found a way of doing it in two instructions. + the two constants are in val and temp */ + operands[2] = GEN_INT ((int)val); + operands[3] = GEN_INT ((int)temp); +} +") + +(define_insn "addsf3" + [(set (match_operand:SF 0 "s_register_operand" "=f,f") + (plus:SF (match_operand:SF 1 "s_register_operand" "f,f") + (match_operand:SF 2 "fpu_add_operand" "fG,H")))] + "TARGET_HARD_FLOAT" + "@ + adf%?s\\t%0, %1, %2 + suf%?s\\t%0, %1, #%N2" +[(set_attr "type" "farith")]) + +(define_insn "adddf3" + [(set (match_operand:DF 0 "s_register_operand" "=f,f") + (plus:DF (match_operand:DF 1 "s_register_operand" "f,f") + (match_operand:DF 2 "fpu_add_operand" "fG,H")))] + "TARGET_HARD_FLOAT" + "@ + adf%?d\\t%0, %1, %2 + suf%?d\\t%0, %1, #%N2" +[(set_attr "type" "farith")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f,f") + (plus:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f,f")) + (match_operand:DF 2 "fpu_add_operand" "fG,H")))] + "TARGET_HARD_FLOAT" + "@ + adf%?d\\t%0, %1, %2 + suf%?d\\t%0, %1, #%N2" +[(set_attr "type" "farith")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (plus:DF (match_operand:DF 1 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "adf%?d\\t%0, %1, %2" +[(set_attr "type" "farith")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (plus:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "adf%?d\\t%0, %1, %2" +[(set_attr "type" "farith")]) + +(define_insn "addxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f,f") + (plus:XF (match_operand:XF 1 "s_register_operand" "f,f") + (match_operand:XF 2 "fpu_add_operand" "fG,H")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "@ + adf%?e\\t%0, %1, %2 + suf%?e\\t%0, %1, #%N2" +[(set_attr "type" "farith")]) + +(define_insn "subdi3" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r") + (minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0") + (match_operand:DI 2 "s_register_operand" "r,0,0"))) + (clobber (reg:CC 24))] + "" + "subs\\t%0, %1, %2\;sbc\\t%R0, %R1, %R2" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (minus:DI (match_operand:DI 1 "s_register_operand" "?r,0") + (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")))) + (clobber (reg:CC 24))] + "" + "subs\\t%0, %1, %2\;sbc\\t%R0, %R1, #0" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (minus:DI (match_operand:DI 1 "s_register_operand" "r,0") + (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")))) + (clobber (reg:CC 24))] + "" + "subs\\t%0, %1, %2\;sbc\\t%R0, %R1, %2, asr #31" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (minus:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0"))) + (clobber (reg:CC 24))] + "" + "rsbs\\t%0, %1, %2\;rsc\\t%R0, %R1, #0" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (minus:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0"))) + (clobber (reg:CC 24))] + "" + "rsbs\\t%0, %1, %2\;rsc\\t%R0, %R1, %2, asr #31" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=r") + (minus:DI (zero_extend:DI + (match_operand:SI 1 "s_register_operand" "r")) + (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r")))) + (clobber (reg:CC 24))] + "" + "subs\\t%0, %1, %2\;rsc\\t%R0, %1, %1" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_expand "subsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (minus:SI (match_operand:SI 1 "reg_or_int_operand" "") + (match_operand:SI 2 "s_register_operand" "")))] + "" + " + if (GET_CODE (operands[1]) == CONST_INT) + { + arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0], + operands[2], + (reload_in_progress || reload_completed ? 0 + : preserve_subexpressions_p ())); + DONE; + } +") + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,?n") + (match_operand:SI 2 "s_register_operand" "r,r")))] + "" + "@ + rsb%?\\t%0, %2, %1 + #" +[(set_attr "length" "4,16")]) + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (minus:SI (match_operand:SI 1 "const_int_operand" "") + (match_operand:SI 2 "s_register_operand" "")))] + "! const_ok_for_arm (INTVAL (operands[1]))" + [(clobber (const_int 0))] + " + arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0], + operands[2], 0); + DONE; +") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I") + (match_operand:SI 2 "arm_rhs_operand" "rI,r")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r,r") + (minus:SI (match_dup 1) (match_dup 2)))] + "" + "@ + sub%?s\\t%0, %1, %2 + rsb%?s\\t%0, %2, %1" +[(set_attr "conds" "set")]) + +(define_insn "decscc" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r") + (match_operator:SI 2 "comparison_operator" + [(reg 24) (const_int 0)])))] + "" + "@ + sub%d2\\t%0, %1, #1 + mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1" +[(set_attr "conds" "use") + (set_attr "length" "*,8")]) + +(define_insn "subsf3" + [(set (match_operand:SF 0 "s_register_operand" "=f,f") + (minus:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G") + (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))] + "TARGET_HARD_FLOAT" + "@ + suf%?s\\t%0, %1, %2 + rsf%?s\\t%0, %2, %1" +[(set_attr "type" "farith")]) + +(define_insn "subdf3" + [(set (match_operand:DF 0 "s_register_operand" "=f,f") + (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G") + (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))] + "TARGET_HARD_FLOAT" + "@ + suf%?d\\t%0, %1, %2 + rsf%?d\\t%0, %2, %1" +[(set_attr "type" "farith")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (minus:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "TARGET_HARD_FLOAT" + "suf%?d\\t%0, %1, %2" +[(set_attr "type" "farith")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f,f") + (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f,f"))))] + "TARGET_HARD_FLOAT" + "@ + suf%?d\\t%0, %1, %2 + rsf%?d\\t%0, %2, %1" +[(set_attr "type" "farith")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (minus:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "suf%?d\\t%0, %1, %2" +[(set_attr "type" "farith")]) + +(define_insn "subxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f,f") + (minus:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G") + (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "@ + suf%?e\\t%0, %1, %2 + rsf%?e\\t%0, %2, %1" +[(set_attr "type" "farith")]) + +;; Multiplication insns + +;; Use `&' and then `0' to prevent the operands 0 and 1 being the same +(define_insn "mulsi3" + [(set (match_operand:SI 0 "s_register_operand" "=&r,&r") + (mult:SI (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 1 "s_register_operand" "%?r,0")))] + "" + "mul%?\\t%0, %2, %1") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (mult:SI + (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 1 "s_register_operand" "%?r,0")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=&r,&r") + (mult:SI (match_dup 2) (match_dup 1)))] + "" + "mul%?s\\t%0, %2, %1" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (mult:SI + (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 1 "s_register_operand" "%?r,0")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=&r,&r"))] + "" + "mul%?s\\t%0, %2, %1" +[(set_attr "conds" "set")]) + +;; Unnamed templates to match MLA instruction. + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r") + (plus:SI + (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 1 "s_register_operand" "%r,0,r,0")) + (match_operand:SI 3 "s_register_operand" "?r,r,0,0")))] + "" + "mla%?\\t%0, %2, %1, %3") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI + (mult:SI + (match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 1 "s_register_operand" "%r,0,r,0")) + (match_operand:SI 3 "s_register_operand" "?r,r,0,0")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r") + (plus:SI (mult:SI (match_dup 2) (match_dup 1)) + (match_dup 3)))] + "" + "mla%?s\\t%0, %2, %1, %3" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI + (mult:SI + (match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 1 "s_register_operand" "%r,0,r,0")) + (match_operand:SI 3 "s_register_operand" "?r,r,0,0")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))] + "" + "mla%?s\\t%0, %2, %1, %3" +[(set_attr "conds" "set")]) + +(define_insn "mulsf3" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (mult:SF (match_operand:SF 1 "s_register_operand" "f") + (match_operand:SF 2 "fpu_rhs_operand" "fG")))] + "TARGET_HARD_FLOAT" + "fml%?s\\t%0, %1, %2" +[(set_attr "type" "ffmul")]) + +(define_insn "muldf3" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mult:DF (match_operand:DF 1 "s_register_operand" "f") + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "TARGET_HARD_FLOAT" + "muf%?d\\t%0, %1, %2" +[(set_attr "type" "fmul")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mult:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "TARGET_HARD_FLOAT" + "muf%?d\\t%0, %1, %2" +[(set_attr "type" "fmul")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mult:DF (match_operand:DF 1 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "muf%?d\\t%0, %1, %2" +[(set_attr "type" "fmul")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mult:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "muf%?d\\t%0, %1, %2" +[(set_attr "type" "fmul")]) + +(define_insn "mulxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (mult:XF (match_operand:XF 1 "s_register_operand" "f") + (match_operand:XF 2 "fpu_rhs_operand" "fG")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "muf%?e\\t%0, %1, %2" +[(set_attr "type" "fmul")]) + +;; Division insns + +(define_insn "divsf3" + [(set (match_operand:SF 0 "s_register_operand" "=f,f") + (div:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G") + (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))] + "TARGET_HARD_FLOAT" + "@ + fdv%?s\\t%0, %1, %2 + frd%?s\\t%0, %2, %1" +[(set_attr "type" "fdivs")]) + +(define_insn "divdf3" + [(set (match_operand:DF 0 "s_register_operand" "=f,f") + (div:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G") + (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))] + "TARGET_HARD_FLOAT" + "@ + dvf%?d\\t%0, %1, %2 + rdf%?d\\t%0, %2, %1" +[(set_attr "type" "fdivd")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (div:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "TARGET_HARD_FLOAT" + "dvf%?d\\t%0, %1, %2" +[(set_attr "type" "fdivd")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (div:DF (match_operand:DF 1 "fpu_rhs_operand" "fG") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "rdf%?d\\t%0, %2, %1" +[(set_attr "type" "fdivd")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (div:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "dvf%?d\\t%0, %1, %2" +[(set_attr "type" "fdivd")]) + +(define_insn "divxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f,f") + (div:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G") + (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "@ + dvf%?e\\t%0, %1, %2 + rdf%?e\\t%0, %2, %1" +[(set_attr "type" "fdivx")]) + +;; Modulo insns + +(define_insn "modsf3" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (mod:SF (match_operand:SF 1 "s_register_operand" "f") + (match_operand:SF 2 "fpu_rhs_operand" "fG")))] + "TARGET_HARD_FLOAT" + "rmf%?s\\t%0, %1, %2" +[(set_attr "type" "fdivs")]) + +(define_insn "moddf3" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mod:DF (match_operand:DF 1 "s_register_operand" "f") + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "TARGET_HARD_FLOAT" + "rmf%?d\\t%0, %1, %2" +[(set_attr "type" "fdivd")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mod:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "TARGET_HARD_FLOAT" + "rmf%?d\\t%0, %1, %2" +[(set_attr "type" "fdivd")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mod:DF (match_operand:DF 1 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "rmf%?d\\t%0, %1, %2" +[(set_attr "type" "fdivd")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mod:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "rmf%?d\\t%0, %1, %2" +[(set_attr "type" "fdivd")]) + +(define_insn "modxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (mod:XF (match_operand:XF 1 "s_register_operand" "f") + (match_operand:XF 2 "fpu_rhs_operand" "fG")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "rmf%?e\\t%0, %1, %2" +[(set_attr "type" "fdivx")]) + +;; Boolean and,ior,xor insns + +(define_insn "anddi3" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (match_operand:DI 1 "s_register_operand" "%0,0") + (match_operand:DI 2 "s_register_operand" "r,0")))] + "" + "and%?\\t%0, %1, %2\;and%?\\t%R0, %R1, %R2" +[(set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "and%?\\t%0, %1, %2\;mov%?\\t%R0, #0" +[(set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "and%?\\t%0, %1, %2\;and%?\\t%R0, %R1, %2, asr #31" +[(set_attr "length" "8")]) + +(define_expand "andsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (and:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "reg_or_int_operand" "")))] + "" + " + if (GET_CODE (operands[2]) == CONST_INT) + { + arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0], + operands[1], + (reload_in_progress || reload_completed + ? 0 : preserve_subexpressions_p ())); + DONE; + } +") + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (and:SI (match_operand:SI 1 "s_register_operand" "r,r,r") + (match_operand:SI 2 "reg_or_int_operand" "rI,K,?n")))] + "" + "@ + and%?\\t%0, %1, %2 + bic%?\\t%0, %1, #%B2 + #" +[(set_attr "length" "4,4,16")]) + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (and:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "const_int_operand" "")))] + "! (const_ok_for_arm (INTVAL (operands[2])) + || const_ok_for_arm (~ INTVAL (operands[2])))" + [(clobber (const_int 0))] + " + arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0], + operands[1], 0); + DONE; +") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV + (and:SI (match_operand:SI 1 "s_register_operand" "r,r") + (match_operand:SI 2 "arm_not_operand" "rI,K")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r,r") + (and:SI (match_dup 1) (match_dup 2)))] + "" + "@ + and%?s\\t%0, %1, %2 + bic%?s\\t%0, %1, #%B2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV + (and:SI (match_operand:SI 0 "s_register_operand" "r,r") + (match_operand:SI 1 "arm_not_operand" "rI,K")) + (const_int 0))) + (clobber (match_scratch:SI 3 "=X,r"))] + "" + "@ + tst%?\\t%0, %1 + bic%?s\\t%3, %0, #%B1" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (zero_extract:SI + (match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "immediate_operand" "n") + (match_operand:SI 2 "immediate_operand" "n")) + (const_int 0)))] + "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32 + && INTVAL (operands[1]) > 0 + && INTVAL (operands[1]) + (INTVAL (operands[2]) & 1) <= 8 + && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32" + "* +{ + unsigned int mask = 0; + int cnt = INTVAL (operands[1]); + + while (cnt--) + mask = (mask << 1) | 1; + operands[1] = GEN_INT (mask << INTVAL (operands[2])); + output_asm_insn (\"tst%?\\t%0, %1\", operands); + return \"\"; +} +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (zero_extract:SI + (match_operand:QI 0 "memory_operand" "m") + (match_operand 1 "immediate_operand" "n") + (match_operand 2 "immediate_operand" "n")) + (const_int 0))) + (clobber (match_scratch:QI 3 "=r"))] + "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 8 + && INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8" + "* +{ + unsigned int mask = 0; + int cnt = INTVAL (operands[1]); + + while (cnt--) + mask = (mask << 1) | 1; + operands[1] = GEN_INT (mask << INTVAL (operands[2])); + output_asm_insn (\"ldr%?b\\t%3, %0\", operands); + output_asm_insn (\"tst%?\\t%3, %1\", operands); + return \"\"; +} +" +[(set_attr "conds" "set") + (set_attr "length" "8")]) + +;; constants for op 2 will never be given to these patterns. +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (not:DI (match_operand:DI 2 "s_register_operand" "r,0")) + (match_operand:DI 1 "s_register_operand" "0,r")))] + "" + "bic%?\\t%0, %1, %2\;bic%?\\t%R0, %R1, %R2" +[(set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (not:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r"))) + (match_operand:DI 1 "s_register_operand" "0,?r")))] + "" + "@ + bic%?\\t%0, %1, %2 + bic%?\\t%0, %1, %2\;mov%?\\t%R0, %R1" +[(set_attr "length" "4,8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (not:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r"))) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "bic%?\\t%0, %1, %2\;bic%?\\t%R0, %R1, %2, asr #31" +[(set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r")) + (match_operand:SI 1 "s_register_operand" "r")))] + "" + "bic%?\\t%0, %1, %2") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV + (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r")) + (match_operand:SI 1 "s_register_operand" "r")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (and:SI (not:SI (match_dup 2)) (match_dup 1)))] + "" + "bic%?s\\t%0, %1, %2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV + (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r")) + (match_operand:SI 1 "s_register_operand" "r")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "bic%?s\\t%0, %1, %2" +[(set_attr "conds" "set")]) + +(define_insn "iordi3" + [(set (match_operand:DI 0 "s_register_operand" "=&r") + (ior:DI (match_operand:DI 1 "s_register_operand" "%0") + (match_operand:DI 2 "s_register_operand" "r")))] + "" + "orr%?\\t%0, %1, %2\;orr%?\\t%R0, %R1, %R2" +[(set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (ior:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "0,?r")))] + "" + "@ + orr%?\\t%0, %1, %2 + orr%?\\t%0, %1, %2\;mov%?\\t%R0, %R1" +[(set_attr "length" "4,8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (ior:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "orr%?\\t%0, %1, %2\;orr%?\\t%R0, %R1, %2, asr #31" +[(set_attr "length" "8")]) + +(define_expand "iorsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (ior:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "reg_or_int_operand" "")))] + "" + " + if (GET_CODE (operands[2]) == CONST_INT) + { + arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0], + operands[1], + (reload_in_progress || reload_completed + ? 0 : preserve_subexpressions_p ())); + DONE; + } +") + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (ior:SI (match_operand:SI 1 "s_register_operand" "r,r") + (match_operand:SI 2 "reg_or_int_operand" "rI,?n")))] + "" + "@ + orr%?\\t%0, %1, %2 + #" +[(set_attr "length" "4,16")]) + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (ior:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "const_int_operand" "")))] + "! const_ok_for_arm (INTVAL (operands[2]))" + [(clobber (const_int 0))] + " + arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0], + operands[1], 0); + DONE; +") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r") + (match_operand:SI 2 "arm_rhs_operand" "rI")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (ior:SI (match_dup 1) (match_dup 2)))] + "" + "orr%?s\\t%0, %1, %2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r") + (match_operand:SI 2 "arm_rhs_operand" "rI")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "orr%?s\\t%0, %1, %2" +[(set_attr "conds" "set")]) + +(define_insn "xordi3" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (xor:DI (match_operand:DI 1 "s_register_operand" "%0,0") + (match_operand:DI 2 "s_register_operand" "r,0")))] + "" + "eor%?\\t%0, %1, %2\;eor%?\\t%R0, %R1, %R2" +[(set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (xor:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "0,?r")))] + "" + "@ + eor%?\\t%0, %1, %2 + eor%?\\t%0, %1, %2\;mov%?\\t%R0, %R1" +[(set_attr "length" "4,8")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (xor:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "eor%?\\t%0, %1, %2\;eor%?\\t%R0, %R1, %2, asr #31" +[(set_attr "length" "8")]) + +(define_insn "xorsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (xor:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rI")))] + "" + "eor%?\\t%0, %1, %2") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (xor:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rI")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (xor:SI (match_dup 1) (match_dup 2)))] + "" + "eor%?s\\t%0, %1, %2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (xor:SI (match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "arm_rhs_operand" "rI")) + (const_int 0)))] + "" + "teq%?\\t%0, %1" +[(set_attr "conds" "set")]) + +;; by splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C), +;; (NOT D) we can sometimes merge the final NOT into one of the following +;; insns + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ior:SI (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "r")) + (not:SI (match_operand:SI 2 "arm_rhs_operand" "rI"))) + (match_operand:SI 3 "arm_rhs_operand" "rI"))) + (clobber (match_operand:SI 4 "s_register_operand" "=r"))] + "" + [(set (match_dup 4) (and:SI (ior:SI (match_dup 1) (match_dup 2)) + (not:SI (match_dup 3)))) + (set (match_dup 0) (not:SI (match_dup 4)))] + "" +) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r") + (and:SI (ior:SI (match_operand:SI 1 "s_register_operand" "r,r,0") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")) + (not:SI (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI"))))] + "" + "orr%?\\t%0, %1, %2\;bic%?\\t%0, %0, %3" +[(set_attr "length" "8")]) + + + +;; Minimum and maximum insns + +(define_insn "smaxsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC 24))] + "" + "@ + cmp\\t%1, %2\;movlt\\t%0, %2 + cmp\\t%1, %2\;movge\\t%0, %1 + cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12")]) + +(define_insn "sminsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC 24))] + "" + "@ + cmp\\t%1, %2\;movge\\t%0, %2 + cmp\\t%1, %2\;movlt\\t%0, %1 + cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12")]) + +(define_insn "umaxsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC 24))] + "" + "@ + cmp\\t%1, %2\;movcc\\t%0, %2 + cmp\\t%1, %2\;movcs\\t%0, %1 + cmp\\t%1, %2\;movcs\\t%0, %1\;movcc\\t%0, %2" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12")]) + +(define_insn "uminsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC 24))] + "" + "@ + cmp\\t%1, %2\;movcs\\t%0, %2 + cmp\\t%1, %2\;movcc\\t%0, %1 + cmp\\t%1, %2\;movcc\\t%0, %1\;movcs\\t%0, %2" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "memory_operand" "=m") + (match_operator:SI 3 "minmax_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "s_register_operand" "r")])) + (clobber (reg:CC 24))] + "" + "* + operands[3] = gen_rtx (minmax_code (operands[3]), SImode, operands[1], + operands[2]); + output_asm_insn (\"cmp\\t%1, %2\", operands); + output_asm_insn (\"str%d3\\t%1, %0\", operands); + output_asm_insn (\"str%D3\\t%2, %0\", operands); + return \"\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "12") + (set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (match_operator:SI 4 "shiftable_operator" + [(match_operator:SI 5 "minmax_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]) + (match_operand:SI 1 "s_register_operand" "0,?r")])) + (clobber (reg:CC 24))] + "" + "* +{ + enum rtx_code code = GET_CODE (operands[4]); + + operands[5] = gen_rtx (minmax_code (operands[5]), SImode, operands[2], + operands[3]); + output_asm_insn (\"cmp\\t%2, %3\", operands); + output_asm_insn (\"%i4%d5\\t%0, %1, %2\", operands); + if (which_alternative != 0 || operands[3] != const0_rtx + || (code != PLUS && code != MINUS && code != IOR && code != XOR)) + output_asm_insn (\"%i4%D5\\t%0, %1, %3\", operands); + return \"\"; +} +" +[(set_attr "conds" "clob") + (set_attr "length" "12")]) + + +;; Shift and rotation insns + +(define_expand "ashlsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (ashift:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "arm_rhs_operand" "")))] + "" + " + if (GET_CODE (operands[2]) == CONST_INT + && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31) + { + emit_insn (gen_movsi (operands[0], const0_rtx)); + DONE; + } +") + +(define_expand "ashrsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "arm_rhs_operand" "")))] + "" + " + if (GET_CODE (operands[2]) == CONST_INT + && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31) + operands[2] = GEN_INT (31); +") + +(define_expand "lshrsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "arm_rhs_operand" "")))] + "" + " + if (GET_CODE (operands[2]) == CONST_INT + && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31) + { + emit_insn (gen_movsi (operands[0], const0_rtx)); + DONE; + } +") + +(define_expand "rotlsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (rotatert:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "reg_or_int_operand" "")))] + "" + " + if (GET_CODE (operands[2]) == CONST_INT) + operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32); + else + { + rtx reg = gen_reg_rtx (SImode); + emit_insn (gen_subsi3 (reg, GEN_INT (32), operands[2])); + operands[2] = reg; + } +") + +(define_expand "rotrsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (rotatert:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "arm_rhs_operand" "")))] + "" + " + if (GET_CODE (operands[2]) == CONST_INT + && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31) + operands[2] = GEN_INT (INTVAL (operands[2]) % 32); +") + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operator:SI 3 "shift_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "reg_or_int_operand" "rM")]))] + "" + "mov%?\\t%0, %1%S3") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operator:SI 3 "shift_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rM")]) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (match_op_dup 3 [(match_dup 1) (match_dup 2)]))] + "" + "mov%?s\\t%0, %1%S3" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operator:SI 3 "shift_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rM")]) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "mov%?s\\t%0, %1%S3" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_operator:SI 3 "shift_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rM")])))] + "" + "mvn%?\\t%0, %1%S3") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rM")])) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))] + "" + "mvn%?s\\t%0, %1%S3" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rM")])) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "mvn%?s\\t%0, %1%S3" +[(set_attr "conds" "set")]) + + +;; Unary arithmetic insns + +(define_insn "negdi2" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "rsbs\\t%0, %1, #0\;rsc\\t%R0, %R1, #0" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_insn "negsi2" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (neg:SI (match_operand:SI 1 "s_register_operand" "r")))] + "" + "rsb%?\\t%0, %1, #0") + +(define_insn "negsf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (neg:SF (match_operand:SF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "mnf%?s\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "negdf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (neg:DF (match_operand:DF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "mnf%?d\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (neg:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "mnf%?d\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "negxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (neg:XF (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "mnf%?e\\t%0, %1" +[(set_attr "type" "ffarith")]) + +;; abssi2 doesn't really clobber the condition codes if a different register +;; is being set. To keep things simple, assume during rtl manipulations that +;; it does, but tell the final scan operator the truth. Similarly for +;; (neg (abs...)) + +(define_insn "abssi2" + [(set (match_operand:SI 0 "s_register_operand" "=r,&r") + (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))) + (clobber (reg 24))] + "" + "@ + cmp\\t%0, #0\;rsblt\\t%0, %0, #0 + eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31" +[(set_attr "conds" "clob,*") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,&r") + (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))) + (clobber (reg 24))] + "" + "@ + cmp\\t%0, #0\;rsbgt\\t%0, %0, #0 + eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31" +[(set_attr "conds" "clob,*") + (set_attr "length" "8")]) + +(define_insn "abssf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (abs:SF (match_operand:SF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "abs%?s\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "absdf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (abs:DF (match_operand:DF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "abs%?d\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (abs:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "abs%?d\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "absxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (abs:XF (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "abs%?e\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "sqrtsf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (sqrt:SF (match_operand:SF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "sqt%?s\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "sqrtdf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (sqrt:DF (match_operand:DF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "sqt%?d\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (sqrt:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "sqt%?d\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "sqrtxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (sqrt:XF (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "sqt%?e\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "sinsf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 0))] + "TARGET_HARD_FLOAT" + "sin%?s\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "sindf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 0))] + "TARGET_HARD_FLOAT" + "sin%?d\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (unspec:DF [(float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))] 0))] + "TARGET_HARD_FLOAT" + "sin%?d\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "sinxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 0))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "sin%?e\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "cossf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 1))] + "TARGET_HARD_FLOAT" + "cos%?s\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "cosdf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 1))] + "TARGET_HARD_FLOAT" + "cos%?d\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (unspec:DF [(float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))] 1))] + "TARGET_HARD_FLOAT" + "cos%?d\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "cosxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 1))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "cos%?e\\t%0, %1" +[(set_attr "type" "float_em")]) + +(define_insn "one_cmpldi2" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (not:DI (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "mvn%?\\t%0, %1\;mvn%?\\t%R0, %R1" +[(set_attr "length" "8")]) + +(define_insn "one_cmplsi2" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_operand:SI 1 "s_register_operand" "r")))] + "" + "mvn%?\\t%0, %1") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_dup 1)))] + "" + "mvn%?s\\t%0, %1" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "mvn%?s\\t%0, %1" +[(set_attr "conds" "set")]) + +;; Fixed <--> Floating conversion insns + +(define_insn "floatsisf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (float:SF (match_operand:SI 1 "s_register_operand" "r")))] + "TARGET_HARD_FLOAT" + "flt%?s\\t%0, %1" +[(set_attr "type" "r_2_f")]) + +(define_insn "floatsidf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (float:DF (match_operand:SI 1 "s_register_operand" "r")))] + "TARGET_HARD_FLOAT" + "flt%?d\\t%0, %1" +[(set_attr "type" "r_2_f")]) + +(define_insn "floatsixf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (float:XF (match_operand:SI 1 "s_register_operand" "r")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "flt%?e\\t%0, %1" +[(set_attr "type" "r_2_f")]) + +(define_insn "fix_truncsfsi2" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (fix:SI (match_operand:SF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "fix%?z\\t%0, %1" +[(set_attr "type" "f_2_r")]) + +(define_insn "fix_truncdfsi2" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (fix:SI (match_operand:DF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "fix%?z\\t%0, %1" +[(set_attr "type" "f_2_r")]) + +(define_insn "fix_truncxfsi2" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (fix:SI (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "fix%?z\\t%0, %1" +[(set_attr "type" "f_2_r")]) + +;; Truncation insns + +(define_insn "truncdfsf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (float_truncate:SF + (match_operand:DF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "mvf%?s\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "truncxfsf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (float_truncate:SF + (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "mvf%?s\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "truncxfdf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (float_truncate:DF + (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "mvf%?d\\t%0, %1" +[(set_attr "type" "ffarith")]) + +;; Zero and sign extension instructions. + +(define_insn "zero_extendsidi2" + [(set (match_operand:DI 0 "s_register_operand" "=r") + (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))] + "" + "* + if (REGNO (operands[1]) != REGNO (operands[0])) + output_asm_insn (\"mov%?\\t%0, %1\", operands); + return \"mov%?\\t%R0, #0\"; +" +[(set_attr "length" "8")]) + +(define_insn "zero_extendqidi2" + [(set (match_operand:DI 0 "s_register_operand" "=r,r") + (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))] + "" + "@ + and%?\\t%0, %1, #255\;mov%?\\t%R0, #0 + ldr%?b\\t%0, %1\;mov%?\\t%R0, #0" +[(set_attr "length" "8") + (set_attr "type" "*,load")]) + +(define_insn "extendsidi2" + [(set (match_operand:DI 0 "s_register_operand" "=r") + (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))] + "" + "* + if (REGNO (operands[1]) != REGNO (operands[0])) + output_asm_insn (\"mov%?\\t%0, %1\", operands); + return \"mov%?\\t%R0, %0, asr #31\"; +" +[(set_attr "length" "8")]) + +(define_expand "zero_extendhisi2" + [(set (match_dup 2) (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "") + (const_int 16))) + (set (match_operand:SI 0 "s_register_operand" "") + (lshiftrt:SI (match_dup 2) (const_int 16)))] + "" + " +{ + if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM) + { + emit_insn (gen_movhi_bytes (operands[0], operands[1])); + DONE; + } + if (! s_register_operand (operands[1], HImode)) + operands[1] = copy_to_mode_reg (HImode, operands[1]); + operands[1] = gen_lowpart (SImode, operands[1]); + operands[2] = gen_reg_rtx (SImode); +}") + +(define_expand "zero_extendqisi2" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (zero_extend:SI + (match_operand:QI 1 "nonimmediate_operand" "r,m")))] + "" + " + if (GET_CODE (operands[1]) != MEM) + { + emit_insn (gen_andsi3 (operands[0], gen_lowpart (SImode, operands[1]), + GEN_INT (255))); + DONE; + } +") + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))] + "" + "ldr%?b\\t%0, %1\\t%@ zero_extendqisi2" +[(set_attr "type" "load")]) + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 0))) + (clobber (match_operand:SI 2 "s_register_operand" ""))] + "GET_CODE (operands[1]) != MEM" + [(set (match_dup 2) (match_dup 1)) + (set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))] + "") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operand:QI 0 "s_register_operand" "r") + (const_int 0)))] + "" + "tst\\t%0, #255" +[(set_attr "conds" "set")]) + +(define_expand "extendhisi2" + [(set (match_dup 2) + (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "") + (const_int 16))) + (set (match_operand:SI 0 "s_register_operand" "") + (ashiftrt:SI (match_dup 2) + (const_int 16)))] + "" + " +{ + if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM) + { + emit_insn (gen_extendhisi2_mem (operands[0], operands[1])); + DONE; + } + if (! s_register_operand (operands[1], HImode)) + operands[1] = copy_to_mode_reg (HImode, operands[1]); + operands[1] = gen_lowpart (SImode, operands[1]); + operands[2] = gen_reg_rtx (SImode); +}") + +(define_expand "extendhisi2_mem" + [(set (match_dup 2) (zero_extend:SI (mem:QI (match_operand:HI 1 "" "")))) + (set (match_dup 3) + (zero_extend:SI (mem:QI (plus:SI (match_dup 1) (const_int 1))))) + (set (match_dup 6) (ashift:SI (match_dup 4) (const_int 24))) + (set (match_operand:SI 0 "" "") + (ior:SI (ashiftrt:SI (match_dup 6) (const_int 16)) (match_dup 5)))] + "" + " + operands[0] = gen_lowpart (SImode, operands[0]); + operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0)); + operands[2] = gen_reg_rtx (SImode); + operands[3] = gen_reg_rtx (SImode); + operands[6] = gen_reg_rtx (SImode); + + if (BYTES_BIG_ENDIAN) + { + operands[4] = operands[2]; + operands[5] = operands[3]; + } + else + { + operands[4] = operands[3]; + operands[5] = operands[2]; + } +") + +(define_expand "extendqihi2" + [(set (match_dup 2) + (ashift:SI (match_operand:QI 1 "s_register_operand" "") + (const_int 24))) + (set (match_operand:HI 0 "s_register_operand" "") + (ashiftrt:SI (match_dup 2) + (const_int 24)))] + "" + " +{ operands[0] = gen_lowpart (SImode, operands[0]); + operands[1] = gen_lowpart (SImode, operands[1]); + operands[2] = gen_reg_rtx (SImode); }") + +(define_expand "extendqisi2" + [(set (match_dup 2) + (ashift:SI (match_operand:QI 1 "s_register_operand" "") + (const_int 24))) + (set (match_operand:SI 0 "s_register_operand" "") + (ashiftrt:SI (match_dup 2) + (const_int 24)))] + "" + " +{ operands[1] = gen_lowpart (SImode, operands[1]); + operands[2] = gen_reg_rtx (SImode); }") + +(define_insn "extendsfdf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (float_extend:DF (match_operand:SF 1 "s_register_operand" "f")))] + "TARGET_HARD_FLOAT" + "mvf%?d\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "extendsfxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (float_extend:XF (match_operand:SF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "mvf%?e\\t%0, %1" +[(set_attr "type" "ffarith")]) + +(define_insn "extenddfxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (float_extend:XF (match_operand:DF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "mvf%?e\\t%0, %1" +[(set_attr "type" "ffarith")]) + + +;; Move insns (including loads and stores) + +;; XXX Just some ideas about movti. +;; I don't think these are a good idea on the arm, there just aren't enough +;; registers +;;(define_expand "loadti" +;; [(set (match_operand:TI 0 "s_register_operand" "") +;; (mem:TI (match_operand:SI 1 "address_operand" "")))] +;; "" "") + +;;(define_expand "storeti" +;; [(set (mem:TI (match_operand:TI 0 "address_operand" "")) +;; (match_operand:TI 1 "s_register_operand" ""))] +;; "" "") + +;;(define_expand "movti" +;; [(set (match_operand:TI 0 "general_operand" "") +;; (match_operand:TI 1 "general_operand" ""))] +;; "" +;; " +;;{ +;; rtx insn; +;; +;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) +;; operands[1] = copy_to_reg (operands[1]); +;; if (GET_CODE (operands[0]) == MEM) +;; insn = gen_storeti (XEXP (operands[0], 0), operands[1]); +;; else if (GET_CODE (operands[1]) == MEM) +;; insn = gen_loadti (operands[0], XEXP (operands[1], 0)); +;; else +;; FAIL; +;; +;; emit_insn (insn); +;; DONE; +;;}") + +;; Recognise garbage generated above. + +;;(define_insn "" +;; [(set (match_operand:TI 0 "general_operand" "=r,r,r,<,>,m") +;; (match_operand:TI 1 "general_operand" "<,>,m,r,r,r"))] +;; "" +;; "* +;; { +;; register mem = (which_alternative < 3); +;; register char *template; +;; +;; operands[mem] = XEXP (operands[mem], 0); +;; switch (which_alternative) +;; { +;; case 0: template = \"ldmdb\\t%1!, %M0\"; break; +;; case 1: template = \"ldmia\\t%1!, %M0\"; break; +;; case 2: template = \"ldmia\\t%1, %M0\"; break; +;; case 3: template = \"stmdb\\t%0!, %M1\"; break; +;; case 4: template = \"stmia\\t%0!, %M1\"; break; +;; case 5: template = \"stmia\\t%0, %M1\"; break; +;; } +;; output_asm_insn (template, operands); +;; return \"\"; +;; }") + + +(define_insn "movdi" + [(set (match_operand:DI 0 "di_operand" "=r,r,r,o<>,r") + (match_operand:DI 1 "di_operand" "rIK,n,o<>,r,F"))] + "" + "* + return (output_move_double (operands)); +" +[(set_attr "length" "8,32,8,8,32") + (set_attr "type" "*,*,load,store2,*")]) + +(define_expand "movsi" + [(set (match_operand:SI 0 "general_operand" "") + (match_operand:SI 1 "general_operand" ""))] + "" + " + /* Everything except mem = const or mem = mem can be done easily */ + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (SImode, operands[1]); + if (GET_CODE (operands[1]) == CONST_INT + && !(const_ok_for_arm (INTVAL (operands[1])) + || const_ok_for_arm (~INTVAL (operands[1])))) + { + arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0], + NULL_RTX, + (reload_in_progress || reload_completed ? 0 + : preserve_subexpressions_p ())); + DONE; + } +") + +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "=r,r,r,r,m,r,r") + (match_operand:SI 1 "general_operand" "R,m,K,rI,r,S,?n"))] + "(register_operand (operands[0], SImode) + && (GET_CODE (operands[1]) != SYMBOL_REF + || CONSTANT_ADDRESS_P (operands[1]))) + || register_operand (operands[1], SImode)" + "* + switch (which_alternative) + { + case 0: + /* NB Calling get_attr_length may cause the insn to be re-extracted... */ + if (get_attr_length (insn) == 8) + { + /* ... so modify the operands here. */ + operands[1] = XEXP (operands[1], 0); + output_asm_insn (\"sub%?\\t%0, %|pc, #(8 + . - %a1) & ~4095\", + operands); + output_asm_insn (\"ldr%?\\t%0, [%0, #- ((4 + . - %a1) & 4095)]\", + operands); + } + else + { + /* ... and here. */ + operands[1] = XEXP (operands[1], 0); + output_asm_insn (\"ldr%?\\t%0, [%|pc, %1 - . - 8]\", operands); + } + return \"\"; + + case 1: + if (GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF + && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0))) + abort (); + return \"ldr%?\\t%0, %1\"; + + case 3: + return \"mov%?\\t%0, %1\"; + case 2: + return \"mvn%?\\t%0, #%B1\"; + case 4: + return \"str%?\\t%1, %0\"; + case 5: + return output_load_symbol (insn, operands); + case 6: + return \"#\"; + } +" +[(set (attr "length") + (cond [(eq_attr "alternative" "0") + (if_then_else + (gt (minus + (pc) + (symbol_ref "const_pool_offset (XEXP (operands[1], 0))")) + (const_int 4087)) + (const_int 8) + (const_int 4)) + (ior (eq_attr "alternative" "5") + (eq_attr "alternative" "6")) (const_int 16)] + (const_int 4))) + (set_attr "type" "load,load,*,*,store1,*,*")]) + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (match_operand:SI 1 "const_int_operand" ""))] + "! (const_ok_for_arm (INTVAL (operands[1])) + || const_ok_for_arm (~INTVAL (operands[1])))" + [(clobber (const_int 0))] + " + arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0], + NULL_RTX, 0); + DONE; +") + +;; If copying one reg to another we can set the condition codes according to +;; its value. Such a move is common after a return from subroutine and the +;; result is being tested against zero. + +(define_insn "" + [(set (reg:CC 24) (compare (match_operand:SI 1 "s_register_operand" "0,r") + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r,r") (match_dup 1))] + "" + "@ + cmp%?\\t%0, #0 + sub%?s\\t%0, %1, #0" +[(set_attr "conds" "set")]) + +;; Subroutine to store a half word from a register into memory. +;; Operand 0 is the source register (HImode) +;; Operand 1 is the destination address in a register (SImode) + +;; In both this routine and the next, we must be careful not to spill +;; a memory address of reg+large_const into a separate PLUS insn, since this +;; can generate unrecognizable rtl. + +(define_expand "storehi" + [;; store the low byte + (set (mem:QI (match_operand:SI 1 "" "")) (match_dup 3)) + ;; extract the high byte + (set (match_dup 2) + (ashiftrt:SI (match_operand 0 "" "") (const_int 8))) + ;; store the high byte + (set (mem:QI (match_dup 4)) + (subreg:QI (match_dup 2) 0))] ;explicit subreg safe + "" + " +{ + enum rtx_code code = GET_CODE (operands[1]); + + if ((code == PLUS || code == MINUS) + && (GET_CODE (XEXP (operands[1], 1)) == REG + || GET_CODE (XEXP (operands[1], 0)) != REG)) + operands[1] = force_reg (SImode, operands[1]); + operands[4] = plus_constant (operands[1], 1); + operands[3] = gen_lowpart (QImode, operands[0]); + operands[0] = gen_lowpart (SImode, operands[0]); + operands[2] = gen_reg_rtx (SImode); +} +") + +(define_expand "storehi_bigend" + [(set (mem:QI (match_dup 4)) (match_dup 3)) + (set (match_dup 2) + (ashiftrt:SI (match_operand 0 "" "") (const_int 8))) + (set (mem:QI (match_operand 1 "" "")) + (subreg:QI (match_dup 2) 0))] + "" + " +{ + enum rtx_code code = GET_CODE (operands[1]); + if ((code == PLUS || code == MINUS) + && (GET_CODE (XEXP (operands[1], 1)) == REG + || GET_CODE (XEXP (operands[1], 0)) != REG)) + operands[1] = force_reg (SImode, operands[1]); + + operands[4] = plus_constant (operands[1], 1); + operands[3] = gen_lowpart (QImode, operands[0]); + operands[0] = gen_lowpart (SImode, operands[0]); + operands[2] = gen_reg_rtx (SImode); +} +") + +;; Subroutine to store a half word integer constant into memory. +(define_expand "storeinthi" + [(set (mem:QI (match_operand:SI 0 "" "")) + (subreg:QI (match_operand 1 "" "") 0)) + (set (mem:QI (match_dup 3)) (subreg:QI (match_dup 2) 0))] + "" + " +{ + HOST_WIDE_INT value = INTVAL (operands[1]); + enum rtx_code code = GET_CODE (operands[0]); + + if ((code == PLUS || code == MINUS) + && (GET_CODE (XEXP (operands[0], 1)) == REG + || GET_CODE (XEXP (operands[0], 0)) != REG)) + operands[0] = force_reg (SImode, operands[0]); + + operands[1] = gen_reg_rtx (SImode); + if (BYTES_BIG_ENDIAN) + { + emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255))); + if ((value & 255) == ((value >> 8) & 255)) + operands[2] = operands[1]; + else + { + operands[2] = gen_reg_rtx (SImode); + emit_insn (gen_movsi (operands[2], GEN_INT (value & 255))); + } + } + else + { + emit_insn (gen_movsi (operands[1], GEN_INT (value & 255))); + if ((value & 255) == ((value >> 8) & 255)) + operands[2] = operands[1]; + else + { + operands[2] = gen_reg_rtx (SImode); + emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255))); + } + } + + operands[3] = plus_constant (operands[0], 1); +} +") + +(define_expand "movhi" + [(set (match_operand:HI 0 "general_operand" "") + (match_operand:HI 1 "general_operand" ""))] + "" + " +{ + rtx insn; + + if (! (reload_in_progress || reload_completed)) + { + if (GET_CODE (operands[0]) == MEM) + { + if (GET_CODE (operands[1]) == CONST_INT) + emit_insn (gen_storeinthi (XEXP (operands[0], 0), operands[1])); + else + { + if (GET_CODE (operands[1]) == MEM) + operands[1] = force_reg (HImode, operands[1]); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_storehi_bigend (operands[1], + XEXP (operands[0], 0))); + else + emit_insn (gen_storehi (operands[1], XEXP (operands[0], 0))); + } + DONE; + } + /* Sign extend a constant, and keep it in an SImode reg. */ + else if (GET_CODE (operands[1]) == CONST_INT) + { + rtx reg = gen_reg_rtx (SImode); + HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff; + + /* If the constant is already valid, leave it alone. */ + if (! const_ok_for_arm (val)) + { + /* If setting all the top bits will make the constant + loadable in a single instruction, then set them. + Otherwise, sign extend the number. */ + + if (const_ok_for_arm (~ (val | ~0xffff))) + val |= ~0xffff; + else if (val & 0x8000) + val |= ~0xffff; + } + + emit_insn (gen_movsi (reg, GEN_INT (val))); + operands[1] = gen_rtx (SUBREG, HImode, reg, 0); + } + else if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM) + { + rtx reg = gen_reg_rtx (SImode); + emit_insn (gen_movhi_bytes (reg, operands[1])); + operands[1] = gen_lowpart (HImode, reg); + } + else if (BYTES_BIG_ENDIAN && GET_CODE (operands[1]) == MEM) + { + emit_insn (gen_movhi_bigend (operands[0], operands[1])); + DONE; + } + } +} +") + +(define_expand "movhi_bytes" + [(set (match_dup 2) (zero_extend:SI (mem:QI (match_operand:HI 1 "" "")))) + (set (match_dup 3) + (zero_extend:SI (mem:QI (plus:SI (match_dup 1) (const_int 1))))) + (set (match_operand:SI 0 "" "") + (ior:SI (ashift:SI (match_dup 4) (const_int 8)) (match_dup 5)))] + "" + " + operands[0] = gen_lowpart (SImode, operands[0]); + operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0)); + operands[2] = gen_reg_rtx (SImode); + operands[3] = gen_reg_rtx (SImode); + + if (BYTES_BIG_ENDIAN) + { + operands[4] = operands[2]; + operands[5] = operands[3]; + } + else + { + operands[4] = operands[3]; + operands[5] = operands[2]; + } +") + +(define_expand "movhi_bigend" + [(set (match_dup 2) + (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "") 0) + (const_int 16))) + (set (match_dup 3) + (ashiftrt:SI (match_dup 2) (const_int 16))) + (set (match_operand:HI 0 "s_register_operand" "") + (subreg:HI (match_dup 3) 0))] + "" + " + operands[2] = gen_reg_rtx (SImode); + operands[3] = gen_reg_rtx (SImode); +") + +;; Pattern to recognise insn generated default case above + +(define_insn "" + [(set (match_operand:HI 0 "general_operand" "=r,r,r") + (match_operand:HI 1 "general_operand" "rI,K,m"))] + "! BYTES_BIG_ENDIAN + && ! TARGET_SHORT_BY_BYTES + && (GET_CODE (operands[1]) != CONST_INT + || const_ok_for_arm (INTVAL (operands[1])) + || const_ok_for_arm (~INTVAL (operands[1])))" + "@ + mov%?\\t%0, %1\\t%@ movhi + mvn%?\\t%0, #%B1\\t%@ movhi + ldr%?\\t%0, %1\\t%@ movhi" +[(set_attr "type" "*,*,load")]) + +(define_insn "" + [(set (match_operand:HI 0 "s_register_operand" "=r,r,r") + (match_operand:HI 1 "general_operand" "rI,K,m"))] + "BYTES_BIG_ENDIAN + && ! TARGET_SHORT_BY_BYTES + && (GET_CODE (operands[1]) != CONST_INT + || const_ok_for_arm (INTVAL (operands[1])) + || const_ok_for_arm (~INTVAL (operands[1])))" + "@ + mov%?\\t%0, %1\\t%@ movhi + mvn%?\\t%0, #%B1\\t%@ movhi + ldr%?\\t%0, %1\\t%@ movhi_bigend\;mov%?\\t%0, %0, asr #16" +[(set_attr "type" "*,*,load") + (set_attr "length" "4,4,8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "m") 0) + (const_int 16)))] + "BYTES_BIG_ENDIAN + && ! TARGET_SHORT_BY_BYTES" + "ldr%?\\t%0, %1\\t%@ movhi_bigend" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:HI 0 "s_register_operand" "=r,r") + (match_operand:HI 1 "arm_rhs_operand" "rI,K"))] + "TARGET_SHORT_BY_BYTES" + "@ + mov%?\\t%0, %1\\t%@ movhi + mvn%?\\t%0, #%B1\\t%@ movhi") + + +(define_expand "reload_outhi" + [(parallel [(match_operand:HI 0 "reload_memory_operand" "=o") + (match_operand:HI 1 "s_register_operand" "r") + (match_operand:SI 2 "s_register_operand" "=&r")])] + "" + " + arm_reload_out_hi (operands); + DONE; +") + +(define_expand "reload_inhi" + [(parallel [(match_operand:HI 0 "s_register_operand" "=r") + (match_operand:HI 1 "reload_memory_operand" "o") + (match_operand:SI 2 "s_register_operand" "=&r")])] + "TARGET_SHORT_BY_BYTES" + " + arm_reload_in_hi (operands); + DONE; +") + +(define_expand "movqi" + [(set (match_operand:QI 0 "general_operand" "") + (match_operand:QI 1 "general_operand" ""))] + "" + " + /* Everything except mem = const or mem = mem can be done easily */ + + if (!(reload_in_progress || reload_completed)) + { + if (GET_CODE (operands[1]) == CONST_INT) + { + rtx reg = gen_reg_rtx (SImode); + + emit_insn (gen_movsi (reg, operands[1])); + operands[1] = gen_rtx (SUBREG, QImode, reg, 0); + } + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (QImode, operands[1]); + } +") + + +(define_insn "" + [(set (match_operand:QI 0 "general_operand" "=r,r,r,m") + (match_operand:QI 1 "general_operand" "rI,K,m,r"))] + "register_operand (operands[0], QImode) + || register_operand (operands[1], QImode)" + "@ + mov%?\\t%0, %1 + mvn%?\\t%0, #%B1 + ldr%?b\\t%0, %1 + str%?b\\t%1, %0" +[(set_attr "type" "*,*,load,store1")]) + +(define_expand "movsf" + [(set (match_operand:SF 0 "general_operand" "") + (match_operand:SF 1 "general_operand" ""))] + "" + " + if (GET_CODE (operands[1]) == CONST_DOUBLE + && (TARGET_SOFT_FLOAT + || (GET_CODE (operands[0]) == REG + && REGNO (operands[0]) < 16) + || ! (const_double_rtx_ok_for_fpu (operands[1]) + || neg_const_double_rtx_ok_for_fpu (operands[1])))) + { + extern int optimize; + rtx mem = force_const_mem (SFmode, operands[1]); + rtx addr; + + if (reload_in_progress || reload_completed) + addr = gen_rtx (REG, SImode, REGNO (operands[0])); + else + addr = gen_reg_rtx (SImode); + if (optimize == 0) + { + rtx ptr = force_const_mem (SImode, XEXP (mem, 0)); + emit_insn (gen_movsi (addr, ptr)); + } + else + emit_insn (gen_movsi (addr, XEXP (mem, 0))); + operands[1] = gen_rtx (MEM, SFmode, addr); + } + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (SFmode, operands[1]); +") + +(define_insn "" + [(set (match_operand:SF 0 "general_operand" "=f,f,f,m,f,r,r,r,m") + (match_operand:SF 1 "general_operand" "fG,H,m,f,r,f,r,m,r"))] + "TARGET_HARD_FLOAT + && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))" + "@ + mvf%?s\\t%0, %1 + mnf%?s\\t%0, #%N1 + ldf%?s\\t%0, %1 + stf%?s\\t%1, %0 + str%?\\t%1, [%|sp, #-4]!\;ldf%?s\\t%0, [%|sp], #4 + stf%?s\\t%1, [%|sp, #-4]!\;ldr%?\\t%0, [%|sp], #4 + mov%?\\t%0, %1 + ldr%?\\t%0, %1\\t%@ float + str%?\\t%1, %0\\t%@ float" +[(set_attr "length" "4,4,4,4,8,8,4,4,4") + (set_attr "type" + "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*,load,store1")]) + +;; Exactly the same as above, except that all `f' cases are deleted. +;; This is necessary to prevent reload from ever trying to use a `f' reg +;; when -msoft-float. + +(define_insn "*movsf_soft_insn" + [(set (match_operand:SF 0 "general_operand" "=r,r,m") + (match_operand:SF 1 "general_operand" "r,m,r"))] + "TARGET_SOFT_FLOAT + && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))" + "@ + mov%?\\t%0, %1 + ldr%?\\t%0, %1\\t%@ float + str%?\\t%1, %0\\t%@ float" +[(set_attr "length" "4,4,4") + (set_attr "type" "*,load,store1")]) + +(define_expand "movdf" + [(set (match_operand:DF 0 "general_operand" "") + (match_operand:DF 1 "general_operand" ""))] + "" + " + if (GET_CODE (operands[1]) == CONST_DOUBLE + && (TARGET_SOFT_FLOAT + || (GET_CODE (operands[0]) == REG + && REGNO (operands[0]) < 16) + || ! (const_double_rtx_ok_for_fpu (operands[1]) + || neg_const_double_rtx_ok_for_fpu (operands[1])))) + { + extern int optimize; + rtx mem = force_const_mem (DFmode, operands[1]); + rtx addr; + + if (reload_in_progress || reload_completed) + addr = gen_rtx (REG, SImode, REGNO (operands[0])); + else + addr = gen_reg_rtx (SImode); + if (optimize == 0) + { + rtx ptr = force_const_mem (SImode, XEXP (mem, 0)); + emit_insn (gen_movsi (addr, ptr)); + } + else + emit_insn (gen_movsi (addr, XEXP (mem, 0))); + operands[1] = gen_rtx (MEM, DFmode, addr); + } + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (DFmode, operands[1]); +") + +;; Reloading a df mode value stored in integer regs to memory can require a +;; scratch reg. +(define_expand "reload_outdf" + [(match_operand:DF 0 "reload_memory_operand" "=o") + (match_operand:DF 1 "s_register_operand" "r") + (match_operand:SI 2 "s_register_operand" "=&r")] + "" + " + if (GET_CODE (XEXP (operands[0], 0)) == REG) + operands[2] = XEXP (operands[0], 0); + else + emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0), + XEXP (XEXP (operands[0], 0), 1))); + emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (MEM, DFmode, operands[2]), + operands[1])); + DONE; +") + +(define_insn "" + [(set (match_operand:DF 0 "general_operand" "=r,Q#m,r,f,f,f,f,m,!f,!r,r") + (match_operand:DF 1 "general_operand" + "Q,r,?o,?f,!G,!H,m,f,r,f,??r"))] + "TARGET_HARD_FLOAT + && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], DFmode))" + "* +{ + rtx ops[3]; + + switch (which_alternative) + { + case 0: + return \"ldm%?ia\\t%m1, {%0, %R0}\\t%@ double\"; + + case 1: + return \"stm%?ia\\t%m0, {%1, %R1}\\t%@ double\"; + + case 2: + ops[0] = operands[0]; + ops[1] = XEXP (XEXP (operands[1], 0), 0); + ops[2] = XEXP (XEXP (operands[1], 0), 1); + if (!INTVAL (ops[2]) || const_ok_for_arm (INTVAL (ops[2]))) + output_asm_insn (\"add%?\\t%0, %1, %2\", ops); + else + output_asm_insn (\"sub%?\\t%0, %1, #%n2\", ops); + return \"ldm%?ia\\t%0, {%0, %R0}\\t%@ double\"; + + case 3: + case 4: + return \"mvf%?d\\t%0, %1\"; + + case 5: return \"mnf%?d\\t%0, #%N1\"; + case 6: return \"ldf%?d\\t%0, %1\"; + case 7: return \"stf%?d\\t%1, %0\"; + case 8: return output_mov_double_fpu_from_arm (operands); + case 9: return output_mov_double_arm_from_fpu (operands); + case 10: return output_move_double (operands); + } +} +" +[(set_attr "length" "4,4,8,4,4,4,4,4,8,8,8") + (set_attr "type" +"load,store2,load,ffarith,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")]) + +;; Software floating point version. This is essentially the same as movdi. +;; Do not use `f' as a constraint to prevent reload from ever trying to use +;; an `f' reg. + +(define_insn "*movdf_soft_insn" + [(set (match_operand:DF 0 "soft_df_operand" "=r,r,o<>,r") + (match_operand:DF 1 "soft_df_operand" "r,o<>,r,F"))] + "TARGET_SOFT_FLOAT" + "* return output_move_double (operands);" +[(set_attr "length" "8,8,8,32") + (set_attr "type" "*,load,store2,*")]) + +(define_expand "movxf" + [(set (match_operand:XF 0 "general_operand" "") + (match_operand:XF 1 "general_operand" ""))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "") + +;; Even when the XFmode patterns aren't enabled, we enable this after +;; reloading so that we can push floating point registers in the prologue. + +(define_insn "" + [(set (match_operand:XF 0 "general_operand" "=f,f,f,m,f,r,r") + (match_operand:XF 1 "general_operand" "fG,H,m,f,r,f,r"))] + "TARGET_HARD_FLOAT && (ENABLE_XF_PATTERNS || reload_completed)" + "* + switch (which_alternative) + { + case 0: return \"mvf%?e\\t%0, %1\"; + case 1: return \"mnf%?e\\t%0, #%N1\"; + case 2: return \"ldf%?e\\t%0, %1\"; + case 3: return \"stf%?e\\t%1, %0\"; + case 4: return output_mov_long_double_fpu_from_arm (operands); + case 5: return output_mov_long_double_arm_from_fpu (operands); + case 6: return output_mov_long_double_arm_from_arm (operands); + } +" +[(set_attr "length" "4,4,4,4,8,8,12") + (set_attr "type" "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")]) + + +;; load- and store-multiple insns +;; The arm can load/store any set of registers, provided that they are in +;; ascending order; but that is beyond GCC so stick with what it knows. + +(define_expand "load_multiple" + [(match_par_dup 3 [(set (match_operand:SI 0 "" "") + (match_operand:SI 1 "" "")) + (use (match_operand:SI 2 "" ""))])] + "" + " + /* Support only fixed point registers */ + if (GET_CODE (operands[2]) != CONST_INT + || INTVAL (operands[2]) > 14 + || INTVAL (operands[2]) < 2 + || GET_CODE (operands[1]) != MEM + || GET_CODE (operands[0]) != REG + || REGNO (operands[0]) > 14 + || REGNO (operands[0]) + INTVAL (operands[2]) > 15) + FAIL; + + operands[3] + = arm_gen_load_multiple (REGNO (operands[0]), INTVAL (operands[2]), + force_reg (SImode, XEXP (operands[1], 0)), + TRUE, FALSE); +") + +;; Load multiple with write-back + +(define_insn "" + [(match_parallel 0 "load_multiple_operation" + [(set (match_operand:SI 1 "s_register_operand" "+r") + (plus:SI (match_dup 1) + (match_operand:SI 2 "immediate_operand" "n"))) + (set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (match_dup 1)))])] + "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))" + "* +{ + rtx ops[3]; + int count = XVECLEN (operands[0], 0); + + ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0); + ops[1] = SET_DEST (XVECEXP (operands[0], 0, 1)); + ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 2)); + + output_asm_insn (\"ldm%?ia\\t%0!, {%1-%2}\\t%@ load multiple\", ops); + return \"\"; +} +" +[(set_attr "type" "load")]) + +;; Ordinary load multiple + +(define_insn "" + [(match_parallel 0 "load_multiple_operation" + [(set (match_operand:SI 1 "s_register_operand" "=r") + (match_operand:SI 2 "indirect_operand" "Q"))])] + "" + "* +{ + rtx ops[3]; + int count = XVECLEN (operands[0], 0); + + ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0); + ops[1] = SET_DEST (XVECEXP (operands[0], 0, 0)); + ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 1)); + + output_asm_insn (\"ldm%?ia\\t%0, {%1-%2}\\t%@ load multiple\", ops); + return \"\"; +} +" +[(set_attr "type" "load")]) + +(define_expand "store_multiple" + [(match_par_dup 3 [(set (match_operand:SI 0 "" "") + (match_operand:SI 1 "" "")) + (use (match_operand:SI 2 "" ""))])] + "" + " + /* Support only fixed point registers */ + if (GET_CODE (operands[2]) != CONST_INT + || INTVAL (operands[2]) > 14 + || INTVAL (operands[2]) < 2 + || GET_CODE (operands[1]) != REG + || GET_CODE (operands[0]) != MEM + || REGNO (operands[1]) > 14 + || REGNO (operands[1]) + INTVAL (operands[2]) > 15) + FAIL; + + operands[3] + = arm_gen_store_multiple (REGNO (operands[1]), INTVAL (operands[2]), + force_reg (SImode, XEXP (operands[0], 0)), + TRUE, FALSE); +") + +;; Store multiple with write-back + +(define_insn "" + [(match_parallel 0 "store_multiple_operation" + [(set (match_operand:SI 1 "s_register_operand" "+r") + (plus:SI (match_dup 1) + (match_operand:SI 2 "immediate_operand" "n"))) + (set (mem:SI (match_dup 1)) + (match_operand:SI 3 "s_register_operand" "r"))])] + "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))" + "* +{ + rtx ops[3]; + int count = XVECLEN (operands[0], 0); + + ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0); + ops[1] = SET_SRC (XVECEXP (operands[0], 0, 1)); + ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 2)); + + output_asm_insn (\"stm%?ia\\t%0!, {%1-%2}\\t%@ str multiple\", ops); + return \"\"; +} +" +[(set (attr "type") + (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4)) + (const_string "store2") + (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 5)) + (const_string "store3")] + (const_string "store4")))]) + +;; Ordinary store multiple + +(define_insn "" + [(match_parallel 0 "store_multiple_operation" + [(set (match_operand:SI 2 "indirect_operand" "=Q") + (match_operand:SI 1 "s_register_operand" "r"))])] + "" + "* +{ + rtx ops[3]; + int count = XVECLEN (operands[0], 0); + + ops[0] = XEXP (SET_DEST (XVECEXP (operands[0], 0, 0)), 0); + ops[1] = SET_SRC (XVECEXP (operands[0], 0, 0)); + ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 1)); + + output_asm_insn (\"stm%?ia\\t%0, {%1-%2}\\t%@ str multiple\", ops); + return \"\"; +} +" +[(set (attr "type") + (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 3)) + (const_string "store2") + (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4)) + (const_string "store3")] + (const_string "store4")))]) + +;; Move a block of memory if it is word aligned and MORE than 2 words long. +;; We could let this apply for blocks of less than this, but it clobbers so +;; many registers that there is then probably a better way. + +(define_expand "movstrqi" + [(match_operand:BLK 0 "general_operand" "") + (match_operand:BLK 1 "general_operand" "") + (match_operand:SI 2 "const_int_operand" "") + (match_operand:SI 3 "const_int_operand" "")] + "" + " + if (arm_gen_movstrqi (operands)) + DONE; + FAIL; +") + + +;; Comparison and test insns + +(define_expand "cmpsi" + [(set (reg:CC 24) + (compare:CC (match_operand:SI 0 "s_register_operand" "") + (match_operand:SI 1 "arm_add_operand" "")))] + "" + " +{ + arm_compare_op0 = operands[0]; + arm_compare_op1 = operands[1]; + arm_compare_fp = 0; + DONE; +} +") + +(define_expand "cmpsf" + [(set (reg:CC 24) + (compare:CC (match_operand:SF 0 "s_register_operand" "") + (match_operand:SF 1 "fpu_rhs_operand" "")))] + "TARGET_HARD_FLOAT" + " +{ + arm_compare_op0 = operands[0]; + arm_compare_op1 = operands[1]; + arm_compare_fp = 1; + DONE; +} +") + +(define_expand "cmpdf" + [(set (reg:CC 24) + (compare:CC (match_operand:DF 0 "s_register_operand" "") + (match_operand:DF 1 "fpu_rhs_operand" "")))] + "TARGET_HARD_FLOAT" + " +{ + arm_compare_op0 = operands[0]; + arm_compare_op1 = operands[1]; + arm_compare_fp = 1; + DONE; +} +") + +(define_expand "cmpxf" + [(set (reg:CC 24) + (compare:CC (match_operand:XF 0 "s_register_operand" "") + (match_operand:XF 1 "fpu_rhs_operand" "")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + " +{ + arm_compare_op0 = operands[0]; + arm_compare_op1 = operands[1]; + arm_compare_fp = 1; + DONE; +} +") + +(define_insn "" + [(set (match_operand 0 "cc_register" "") + (compare (match_operand:SI 1 "s_register_operand" "r,r") + (match_operand:SI 2 "arm_add_operand" "rI,L")))] + "" + "@ + cmp%?\\t%1, %2 + cmn%?\\t%1, #%n2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (match_operand 0 "cc_register" "") + (compare (match_operand:SI 1 "s_register_operand" "r") + (neg:SI (match_operand:SI 2 "s_register_operand" "r"))))] + "" + "cmn%?\\t%1, %2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (match_operand 0 "cc_register" "") + (compare (match_operand:SI 1 "s_register_operand" "r") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "arm_rhs_operand" "rM")])))] + "" + "cmp%?\\t%1, %3%S2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (match_operand 0 "cc_register" "") + (compare (match_operand:SI 1 "s_register_operand" "r") + (neg:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "arm_rhs_operand" "rM")]))))] + "" + "cmn%?\\t%1, %3%S2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CCFP 24) + (compare:CCFP (match_operand:SF 0 "s_register_operand" "f,f") + (match_operand:SF 1 "fpu_add_operand" "fG,H")))] + "TARGET_HARD_FLOAT" + "@ + cmf%?\\t%0, %1 + cnf%?\\t%0, #%N1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFP 24) + (compare:CCFP (match_operand:DF 0 "s_register_operand" "f,f") + (match_operand:DF 1 "fpu_add_operand" "fG,H")))] + "TARGET_HARD_FLOAT" + "@ + cmf%?\\t%0, %1 + cnf%?\\t%0, #%N1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFP 24) + (compare:CCFP (float_extend:DF + (match_operand:SF 0 "s_register_operand" "f,f")) + (match_operand:DF 1 "fpu_add_operand" "fG,H")))] + "TARGET_HARD_FLOAT" + "@ + cmf%?\\t%0, %1 + cnf%?\\t%0, #%N1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFP 24) + (compare:CCFP (match_operand:DF 0 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "cmf%?\\t%0, %1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFP 24) + (compare:CCFP (match_operand:XF 0 "s_register_operand" "f,f") + (match_operand:XF 1 "fpu_add_operand" "fG,H")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "@ + cmf%?\\t%0, %1 + cnf%?\\t%0, #%N1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFPE 24) + (compare:CCFPE (match_operand:SF 0 "s_register_operand" "f,f") + (match_operand:SF 1 "fpu_add_operand" "fG,H")))] + "TARGET_HARD_FLOAT" + "@ + cmf%?e\\t%0, %1 + cnf%?e\\t%0, #%N1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFPE 24) + (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f,f") + (match_operand:DF 1 "fpu_add_operand" "fG,H")))] + "TARGET_HARD_FLOAT" + "@ + cmf%?e\\t%0, %1 + cnf%?e\\t%0, #%N1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFPE 24) + (compare:CCFPE (float_extend:DF + (match_operand:SF 0 "s_register_operand" "f,f")) + (match_operand:DF 1 "fpu_add_operand" "fG,H")))] + "TARGET_HARD_FLOAT" + "@ + cmf%?e\\t%0, %1 + cnf%?e\\t%0, #%N1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFPE 24) + (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] + "TARGET_HARD_FLOAT" + "cmf%?e\\t%0, %1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFPE 24) + (compare:CCFPE (match_operand:XF 0 "s_register_operand" "f,f") + (match_operand:XF 1 "fpu_add_operand" "fG,H")))] + "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT" + "@ + cmf%?e\\t%0, %1 + cnf%?e\\t%0, #%N1" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +; This insn allows redundant compares to be removed by cse, nothing should +; ever appear in the output file since (set (reg x) (reg x)) is a no-op that +; is deleted later on. The match_dup will match the mode here, so that +; mode changes of the condition codes aren't lost by this even though we don't +; specify what they are. + +(define_insn "" + [(set (match_operand 0 "cc_register" "") (match_dup 0))] + "" + "\\t%@ deleted compare" +[(set_attr "conds" "set") + (set_attr "length" "0")]) + + +;; Conditional branch insns + +(define_expand "beq" + [(set (pc) + (if_then_else (eq (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "bne" + [(set (pc) + (if_then_else (ne (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "bgt" + [(set (pc) + (if_then_else (gt (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "ble" + [(set (pc) + (if_then_else (le (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "bge" + [(set (pc) + (if_then_else (ge (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "blt" + [(set (pc) + (if_then_else (lt (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "bgtu" + [(set (pc) + (if_then_else (gtu (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "bleu" + [(set (pc) + (if_then_else (leu (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "bgeu" + [(set (pc) + (if_then_else (geu (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "bltu" + [(set (pc) + (if_then_else (ltu (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +;; patterns to match conditional branch insns + +(define_insn "" + [(set (pc) + (if_then_else (match_operator 1 "comparison_operator" + [(reg 24) (const_int 0)]) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return \"b%d1\\t%l0\"; +}" +[(set_attr "conds" "use")]) + +(define_insn "" + [(set (pc) + (if_then_else (match_operator 1 "comparison_operator" + [(reg 24) (const_int 0)]) + (pc) + (label_ref (match_operand 0 "" ""))))] + "REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return \"b%D1\\t%l0\"; +}" +[(set_attr "conds" "use")]) + + +; scc insns + +(define_expand "seq" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (eq:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sne" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ne:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sgt" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (gt:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sle" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (le:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sge" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ge:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "slt" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (lt:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sgtu" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (gtu:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sleu" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (leu:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sgeu" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (geu:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sltu" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ltu:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operator:SI 1 "comparison_operator" [(reg 24) (const_int 0)]))] + "" + "mov%D1\\t%0, #0\;mov%d1\\t%0, #1" +[(set_attr "conds" "use") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (neg:SI (match_operator:SI 1 "comparison_operator" + [(reg 24) (const_int 0)])))] + "" + "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0" +[(set_attr "conds" "use") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_operator:SI 1 "comparison_operator" + [(reg 24) (const_int 0)])))] + "" + "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1" +[(set_attr "conds" "use") + (set_attr "length" "8")]) + + +;; Conditional move insns + +(define_expand "movsicc" + [(set (match_operand:SI 0 "register_operand" "") + (if_then_else (match_operand 1 "comparison_operator" "") + (match_operand:SI 2 "arm_not_operand" "") + (match_operand:SI 3 "register_operand" "")))] + "0" + " +{ + enum rtx_code code = GET_CODE (operands[1]); + rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1, + arm_compare_fp); + + operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx); +}") + +(define_expand "movsfcc" + [(set (match_operand:SF 0 "register_operand" "") + (if_then_else (match_operand 1 "comparison_operator" "") + (match_operand:SF 2 "nonmemory_operand" "") + (match_operand:SF 3 "register_operand" "")))] + "" + " +{ + enum rtx_code code = GET_CODE (operands[1]); + rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1, + arm_compare_fp); + + operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx); +}") + +(define_expand "movdfcc" + [(set (match_operand:DF 0 "register_operand" "") + (if_then_else (match_operand 1 "comparison_operator" "") + (match_operand:DF 2 "nonmemory_operand" "") + (match_operand:DF 3 "register_operand" "")))] + "TARGET_HARD_FLOAT" + " +{ + enum rtx_code code = GET_CODE (operands[1]); + rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1, + arm_compare_fp); + + operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx); +}") + +(define_insn "*movsicc_insn" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (if_then_else (match_operator 1 "comparison_operator" + [(reg 24) (const_int 0)]) + (match_operand:SI 2 "arm_not_operand" "rI,K") + (match_operand:SI 3 "register_operand" "0,0")))] + "" + "@ + mov%d1\\t%0, %2 + mvn%d1\\t%0, #%B2" + [(set_attr "type" "*,*") + (set_attr "conds" "use,use")]) + +(define_insn "*movsfcc_hard_insn" + [(set (match_operand:SF 0 "register_operand" "=f") + (if_then_else (match_operator 1 "comparison_operator" + [(reg 24) (const_int 0)]) + (match_operand:SF 2 "register_operand" "f") + (match_operand:SF 3 "register_operand" "0")))] + "TARGET_HARD_FLOAT" + "mvf%d1s\\t%0, %2" + [(set_attr "type" "ffarith") + (set_attr "conds" "use")]) + +(define_insn "*movsfcc_soft_insn" + [(set (match_operand:SF 0 "register_operand" "=r") + (if_then_else (match_operator 1 "comparison_operator" + [(reg 24) (const_int 0)]) + (match_operand:SF 2 "register_operand" "r") + (match_operand:SF 3 "register_operand" "0")))] + "TARGET_SOFT_FLOAT" + "mov%d1\\t%0, %2" + [(set_attr "type" "*") + (set_attr "conds" "use")]) + +(define_insn "*movdfcc_insn" + [(set (match_operand:DF 0 "register_operand" "=f") + (if_then_else (match_operator 1 "comparison_operator" + [(reg 24) (const_int 0)]) + (match_operand:DF 2 "register_operand" "f") + (match_operand:DF 3 "register_operand" "0")))] + "TARGET_HARD_FLOAT" + "mvf%d1d\\t%0, %2" + [(set_attr "type" "ffarith") + (set_attr "conds" "use")]) + +;; Jump and linkage insns + +(define_insn "jump" + [(set (pc) + (label_ref (match_operand 0 "" "")))] + "" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return \"b%?\\t%l0\"; +}") + +(define_expand "call" + [(parallel [(call (match_operand 0 "memory_operand" "") + (match_operand 1 "general_operand" "")) + (clobber (reg:SI 14))])] + "" + "") + +(define_insn "" + [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r")) + (match_operand 1 "" "g")) + (clobber (reg:SI 14))] + "" + "* + return output_call (operands); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) +;; length is worst case, normally it is only two + (set_attr "length" "12") + (set_attr "type" "call")]) + +(define_insn "" + [(call (mem:SI (match_operand 0 "memory_operand" "m")) + (match_operand 1 "general_operand" "g")) + (clobber (reg:SI 14))] + "" + "* + return output_call_mem (operands); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "12") + (set_attr "type" "call")]) + +(define_expand "call_value" + [(parallel [(set (match_operand 0 "" "=rf") + (call (match_operand 1 "memory_operand" "m") + (match_operand 2 "general_operand" "g"))) + (clobber (reg:SI 14))])] + "" + "") + +(define_insn "" + [(set (match_operand 0 "" "=rf") + (call (mem:SI (match_operand:SI 1 "s_register_operand" "r")) + (match_operand 2 "general_operand" "g"))) + (clobber (reg:SI 14))] + "" + "* + return output_call (&operands[1]); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "12") + (set_attr "type" "call")]) + +(define_insn "" + [(set (match_operand 0 "" "=rf") + (call (mem:SI (match_operand 1 "memory_operand" "m")) + (match_operand 2 "general_operand" "g"))) + (clobber (reg:SI 14))] + "! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))" + "* + return output_call_mem (&operands[1]); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "12") + (set_attr "type" "call")]) + +;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses +;; The 'a' causes the operand to be treated as an address, i.e. no '#' output. + +(define_insn "" + [(call (mem:SI (match_operand:SI 0 "" "i")) + (match_operand:SI 1 "general_operand" "g")) + (clobber (reg:SI 14))] + "GET_CODE (operands[0]) == SYMBOL_REF" + "bl%?\\t%a0" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "type" "call")]) + +(define_insn "" + [(set (match_operand 0 "s_register_operand" "=rf") + (call (mem:SI (match_operand:SI 1 "" "i")) + (match_operand:SI 2 "general_operand" "g"))) + (clobber (reg:SI 14))] + "GET_CODE(operands[1]) == SYMBOL_REF" + "bl%?\\t%a1" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "type" "call")]) + +;; Often the return insn will be the same as loading from memory, so set attr +(define_insn "return" + [(return)] + "USE_RETURN_INSN" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return output_return_instruction (NULL, TRUE); +}" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" + [(reg 24) (const_int 0)]) + (return) + (pc)))] + "USE_RETURN_INSN" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return output_return_instruction (operands[0], TRUE); +}" +[(set_attr "conds" "use") + (set_attr "type" "load")]) + +(define_insn "" + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" + [(reg 24) (const_int 0)]) + (pc) + (return)))] + "USE_RETURN_INSN" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return output_return_instruction + (gen_rtx (reverse_condition (GET_CODE (operands[0])), + GET_MODE (operands[0]), XEXP (operands[0], 0), + XEXP (operands[0], 1)), + TRUE); +}" +[(set_attr "conds" "use") + (set_attr "type" "load")]) + +;; Call subroutine returning any type. + +(define_expand "untyped_call" + [(parallel [(call (match_operand 0 "" "") + (const_int 0)) + (match_operand 1 "" "") + (match_operand 2 "" "")])] + "" + " +{ + int i; + + emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx)); + + for (i = 0; i < XVECLEN (operands[2], 0); i++) + { + rtx set = XVECEXP (operands[2], 0, i); + emit_move_insn (SET_DEST (set), SET_SRC (set)); + } + + /* The optimizer does not know that the call sets the function value + registers we stored in the result block. We avoid problems by + claiming that all hard registers are used and clobbered at this + point. */ + emit_insn (gen_blockage ()); + + DONE; +}") + +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and +;; all of memory. This blocks insns from being moved across this point. + +(define_insn "blockage" + [(unspec_volatile [(const_int 0)] 0)] + "" + "" +[(set_attr "length" "0") + (set_attr "type" "block")]) + +(define_insn "tablejump" + [(set (pc) + (match_operand:SI 0 "s_register_operand" "r")) + (use (label_ref (match_operand 1 "" "")))] + "" + "mov%?\\t%|pc, %0\\t%@ table jump, label %l1") + +(define_insn "" + [(set (pc) + (match_operand:SI 0 "memory_operand" "m")) + (use (label_ref (match_operand 1 "" "")))] + "" + "ldr%?\\t%|pc, %0\\t%@ table jump, label %l1" +[(set_attr "type" "load")]) + +(define_insn "indirect_jump" + [(set (pc) + (match_operand:SI 0 "s_register_operand" "r"))] + "" + "mov%?\\t%|pc, %0\\t%@ indirect jump") + +(define_insn "" + [(set (pc) + (match_operand:SI 0 "memory_operand" "m"))] + "" + "ldr%?\\t%|pc, %0\\t%@ indirect jump" +[(set_attr "type" "load")]) + +;; Misc insns + +(define_insn "nop" + [(const_int 0)] + "" + "mov%?\\tr0, r0\\t%@ nop") + +;; Patterns to allow combination of arithmetic, cond code and shifts + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operator:SI 1 "shiftable_operator" + [(match_operator:SI 3 "shift_operator" + [(match_operand:SI 4 "s_register_operand" "r") + (match_operand:SI 5 "reg_or_int_operand" "rI")]) + (match_operand:SI 2 "s_register_operand" "r")]))] + "" + "%i1%?\\t%0, %2, %4%S3") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator" + [(match_operator:SI 3 "shift_operator" + [(match_operand:SI 4 "s_register_operand" "r") + (match_operand:SI 5 "reg_or_int_operand" "rI")]) + (match_operand:SI 2 "s_register_operand" "r")]) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)]) + (match_dup 2)]))] + "" + "%i1%?s\\t%0, %2, %4%S3" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator" + [(match_operator:SI 3 "shift_operator" + [(match_operand:SI 4 "s_register_operand" "r") + (match_operand:SI 5 "reg_or_int_operand" "rI")]) + (match_operand:SI 2 "s_register_operand" "r")]) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "%i1%?s\\t%0, %2, %4%S3" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "reg_or_int_operand" "rM")])))] + "" + "sub%?\\t%0, %1, %3%S2") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV + (minus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "reg_or_int_operand" "rM")])) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "" + "sub%?s\\t%0, %1, %3%S2" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV + (minus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "reg_or_int_operand" "rM")])) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "sub%?s\\t%0, %1, %3%S2" +[(set_attr "conds" "set")]) + +;; These variants of the above insns can occur if the first operand is the +;; frame pointer and we eliminate that. This is a kludge, but there doesn't +;; seem to be a way around it. Most of the predicates have to be null +;; because the format can be generated part way through reload, so +;; if we don't match it as soon as it becomes available, reload doesn't know +;; how to reload pseudos that haven't got hard registers; the constraints will +;; sort everything out. + +(define_insn "" + [(set (match_operand:SI 0 "" "=&r") + (plus:SI (plus:SI (match_operator:SI 5 "shift_operator" + [(match_operand:SI 3 "" "r") + (match_operand:SI 4 "" "rM")]) + (match_operand:SI 2 "" "r")) + (match_operand:SI 1 "const_int_operand" "n")))] + "reload_in_progress" + "* + output_asm_insn (\"add%?\\t%0, %2, %3%S5\", operands); + operands[2] = operands[1]; + operands[1] = operands[0]; + return output_add_immediate (operands); +" +; we have no idea how long the add_immediate is, it could be up to 4. +[(set_attr "length" "20")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI + (plus:SI + (match_operator:SI 5 "shift_operator" + [(match_operand:SI 3 "" "r") + (match_operand:SI 4 "" "rM")]) + (match_operand:SI 1 "" "r")) + (match_operand:SI 2 "const_int_operand" "n")) + (const_int 0))) + (set (match_operand:SI 0 "" "=&r") + (plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)]) + (match_dup 1)) + (match_dup 2)))] + "reload_in_progress" + "* + output_add_immediate (operands); + return \"add%?s\\t%0, %0, %3%S5\"; +" +[(set_attr "conds" "set") + (set_attr "length" "20")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI + (plus:SI + (match_operator:SI 5 "shift_operator" + [(match_operand:SI 3 "" "r") + (match_operand:SI 4 "" "rM")]) + (match_operand:SI 1 "" "r")) + (match_operand:SI 2 "const_int_operand" "n")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=&r"))] + "reload_in_progress" + "* + output_add_immediate (operands); + return \"add%?s\\t%0, %0, %3%S5\"; +" +[(set_attr "conds" "set") + (set_attr "length" "20")]) + +;; These are similar, but are needed when the mla pattern contains the +;; eliminated register as operand 3. + +(define_insn "" + [(set (match_operand:SI 0 "" "=&r,&r") + (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "" "%0,r") + (match_operand:SI 2 "" "r,r")) + (match_operand:SI 3 "" "r,r")) + (match_operand:SI 4 "const_int_operand" "n,n")))] + "reload_in_progress" + "* + output_asm_insn (\"mla%?\\t%0, %2, %1, %3\", operands); + operands[2] = operands[4]; + operands[1] = operands[0]; + return output_add_immediate (operands); +" +[(set_attr "length" "20")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI (plus:SI (mult:SI + (match_operand:SI 3 "" "r") + (match_operand:SI 4 "" "r")) + (match_operand:SI 1 "" "r")) + (match_operand:SI 2 "const_int_operand" "n")) + (const_int 0))) + (set (match_operand:SI 0 "" "=&r") + (plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1)) + (match_dup 2)))] + "reload_in_progress" + "* + output_add_immediate (operands); + output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands); + return \"\"; +" +[(set_attr "length" "20") + (set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI (plus:SI (mult:SI + (match_operand:SI 3 "" "r") + (match_operand:SI 4 "" "r")) + (match_operand:SI 1 "" "r")) + (match_operand:SI 2 "const_int_operand" "n")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=&r"))] + "reload_in_progress" + "* + output_add_immediate (operands); + return \"mla%?s\\t%0, %3, %4, %0\"; +" +[(set_attr "length" "20") + (set_attr "conds" "set")]) + + + + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (and:SI (match_operator 1 "comparison_operator" + [(match_operand 3 "reversible_cc_register" "") (const_int 0)]) + (match_operand:SI 2 "s_register_operand" "r")))] + "" + "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1" +[(set_attr "conds" "use") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (ior:SI (match_operator 2 "comparison_operator" + [(reg 24) (const_int 0)]) + (match_operand:SI 1 "s_register_operand" "0,?r")))] + "" + "@ + orr%d2\\t%0, %1, #1 + mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1" +[(set_attr "conds" "use") + (set_attr "length" "4,8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (match_operator 1 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_add_operand" "rI,L")])) + (clobber (reg 24))] + "" + "* + if (GET_CODE (operands[1]) == LT && operands[3] == const0_rtx) + return \"mov\\t%0, %2, lsr #31\"; + + if (GET_CODE (operands[1]) == GE && operands[3] == const0_rtx) + return \"mvn\\t%0, %2\;mov\\t%0, %0, lsr #31\"; + + if (GET_CODE (operands[1]) == NE) + { + if (which_alternative == 1) + return \"adds\\t%0, %2, #%n3\;movne\\t%0, #1\"; + return \"subs\\t%0, %2, %3\;movne\\t%0, #1\"; + } + if (which_alternative == 1) + output_asm_insn (\"cmn\\t%2, #%n3\", operands); + else + output_asm_insn (\"cmp\\t%2, %3\", operands); + return \"mov%D1\\t%0, #0\;mov%d1\\t%0, #1\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=&r") + (ior:SI (match_operator 1 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rI")]) + (match_operator 4 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r") + (match_operand:SI 6 "arm_rhs_operand" "rI")]))) + (clobber (reg 24))] + "" + "* +{ + int dominant = comparison_dominates_p (GET_CODE (operands[4]), + GET_CODE (operands[1])); + + output_asm_insn (dominant ? \"cmp\\t%5, %6\" : \"cmp\\t%2, %3\", + operands); + output_asm_insn (\"mov\\t%0, #0\", operands); + if (GET_CODE (operands[1]) == GET_CODE (operands[4]) + || comparison_dominates_p (GET_CODE (operands[1]), + GET_CODE (operands[4])) + || dominant) + output_asm_insn (dominant ? \"cmp%D4\\t%2, %3\" : \"cmp%D1\\t%5,%6\", + operands); + else + output_asm_insn (\"mov%d1\\t%0, #1\;cmp\\t%5, %6\", operands); + return dominant ? \"mov%d1\\t%0, #1\" : \"mov%d4\\t%0, #1\"; +} +" +[(set_attr "conds" "clob") +; worst case length + (set_attr "length" "20")]) + +(define_split + [(set (pc) + (if_then_else + (match_operator 5 "equality_operator" + [(ior:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 0 "s_register_operand" "") + (match_operand:SI 1 "arm_add_operand" "")]) + (match_operator 7 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "") + (match_operand:SI 3 "arm_add_operand" "")])) + (const_int 0)]) + (label_ref (match_operand 4 "" "")) + (pc))) + (clobber (reg 24))] + "(GET_CODE (operands[6]) == GET_CODE (operands[7]) + || comparison_dominates_p (GET_CODE (operands[6]), GET_CODE (operands[7])) + || comparison_dominates_p (GET_CODE (operands[7]), GET_CODE (operands[6])))" + [(set (reg:CC 24) + (compare:CC (ior:CC (match_op_dup 6 + [(match_dup 0) (match_dup 1)]) + (match_op_dup 7 + [(match_dup 2) (match_dup 3)])) + (const_int 0))) + (set (pc) + (if_then_else (match_op_dup 5 [(reg:CC 24) (const_int 0)]) + (label_ref (match_dup 4)) + (pc)))] + " +{ + enum rtx_code code = comparison_dominates_p (GET_CODE (operands[6]), + GET_CODE (operands[7])) + ? GET_CODE (operands[7]) : GET_CODE (operands[6]); + + if (GET_CODE (operands[5]) == NE) + operands[5] = gen_rtx (code, CCmode, + XEXP (operands[5], 0), XEXP (operands[5], 1)); + else + operands[5] = gen_rtx (reverse_condition (code), CCmode, + XEXP (operands[5], 0), XEXP (operands[5], 1)); +} +") + +;; Don't match these patterns if we can use a conditional compare, since they +;; tell the final prescan branch eliminator code that full branch inlining +;; can't be done. + +(define_insn "" + [(set (pc) + (if_then_else + (ne (ior:SI (match_operator 5 "comparison_operator" + [(match_operand:SI 0 "s_register_operand" "r,r,r,r") + (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")]) + (match_operator 6 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI,L,L")])) + (const_int 0)) + (label_ref (match_operand 4 "" "")) + (pc))) + (clobber (reg 24))] + "!(GET_CODE (operands[5]) == GET_CODE (operands[6]) + || comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[6])) + || comparison_dominates_p (GET_CODE (operands[6]), GET_CODE (operands[5])))" + "* +{ + extern int arm_ccfsm_state; + + if (which_alternative & 1) + output_asm_insn (\"cmn\\t%0, #%n1\;b%d5\\t%l4\", operands); + else + output_asm_insn (\"cmp\\t%0, %1\;b%d5\\t%l4\", operands); + + if (which_alternative >= 2) + output_asm_insn (\"cmn\\t%2, #%n3\", operands); + else + output_asm_insn (\"cmp\\t%2, %3\", operands); + + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return \"b%d6\\t%l4\"; +}" +[(set_attr "conds" "jump_clob") + (set_attr "length" "16")]) + +(define_insn "" + [(set (reg:CC 24) + (compare:CC + (ior:CC (match_operator 4 "comparison_operator" + [(match_operand:SI 0 "s_register_operand" "r,r,r,r") + (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")]) + (match_operator 5 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])) + (const_int 0)))] + "(GET_CODE (operands[4]) == GET_CODE (operands[5]) + || comparison_dominates_p (GET_CODE (operands[4]), GET_CODE (operands[5])) + || comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4])))" + "* + if (comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]))) + { + if (which_alternative >= 2) + output_asm_insn (\"cmn\\t%2, #%n3\", operands); + else + output_asm_insn (\"cmp\\t%2, %3\", operands); + + if (which_alternative & 1) + return \"cmn%D5\\t%0, #%n1\"; + return \"cmp%D5\\t%0, %1\"; + } + + if (which_alternative & 1) + output_asm_insn (\"cmn\\t%0, #%n1\", operands); + else + output_asm_insn (\"cmp\\t%0, %1\", operands); + + if (which_alternative >= 2) + return \"cmn%D4\\t%2, #%n3\"; + return \"cmp%D4\\t%2, %3\"; +" +[(set_attr "conds" "set") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (if_then_else (match_operator 3 "equality_operator" + [(match_operator 4 "comparison_operator" + [(reg 24) (const_int 0)]) + (const_int 0)]) + (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))] + "" + "* + if (GET_CODE (operands[3]) == NE) + { + if (which_alternative != 1) + output_asm_insn (\"mov%D4\\t%0, %2\", operands); + if (which_alternative != 0) + output_asm_insn (\"mov%d4\\t%0, %1\", operands); + return \"\"; + } + if (which_alternative != 0) + output_asm_insn (\"mov%D4\\t%0, %1\", operands); + if (which_alternative != 1) + output_asm_insn (\"mov%d4\\t%0, %2\", operands); + return \"\"; +" +[(set_attr "conds" "use") + (set_attr "length" "4,4,8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (match_operator:SI 5 "shiftable_operator" + [(match_operator:SI 4 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]) + (match_operand:SI 1 "s_register_operand" "0,?r")])) + (clobber (reg 24))] + "" + "* + if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx) + return \"%i5\\t%0, %1, %2, lsr #31\"; + + output_asm_insn (\"cmp\\t%2, %3\", operands); + if (GET_CODE (operands[5]) == AND) + output_asm_insn (\"mov%D4\\t%0, #0\", operands); + else if (GET_CODE (operands[5]) == MINUS) + output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands); + else if (which_alternative != 0) + output_asm_insn (\"mov%D4\\t%0, %1\", operands); + return \"%i5%d4\\t%0, %1, #1\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r") + (match_operator:SI 4 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]))) + (clobber (reg 24))] + "" + "* + output_asm_insn (\"cmp\\t%2, %3\", operands); + if (which_alternative != 0) + output_asm_insn (\"mov%D4\\t%0, %1\", operands); + return \"sub%d4\\t%0, %1, #1\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "8,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=&r") + (and:SI (match_operator 1 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rI")]) + (match_operator 4 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r") + (match_operand:SI 6 "arm_rhs_operand" "rI")]))) + (clobber (reg 24))] + "" + "* +{ + int dominant = + comparison_dominates_p (reverse_condition (GET_CODE (operands[1])), + reverse_condition (GET_CODE (operands[4]))) + ? 1 + : comparison_dominates_p (reverse_condition (GET_CODE (operands[4])), + reverse_condition (GET_CODE (operands[1]))) + ? 2 : 0; + output_asm_insn (dominant == 2 ? \"cmp\\t%5, %6\" : \"cmp\\t%2, %3\", + operands); + output_asm_insn (\"mov\\t%0, #1\", operands); + if (GET_CODE (operands[1]) == GET_CODE (operands[4]) || dominant) + { + output_asm_insn (dominant == 2 ? \"cmp%d4\\t%2, %3\" + : \"cmp%d1\\t%5, %6\", operands); + } + else + { + output_asm_insn (\"mov%D1\\t%0, #0\", operands); + output_asm_insn (\"cmp\\t%5, %6\", operands); + } + return dominant == 2 ? \"mov%D1\\t%0, #0\" : \"mov%D4\\t%0, #0\"; +} +" +[(set_attr "conds" "clob") + (set_attr "length" "20")]) + +(define_split + [(set (pc) + (if_then_else (match_operator 1 "equality_operator" + [(and:SI (match_operator 2 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "") + (match_operand:SI 4 "arm_add_operand" "")]) + (match_operator 0 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "") + (match_operand:SI 6 "arm_add_operand" "")])) + (const_int 0)]) + (label_ref (match_operand 7 "" "")) + (pc))) + (clobber (reg 24))] + "(GET_CODE (operands[2]) == GET_CODE (operands[0]) + || comparison_dominates_p (reverse_condition (GET_CODE (operands[2])), + reverse_condition (GET_CODE (operands[0]))) + || comparison_dominates_p (reverse_condition (GET_CODE (operands[0])), + reverse_condition (GET_CODE (operands[2]))))" + [(set (reg:CC 24) + (compare:CC (ior:CC (match_op_dup 2 + [(match_dup 3) (match_dup 4)]) + (match_op_dup 0 + [(match_dup 5) (match_dup 6)])) + (const_int 0))) + (set (pc) + (if_then_else (match_op_dup 1 [(reg:CC 24) (const_int 0)]) + (label_ref (match_dup 7)) + (pc)))] + " +{ + /* Use DeMorgans law to convert this into an IOR of the inverse conditions + This is safe since we only do it for integer comparisons. */ + enum rtx_code code = + comparison_dominates_p (reverse_condition (GET_CODE (operands[2])), + reverse_condition (GET_CODE (operands[0]))) + ? GET_CODE (operands[0]) : GET_CODE (operands[2]); + + operands[2] = gen_rtx (reverse_condition (GET_CODE (operands[2])), + GET_MODE (operands[2]), operands[3], operands[4]); + operands[0] = gen_rtx (reverse_condition (GET_CODE (operands[0])), + GET_MODE (operands[0]), operands[5], operands[6]); + if (GET_CODE (operands[1]) == NE) + operands[1] = gen_rtx (code, CCmode, + XEXP (operands[1], 0), XEXP (operands[1], 1)); + else + operands[1] = gen_rtx (reverse_condition (code), CCmode, + XEXP (operands[1], 0), XEXP (operands[1], 1)); +} +") + +;; Don't match these patterns if we can use a conditional compare, since they +;; tell the final prescan branch eliminator code that full branch inlining +;; can't be done. + +(define_insn "" + [(set (pc) + (if_then_else + (eq (and:SI (match_operator 1 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L")]) + (match_operator 4 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r,r,r,r") + (match_operand:SI 6 "arm_rhs_operand" "rI,rI,L,L")])) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc))) + (clobber (reg 24))] + "!(GET_CODE (operands[1]) == GET_CODE (operands[4]) + || comparison_dominates_p (reverse_condition (GET_CODE (operands[1])), + reverse_condition (GET_CODE (operands[4]))) + || comparison_dominates_p (reverse_condition (GET_CODE (operands[4])), + reverse_condition (GET_CODE (operands[1]))))" + "* +{ + extern int arm_ccfsm_state; + + if (which_alternative & 1) + output_asm_insn (\"cmn\\t%2, #%n3\;b%D1\\t%l0\", operands); + else + output_asm_insn (\"cmp\\t%2, %3\;b%D1\\t%l0\", operands); + + if (which_alternative >= 2) + output_asm_insn (\"cmn\\t%5, #%n6\", operands); + else + output_asm_insn (\"cmp\\t%5, %6\", operands); + + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return \"b%D4\\t%l0\"; +}" +[(set_attr "conds" "jump_clob") + (set_attr "length" "16")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (neg:SI (match_operator 3 "comparison_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rI")]))) + (clobber (reg 24))] + "" + "* + if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx) + return \"mov\\t%0, %1, asr #31\"; + + if (GET_CODE (operands[3]) == NE) + return \"subs\\t%0, %1, %2\;mvnne\\t%0, #0\"; + + if (GET_CODE (operands[3]) == GT) + return \"subs\\t%0, %1, %2\;mvnne\\t%0, %0, asr #31\"; + + output_asm_insn (\"cmp\\t%1, %2\", operands); + output_asm_insn (\"mov%D3\\t%0, #0\", operands); + return \"mvn%d3\\t%0, #0\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "12")]) + +(define_insn "movcond" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (if_then_else:SI + (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r,r") + (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")]) + (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg 24))] + "" + "* + if (GET_CODE (operands[5]) == LT + && (operands[4] == const0_rtx)) + { + if (which_alternative != 1 && GET_CODE (operands[1]) == REG) + { + if (operands[2] == const0_rtx) + return \"and\\t%0, %1, %3, asr #31\"; + return \"ands\\t%0, %1, %3, asr #32\;movcc\\t%0, %2\"; + } + else if (which_alternative != 0 && GET_CODE (operands[2]) == REG) + { + if (operands[1] == const0_rtx) + return \"bic\\t%0, %2, %3, asr #31\"; + return \"bics\\t%0, %2, %3, asr #32\;movcs\\t%0, %1\"; + } + /* The only case that falls through to here is when both ops 1 & 2 + are constants */ + } + + if (GET_CODE (operands[5]) == GE + && (operands[4] == const0_rtx)) + { + if (which_alternative != 1 && GET_CODE (operands[1]) == REG) + { + if (operands[2] == const0_rtx) + return \"bic\\t%0, %1, %3, asr #31\"; + return \"bics\\t%0, %1, %3, asr #32\;movcs\\t%0, %2\"; + } + else if (which_alternative != 0 && GET_CODE (operands[2]) == REG) + { + if (operands[1] == const0_rtx) + return \"and\\t%0, %2, %3, asr #31\"; + return \"ands\\t%0, %2, %3, asr #32\;movcc\\t%0, %1\"; + } + /* The only case that falls through to here is when both ops 1 & 2 + are constants */ + } + if (GET_CODE (operands[4]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[4]))) + output_asm_insn (\"cmn\\t%3, #%n4\", operands); + else + output_asm_insn (\"cmp\\t%3, %4\", operands); + if (which_alternative != 0) + output_asm_insn (\"mov%d5\\t%0, %1\", operands); + if (which_alternative != 1) + output_asm_insn (\"mov%D5\\t%0, %2\", operands); + return \"\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 9 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r,r") + (match_operand:SI 6 "arm_add_operand" "rI,L")]) + (match_operator:SI 8 "shiftable_operator" + [(match_operand:SI 1 "s_register_operand" "r,r") + (match_operand:SI 2 "arm_rhs_operand" "rI,rI")]) + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 3 "s_register_operand" "r,r") + (match_operand:SI 4 "arm_rhs_operand" "rI,rI")]))) + (clobber (reg 24))] + "" + "@ + cmp\\t%5, %6\;%I8%d9\\t%0, %1, %2\;%I7%D9\\t%0, %3, %4 + cmn\\t%5, #%n6\;%I8%d9\\t%0, %1, %2\;%I7%D9\\t%0, %3, %4" +[(set_attr "conds" "clob") + (set_attr "length" "12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_add_operand" "rIL,rIL")]) + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_rhs_operand" "rI,rI")]) + (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm"))) + (clobber (reg 24))] + "" + "* + /* If we have an operation where (op x 0) is the identity operation and + the conditional operator is LT or GE and we are comparing against zero and + everything is in registers then we can do this in two instructions */ + if (operands[3] == const0_rtx + && GET_CODE (operands[7]) != AND + && GET_CODE (operands[5]) == REG + && GET_CODE (operands[1]) == REG + && REGNO (operands[1]) == REGNO (operands[4]) + && REGNO (operands[4]) != REGNO (operands[0])) + { + if (GET_CODE (operands[6]) == LT) + return \"and\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\"; + else if (GET_CODE (operands[6]) == GE) + return \"bic\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\"; + } + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + output_asm_insn (\"cmn\\t%2, #%n3\", operands); + else + output_asm_insn (\"cmp\\t%2, %3\", operands); + output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands); + if (which_alternative != 0) + { + if (GET_CODE (operands[1]) == MEM) + return \"ldr%D6\\t%0, %1\"; + else + return \"mov%D6\\t%0, %1\"; + } + return \"\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "8,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rIL,rIL")]) + (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm") + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]))) + (clobber (reg 24))] + "" + "* + /* If we have an operation where (op x 0) is the identity operation and + the conditional operator is LT or GE and we are comparing against zero and + everything is in registers then we can do this in two instructions */ + if (operands[5] == const0_rtx + && GET_CODE (operands[7]) != AND + && GET_CODE (operands[3]) == REG + && GET_CODE (operands[1]) == REG + && REGNO (operands[1]) == REGNO (operands[2]) + && REGNO (operands[2]) != REGNO (operands[0])) + { + if (GET_CODE (operands[6]) == GE) + return \"and\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\"; + else if (GET_CODE (operands[6]) == LT) + return \"bic\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\"; + } + + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + output_asm_insn (\"cmp\\t%4, %5\", operands); + + if (which_alternative != 0) + { + if (GET_CODE (operands[1]) == MEM) + output_asm_insn (\"ldr%d6\\t%0, %1\", operands); + else + output_asm_insn (\"mov%d6\\t%0, %1\", operands); + } + return \"%I7%D6\\t%0, %2, %3\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "8,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rIL,rIL")]) + (plus:SI + (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_add_operand" "rL,rL")) + (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm"))) + (clobber (reg 24))] + "" + "* +{ + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + output_asm_insn (\"cmp\\t%4, %5\", operands); + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + output_asm_insn (\"sub%d6\\t%0, %2, #%n3\", operands); + else + output_asm_insn (\"add%d6\\t%0, %2, %3\", operands); + if (which_alternative != 0) + { + if (GET_CODE (operands[1]) == MEM) + output_asm_insn (\"ldr%D6\\t%0, %1\", operands); + else + output_asm_insn (\"mov%D6\\t%0, %1\", operands); + } + return \"\"; +} +" +[(set_attr "conds" "clob") + (set_attr "length" "8,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rIL,rIL")]) + (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm") + (plus:SI + (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_add_operand" "rIL,rIL")))) + (clobber (reg 24))] + "" + "* +{ + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + output_asm_insn (\"cmp\\t%4, %5\", operands); + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + output_asm_insn (\"sub%D6\\t%0, %2, #%n3\", operands); + else + output_asm_insn (\"add%D6\\t%0, %2, %3\", operands); + if (which_alternative != 0) + { + if (GET_CODE (operands[6]) == MEM) + output_asm_insn (\"ldr%d6\\t%0, %1\", operands); + else + output_asm_insn (\"mov%d6\\t%0, %1\", operands); + } + return \"\"; +} +" +[(set_attr "conds" "clob") + (set_attr "length" "8,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r") + (match_operand:SI 4 "arm_add_operand" "rIL,rIL")]) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI") + (not:SI + (match_operand:SI 2 "s_register_operand" "r,r")))) + (clobber (reg 24))] + "" + "#" +[(set_attr "conds" "clob") + (set_attr "length" "8,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r") + (if_then_else:SI + (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r,r,r") + (match_operand:SI 4 "arm_add_operand" "rI,L,rI,L")]) + (not:SI + (match_operand:SI 2 "s_register_operand" "r,r,r,r")) + (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI"))) + (clobber (reg 24))] + "" + "@ + cmp\\t%3, %4\;mvn%d5\\t%0, %2 + cmn\\t%3, #%n4\;mvn%d5\\t%0, %2 + cmp\\t%3, %4\;mov%D5\\t%0, %1\;mvn%d5\\t%0, %2 + cmn\\t%3, #%n4\;mov%D5\\t%0, %1\;mvn%d5\\t%0, %2" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r") + (if_then_else:SI + (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r,r,r") + (match_operand:SI 5 "arm_add_operand" "rI,L,rI,L")]) + (match_operator:SI 7 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM,rM")]) + (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI"))) + (clobber (reg 24))] + "" + "@ + cmp\\t%4, %5\;mov%d6\\t%0, %2%S7 + cmn\\t%4, #%n5\;mov%d6\\t%0, %2%S7 + cmp\\t%4, %5\;mov%D6\\t%0, %1\;mov%d6\\t%0, %2%S7 + cmn\\t%4, #%n5\;mov%D6\\t%0, %1\;mov%d6\\t%0, %2%S7" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r") + (if_then_else:SI + (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r,r,r") + (match_operand:SI 5 "arm_add_operand" "rI,L,rI,L")]) + (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI") + (match_operator:SI 7 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM,rM")]))) + (clobber (reg 24))] + "" + "@ + cmp\\t%4, %5\;mov%D6\\t%0, %2%S7 + cmn\\t%4, #%n5\;mov%D6\\t%0, %2%S7 + cmp\\t%4, %5\;mov%d6\\t%0, %1\;mov%D6\\t%0, %2%S7 + cmn\\t%4, #%n5\;mov%d6\\t%0, %1\;mov%D6\\t%0, %2%S7" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI + (match_operator 7 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r,r") + (match_operand:SI 6 "arm_add_operand" "rI,L")]) + (match_operator:SI 8 "shift_operator" + [(match_operand:SI 1 "s_register_operand" "r,r") + (match_operand:SI 2 "arm_rhs_operand" "rM,rM")]) + (match_operator:SI 9 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r,r") + (match_operand:SI 4 "arm_rhs_operand" "rI,rI")]))) + (clobber (reg 24))] + "" + "@ + cmp\\t%5, %6\;mov%d7\\t%0, %1%S8\;mov%D7\\t%0, %3%S9 + cmn\\t%5, #%n6\;mov%d7\\t%0, %1%S8\;mov%D7\\t%0, %3%S9" +[(set_attr "conds" "clob") + (set_attr "length" "12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI + (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rI,L")]) + (not:SI (match_operand:SI 1 "s_register_operand" "r,r")) + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]))) + (clobber (reg 24))] + "" + "@ + cmp\\t%4, %5\;mvn%d6\\t%0, %1\;%I7%D6\\t%0, %2, %3 + cmn\\t%4, #%n5\;mvn%d6\\t%0, %1\;%I7%D6\\t%0, %2, %3" +[(set_attr "conds" "clob") + (set_attr "length" "12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI + (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rI,L")]) + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]) + (not:SI (match_operand:SI 1 "s_register_operand" "r,r")))) + (clobber (reg 24))] + "" + "@ + cmp\\t%4, %5\;mvn%D6\\t%0, %1\;%I7%d6\\t%0, %2, %3 + cmn\\t%4, #%n5\;mvn%D6\\t%0, %1\;%I7%d6\\t%0, %2, %3" +[(set_attr "conds" "clob") + (set_attr "length" "12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r") + (if_then_else:SI + (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r,r,r") + (match_operand:SI 4 "arm_add_operand" "rI,L,rI,L")]) + (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")) + (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI"))) + (clobber (reg:CC 24))] + "" + "@ + cmp\\t%3, %4\;rsb%d5\\t%0, %2, #0 + cmn\\t%3, #%n4\;rsb%d5\\t%0, %2, #0 + cmp\\t%3, %4\;mov%D5\\t%0, %1\;rsb%d5\\t%0, %2, #0 + cmn\\t%3, #%n4\;mov%D5\\t%0, %1\;rsb%d5\\t%0, %2, #0" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r") + (if_then_else:SI + (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r,r,r") + (match_operand:SI 4 "arm_add_operand" "rI,L,rI,L")]) + (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI") + (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")))) + (clobber (reg:CC 24))] + "" + "@ + cmp\\t%3, %4\;rsb%D5\\t%0, %2, #0 + cmn\\t%3, #%n4\;rsb%D5\\t%0, %2, #0 + cmp\\t%3, %4\;mov%d5\\t%0, %1\;rsb%D5\\t%0, %2, #0 + cmn\\t%3, #%n4\;mov%d5\\t%0, %1\;rsb%D5\\t%0, %2, #0" +[(set_attr "conds" "clob") + (set_attr "length" "8,8,12,12")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operator:SI 1 "shiftable_operator" + [(match_operand:SI 2 "memory_operand" "m") + (match_operand:SI 3 "memory_operand" "m")])) + (clobber (match_scratch:SI 4 "=r"))] + "adjacent_mem_locations (operands[2], operands[3])" + "* +{ + rtx ldm[3]; + rtx arith[4]; + int val1 = 0, val2 = 0; + + if (REGNO (operands[0]) > REGNO (operands[4])) + { + ldm[1] = operands[4]; + ldm[2] = operands[0]; + } + else + { + ldm[1] = operands[0]; + ldm[2] = operands[4]; + } + if (GET_CODE (XEXP (operands[2], 0)) != REG) + val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1)); + if (GET_CODE (XEXP (operands[3], 0)) != REG) + val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1)); + arith[0] = operands[0]; + arith[3] = operands[1]; + if (val1 < val2) + { + arith[1] = ldm[1]; + arith[2] = ldm[2]; + } + else + { + arith[1] = ldm[2]; + arith[2] = ldm[1]; + } + if (val1 && val2) + { + rtx ops[3]; + ldm[0] = ops[0] = operands[4]; + ops[1] = XEXP (XEXP (operands[2], 0), 0); + ops[2] = XEXP (XEXP (operands[2], 0), 1); + output_add_immediate (ops); + if (val1 < val2) + output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm); + else + output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm); + } + else if (val1) + { + ldm[0] = XEXP (operands[3], 0); + if (val1 < val2) + output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm); + else + output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm); + } + else + { + ldm[0] = XEXP (operands[2], 0); + if (val1 < val2) + output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm); + else + output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm); + } + output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith); + return \"\"; +} +" +[(set_attr "length" "12") + (set_attr "type" "load")]) + +;; the arm can support extended pre-inc instructions + +;; In all these cases, we use operands 0 and 1 for the register being +;; incremented because those are the operands that local-alloc will +;; tie and these are the pair most likely to be tieable (and the ones +;; that will benefit the most). + +;; We reject the frame pointer if it occurs anywhere in these patterns since +;; elimination will cause too many headaches. + +(define_insn "" + [(set (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ"))) + (match_operand:QI 3 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "str%?b\\t%3, [%0, %2]!" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r"))) + (match_operand:QI 3 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "str%?b\\t%3, [%0, -%2]!" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:QI 3 "s_register_operand" "=r") + (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "ldr%?b\\t%3, [%0, %2]!" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:QI 3 "s_register_operand" "=r") + (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "ldr%?b\\t%3, [%0, -%2]!" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:SI 3 "s_register_operand" "=r") + (zero_extend:SI + (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ"))))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "ldr%?b\\t%3, [%0, %2]!\\t%@ z_extendqisi" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:SI 3 "s_register_operand" "=r") + (zero_extend:SI + (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r"))))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "ldr%?b\\t%3, [%0, -%2]!\\t%@ z_extendqisi" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ"))) + (match_operand:SI 3 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "str%?\\t%3, [%0, %2]!" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r"))) + (match_operand:SI 3 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "str%?\\t%3, [%0, -%2]!" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "ldr%?\\t%3, [%0, %2]!" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "ldr%?\\t%3, [%0, -%2]!" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:HI 3 "s_register_operand" "=r") + (mem:HI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "(! BYTES_BIG_ENDIAN) + && ! TARGET_SHORT_BY_BYTES + && REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "ldr%?\\t%3, [%0, %2]!\\t%@ loadhi" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:HI 3 "s_register_operand" "=r") + (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "(!BYTES_BIG_ENDIAN) + && ! TARGET_SHORT_BY_BYTES + && REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "ldr%?\\t%3, [%0, -%2]!\\t%@ loadhi" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (mem:QI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0"))) + (match_operand:QI 5 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "str%?b\\t%5, [%0, %3%S2]!" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]))) + (match_operand:QI 5 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "str%?b\\t%5, [%0, -%3%S2]!" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:QI 5 "s_register_operand" "=r") + (mem:QI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "ldr%?b\\t%5, [%0, %3%S2]!" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:QI 5 "s_register_operand" "=r") + (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")])))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "ldr%?b\\t%5, [%0, -%3%S2]!" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (mem:SI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0"))) + (match_operand:SI 5 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "str%?\\t%5, [%0, %3%S2]!" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]))) + (match_operand:SI 5 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "str%?\\t%5, [%0, -%3%S2]!" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:SI 5 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "ldr%?\\t%5, [%0, %3%S2]!" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:SI 5 "s_register_operand" "=r") + (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")])))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "ldr%?\\t%5, [%0, -%3%S2]!" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:HI 5 "s_register_operand" "=r") + (mem:HI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "(! BYTES_BIG_ENDIAN) + && ! TARGET_SHORT_BY_BYTES + && REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "ldr%?\\t%5, [%0, %3%S2]!\\t%@ loadhi" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:HI 5 "s_register_operand" "=r") + (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")])))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "(! BYTES_BIG_ENDIAN) + && ! TARGET_SHORT_BY_BYTES + && REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "ldr%?\\t%5, [%0, -%3%S2]!\\t%@ loadhi" +[(set_attr "type" "load")]) + +; It can also support extended post-inc expressions, but combine doesn't +; try these.... +; It doesn't seem worth adding peepholes for anything but the most common +; cases since, unlike combine, the increment must immediately follow the load +; for this pattern to match. +; When loading we must watch to see that the base register isn't trampled by +; the load. In such cases this isn't a post-inc expression. + +(define_peephole + [(set (mem:QI (match_operand:SI 0 "s_register_operand" "+r")) + (match_operand:QI 2 "s_register_operand" "r")) + (set (match_dup 0) + (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))] + "" + "str%?b\\t%2, [%0], %1") + +(define_peephole + [(set (match_operand:QI 0 "s_register_operand" "=r") + (mem:QI (match_operand:SI 1 "s_register_operand" "+r"))) + (set (match_dup 1) + (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))] + "REGNO(operands[0]) != REGNO(operands[1]) + && (GET_CODE (operands[2]) != REG + || REGNO(operands[0]) != REGNO (operands[2]))" + "ldr%?b\\t%0, [%1], %2") + +(define_peephole + [(set (mem:SI (match_operand:SI 0 "s_register_operand" "+r")) + (match_operand:SI 2 "s_register_operand" "r")) + (set (match_dup 0) + (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))] + "" + "str%?\\t%2, [%0], %1") + +(define_peephole + [(set (match_operand:HI 0 "s_register_operand" "=r") + (mem:HI (match_operand:SI 1 "s_register_operand" "+r"))) + (set (match_dup 1) + (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))] + "(! BYTES_BIG_ENDIAN) + && ! TARGET_SHORT_BY_BYTES + && REGNO(operands[0]) != REGNO(operands[1]) + && (GET_CODE (operands[2]) != REG + || REGNO(operands[0]) != REGNO (operands[2]))" + "ldr%?\\t%0, [%1], %2\\t%@ loadhi") + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (mem:SI (match_operand:SI 1 "s_register_operand" "+r"))) + (set (match_dup 1) + (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))] + "REGNO(operands[0]) != REGNO(operands[1]) + && (GET_CODE (operands[2]) != REG + || REGNO(operands[0]) != REGNO (operands[2]))" + "ldr%?\\t%0, [%1], %2") + +(define_peephole + [(set (mem:QI (plus:SI (match_operand:SI 0 "s_register_operand" "+r") + (match_operand:SI 1 "index_operand" "rJ"))) + (match_operand:QI 2 "s_register_operand" "r")) + (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))] + "" + "str%?b\\t%2, [%0, %1]!") + +(define_peephole + [(set (mem:QI (plus:SI (match_operator:SI 4 "shift_operator" + [(match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "const_int_operand" "n")]) + (match_operand:SI 2 "s_register_operand" "+r"))) + (match_operand:QI 3 "s_register_operand" "r")) + (set (match_dup 2) (plus:SI (match_op_dup 4 [(match_dup 0) (match_dup 1)]) + (match_dup 2)))] + "" + "str%?b\\t%3, [%2, %0%S4]!") + +; This pattern is never tried by combine, so do it as a peephole + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operand:SI 1 "s_register_operand" "r")) + (set (match_operand 2 "cc_register" "") + (compare (match_dup 1) (const_int 0)))] + "" + "sub%?s\\t%0, %1, #0" +[(set_attr "conds" "set")]) + +; Peepholes to spot possible load- and store-multiples, if the ordering is +; reversed, check that the memory references aren't volatile. + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 12)))) + (set (match_operand:SI 2 "s_register_operand" "=r") + (mem:SI (plus:SI (match_dup 1) (const_int 8)))) + (set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (plus:SI (match_dup 1) (const_int 4)))) + (set (match_operand:SI 4 "s_register_operand" "=r") + (mem:SI (match_dup 1)))] + "REGNO (operands[0]) > REGNO (operands[2]) + && REGNO (operands[2]) > REGNO (operands[3]) + && REGNO (operands[3]) > REGNO (operands[4]) + && !(REGNO (operands[1]) == REGNO (operands[0]) + || REGNO (operands[1]) == REGNO (operands[2]) + || REGNO (operands[1]) == REGNO (operands[3]) + || REGNO (operands[1]) == REGNO (operands[4])) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn (insn)))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn + (prev_nonnote_insn (insn))))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn + (prev_nonnote_insn + (prev_nonnote_insn (insn))))))" + "ldm%?ia\\t%1, {%4, %3, %2, %0}\\t%@ phole ldm") + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 8)))) + (set (match_operand:SI 2 "s_register_operand" "=r") + (mem:SI (plus:SI (match_dup 1) (const_int 4)))) + (set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (match_dup 1)))] + "REGNO (operands[0]) > REGNO (operands[2]) + && REGNO (operands[2]) > REGNO (operands[3]) + && !(REGNO (operands[1]) == REGNO (operands[0]) + || REGNO (operands[1]) == REGNO (operands[2]) + || REGNO (operands[1]) == REGNO (operands[3])) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn (insn)))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn + (prev_nonnote_insn (insn)))))" + "ldm%?ia\\t%1, {%3, %2, %0}\\t%@ phole ldm") + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 4)))) + (set (match_operand:SI 2 "s_register_operand" "=r") + (mem:SI (match_dup 1)))] + "REGNO (operands[0]) > REGNO (operands[2]) + && !(REGNO (operands[1]) == REGNO (operands[0]) + || REGNO (operands[1]) == REGNO (operands[2])) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn (insn))))" + "ldm%?ia\\t%1, {%2, %0}\\t%@ phole ldm") + +(define_peephole + [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 12))) + (match_operand:SI 0 "s_register_operand" "r")) + (set (mem:SI (plus:SI (match_dup 1) (const_int 8))) + (match_operand:SI 2 "s_register_operand" "r")) + (set (mem:SI (plus:SI (match_dup 1) (const_int 4))) + (match_operand:SI 3 "s_register_operand" "r")) + (set (mem:SI (match_dup 1)) + (match_operand:SI 4 "s_register_operand" "r"))] + "REGNO (operands[0]) > REGNO (operands[2]) + && REGNO (operands[2]) > REGNO (operands[3]) + && REGNO (operands[3]) > REGNO (operands[4]) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn (insn)))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn + (prev_nonnote_insn (insn))))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn + (prev_nonnote_insn + (prev_nonnote_insn (insn))))))" + "stm%?ia\\t%1, {%4, %3, %2, %0}\\t%@ phole stm") + +(define_peephole + [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 8))) + (match_operand:SI 0 "s_register_operand" "r")) + (set (mem:SI (plus:SI (match_dup 1) (const_int 4))) + (match_operand:SI 2 "s_register_operand" "r")) + (set (mem:SI (match_dup 1)) + (match_operand:SI 3 "s_register_operand" "r"))] + "REGNO (operands[0]) > REGNO (operands[2]) + && REGNO (operands[2]) > REGNO (operands[3]) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn (insn)))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn + (prev_nonnote_insn (insn)))))" + "stm%?ia\\t%1, {%3, %2, %0}\\t%@ phole stm") + +(define_peephole + [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 4))) + (match_operand:SI 0 "s_register_operand" "r")) + (set (mem:SI (match_dup 1)) + (match_operand:SI 2 "s_register_operand" "r"))] + "REGNO (operands[0]) > REGNO (operands[2]) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn (insn))))" + "stm%?ia\\t%1, {%2, %0}\\t%@ phole stm") + +;; A call followed by return can be replaced by restoring the regs and +;; jumping to the subroutine, provided we aren't passing the address of +;; any of our local variables. If we call alloca then this is unsafe +;; since restoring the frame frees the memory, which is not what we want. +;; Sometimes the return might have been targeted by the final prescan: +;; if so then emit a proper return insn as well. +;; Unfortunately, if the frame pointer is required, we don't know if the +;; current function has any implicit stack pointer adjustments that will +;; be restored by the return: we can't therefore do a tail call. +;; Another unfortunate that we can't handle is if current_function_args_size +;; is non-zero: in this case elimination of the argument pointer assumed +;; that lr was pushed onto the stack, so eliminating upsets the offset +;; calculations. + +(define_peephole + [(parallel [(call (mem:SI (match_operand:SI 0 "" "i")) + (match_operand:SI 1 "general_operand" "g")) + (clobber (reg:SI 14))]) + (return)] + "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN + && !get_frame_size () && !current_function_calls_alloca + && !frame_pointer_needed && !current_function_args_size)" + "* +{ + extern rtx arm_target_insn; + extern int arm_ccfsm_state, arm_current_cc; + + if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn)) + { + arm_current_cc ^= 1; + output_return_instruction (NULL, TRUE); + arm_ccfsm_state = 0; + arm_target_insn = NULL; + } + + output_return_instruction (NULL, FALSE); + return \"b%?\\t%a0\"; +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "8")]) + +(define_peephole + [(parallel [(set (match_operand 0 "s_register_operand" "=rf") + (call (mem:SI (match_operand:SI 1 "" "i")) + (match_operand:SI 2 "general_operand" "g"))) + (clobber (reg:SI 14))]) + (return)] + "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN + && !get_frame_size () && !current_function_calls_alloca + && !frame_pointer_needed && !current_function_args_size)" + "* +{ + extern rtx arm_target_insn; + extern int arm_ccfsm_state, arm_current_cc; + + if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn)) + { + arm_current_cc ^= 1; + output_return_instruction (NULL, TRUE); + arm_ccfsm_state = 0; + arm_target_insn = NULL; + } + + output_return_instruction (NULL, FALSE); + return \"b%?\\t%a1\"; +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "8")]) + +;; As above but when this function is not void, we must be returning the +;; result of the called subroutine. + +(define_peephole + [(parallel [(set (match_operand 0 "s_register_operand" "=rf") + (call (mem:SI (match_operand:SI 1 "" "i")) + (match_operand:SI 2 "general_operand" "g"))) + (clobber (reg:SI 14))]) + (use (match_dup 0)) + (return)] + "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN + && !get_frame_size () && !current_function_calls_alloca + && !frame_pointer_needed && !current_function_args_size)" + "* +{ + extern rtx arm_target_insn; + extern int arm_ccfsm_state, arm_current_cc; + + if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn)) + { + arm_current_cc ^= 1; + output_return_instruction (NULL, TRUE); + arm_ccfsm_state = 0; + arm_target_insn = NULL; + } + + output_return_instruction (NULL, FALSE); + return \"b%?\\t%a1\"; +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "8")]) + +;; If calling a subroutine and then jumping back to somewhere else, but not +;; too far away, then we can set the link register with the branch address +;; and jump direct to the subroutine. On return from the subroutine +;; execution continues at the branch; this avoids a prefetch stall. +;; We use the length attribute (via short_branch ()) to establish whether or +;; not this is possible, this is the same as the sparc does. + +(define_peephole + [(parallel[(call (mem:SI (match_operand:SI 0 "" "i")) + (match_operand:SI 1 "general_operand" "g")) + (clobber (reg:SI 14))]) + (set (pc) + (label_ref (match_operand 2 "" "")))] + "0 && GET_CODE (operands[0]) == SYMBOL_REF + && short_branch (INSN_UID (insn), INSN_UID (operands[2])) + && arm_insn_not_targeted (insn)" + "* +{ + int backward = arm_backwards_branch (INSN_UID (insn), + INSN_UID (operands[2])); + +#if 0 + /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or + * above, leaving it out means that the code will still run on an arm 2 or 3 + */ + if (TARGET_6) + { + if (backward) + output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l2)\", operands); + else + output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l2 - . -8)\", operands); + } + else +#endif + { + output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands); + if (backward) + output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l2)\", operands); + else + output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l2 - . -4)\", operands); + } + return \"b%?\\t%a0\"; +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set (attr "length") + (if_then_else (eq_attr "cpu" "arm6") + (const_int 8) + (const_int 12)))]) + +(define_peephole + [(parallel[(set (match_operand:SI 0 "s_register_operand" "=r") + (call (mem:SI (match_operand:SI 1 "" "i")) + (match_operand:SI 2 "general_operand" "g"))) + (clobber (reg:SI 14))]) + (set (pc) + (label_ref (match_operand 3 "" "")))] + "0 && GET_CODE (operands[0]) == SYMBOL_REF + && short_branch (INSN_UID (insn), INSN_UID (operands[3])) + && arm_insn_not_targeted (insn)" + "* +{ + int backward = arm_backwards_branch (INSN_UID (insn), + INSN_UID (operands[3])); + +#if 0 + /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or + * above, leaving it out means that the code will still run on an arm 2 or 3 + */ + if (TARGET_6) + { + if (backward) + output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l3)\", operands); + else + output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l3 - . -8)\", operands); + } + else +#endif + { + output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands); + if (backward) + output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l3)\", operands); + else + output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l3 - . -4)\", operands); + } + return \"b%?\\t%a1\"; +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set (attr "length") + (if_then_else (eq_attr "cpu" "arm6") + (const_int 8) + (const_int 12)))]) + +(define_split + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" + [(match_operator:SI 1 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "reg_or_int_operand" "rM")]) + (match_operand:SI 4 "s_register_operand" "r")]) + (label_ref (match_operand 5 "" "")) + (pc))) + (clobber (reg 24))] + "" + [(set (reg:CC 24) + (compare:CC (match_dup 4) + (match_op_dup 1 [(match_dup 2) (match_dup 3)]))) + (set (pc) + (if_then_else (match_op_dup 0 [(reg 24) (const_int 0)]) + (label_ref (match_dup 5)) + (pc)))] + " + operands[0] = gen_rtx (swap_condition (GET_CODE (operands[0])), VOIDmode, + operands[1], operands[2]); +") + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (and:SI (ge:SI (match_operand:SI 1 "s_register_operand" "") + (const_int 0)) + (neg:SI (match_operator:SI 2 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "") + (match_operand:SI 4 "arm_rhs_operand" "")])))) + (clobber (match_operand:SI 5 "s_register_operand" ""))] + "" + [(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31)))) + (set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 5)))] + "") + +;; This pattern can be used because cc_noov mode implies that the following +;; branch will be an equality (EQ or NE), so the sign extension is not +;; needed. Combine doesn't eliminate these because by the time it sees the +;; branch it no-longer knows that the data came from memory. + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV + (ashift:SI (subreg:SI (match_operand:QI 0 "memory_operand" "m") 0) + (const_int 24)) + (match_operand 1 "immediate_operand" "I"))) + (clobber (match_scratch:SI 2 "=r"))] + "((unsigned long) INTVAL (operands[1])) + == (((unsigned long) INTVAL (operands[1])) >> 24) << 24" + "* + operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24); + output_asm_insn (\"ldr%?b\\t%2, %0\", operands); + output_asm_insn (\"cmp%?\\t%2, %1\", operands); + return \"\"; +" +[(set_attr "conds" "set") + (set_attr "length" "8") + (set_attr "type" "load")]) + +(define_expand "prologue" + [(clobber (const_int 0))] + "" + " + arm_expand_prologue (); + DONE; +") + +;; This split is only used during output to reduce the number of patterns +;; that need assembler instructions adding to them. We allowed the setting +;; of the conditions to be implicit during rtl generation so that +;; the conditional compare patterns would work. However this conflicts to +;; some extend with the conditional data operations, so we have to split them +;; up again here. + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (if_then_else:SI (match_operator 1 "comparison_operator" + [(match_operand 2 "" "") (match_operand 3 "" "")]) + (match_operand 4 "" "") + (match_operand 5 "" ""))) + (clobber (reg 24))] + "reload_completed" + [(set (match_dup 6) (match_dup 7)) + (set (match_dup 0) + (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)]) + (match_dup 4) + (match_dup 5)))] + " +{ + enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2], + operands[3]); + + operands[6] = gen_rtx (REG, mode, 24); + operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]); +} +") + + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 4 "comparison_operator" + [(match_operand 3 "reversible_cc_register" "") + (const_int 0)]) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI") + (not:SI + (match_operand:SI 2 "s_register_operand" "r,r"))))] + "" + "@ + mvn%D4\\t%0, %2 + mov%d4\\t%0, %1\;mvn%D4\\t%0, %2" +[(set_attr "conds" "use") + (set_attr "length" "4,8")]) + +;; The next two patterns occur when an AND operation is followed by a +;; scc insn sequence + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 1) + (match_operand:SI 2 "immediate_operand" "n")))] + "" + "* + operands[2] = GEN_INT (1 << INTVAL (operands[2])); + output_asm_insn (\"ands\\t%0, %1, %2\", operands); + return \"mvnne\\t%0, #0\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "8")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI + (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 1) + (match_operand:SI 2 "immediate_operand" "n"))))] + "" + "* + operands[2] = GEN_INT (1 << INTVAL (operands[2])); + output_asm_insn (\"tst\\t%1, %2\", operands); + output_asm_insn (\"mvneq\\t%0, #0\", operands); + return \"movne\\t%0, #0\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "12")]) + +;; Push multiple registers to the stack. The first register is in the +;; unspec part of the insn; subsequent registers are in parallel (use ...) +;; expressions. +(define_insn "" + [(match_parallel 2 "multi_register_push" + [(set (match_operand:BLK 0 "memory_operand" "=m") + (unspec:BLK [(match_operand:SI 1 "s_register_operand" "r")] 2))])] + "" + "* +{ + char pattern[100]; + int i; + extern int lr_save_eliminated; + + if (lr_save_eliminated) + { + if (XVECLEN (operands[2], 0) > 1) + abort (); + return \"\"; + } + strcpy (pattern, \"stmfd\\t%m0!, {%|%1\"); + for (i = 1; i < XVECLEN (operands[2], 0); i++) + { + strcat (pattern, \", %|\"); + strcat (pattern, reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i), + 0))]); + } + strcat (pattern, \"}\"); + output_asm_insn (pattern, operands); + return \"\"; +}" +[(set_attr "type" "store4")]) diff --git a/gnu/usr.bin/gcc/arch/arm32/config.h b/gnu/usr.bin/gcc/arch/arm32/config.h new file mode 100644 index 000000000000..5849357edd88 --- /dev/null +++ b/gnu/usr.bin/gcc/arch/arm32/config.h @@ -0,0 +1,4 @@ +#define NETBSD_NATIVE +#define DEFAULT_TARGET_VERSION "2.7.2" +#define DEFAULT_TARGET_MACHINE "NetBSD/arm32" +#include "arm32/xm-netbsd.h" diff --git a/gnu/usr.bin/gcc/arch/arm32/hconfig.h b/gnu/usr.bin/gcc/arch/arm32/hconfig.h new file mode 100644 index 000000000000..ff743d7ab6bd --- /dev/null +++ b/gnu/usr.bin/gcc/arch/arm32/hconfig.h @@ -0,0 +1 @@ +#include "arm32/xm-netbsd.h" \ No newline at end of file diff --git a/gnu/usr.bin/gcc/arch/arm32/netbsd.h b/gnu/usr.bin/gcc/arch/arm32/netbsd.h new file mode 100644 index 000000000000..35d058b785d7 --- /dev/null +++ b/gnu/usr.bin/gcc/arch/arm32/netbsd.h @@ -0,0 +1,141 @@ +/* Definitions of target machine for GNU compiler. + NetBSD/arm (RiscBSD) version. + Copyright (C) 1993, 1994 Free Software Foundation, Inc. + Contributed by Mark Brinicombe (amb@physig.ph.kcl.ac.uk) + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ + +/* Ok it either ARM2 or ARM3 code is produced we need to define the + * appropriate symbol and delete the ARM6 symbol + */ + +/* Run-time Target Specification. */ + +#define TARGET_VERSION fputs (" (ARM/NetBSD)", stderr); + +/* This is used in ASM_FILE_START */ + +#define ARM_OS_NAME "NetBSD" + +/* Unsigned chars produces much better code than signed. */ + +#define DEFAULT_SIGNED_CHAR 0 + +/* ARM600 default cpu */ + +#define TARGET_DEFAULT 8 + +/* Since we always use GAS as our assembler we support stabs */ + +#define DBX_DEBUGGING_INFO 1 + +/*#undef ASM_DECLARE_FUNCTION_NAME*/ + +#include "arm32/arm32.h" + +/* Gets redefined in config/netbsd.h */ + +#undef TARGET_MEM_FUNCTIONS + +#include + +/* Ok some nice defines for CPP + By default we define arm32 __arm32__ and __arm6__ + However if we are compiling 26 bit code -m2 or -m3 then + we remove all these definitions. + The arm32 and __arm32__ defines indication that the compiler + is generating 32 bit address space code. + The __arm2__ __arm3__ and __arm6__ are obvious. */ + +#undef CPP_PREDEFINES +#define CPP_PREDEFINES "-Dunix -Darm32 -D__arm32__ -D__arm6__ -Driscbsd -D__NetBSD__ -Asystem(unix) -Asystem(NetBSD) -Acpu(arm) -Amachine(arm)" + +#undef CPP_SPEC +#define CPP_SPEC "%{m2:-D__arm2__} %{m3:-D__arm3__} %{m2:-U__arm6__} \ + %{m3:-U__arm6__} %{m2:-U__arm32__} %{m3:-U__arm32__} \ + %{m2:-Uarm32} %{m3:-Uarm32} \ + %{posix:-D_POSIX_SOURCE}" + +#undef SIZE_TYPE +#define SIZE_TYPE "unsigned int" + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE "int" + +#undef WCHAR_TYPE +#define WCHAR_TYPE "int" + +#undef WCHAR_UNSIGNED +#define WCHAR_UNSIGNED 0 + +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + +#define HANDLE_SYSV_PRAGMA + + +/* We don't have any limit on the length as out debugger is GDB */ + +#undef DBX_CONTIN_LENGTH + +/* NetBSD does its profiling differently to the Acorn compiler. We don't need + a word following mcount call and to skip if requires an assembly stub of + use of fomit-frame-pointer when compiling the profiling functions. + Since we break Acorn CC compatibility below a little more won't hurt */ + +#undef FUNCTION_PROFILER +#define FUNCTION_PROFILER(STREAM,LABELNO) \ +{ \ + fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf(STREAM, "\tbl\tmcount\n"); \ +} + +/* VERY BIG NOTE : Change of structure alignment for RiscBSD. + There are consequences you should be aware of */ + +/* Normally GCC/arm uses a structure alignment of 32. This means that + structures are padded to a word boundry. However this causes + problems with bugged NetBSD kernel code (possible userland code + as well - I have not checked every binary). + The nature of this the bugged code is to rely on sizeof() returning + the correct size of various structures rounded to the nearest byte + (SCSI and ether code are two examples, the vm system is another) + This code starts to break when the structure alignment is 32 as sizeof() + will report a word rounded size. + By changing the structure alignment to 8. GCC will conform to what + is expected by NetBSD. + + This has several side effects that should be considered. + 1. Structures will only be aligned to the size of the largest member. + i.e. structures containing only bytes will be byte aligned. + structures containing shorts will be half word alinged. + structures containing ints will be word aligned. + + This means structures should be padded to a word boundry if + alignment of 32 is require for byte structures etc. + + 2. A potential performance penalty may exist if strings are no longer + word aligned. GCC will not be able to use word load/stores for copy + short strings. + + This modification is not encouraged but with the present state of the + NetBSD source tree it is currently the only solution to meet the + requirements. +*/ + +#undef STRUCTURE_SIZE_BOUNDARY +#define STRUCTURE_SIZE_BOUNDARY 8 diff --git a/gnu/usr.bin/gcc/arch/arm32/tconfig.h b/gnu/usr.bin/gcc/arch/arm32/tconfig.h new file mode 100644 index 000000000000..ff743d7ab6bd --- /dev/null +++ b/gnu/usr.bin/gcc/arch/arm32/tconfig.h @@ -0,0 +1 @@ +#include "arm32/xm-netbsd.h" \ No newline at end of file diff --git a/gnu/usr.bin/gcc/arch/arm32/tm.h b/gnu/usr.bin/gcc/arch/arm32/tm.h new file mode 100644 index 000000000000..4a946ea9a32c --- /dev/null +++ b/gnu/usr.bin/gcc/arch/arm32/tm.h @@ -0,0 +1 @@ +#include "arm32/netbsd.h" \ No newline at end of file diff --git a/gnu/usr.bin/gcc/arch/arm32/xm-arm32.h b/gnu/usr.bin/gcc/arch/arm32/xm-arm32.h new file mode 100644 index 000000000000..345c449a14a2 --- /dev/null +++ b/gnu/usr.bin/gcc/arch/arm32/xm-arm32.h @@ -0,0 +1,74 @@ +/* Configuration for GNU C-compiler for Acorn RISC Machine. + Copyright (C) 1991, 1993 Free Software Foundation, Inc. + Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) + and Martin Simmons (@harleqn.co.uk). + More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk) + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* #defines that need visibility everywhere. */ +#define FALSE 0 +#define TRUE 1 + +/* This describes the machine the compiler is hosted on. */ +#define HOST_BITS_PER_CHAR 8 +#define HOST_BITS_PER_SHORT 16 +#define HOST_BITS_PER_INT 32 +#define HOST_BITS_PER_LONG 32 + +/* A code distinguishing the floating point format of the host + machine. There are three defined values: IEEE_FLOAT_FORMAT, + VAX_FLOAT_FORMAT, and UNKNOWN_FLOAT_FORMAT. */ + +#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT + +#define HOST_FLOAT_WORDS_BIG_ENDIAN 1 + +/* If not compiled with GNU C, use C alloca. */ +#ifndef __GNUC__ +#define USE_C_ALLOCA +#endif + +/* Define this if the library function putenv is available on your machine */ +#define HAVE_PUTENV 1 + +/* Define this if the library function vprintf is available on your machine */ +#define HAVE_VPRINTF 1 + +/* Define this to be 1 if you know the host compiler supports prototypes, even + if it doesn't define __STDC__, or define it to be 0 if you do not want any + prototypes when compiling GNU CC. */ +#define USE_PROTOTYPES 1 + +/* target machine dependencies. + tm.h is a symbolic link to the actual target specific file. */ +#include "tm.h" + +/* Arguments to use with `exit'. */ +#define SUCCESS_EXIT_CODE 0 +#define FATAL_EXIT_CODE 33 + +/* If we have defined POSIX, but are compiling in the BSD environment, then + we need to define getcwd in terms of getwd. */ +#if defined (POSIX) && defined (_BSD_C) +#define HAVE_GETWD 1 +#endif + +/* EOF xm-arm32.h */ + +