summaryrefslogtreecommitdiff
path: root/gcc/config/bfin
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/bfin')
-rw-r--r--gcc/config/bfin/bfin-modes.def28
-rw-r--r--gcc/config/bfin/bfin-protos.h122
-rw-r--r--gcc/config/bfin/bfin.c6695
-rw-r--r--gcc/config/bfin/bfin.h1220
-rw-r--r--gcc/config/bfin/bfin.md4211
-rw-r--r--gcc/config/bfin/bfin.opt101
-rw-r--r--gcc/config/bfin/constraints.md225
-rw-r--r--gcc/config/bfin/crti.s59
-rw-r--r--gcc/config/bfin/crtlibid.s29
-rw-r--r--gcc/config/bfin/crtn.s50
-rw-r--r--gcc/config/bfin/elf.h73
-rw-r--r--gcc/config/bfin/lib1funcs.asm146
-rw-r--r--gcc/config/bfin/libgcc-bfin.ver1914
-rw-r--r--gcc/config/bfin/linux-unwind.h164
-rw-r--r--gcc/config/bfin/linux.h54
-rw-r--r--gcc/config/bfin/predicates.md241
-rw-r--r--gcc/config/bfin/print-sysroot-suffix.sh81
-rw-r--r--gcc/config/bfin/rtems.h28
-rw-r--r--gcc/config/bfin/sync.md178
-rw-r--r--gcc/config/bfin/t-bfin43
-rw-r--r--gcc/config/bfin/t-bfin-elf81
-rw-r--r--gcc/config/bfin/t-bfin-linux72
-rw-r--r--gcc/config/bfin/t-bfin-uclinux72
-rw-r--r--gcc/config/bfin/t-rtems6
-rw-r--r--gcc/config/bfin/uclinux.h41
25 files changed, 15934 insertions, 0 deletions
diff --git a/gcc/config/bfin/bfin-modes.def b/gcc/config/bfin/bfin-modes.def
new file mode 100644
index 000000000..27459cc13
--- /dev/null
+++ b/gcc/config/bfin/bfin-modes.def
@@ -0,0 +1,28 @@
+/* Definitions of target machine for GNU compiler, for Blackfin.
+ Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+ Contributed by Analog Devices.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* PDImode for the 40-bit accumulators. */
+PARTIAL_INT_MODE (DI);
+
+/* Two of those - covering both accumulators for vector multiplications. */
+VECTOR_MODE (INT, PDI, 2);
+
+VECTOR_MODE (INT, HI, 2); /* V2HI */
+VECTOR_MODE (INT, SI, 2); /* V2SI - occasionally used. */
diff --git a/gcc/config/bfin/bfin-protos.h b/gcc/config/bfin/bfin-protos.h
new file mode 100644
index 000000000..1e85e16ff
--- /dev/null
+++ b/gcc/config/bfin/bfin-protos.h
@@ -0,0 +1,122 @@
+/* Prototypes for Blackfin functions used in the md file & elsewhere.
+ Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Function prototypes that cannot exist in bfin.h due to dependency
+ complications. */
+#ifndef GCC_BFIN_PROTOS_H
+#define GCC_BFIN_PROTOS_H
+
+/* For the anomaly 05-00-0245 */
+#define WA_SPECULATIVE_LOADS 0x00000001
+#define ENABLE_WA_SPECULATIVE_LOADS \
+ (bfin_workarounds & WA_SPECULATIVE_LOADS)
+
+/* For the anomaly 05-00-0244 */
+#define WA_SPECULATIVE_SYNCS 0x00000002
+#define ENABLE_WA_SPECULATIVE_SYNCS \
+ (bfin_workarounds & WA_SPECULATIVE_SYNCS)
+
+/* For the anomaly 05-00-0371 */
+#define WA_RETS 0x00000004
+#define ENABLE_WA_RETS \
+ (bfin_workarounds & WA_RETS)
+
+/* For the anomaly 05-00-0426 */
+#define WA_INDIRECT_CALLS 0x00000008
+#define ENABLE_WA_INDIRECT_CALLS \
+ ((bfin_workarounds & WA_INDIRECT_CALLS) && !TARGET_ICPLB)
+
+#define WA_05000257 0x00000010
+#define ENABLE_WA_05000257 \
+ (bfin_workarounds & WA_05000257)
+
+#define WA_05000283 0x00000020
+#define ENABLE_WA_05000283 \
+ (bfin_workarounds & WA_05000283)
+
+#define WA_05000315 0x00000040
+#define ENABLE_WA_05000315 \
+ (bfin_workarounds & WA_05000315)
+
+/* For the anomaly 05-00-0312 */
+#define WA_LOAD_LCREGS 0x00000080
+#define ENABLE_WA_LOAD_LCREGS \
+ (bfin_workarounds & WA_LOAD_LCREGS)
+
+#define WA_05000074 0x00000100
+#define ENABLE_WA_05000074 \
+ (bfin_workarounds & WA_05000074)
+
+#define Mmode enum machine_mode
+
+extern bool function_arg_regno_p (int);
+
+extern const char *output_load_immediate (rtx *);
+extern const char *output_casesi_internal (rtx *);
+extern char *bfin_asm_long (void);
+extern char *bfin_asm_short (void);
+extern int log2constp (unsigned HOST_WIDE_INT);
+
+extern bool bfin_legitimate_constant_p (rtx);
+extern int hard_regno_mode_ok (int, Mmode);
+extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx);
+extern HOST_WIDE_INT bfin_initial_elimination_offset (int, int);
+
+extern int effective_address_32bit_p (rtx, Mmode);
+extern int symbolic_reference_mentioned_p (rtx);
+extern rtx bfin_gen_compare (rtx, Mmode);
+extern bool expand_move (rtx *, Mmode);
+extern void bfin_expand_call (rtx, rtx, rtx, rtx, int);
+extern bool bfin_longcall_p (rtx, int);
+extern bool bfin_dsp_memref_p (rtx);
+extern bool bfin_expand_movmem (rtx, rtx, rtx, rtx);
+
+extern int bfin_register_move_cost (enum machine_mode, enum reg_class,
+ enum reg_class);
+extern int bfin_memory_move_cost (enum machine_mode, enum reg_class, int in);
+extern enum reg_class secondary_input_reload_class (enum reg_class, Mmode,
+ rtx);
+extern enum reg_class secondary_output_reload_class (enum reg_class, Mmode,
+ rtx);
+extern char *section_asm_op_1 (SECT_ENUM_T);
+extern char *section_asm_op (SECT_ENUM_T);
+extern void print_operand (FILE *, rtx, char);
+extern void print_address_operand (FILE *, rtx);
+extern void split_di (rtx [], int, rtx [], rtx []);
+extern int split_load_immediate (rtx []);
+extern void emit_pic_move (rtx *, Mmode);
+extern void asm_conditional_branch (rtx, rtx *, int, int);
+extern rtx bfin_gen_compare (rtx, Mmode);
+
+extern unsigned bfin_local_alignment (tree, unsigned);
+extern rtx bfin_va_arg (tree, tree);
+
+extern void bfin_expand_prologue (void);
+extern void bfin_expand_epilogue (int, int, bool);
+extern int push_multiple_operation (rtx, Mmode);
+extern int pop_multiple_operation (rtx, Mmode);
+extern void output_push_multiple (rtx, rtx *);
+extern void output_pop_multiple (rtx, rtx *);
+extern int bfin_hard_regno_rename_ok (unsigned int, unsigned int);
+extern rtx bfin_return_addr_rtx (int);
+extern void bfin_hardware_loop (void);
+#undef Mmode
+
+#endif
+
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
new file mode 100644
index 000000000..60cd09eff
--- /dev/null
+++ b/gcc/config/bfin/bfin.c
@@ -0,0 +1,6695 @@
+/* The Blackfin code generation auxiliary output file.
+ Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
+ Contributed by Analog Devices.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "insn-codes.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "tree.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "input.h"
+#include "target.h"
+#include "target-def.h"
+#include "expr.h"
+#include "diagnostic-core.h"
+#include "recog.h"
+#include "optabs.h"
+#include "ggc.h"
+#include "integrate.h"
+#include "cgraph.h"
+#include "langhooks.h"
+#include "bfin-protos.h"
+#include "tm-preds.h"
+#include "tm-constrs.h"
+#include "gt-bfin.h"
+#include "basic-block.h"
+#include "cfglayout.h"
+#include "timevar.h"
+#include "df.h"
+#include "sel-sched.h"
+
+/* A C structure for machine-specific, per-function data.
+ This is added to the cfun structure. */
+struct GTY(()) machine_function
+{
+ /* Set if we are notified by the doloop pass that a hardware loop
+ was created. */
+ int has_hardware_loops;
+
+ /* Set if we create a memcpy pattern that uses loop registers. */
+ int has_loopreg_clobber;
+};
+
+/* RTX for condition code flag register and RETS register */
+extern GTY(()) rtx bfin_cc_rtx;
+extern GTY(()) rtx bfin_rets_rtx;
+rtx bfin_cc_rtx, bfin_rets_rtx;
+
+int max_arg_registers = 0;
+
+/* Arrays used when emitting register names. */
+const char *short_reg_names[] = SHORT_REGISTER_NAMES;
+const char *high_reg_names[] = HIGH_REGISTER_NAMES;
+const char *dregs_pair_names[] = DREGS_PAIR_NAMES;
+const char *byte_reg_names[] = BYTE_REGISTER_NAMES;
+
+static int arg_regs[] = FUNCTION_ARG_REGISTERS;
+static int ret_regs[] = FUNCTION_RETURN_REGISTERS;
+
+/* Nonzero if -mshared-library-id was given. */
+static int bfin_lib_id_given;
+
+/* -mcpu support */
+bfin_cpu_t bfin_cpu_type = BFIN_CPU_UNKNOWN;
+
+/* -msi-revision support. There are three special values:
+ -1 -msi-revision=none.
+ 0xffff -msi-revision=any. */
+int bfin_si_revision;
+
+/* The workarounds enabled */
+unsigned int bfin_workarounds = 0;
+
+struct bfin_cpu
+{
+ const char *name;
+ bfin_cpu_t type;
+ int si_revision;
+ unsigned int workarounds;
+};
+
+struct bfin_cpu bfin_cpus[] =
+{
+ {"bf512", BFIN_CPU_BF512, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+
+ {"bf514", BFIN_CPU_BF514, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+
+ {"bf516", BFIN_CPU_BF516, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+
+ {"bf518", BFIN_CPU_BF518, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+
+ {"bf522", BFIN_CPU_BF522, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+ {"bf522", BFIN_CPU_BF522, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+ {"bf522", BFIN_CPU_BF522, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+
+ {"bf523", BFIN_CPU_BF523, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+ {"bf523", BFIN_CPU_BF523, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+ {"bf523", BFIN_CPU_BF523, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+
+ {"bf524", BFIN_CPU_BF524, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+ {"bf524", BFIN_CPU_BF524, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+ {"bf524", BFIN_CPU_BF524, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+
+ {"bf525", BFIN_CPU_BF525, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+ {"bf525", BFIN_CPU_BF525, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+ {"bf525", BFIN_CPU_BF525, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+
+ {"bf526", BFIN_CPU_BF526, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+ {"bf526", BFIN_CPU_BF526, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+ {"bf526", BFIN_CPU_BF526, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+
+ {"bf527", BFIN_CPU_BF527, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_05000074},
+ {"bf527", BFIN_CPU_BF527, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+ {"bf527", BFIN_CPU_BF527, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000074},
+
+ {"bf531", BFIN_CPU_BF531, 0x0006,
+ WA_SPECULATIVE_LOADS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf531", BFIN_CPU_BF531, 0x0005,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000283 | WA_05000315
+ | WA_LOAD_LCREGS | WA_05000074},
+ {"bf531", BFIN_CPU_BF531, 0x0004,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+ {"bf531", BFIN_CPU_BF531, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf532", BFIN_CPU_BF532, 0x0006,
+ WA_SPECULATIVE_LOADS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf532", BFIN_CPU_BF532, 0x0005,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000283 | WA_05000315
+ | WA_LOAD_LCREGS | WA_05000074},
+ {"bf532", BFIN_CPU_BF532, 0x0004,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+ {"bf532", BFIN_CPU_BF532, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf533", BFIN_CPU_BF533, 0x0006,
+ WA_SPECULATIVE_LOADS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf533", BFIN_CPU_BF533, 0x0005,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_05000283 | WA_05000315
+ | WA_LOAD_LCREGS | WA_05000074},
+ {"bf533", BFIN_CPU_BF533, 0x0004,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+ {"bf533", BFIN_CPU_BF533, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf534", BFIN_CPU_BF534, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf534", BFIN_CPU_BF534, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+ {"bf534", BFIN_CPU_BF534, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf536", BFIN_CPU_BF536, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf536", BFIN_CPU_BF536, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+ {"bf536", BFIN_CPU_BF536, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf537", BFIN_CPU_BF537, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf537", BFIN_CPU_BF537, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+ {"bf537", BFIN_CPU_BF537, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf538", BFIN_CPU_BF538, 0x0005,
+ WA_SPECULATIVE_LOADS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf538", BFIN_CPU_BF538, 0x0004,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf538", BFIN_CPU_BF538, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_RETS
+ | WA_05000283 | WA_05000315 | WA_LOAD_LCREGS | WA_05000074},
+ {"bf538", BFIN_CPU_BF538, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf539", BFIN_CPU_BF539, 0x0005,
+ WA_SPECULATIVE_LOADS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf539", BFIN_CPU_BF539, 0x0004,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_LOAD_LCREGS | WA_05000074},
+ {"bf539", BFIN_CPU_BF539, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_RETS
+ | WA_05000283 | WA_05000315 | WA_LOAD_LCREGS | WA_05000074},
+ {"bf539", BFIN_CPU_BF539, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf542m", BFIN_CPU_BF542M, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+
+ {"bf542", BFIN_CPU_BF542, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf542", BFIN_CPU_BF542, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf542", BFIN_CPU_BF542, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf544m", BFIN_CPU_BF544M, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+
+ {"bf544", BFIN_CPU_BF544, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf544", BFIN_CPU_BF544, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf544", BFIN_CPU_BF544, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf547m", BFIN_CPU_BF547M, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+
+ {"bf547", BFIN_CPU_BF547, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf547", BFIN_CPU_BF547, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf547", BFIN_CPU_BF547, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf548m", BFIN_CPU_BF548M, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+
+ {"bf548", BFIN_CPU_BF548, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf548", BFIN_CPU_BF548, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf548", BFIN_CPU_BF548, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf549m", BFIN_CPU_BF549M, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+
+ {"bf549", BFIN_CPU_BF549, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf549", BFIN_CPU_BF549, 0x0001,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_05000074},
+ {"bf549", BFIN_CPU_BF549, 0x0000,
+ WA_SPECULATIVE_LOADS | WA_RETS | WA_INDIRECT_CALLS | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {"bf561", BFIN_CPU_BF561, 0x0005, WA_RETS
+ | WA_05000283 | WA_05000315 | WA_LOAD_LCREGS | WA_05000074},
+ {"bf561", BFIN_CPU_BF561, 0x0003,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+ {"bf561", BFIN_CPU_BF561, 0x0002,
+ WA_SPECULATIVE_LOADS | WA_SPECULATIVE_SYNCS | WA_RETS
+ | WA_05000283 | WA_05000257 | WA_05000315 | WA_LOAD_LCREGS
+ | WA_05000074},
+
+ {NULL, BFIN_CPU_UNKNOWN, 0, 0}
+};
+
+int splitting_for_sched, splitting_loops;
+
+static void
+bfin_globalize_label (FILE *stream, const char *name)
+{
+ fputs (".global ", stream);
+ assemble_name (stream, name);
+ fputc (';',stream);
+ fputc ('\n',stream);
+}
+
+static void
+output_file_start (void)
+{
+ FILE *file = asm_out_file;
+ int i;
+
+ fprintf (file, ".file \"%s\";\n", input_filename);
+
+ for (i = 0; arg_regs[i] >= 0; i++)
+ ;
+ max_arg_registers = i; /* how many arg reg used */
+}
+
+/* Examine machine-dependent attributes of function type FUNTYPE and return its
+ type. See the definition of E_FUNKIND. */
+
+static e_funkind
+funkind (const_tree funtype)
+{
+ tree attrs = TYPE_ATTRIBUTES (funtype);
+ if (lookup_attribute ("interrupt_handler", attrs))
+ return INTERRUPT_HANDLER;
+ else if (lookup_attribute ("exception_handler", attrs))
+ return EXCPT_HANDLER;
+ else if (lookup_attribute ("nmi_handler", attrs))
+ return NMI_HANDLER;
+ else
+ return SUBROUTINE;
+}
+
+/* Legitimize PIC addresses. If the address is already position-independent,
+ we return ORIG. Newly generated position-independent addresses go into a
+ reg. This is REG if nonzero, otherwise we allocate register(s) as
+ necessary. PICREG is the register holding the pointer to the PIC offset
+ table. */
+
+static rtx
+legitimize_pic_address (rtx orig, rtx reg, rtx picreg)
+{
+ rtx addr = orig;
+ rtx new_rtx = orig;
+
+ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
+ {
+ int unspec;
+ rtx tmp;
+
+ if (TARGET_ID_SHARED_LIBRARY)
+ unspec = UNSPEC_MOVE_PIC;
+ else if (GET_CODE (addr) == SYMBOL_REF
+ && SYMBOL_REF_FUNCTION_P (addr))
+ unspec = UNSPEC_FUNCDESC_GOT17M4;
+ else
+ unspec = UNSPEC_MOVE_FDPIC;
+
+ if (reg == 0)
+ {
+ gcc_assert (can_create_pseudo_p ());
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), unspec);
+ new_rtx = gen_const_mem (Pmode, gen_rtx_PLUS (Pmode, picreg, tmp));
+
+ emit_move_insn (reg, new_rtx);
+ if (picreg == pic_offset_table_rtx)
+ crtl->uses_pic_offset_table = 1;
+ return reg;
+ }
+
+ else if (GET_CODE (addr) == CONST || GET_CODE (addr) == PLUS)
+ {
+ rtx base;
+
+ if (GET_CODE (addr) == CONST)
+ {
+ addr = XEXP (addr, 0);
+ gcc_assert (GET_CODE (addr) == PLUS);
+ }
+
+ if (XEXP (addr, 0) == picreg)
+ return orig;
+
+ if (reg == 0)
+ {
+ gcc_assert (can_create_pseudo_p ());
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ base = legitimize_pic_address (XEXP (addr, 0), reg, picreg);
+ addr = legitimize_pic_address (XEXP (addr, 1),
+ base == reg ? NULL_RTX : reg,
+ picreg);
+
+ if (GET_CODE (addr) == CONST_INT)
+ {
+ gcc_assert (! reload_in_progress && ! reload_completed);
+ addr = force_reg (Pmode, addr);
+ }
+
+ if (GET_CODE (addr) == PLUS && CONSTANT_P (XEXP (addr, 1)))
+ {
+ base = gen_rtx_PLUS (Pmode, base, XEXP (addr, 0));
+ addr = XEXP (addr, 1);
+ }
+
+ return gen_rtx_PLUS (Pmode, base, addr);
+ }
+
+ return new_rtx;
+}
+
+/* Stack frame layout. */
+
+/* For a given REGNO, determine whether it must be saved in the function
+ prologue. IS_INTHANDLER specifies whether we're generating a normal
+ prologue or an interrupt/exception one. */
+static bool
+must_save_p (bool is_inthandler, unsigned regno)
+{
+ if (D_REGNO_P (regno))
+ {
+ bool is_eh_return_reg = false;
+ if (crtl->calls_eh_return)
+ {
+ unsigned j;
+ for (j = 0; ; j++)
+ {
+ unsigned test = EH_RETURN_DATA_REGNO (j);
+ if (test == INVALID_REGNUM)
+ break;
+ if (test == regno)
+ is_eh_return_reg = true;
+ }
+ }
+
+ return (is_eh_return_reg
+ || (df_regs_ever_live_p (regno)
+ && !fixed_regs[regno]
+ && (is_inthandler || !call_used_regs[regno])));
+ }
+ else if (P_REGNO_P (regno))
+ {
+ return ((df_regs_ever_live_p (regno)
+ && !fixed_regs[regno]
+ && (is_inthandler || !call_used_regs[regno]))
+ || (is_inthandler
+ && (ENABLE_WA_05000283 || ENABLE_WA_05000315)
+ && regno == REG_P5)
+ || (!TARGET_FDPIC
+ && regno == PIC_OFFSET_TABLE_REGNUM
+ && (crtl->uses_pic_offset_table
+ || (TARGET_ID_SHARED_LIBRARY && !current_function_is_leaf))));
+ }
+ else
+ return ((is_inthandler || !call_used_regs[regno])
+ && (df_regs_ever_live_p (regno)
+ || (!leaf_function_p () && call_used_regs[regno])));
+
+}
+
+/* Compute the number of DREGS to save with a push_multiple operation.
+ This could include registers that aren't modified in the function,
+ since push_multiple only takes a range of registers.
+ If IS_INTHANDLER, then everything that is live must be saved, even
+ if normally call-clobbered.
+ If CONSECUTIVE, return the number of registers we can save in one
+ instruction with a push/pop multiple instruction. */
+
+static int
+n_dregs_to_save (bool is_inthandler, bool consecutive)
+{
+ int count = 0;
+ unsigned i;
+
+ for (i = REG_R7 + 1; i-- != REG_R0;)
+ {
+ if (must_save_p (is_inthandler, i))
+ count++;
+ else if (consecutive)
+ return count;
+ }
+ return count;
+}
+
+/* Like n_dregs_to_save, but compute number of PREGS to save. */
+
+static int
+n_pregs_to_save (bool is_inthandler, bool consecutive)
+{
+ int count = 0;
+ unsigned i;
+
+ for (i = REG_P5 + 1; i-- != REG_P0;)
+ if (must_save_p (is_inthandler, i))
+ count++;
+ else if (consecutive)
+ return count;
+ return count;
+}
+
+/* Determine if we are going to save the frame pointer in the prologue. */
+
+static bool
+must_save_fp_p (void)
+{
+ return df_regs_ever_live_p (REG_FP);
+}
+
+/* Determine if we are going to save the RETS register. */
+static bool
+must_save_rets_p (void)
+{
+ return df_regs_ever_live_p (REG_RETS);
+}
+
+static bool
+stack_frame_needed_p (void)
+{
+ /* EH return puts a new return address into the frame using an
+ address relative to the frame pointer. */
+ if (crtl->calls_eh_return)
+ return true;
+ return frame_pointer_needed;
+}
+
+/* Emit code to save registers in the prologue. SAVEALL is nonzero if we
+ must save all registers; this is used for interrupt handlers.
+ SPREG contains (reg:SI REG_SP). IS_INTHANDLER is true if we're doing
+ this for an interrupt (or exception) handler. */
+
+static void
+expand_prologue_reg_save (rtx spreg, int saveall, bool is_inthandler)
+{
+ rtx predec1 = gen_rtx_PRE_DEC (SImode, spreg);
+ rtx predec = gen_rtx_MEM (SImode, predec1);
+ int ndregs = saveall ? 8 : n_dregs_to_save (is_inthandler, false);
+ int npregs = saveall ? 6 : n_pregs_to_save (is_inthandler, false);
+ int ndregs_consec = saveall ? 8 : n_dregs_to_save (is_inthandler, true);
+ int npregs_consec = saveall ? 6 : n_pregs_to_save (is_inthandler, true);
+ int dregno, pregno;
+ int total_consec = ndregs_consec + npregs_consec;
+ int i, d_to_save;
+
+ if (saveall || is_inthandler)
+ {
+ rtx insn = emit_move_insn (predec, gen_rtx_REG (SImode, REG_ASTAT));
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ for (dregno = REG_LT0; dregno <= REG_LB1; dregno++)
+ if (! current_function_is_leaf
+ || cfun->machine->has_hardware_loops
+ || cfun->machine->has_loopreg_clobber
+ || (ENABLE_WA_05000257
+ && (dregno == REG_LC0 || dregno == REG_LC1)))
+ {
+ insn = emit_move_insn (predec, gen_rtx_REG (SImode, dregno));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+ if (total_consec != 0)
+ {
+ rtx insn;
+ rtx val = GEN_INT (-total_consec * 4);
+ rtx pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_consec + 2));
+
+ XVECEXP (pat, 0, 0) = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, val),
+ UNSPEC_PUSH_MULTIPLE);
+ XVECEXP (pat, 0, total_consec + 1) = gen_rtx_SET (VOIDmode, spreg,
+ gen_rtx_PLUS (Pmode,
+ spreg,
+ val));
+ RTX_FRAME_RELATED_P (XVECEXP (pat, 0, total_consec + 1)) = 1;
+ d_to_save = ndregs_consec;
+ dregno = REG_R7 + 1 - ndregs_consec;
+ pregno = REG_P5 + 1 - npregs_consec;
+ for (i = 0; i < total_consec; i++)
+ {
+ rtx memref = gen_rtx_MEM (word_mode,
+ gen_rtx_PLUS (Pmode, spreg,
+ GEN_INT (- i * 4 - 4)));
+ rtx subpat;
+ if (d_to_save > 0)
+ {
+ subpat = gen_rtx_SET (VOIDmode, memref, gen_rtx_REG (word_mode,
+ dregno++));
+ d_to_save--;
+ }
+ else
+ {
+ subpat = gen_rtx_SET (VOIDmode, memref, gen_rtx_REG (word_mode,
+ pregno++));
+ }
+ XVECEXP (pat, 0, i + 1) = subpat;
+ RTX_FRAME_RELATED_P (subpat) = 1;
+ }
+ insn = emit_insn (pat);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ for (dregno = REG_R0; ndregs != ndregs_consec; dregno++)
+ {
+ if (must_save_p (is_inthandler, dregno))
+ {
+ rtx insn = emit_move_insn (predec, gen_rtx_REG (word_mode, dregno));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ ndregs--;
+ }
+ }
+ for (pregno = REG_P0; npregs != npregs_consec; pregno++)
+ {
+ if (must_save_p (is_inthandler, pregno))
+ {
+ rtx insn = emit_move_insn (predec, gen_rtx_REG (word_mode, pregno));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ npregs--;
+ }
+ }
+ for (i = REG_P7 + 1; i < REG_CC; i++)
+ if (saveall
+ || (is_inthandler
+ && (df_regs_ever_live_p (i)
+ || (!leaf_function_p () && call_used_regs[i]))))
+ {
+ rtx insn;
+ if (i == REG_A0 || i == REG_A1)
+ insn = emit_move_insn (gen_rtx_MEM (PDImode, predec1),
+ gen_rtx_REG (PDImode, i));
+ else
+ insn = emit_move_insn (predec, gen_rtx_REG (SImode, i));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+}
+
+/* Emit code to restore registers in the epilogue. SAVEALL is nonzero if we
+ must save all registers; this is used for interrupt handlers.
+ SPREG contains (reg:SI REG_SP). IS_INTHANDLER is true if we're doing
+ this for an interrupt (or exception) handler. */
+
+static void
+expand_epilogue_reg_restore (rtx spreg, bool saveall, bool is_inthandler)
+{
+ rtx postinc1 = gen_rtx_POST_INC (SImode, spreg);
+ rtx postinc = gen_rtx_MEM (SImode, postinc1);
+
+ int ndregs = saveall ? 8 : n_dregs_to_save (is_inthandler, false);
+ int npregs = saveall ? 6 : n_pregs_to_save (is_inthandler, false);
+ int ndregs_consec = saveall ? 8 : n_dregs_to_save (is_inthandler, true);
+ int npregs_consec = saveall ? 6 : n_pregs_to_save (is_inthandler, true);
+ int total_consec = ndregs_consec + npregs_consec;
+ int i, regno;
+ rtx insn;
+
+ /* A slightly crude technique to stop flow from trying to delete "dead"
+ insns. */
+ MEM_VOLATILE_P (postinc) = 1;
+
+ for (i = REG_CC - 1; i > REG_P7; i--)
+ if (saveall
+ || (is_inthandler
+ && (df_regs_ever_live_p (i)
+ || (!leaf_function_p () && call_used_regs[i]))))
+ {
+ if (i == REG_A0 || i == REG_A1)
+ {
+ rtx mem = gen_rtx_MEM (PDImode, postinc1);
+ MEM_VOLATILE_P (mem) = 1;
+ emit_move_insn (gen_rtx_REG (PDImode, i), mem);
+ }
+ else
+ emit_move_insn (gen_rtx_REG (SImode, i), postinc);
+ }
+
+ regno = REG_P5 - npregs_consec;
+ for (; npregs != npregs_consec; regno--)
+ {
+ if (must_save_p (is_inthandler, regno))
+ {
+ emit_move_insn (gen_rtx_REG (word_mode, regno), postinc);
+ npregs--;
+ }
+ }
+ regno = REG_R7 - ndregs_consec;
+ for (; ndregs != ndregs_consec; regno--)
+ {
+ if (must_save_p (is_inthandler, regno))
+ {
+ emit_move_insn (gen_rtx_REG (word_mode, regno), postinc);
+ ndregs--;
+ }
+ }
+
+ if (total_consec != 0)
+ {
+ rtx pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_consec + 1));
+ XVECEXP (pat, 0, 0)
+ = gen_rtx_SET (VOIDmode, spreg,
+ gen_rtx_PLUS (Pmode, spreg,
+ GEN_INT (total_consec * 4)));
+
+ if (npregs_consec > 0)
+ regno = REG_P5 + 1;
+ else
+ regno = REG_R7 + 1;
+
+ for (i = 0; i < total_consec; i++)
+ {
+ rtx addr = (i > 0
+ ? gen_rtx_PLUS (Pmode, spreg, GEN_INT (i * 4))
+ : spreg);
+ rtx memref = gen_rtx_MEM (word_mode, addr);
+
+ regno--;
+ XVECEXP (pat, 0, i + 1)
+ = gen_rtx_SET (VOIDmode, gen_rtx_REG (word_mode, regno), memref);
+
+ if (npregs_consec > 0)
+ {
+ if (--npregs_consec == 0)
+ regno = REG_R7 + 1;
+ }
+ }
+
+ insn = emit_insn (pat);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ if (saveall || is_inthandler)
+ {
+ for (regno = REG_LB1; regno >= REG_LT0; regno--)
+ if (! current_function_is_leaf
+ || cfun->machine->has_hardware_loops
+ || cfun->machine->has_loopreg_clobber
+ || (ENABLE_WA_05000257 && (regno == REG_LC0 || regno == REG_LC1)))
+ emit_move_insn (gen_rtx_REG (SImode, regno), postinc);
+
+ emit_move_insn (gen_rtx_REG (SImode, REG_ASTAT), postinc);
+ }
+}
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is as above.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ Blackfin specific :
+ - VDSP C compiler manual (our ABI) says that a variable args function
+ should save the R0, R1 and R2 registers in the stack.
+ - The caller will always leave space on the stack for the
+ arguments that are passed in registers, so we dont have
+ to leave any extra space.
+ - now, the vastart pointer can access all arguments from the stack. */
+
+static void
+setup_incoming_varargs (CUMULATIVE_ARGS *cum,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED, int *pretend_size,
+ int no_rtl)
+{
+ rtx mem;
+ int i;
+
+ if (no_rtl)
+ return;
+
+ /* The move for named arguments will be generated automatically by the
+ compiler. We need to generate the move rtx for the unnamed arguments
+ if they are in the first 3 words. We assume at least 1 named argument
+ exists, so we never generate [ARGP] = R0 here. */
+
+ for (i = cum->words + 1; i < max_arg_registers; i++)
+ {
+ mem = gen_rtx_MEM (Pmode,
+ plus_constant (arg_pointer_rtx, (i * UNITS_PER_WORD)));
+ emit_move_insn (mem, gen_rtx_REG (Pmode, i));
+ }
+
+ *pretend_size = 0;
+}
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may
+ be accessed via the stack pointer) in functions that seem suitable. */
+
+static bool
+bfin_frame_pointer_required (void)
+{
+ e_funkind fkind = funkind (TREE_TYPE (current_function_decl));
+
+ if (fkind != SUBROUTINE)
+ return true;
+
+ /* We turn on -fomit-frame-pointer if -momit-leaf-frame-pointer is used,
+ so we have to override it for non-leaf functions. */
+ if (TARGET_OMIT_LEAF_FRAME_POINTER && ! current_function_is_leaf)
+ return true;
+
+ return false;
+}
+
+/* Return the number of registers pushed during the prologue. */
+
+static int
+n_regs_saved_by_prologue (void)
+{
+ e_funkind fkind = funkind (TREE_TYPE (current_function_decl));
+ bool is_inthandler = fkind != SUBROUTINE;
+ tree attrs = TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl));
+ bool all = (lookup_attribute ("saveall", attrs) != NULL_TREE
+ || (is_inthandler && !current_function_is_leaf));
+ int ndregs = all ? 8 : n_dregs_to_save (is_inthandler, false);
+ int npregs = all ? 6 : n_pregs_to_save (is_inthandler, false);
+ int n = ndregs + npregs;
+ int i;
+
+ if (all || stack_frame_needed_p ())
+ n += 2;
+ else
+ {
+ if (must_save_fp_p ())
+ n++;
+ if (must_save_rets_p ())
+ n++;
+ }
+
+ if (fkind != SUBROUTINE || all)
+ {
+ /* Increment once for ASTAT. */
+ n++;
+ if (! current_function_is_leaf
+ || cfun->machine->has_hardware_loops
+ || cfun->machine->has_loopreg_clobber)
+ {
+ n += 6;
+ }
+ }
+
+ if (fkind != SUBROUTINE)
+ {
+ /* RETE/X/N. */
+ if (lookup_attribute ("nesting", attrs))
+ n++;
+ }
+
+ for (i = REG_P7 + 1; i < REG_CC; i++)
+ if (all
+ || (fkind != SUBROUTINE
+ && (df_regs_ever_live_p (i)
+ || (!leaf_function_p () && call_used_regs[i]))))
+ n += i == REG_A0 || i == REG_A1 ? 2 : 1;
+
+ return n;
+}
+
+/* Given FROM and TO register numbers, say whether this elimination is
+ allowed. Frame pointer elimination is automatically handled.
+
+ All other eliminations are valid. */
+
+static bool
+bfin_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
+}
+
+/* Return the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+
+HOST_WIDE_INT
+bfin_initial_elimination_offset (int from, int to)
+{
+ HOST_WIDE_INT offset = 0;
+
+ if (from == ARG_POINTER_REGNUM)
+ offset = n_regs_saved_by_prologue () * 4;
+
+ if (to == STACK_POINTER_REGNUM)
+ {
+ if (crtl->outgoing_args_size >= FIXED_STACK_AREA)
+ offset += crtl->outgoing_args_size;
+ else if (crtl->outgoing_args_size)
+ offset += FIXED_STACK_AREA;
+
+ offset += get_frame_size ();
+ }
+
+ return offset;
+}
+
+/* Emit code to load a constant CONSTANT into register REG; setting
+ RTX_FRAME_RELATED_P on all insns we generate if RELATED is true.
+ Make sure that the insns we generate need not be split. */
+
+static void
+frame_related_constant_load (rtx reg, HOST_WIDE_INT constant, bool related)
+{
+ rtx insn;
+ rtx cst = GEN_INT (constant);
+
+ if (constant >= -32768 && constant < 65536)
+ insn = emit_move_insn (reg, cst);
+ else
+ {
+ /* We don't call split_load_immediate here, since dwarf2out.c can get
+ confused about some of the more clever sequences it can generate. */
+ insn = emit_insn (gen_movsi_high (reg, cst));
+ if (related)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_insn (gen_movsi_low (reg, reg, cst));
+ }
+ if (related)
+ RTX_FRAME_RELATED_P (insn) = 1;
+}
+
+/* Generate efficient code to add a value to a P register.
+ Set RTX_FRAME_RELATED_P on the generated insns if FRAME is nonzero.
+ EPILOGUE_P is zero if this function is called for prologue,
+ otherwise it's nonzero. And it's less than zero if this is for
+ sibcall epilogue. */
+
+static void
+add_to_reg (rtx reg, HOST_WIDE_INT value, int frame, int epilogue_p)
+{
+ if (value == 0)
+ return;
+
+ /* Choose whether to use a sequence using a temporary register, or
+ a sequence with multiple adds. We can add a signed 7-bit value
+ in one instruction. */
+ if (value > 120 || value < -120)
+ {
+ rtx tmpreg;
+ rtx tmpreg2;
+ rtx insn;
+
+ tmpreg2 = NULL_RTX;
+
+ /* For prologue or normal epilogue, P1 can be safely used
+ as the temporary register. For sibcall epilogue, we try to find
+ a call used P register, which will be restored in epilogue.
+ If we cannot find such a P register, we have to use one I register
+ to help us. */
+
+ if (epilogue_p >= 0)
+ tmpreg = gen_rtx_REG (SImode, REG_P1);
+ else
+ {
+ int i;
+ for (i = REG_P0; i <= REG_P5; i++)
+ if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
+ || (!TARGET_FDPIC
+ && i == PIC_OFFSET_TABLE_REGNUM
+ && (crtl->uses_pic_offset_table
+ || (TARGET_ID_SHARED_LIBRARY
+ && ! current_function_is_leaf))))
+ break;
+ if (i <= REG_P5)
+ tmpreg = gen_rtx_REG (SImode, i);
+ else
+ {
+ tmpreg = gen_rtx_REG (SImode, REG_P1);
+ tmpreg2 = gen_rtx_REG (SImode, REG_I0);
+ emit_move_insn (tmpreg2, tmpreg);
+ }
+ }
+
+ if (frame)
+ frame_related_constant_load (tmpreg, value, TRUE);
+ else
+ insn = emit_move_insn (tmpreg, GEN_INT (value));
+
+ insn = emit_insn (gen_addsi3 (reg, reg, tmpreg));
+ if (frame)
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ if (tmpreg2 != NULL_RTX)
+ emit_move_insn (tmpreg, tmpreg2);
+ }
+ else
+ do
+ {
+ int size = value;
+ rtx insn;
+
+ if (size > 60)
+ size = 60;
+ else if (size < -60)
+ /* We could use -62, but that would leave the stack unaligned, so
+ it's no good. */
+ size = -60;
+
+ insn = emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
+ if (frame)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ value -= size;
+ }
+ while (value != 0);
+}
+
+/* Generate a LINK insn for a frame sized FRAME_SIZE. If this constant
+ is too large, generate a sequence of insns that has the same effect.
+ SPREG contains (reg:SI REG_SP). */
+
+static void
+emit_link_insn (rtx spreg, HOST_WIDE_INT frame_size)
+{
+ HOST_WIDE_INT link_size = frame_size;
+ rtx insn;
+ int i;
+
+ if (link_size > 262140)
+ link_size = 262140;
+
+ /* Use a LINK insn with as big a constant as possible, then subtract
+ any remaining size from the SP. */
+ insn = emit_insn (gen_link (GEN_INT (-8 - link_size)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ rtx set = XVECEXP (PATTERN (insn), 0, i);
+ gcc_assert (GET_CODE (set) == SET);
+ RTX_FRAME_RELATED_P (set) = 1;
+ }
+
+ frame_size -= link_size;
+
+ if (frame_size > 0)
+ {
+ /* Must use a call-clobbered PREG that isn't the static chain. */
+ rtx tmpreg = gen_rtx_REG (Pmode, REG_P1);
+
+ frame_related_constant_load (tmpreg, -frame_size, TRUE);
+ insn = emit_insn (gen_addsi3 (spreg, spreg, tmpreg));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+}
+
+/* Return the number of bytes we must reserve for outgoing arguments
+ in the current function's stack frame. */
+
+static HOST_WIDE_INT
+arg_area_size (void)
+{
+ if (crtl->outgoing_args_size)
+ {
+ if (crtl->outgoing_args_size >= FIXED_STACK_AREA)
+ return crtl->outgoing_args_size;
+ else
+ return FIXED_STACK_AREA;
+ }
+ return 0;
+}
+
+/* Save RETS and FP, and allocate a stack frame. ALL is true if the
+ function must save all its registers (true only for certain interrupt
+ handlers). */
+
+static void
+do_link (rtx spreg, HOST_WIDE_INT frame_size, bool all)
+{
+ frame_size += arg_area_size ();
+
+ if (all
+ || stack_frame_needed_p ()
+ || (must_save_rets_p () && must_save_fp_p ()))
+ emit_link_insn (spreg, frame_size);
+ else
+ {
+ if (must_save_rets_p ())
+ {
+ rtx pat = gen_movsi (gen_rtx_MEM (Pmode,
+ gen_rtx_PRE_DEC (Pmode, spreg)),
+ bfin_rets_rtx);
+ rtx insn = emit_insn (pat);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ if (must_save_fp_p ())
+ {
+ rtx pat = gen_movsi (gen_rtx_MEM (Pmode,
+ gen_rtx_PRE_DEC (Pmode, spreg)),
+ gen_rtx_REG (Pmode, REG_FP));
+ rtx insn = emit_insn (pat);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ add_to_reg (spreg, -frame_size, 1, 0);
+ }
+}
+
+/* Like do_link, but used for epilogues to deallocate the stack frame.
+ EPILOGUE_P is zero if this function is called for prologue,
+ otherwise it's nonzero. And it's less than zero if this is for
+ sibcall epilogue. */
+
+static void
+do_unlink (rtx spreg, HOST_WIDE_INT frame_size, bool all, int epilogue_p)
+{
+ frame_size += arg_area_size ();
+
+ if (stack_frame_needed_p ())
+ emit_insn (gen_unlink ());
+ else
+ {
+ rtx postinc = gen_rtx_MEM (Pmode, gen_rtx_POST_INC (Pmode, spreg));
+
+ add_to_reg (spreg, frame_size, 0, epilogue_p);
+ if (all || must_save_fp_p ())
+ {
+ rtx fpreg = gen_rtx_REG (Pmode, REG_FP);
+ emit_move_insn (fpreg, postinc);
+ emit_use (fpreg);
+ }
+ if (all || must_save_rets_p ())
+ {
+ emit_move_insn (bfin_rets_rtx, postinc);
+ emit_use (bfin_rets_rtx);
+ }
+ }
+}
+
+/* Generate a prologue suitable for a function of kind FKIND. This is
+ called for interrupt and exception handler prologues.
+ SPREG contains (reg:SI REG_SP). */
+
+static void
+expand_interrupt_handler_prologue (rtx spreg, e_funkind fkind, bool all)
+{
+ HOST_WIDE_INT frame_size = get_frame_size ();
+ rtx predec1 = gen_rtx_PRE_DEC (SImode, spreg);
+ rtx predec = gen_rtx_MEM (SImode, predec1);
+ rtx insn;
+ tree attrs = TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl));
+ tree kspisusp = lookup_attribute ("kspisusp", attrs);
+
+ if (kspisusp)
+ {
+ insn = emit_move_insn (spreg, gen_rtx_REG (Pmode, REG_USP));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* We need space on the stack in case we need to save the argument
+ registers. */
+ if (fkind == EXCPT_HANDLER)
+ {
+ insn = emit_insn (gen_addsi3 (spreg, spreg, GEN_INT (-12)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* If we're calling other functions, they won't save their call-clobbered
+ registers, so we must save everything here. */
+ if (!current_function_is_leaf)
+ all = true;
+ expand_prologue_reg_save (spreg, all, true);
+
+ if (ENABLE_WA_05000283 || ENABLE_WA_05000315)
+ {
+ rtx chipid = GEN_INT (trunc_int_for_mode (0xFFC00014, SImode));
+ rtx p5reg = gen_rtx_REG (Pmode, REG_P5);
+ emit_insn (gen_movbi (bfin_cc_rtx, const1_rtx));
+ emit_insn (gen_movsi_high (p5reg, chipid));
+ emit_insn (gen_movsi_low (p5reg, p5reg, chipid));
+ emit_insn (gen_dummy_load (p5reg, bfin_cc_rtx));
+ }
+
+ if (lookup_attribute ("nesting", attrs))
+ {
+ rtx srcreg = gen_rtx_REG (Pmode, ret_regs[fkind]);
+ insn = emit_move_insn (predec, srcreg);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ do_link (spreg, frame_size, all);
+
+ if (fkind == EXCPT_HANDLER)
+ {
+ rtx r0reg = gen_rtx_REG (SImode, REG_R0);
+ rtx r1reg = gen_rtx_REG (SImode, REG_R1);
+ rtx r2reg = gen_rtx_REG (SImode, REG_R2);
+
+ emit_move_insn (r0reg, gen_rtx_REG (SImode, REG_SEQSTAT));
+ emit_insn (gen_ashrsi3 (r0reg, r0reg, GEN_INT (26)));
+ emit_insn (gen_ashlsi3 (r0reg, r0reg, GEN_INT (26)));
+ emit_move_insn (r1reg, spreg);
+ emit_move_insn (r2reg, gen_rtx_REG (Pmode, REG_FP));
+ emit_insn (gen_addsi3 (r2reg, r2reg, GEN_INT (8)));
+ }
+}
+
+/* Generate an epilogue suitable for a function of kind FKIND. This is
+ called for interrupt and exception handler epilogues.
+ SPREG contains (reg:SI REG_SP). */
+
+static void
+expand_interrupt_handler_epilogue (rtx spreg, e_funkind fkind, bool all)
+{
+ tree attrs = TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl));
+ rtx postinc1 = gen_rtx_POST_INC (SImode, spreg);
+ rtx postinc = gen_rtx_MEM (SImode, postinc1);
+
+ /* A slightly crude technique to stop flow from trying to delete "dead"
+ insns. */
+ MEM_VOLATILE_P (postinc) = 1;
+
+ do_unlink (spreg, get_frame_size (), all, 1);
+
+ if (lookup_attribute ("nesting", attrs))
+ {
+ rtx srcreg = gen_rtx_REG (Pmode, ret_regs[fkind]);
+ emit_move_insn (srcreg, postinc);
+ }
+
+ /* If we're calling other functions, they won't save their call-clobbered
+ registers, so we must save (and restore) everything here. */
+ if (!current_function_is_leaf)
+ all = true;
+
+ expand_epilogue_reg_restore (spreg, all, true);
+
+ /* Deallocate any space we left on the stack in case we needed to save the
+ argument registers. */
+ if (fkind == EXCPT_HANDLER)
+ emit_insn (gen_addsi3 (spreg, spreg, GEN_INT (12)));
+
+ emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, ret_regs[fkind])));
+}
+
+/* Used while emitting the prologue to generate code to load the correct value
+ into the PIC register, which is passed in DEST. */
+
+static rtx
+bfin_load_pic_reg (rtx dest)
+{
+ struct cgraph_local_info *i = NULL;
+ rtx addr;
+
+ i = cgraph_local_info (current_function_decl);
+
+ /* Functions local to the translation unit don't need to reload the
+ pic reg, since the caller always passes a usable one. */
+ if (i && i->local)
+ return pic_offset_table_rtx;
+
+ if (bfin_lib_id_given)
+ addr = plus_constant (pic_offset_table_rtx, -4 - bfin_library_id * 4);
+ else
+ addr = gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
+ gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
+ UNSPEC_LIBRARY_OFFSET));
+ emit_insn (gen_movsi (dest, gen_rtx_MEM (Pmode, addr)));
+ return dest;
+}
+
+/* Generate RTL for the prologue of the current function. */
+
+void
+bfin_expand_prologue (void)
+{
+ HOST_WIDE_INT frame_size = get_frame_size ();
+ rtx spreg = gen_rtx_REG (Pmode, REG_SP);
+ e_funkind fkind = funkind (TREE_TYPE (current_function_decl));
+ rtx pic_reg_loaded = NULL_RTX;
+ tree attrs = TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl));
+ bool all = lookup_attribute ("saveall", attrs) != NULL_TREE;
+
+ if (fkind != SUBROUTINE)
+ {
+ expand_interrupt_handler_prologue (spreg, fkind, all);
+ return;
+ }
+
+ if (crtl->limit_stack
+ || (TARGET_STACK_CHECK_L1
+ && !DECL_NO_LIMIT_STACK (current_function_decl)))
+ {
+ HOST_WIDE_INT offset
+ = bfin_initial_elimination_offset (ARG_POINTER_REGNUM,
+ STACK_POINTER_REGNUM);
+ rtx lim = crtl->limit_stack ? stack_limit_rtx : NULL_RTX;
+ rtx p2reg = gen_rtx_REG (Pmode, REG_P2);
+
+ if (!lim)
+ {
+ emit_move_insn (p2reg, gen_int_mode (0xFFB00000, SImode));
+ emit_move_insn (p2reg, gen_rtx_MEM (Pmode, p2reg));
+ lim = p2reg;
+ }
+ if (GET_CODE (lim) == SYMBOL_REF)
+ {
+ if (TARGET_ID_SHARED_LIBRARY)
+ {
+ rtx p1reg = gen_rtx_REG (Pmode, REG_P1);
+ rtx val;
+ pic_reg_loaded = bfin_load_pic_reg (p2reg);
+ val = legitimize_pic_address (stack_limit_rtx, p1reg,
+ pic_reg_loaded);
+ emit_move_insn (p1reg, val);
+ frame_related_constant_load (p2reg, offset, FALSE);
+ emit_insn (gen_addsi3 (p2reg, p2reg, p1reg));
+ lim = p2reg;
+ }
+ else
+ {
+ rtx limit = plus_constant (lim, offset);
+ emit_move_insn (p2reg, limit);
+ lim = p2reg;
+ }
+ }
+ else
+ {
+ if (lim != p2reg)
+ emit_move_insn (p2reg, lim);
+ add_to_reg (p2reg, offset, 0, 0);
+ lim = p2reg;
+ }
+ emit_insn (gen_compare_lt (bfin_cc_rtx, spreg, lim));
+ emit_insn (gen_trapifcc ());
+ }
+ expand_prologue_reg_save (spreg, all, false);
+
+ do_link (spreg, frame_size, all);
+
+ if (TARGET_ID_SHARED_LIBRARY
+ && !TARGET_SEP_DATA
+ && (crtl->uses_pic_offset_table
+ || !current_function_is_leaf))
+ bfin_load_pic_reg (pic_offset_table_rtx);
+}
+
+/* Generate RTL for the epilogue of the current function. NEED_RETURN is zero
+ if this is for a sibcall. EH_RETURN is nonzero if we're expanding an
+ eh_return pattern. SIBCALL_P is true if this is a sibcall epilogue,
+ false otherwise. */
+
+void
+bfin_expand_epilogue (int need_return, int eh_return, bool sibcall_p)
+{
+ rtx spreg = gen_rtx_REG (Pmode, REG_SP);
+ e_funkind fkind = funkind (TREE_TYPE (current_function_decl));
+ int e = sibcall_p ? -1 : 1;
+ tree attrs = TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl));
+ bool all = lookup_attribute ("saveall", attrs) != NULL_TREE;
+
+ if (fkind != SUBROUTINE)
+ {
+ expand_interrupt_handler_epilogue (spreg, fkind, all);
+ return;
+ }
+
+ do_unlink (spreg, get_frame_size (), all, e);
+
+ expand_epilogue_reg_restore (spreg, all, false);
+
+ /* Omit the return insn if this is for a sibcall. */
+ if (! need_return)
+ return;
+
+ if (eh_return)
+ emit_insn (gen_addsi3 (spreg, spreg, gen_rtx_REG (Pmode, REG_P2)));
+
+ emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, REG_RETS)));
+}
+
+/* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
+
+int
+bfin_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
+ unsigned int new_reg)
+{
+ /* Interrupt functions can only use registers that have already been
+ saved by the prologue, even if they would normally be
+ call-clobbered. */
+
+ if (funkind (TREE_TYPE (current_function_decl)) != SUBROUTINE
+ && !df_regs_ever_live_p (new_reg))
+ return 0;
+
+ return 1;
+}
+
+/* Return the value of the return address for the frame COUNT steps up
+ from the current frame, after the prologue.
+ We punt for everything but the current frame by returning const0_rtx. */
+
+rtx
+bfin_return_addr_rtx (int count)
+{
+ if (count != 0)
+ return const0_rtx;
+
+ return get_hard_reg_initial_val (Pmode, REG_RETS);
+}
+
+static rtx
+bfin_delegitimize_address (rtx orig_x)
+{
+ rtx x = orig_x;
+
+ if (GET_CODE (x) != MEM)
+ return orig_x;
+
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == UNSPEC
+ && XINT (XEXP (x, 1), 1) == UNSPEC_MOVE_PIC
+ && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
+ return XVECEXP (XEXP (x, 1), 0, 0);
+
+ return orig_x;
+}
+
+/* This predicate is used to compute the length of a load/store insn.
+ OP is a MEM rtx, we return nonzero if its addressing mode requires a
+ 32-bit instruction. */
+
+int
+effective_address_32bit_p (rtx op, enum machine_mode mode)
+{
+ HOST_WIDE_INT offset;
+
+ mode = GET_MODE (op);
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) != PLUS)
+ {
+ gcc_assert (REG_P (op) || GET_CODE (op) == POST_INC
+ || GET_CODE (op) == PRE_DEC || GET_CODE (op) == POST_DEC);
+ return 0;
+ }
+
+ if (GET_CODE (XEXP (op, 1)) == UNSPEC)
+ return 1;
+
+ offset = INTVAL (XEXP (op, 1));
+
+ /* All byte loads use a 16-bit offset. */
+ if (GET_MODE_SIZE (mode) == 1)
+ return 1;
+
+ if (GET_MODE_SIZE (mode) == 4)
+ {
+ /* Frame pointer relative loads can use a negative offset, all others
+ are restricted to a small positive one. */
+ if (XEXP (op, 0) == frame_pointer_rtx)
+ return offset < -128 || offset > 60;
+ return offset < 0 || offset > 60;
+ }
+
+ /* Must be HImode now. */
+ return offset < 0 || offset > 30;
+}
+
+/* Returns true if X is a memory reference using an I register. */
+bool
+bfin_dsp_memref_p (rtx x)
+{
+ if (! MEM_P (x))
+ return false;
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_INC
+ || GET_CODE (x) == POST_DEC || GET_CODE (x) == PRE_DEC)
+ x = XEXP (x, 0);
+ return IREG_P (x);
+}
+
+/* Return cost of the memory address ADDR.
+ All addressing modes are equally cheap on the Blackfin. */
+
+static int
+bfin_address_cost (rtx addr ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED)
+{
+ return 1;
+}
+
+/* Subroutine of print_operand; used to print a memory reference X to FILE. */
+
+void
+print_address_operand (FILE *file, rtx x)
+{
+ switch (GET_CODE (x))
+ {
+ case PLUS:
+ output_address (XEXP (x, 0));
+ fprintf (file, "+");
+ output_address (XEXP (x, 1));
+ break;
+
+ case PRE_DEC:
+ fprintf (file, "--");
+ output_address (XEXP (x, 0));
+ break;
+ case POST_INC:
+ output_address (XEXP (x, 0));
+ fprintf (file, "++");
+ break;
+ case POST_DEC:
+ output_address (XEXP (x, 0));
+ fprintf (file, "--");
+ break;
+
+ default:
+ gcc_assert (GET_CODE (x) != MEM);
+ print_operand (file, x, 0);
+ break;
+ }
+}
+
+/* Adding intp DImode support by Tony
+ * -- Q: (low word)
+ * -- R: (high word)
+ */
+
+void
+print_operand (FILE *file, rtx x, char code)
+{
+ enum machine_mode mode;
+
+ if (code == '!')
+ {
+ if (GET_MODE (current_output_insn) == SImode)
+ fprintf (file, " ||");
+ else
+ fprintf (file, ";");
+ return;
+ }
+
+ mode = GET_MODE (x);
+
+ switch (code)
+ {
+ case 'j':
+ switch (GET_CODE (x))
+ {
+ case EQ:
+ fprintf (file, "e");
+ break;
+ case NE:
+ fprintf (file, "ne");
+ break;
+ case GT:
+ fprintf (file, "g");
+ break;
+ case LT:
+ fprintf (file, "l");
+ break;
+ case GE:
+ fprintf (file, "ge");
+ break;
+ case LE:
+ fprintf (file, "le");
+ break;
+ case GTU:
+ fprintf (file, "g");
+ break;
+ case LTU:
+ fprintf (file, "l");
+ break;
+ case GEU:
+ fprintf (file, "ge");
+ break;
+ case LEU:
+ fprintf (file, "le");
+ break;
+ default:
+ output_operand_lossage ("invalid %%j value");
+ }
+ break;
+
+ case 'J': /* reverse logic */
+ switch (GET_CODE(x))
+ {
+ case EQ:
+ fprintf (file, "ne");
+ break;
+ case NE:
+ fprintf (file, "e");
+ break;
+ case GT:
+ fprintf (file, "le");
+ break;
+ case LT:
+ fprintf (file, "ge");
+ break;
+ case GE:
+ fprintf (file, "l");
+ break;
+ case LE:
+ fprintf (file, "g");
+ break;
+ case GTU:
+ fprintf (file, "le");
+ break;
+ case LTU:
+ fprintf (file, "ge");
+ break;
+ case GEU:
+ fprintf (file, "l");
+ break;
+ case LEU:
+ fprintf (file, "g");
+ break;
+ default:
+ output_operand_lossage ("invalid %%J value");
+ }
+ break;
+
+ default:
+ switch (GET_CODE (x))
+ {
+ case REG:
+ if (code == 'h')
+ {
+ if (REGNO (x) < 32)
+ fprintf (file, "%s", short_reg_names[REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ }
+ else if (code == 'd')
+ {
+ if (REGNO (x) < 32)
+ fprintf (file, "%s", high_reg_names[REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ }
+ else if (code == 'w')
+ {
+ if (REGNO (x) == REG_A0 || REGNO (x) == REG_A1)
+ fprintf (file, "%s.w", reg_names[REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ }
+ else if (code == 'x')
+ {
+ if (REGNO (x) == REG_A0 || REGNO (x) == REG_A1)
+ fprintf (file, "%s.x", reg_names[REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ }
+ else if (code == 'v')
+ {
+ if (REGNO (x) == REG_A0)
+ fprintf (file, "AV0");
+ else if (REGNO (x) == REG_A1)
+ fprintf (file, "AV1");
+ else
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ }
+ else if (code == 'D')
+ {
+ if (D_REGNO_P (REGNO (x)))
+ fprintf (file, "%s", dregs_pair_names[REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ }
+ else if (code == 'H')
+ {
+ if ((mode == DImode || mode == DFmode) && REG_P (x))
+ fprintf (file, "%s", reg_names[REGNO (x) + 1]);
+ else
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ }
+ else if (code == 'T')
+ {
+ if (D_REGNO_P (REGNO (x)))
+ fprintf (file, "%s", byte_reg_names[REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ }
+ else
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ break;
+
+ case MEM:
+ fputc ('[', file);
+ x = XEXP (x,0);
+ print_address_operand (file, x);
+ fputc (']', file);
+ break;
+
+ case CONST_INT:
+ if (code == 'M')
+ {
+ switch (INTVAL (x))
+ {
+ case MACFLAG_NONE:
+ break;
+ case MACFLAG_FU:
+ fputs ("(FU)", file);
+ break;
+ case MACFLAG_T:
+ fputs ("(T)", file);
+ break;
+ case MACFLAG_TFU:
+ fputs ("(TFU)", file);
+ break;
+ case MACFLAG_W32:
+ fputs ("(W32)", file);
+ break;
+ case MACFLAG_IS:
+ fputs ("(IS)", file);
+ break;
+ case MACFLAG_IU:
+ fputs ("(IU)", file);
+ break;
+ case MACFLAG_IH:
+ fputs ("(IH)", file);
+ break;
+ case MACFLAG_M:
+ fputs ("(M)", file);
+ break;
+ case MACFLAG_IS_M:
+ fputs ("(IS,M)", file);
+ break;
+ case MACFLAG_ISS2:
+ fputs ("(ISS2)", file);
+ break;
+ case MACFLAG_S2RND:
+ fputs ("(S2RND)", file);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ }
+ else if (code == 'b')
+ {
+ if (INTVAL (x) == 0)
+ fputs ("+=", file);
+ else if (INTVAL (x) == 1)
+ fputs ("-=", file);
+ else
+ gcc_unreachable ();
+ break;
+ }
+ /* Moves to half registers with d or h modifiers always use unsigned
+ constants. */
+ else if (code == 'd')
+ x = GEN_INT ((INTVAL (x) >> 16) & 0xffff);
+ else if (code == 'h')
+ x = GEN_INT (INTVAL (x) & 0xffff);
+ else if (code == 'N')
+ x = GEN_INT (-INTVAL (x));
+ else if (code == 'X')
+ x = GEN_INT (exact_log2 (0xffffffff & INTVAL (x)));
+ else if (code == 'Y')
+ x = GEN_INT (exact_log2 (0xffffffff & ~INTVAL (x)));
+ else if (code == 'Z')
+ /* Used for LINK insns. */
+ x = GEN_INT (-8 - INTVAL (x));
+
+ /* fall through */
+
+ case SYMBOL_REF:
+ output_addr_const (file, x);
+ break;
+
+ case CONST_DOUBLE:
+ output_operand_lossage ("invalid const_double operand");
+ break;
+
+ case UNSPEC:
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_MOVE_PIC:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fprintf (file, "@GOT");
+ break;
+
+ case UNSPEC_MOVE_FDPIC:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fprintf (file, "@GOT17M4");
+ break;
+
+ case UNSPEC_FUNCDESC_GOT17M4:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fprintf (file, "@FUNCDESC_GOT17M4");
+ break;
+
+ case UNSPEC_LIBRARY_OFFSET:
+ fprintf (file, "_current_shared_library_p5_offset_");
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ output_addr_const (file, x);
+ }
+ }
+}
+
+/* Argument support functions. */
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ VDSP C Compiler manual, our ABI says that
+ first 3 words of arguments will use R0, R1 and R2.
+*/
+
+void
+init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
+ rtx libname ATTRIBUTE_UNUSED)
+{
+ static CUMULATIVE_ARGS zero_cum;
+
+ *cum = zero_cum;
+
+ /* Set up the number of registers to use for passing arguments. */
+
+ cum->nregs = max_arg_registers;
+ cum->arg_regs = arg_regs;
+
+ cum->call_cookie = CALL_NORMAL;
+ /* Check for a longcall attribute. */
+ if (fntype && lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype)))
+ cum->call_cookie |= CALL_SHORT;
+ else if (fntype && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype)))
+ cum->call_cookie |= CALL_LONG;
+
+ return;
+}
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+static void
+bfin_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ int count, bytes, words;
+
+ bytes = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ cum->words += words;
+ cum->nregs -= words;
+
+ if (cum->nregs <= 0)
+ {
+ cum->nregs = 0;
+ cum->arg_regs = NULL;
+ }
+ else
+ {
+ for (count = 1; count <= words; count++)
+ cum->arg_regs++;
+ }
+
+ return;
+}
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+static rtx
+bfin_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ int bytes
+ = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+
+ if (mode == VOIDmode)
+ /* Compute operand 2 of the call insn. */
+ return GEN_INT (cum->call_cookie);
+
+ if (bytes == -1)
+ return NULL_RTX;
+
+ if (cum->nregs)
+ return gen_rtx_REG (mode, *(cum->arg_regs));
+
+ return NULL_RTX;
+}
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of bytes passed in registers.
+ For args passed entirely in registers or entirely in memory, zero.
+
+ Refer VDSP C Compiler manual, our ABI.
+ First 3 words are in registers. So, if an argument is larger
+ than the registers available, it will span the register and
+ stack. */
+
+static int
+bfin_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ int bytes
+ = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ int bytes_left = cum->nregs * UNITS_PER_WORD;
+
+ if (bytes == -1)
+ return 0;
+
+ if (bytes_left == 0)
+ return 0;
+ if (bytes > bytes_left)
+ return bytes_left;
+ return 0;
+}
+
+/* Variable sized types are passed by reference. */
+
+static bool
+bfin_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
+}
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ TARGET_RETURN_IN_MEMORY. */
+
+static bool
+bfin_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ int size = int_size_in_bytes (type);
+ return size > 2 * UNITS_PER_WORD || size == -1;
+}
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+static rtx
+bfin_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
+ int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, REG_P0);
+}
+
+/* Return true when register may be used to pass function parameters. */
+
+bool
+function_arg_regno_p (int n)
+{
+ int i;
+ for (i = 0; arg_regs[i] != -1; i++)
+ if (n == arg_regs[i])
+ return true;
+ return false;
+}
+
+/* Returns 1 if OP contains a symbol reference */
+
+int
+symbolic_reference_mentioned_p (rtx op)
+{
+ register const char *fmt;
+ register int i;
+
+ if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (op));
+ for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (op, i) - 1; j >= 0; j--)
+ if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
+ return 1;
+ }
+
+ else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Decide whether we can make a sibling call to a function. DECL is the
+ declaration of the function being targeted by the call and EXP is the
+ CALL_EXPR representing the call. */
+
+static bool
+bfin_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
+ tree exp ATTRIBUTE_UNUSED)
+{
+ struct cgraph_local_info *this_func, *called_func;
+ e_funkind fkind = funkind (TREE_TYPE (current_function_decl));
+ if (fkind != SUBROUTINE)
+ return false;
+ if (!TARGET_ID_SHARED_LIBRARY || TARGET_SEP_DATA)
+ return true;
+
+ /* When compiling for ID shared libraries, can't sibcall a local function
+ from a non-local function, because the local function thinks it does
+ not need to reload P5 in the prologue, but the sibcall wil pop P5 in the
+ sibcall epilogue, and we end up with the wrong value in P5. */
+
+ if (!decl)
+ /* Not enough information. */
+ return false;
+
+ this_func = cgraph_local_info (current_function_decl);
+ called_func = cgraph_local_info (decl);
+ return !called_func->local || this_func->local;
+}
+
+/* Write a template for a trampoline to F. */
+
+static void
+bfin_asm_trampoline_template (FILE *f)
+{
+ if (TARGET_FDPIC)
+ {
+ fprintf (f, "\t.dd\t0x00000000\n"); /* 0 */
+ fprintf (f, "\t.dd\t0x00000000\n"); /* 0 */
+ fprintf (f, "\t.dd\t0x0000e109\n"); /* p1.l = fn low */
+ fprintf (f, "\t.dd\t0x0000e149\n"); /* p1.h = fn high */
+ fprintf (f, "\t.dd\t0x0000e10a\n"); /* p2.l = sc low */
+ fprintf (f, "\t.dd\t0x0000e14a\n"); /* p2.h = sc high */
+ fprintf (f, "\t.dw\t0xac4b\n"); /* p3 = [p1 + 4] */
+ fprintf (f, "\t.dw\t0x9149\n"); /* p1 = [p1] */
+ fprintf (f, "\t.dw\t0x0051\n"); /* jump (p1)*/
+ }
+ else
+ {
+ fprintf (f, "\t.dd\t0x0000e109\n"); /* p1.l = fn low */
+ fprintf (f, "\t.dd\t0x0000e149\n"); /* p1.h = fn high */
+ fprintf (f, "\t.dd\t0x0000e10a\n"); /* p2.l = sc low */
+ fprintf (f, "\t.dd\t0x0000e14a\n"); /* p2.h = sc high */
+ fprintf (f, "\t.dw\t0x0051\n"); /* jump (p1)*/
+ }
+}
+
+/* Emit RTL insns to initialize the variable parts of a trampoline at
+ M_TRAMP. FNDECL is the target function. CHAIN_VALUE is an RTX for
+ the static chain value for the function. */
+
+static void
+bfin_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+ rtx t1 = copy_to_reg (XEXP (DECL_RTL (fndecl), 0));
+ rtx t2 = copy_to_reg (chain_value);
+ rtx mem;
+ int i = 0;
+
+ emit_block_move (m_tramp, assemble_trampoline_template (),
+ GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
+
+ if (TARGET_FDPIC)
+ {
+ rtx a = force_reg (Pmode, plus_constant (XEXP (m_tramp, 0), 8));
+ mem = adjust_address (m_tramp, Pmode, 0);
+ emit_move_insn (mem, a);
+ i = 8;
+ }
+
+ mem = adjust_address (m_tramp, HImode, i + 2);
+ emit_move_insn (mem, gen_lowpart (HImode, t1));
+ emit_insn (gen_ashrsi3 (t1, t1, GEN_INT (16)));
+ mem = adjust_address (m_tramp, HImode, i + 6);
+ emit_move_insn (mem, gen_lowpart (HImode, t1));
+
+ mem = adjust_address (m_tramp, HImode, i + 10);
+ emit_move_insn (mem, gen_lowpart (HImode, t2));
+ emit_insn (gen_ashrsi3 (t2, t2, GEN_INT (16)));
+ mem = adjust_address (m_tramp, HImode, i + 14);
+ emit_move_insn (mem, gen_lowpart (HImode, t2));
+}
+
+/* Emit insns to move operands[1] into operands[0]. */
+
+void
+emit_pic_move (rtx *operands, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ rtx temp = reload_in_progress ? operands[0] : gen_reg_rtx (Pmode);
+
+ gcc_assert (!TARGET_FDPIC || !(reload_in_progress || reload_completed));
+ if (GET_CODE (operands[0]) == MEM && SYMBOLIC_CONST (operands[1]))
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ operands[1] = legitimize_pic_address (operands[1], temp,
+ TARGET_FDPIC ? OUR_FDPIC_REG
+ : pic_offset_table_rtx);
+}
+
+/* Expand a move operation in mode MODE. The operands are in OPERANDS.
+ Returns true if no further code must be generated, false if the caller
+ should generate an insn to move OPERANDS[1] to OPERANDS[0]. */
+
+bool
+expand_move (rtx *operands, enum machine_mode mode)
+{
+ rtx op = operands[1];
+ if ((TARGET_ID_SHARED_LIBRARY || TARGET_FDPIC)
+ && SYMBOLIC_CONST (op))
+ emit_pic_move (operands, mode);
+ else if (mode == SImode && GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && !bfin_legitimate_constant_p (op))
+ {
+ rtx dest = operands[0];
+ rtx op0, op1;
+ gcc_assert (!reload_in_progress && !reload_completed);
+ op = XEXP (op, 0);
+ op0 = force_reg (mode, XEXP (op, 0));
+ op1 = XEXP (op, 1);
+ if (!insn_data[CODE_FOR_addsi3].operand[2].predicate (op1, mode))
+ op1 = force_reg (mode, op1);
+ if (GET_CODE (dest) == MEM)
+ dest = gen_reg_rtx (mode);
+ emit_insn (gen_addsi3 (dest, op0, op1));
+ if (dest == operands[0])
+ return true;
+ operands[1] = dest;
+ }
+ /* Don't generate memory->memory or constant->memory moves, go through a
+ register */
+ else if ((reload_in_progress | reload_completed) == 0
+ && GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) != REG)
+ operands[1] = force_reg (mode, operands[1]);
+ return false;
+}
+
+/* Split one or more DImode RTL references into pairs of SImode
+ references. The RTL can be REG, offsettable MEM, integer constant, or
+ CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
+ split and "num" is its length. lo_half and hi_half are output arrays
+ that parallel "operands". */
+
+void
+split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
+{
+ while (num--)
+ {
+ rtx op = operands[num];
+
+ /* simplify_subreg refuse to split volatile memory addresses,
+ but we still have to handle it. */
+ if (GET_CODE (op) == MEM)
+ {
+ lo_half[num] = adjust_address (op, SImode, 0);
+ hi_half[num] = adjust_address (op, SImode, 4);
+ }
+ else
+ {
+ lo_half[num] = simplify_gen_subreg (SImode, op,
+ GET_MODE (op) == VOIDmode
+ ? DImode : GET_MODE (op), 0);
+ hi_half[num] = simplify_gen_subreg (SImode, op,
+ GET_MODE (op) == VOIDmode
+ ? DImode : GET_MODE (op), 4);
+ }
+ }
+}
+
+bool
+bfin_longcall_p (rtx op, int call_cookie)
+{
+ gcc_assert (GET_CODE (op) == SYMBOL_REF);
+ if (SYMBOL_REF_WEAK (op))
+ return 1;
+ if (call_cookie & CALL_SHORT)
+ return 0;
+ if (call_cookie & CALL_LONG)
+ return 1;
+ if (TARGET_LONG_CALLS)
+ return 1;
+ return 0;
+}
+
+/* Expand a call instruction. FNADDR is the call target, RETVAL the return value.
+ COOKIE is a CONST_INT holding the call_cookie prepared init_cumulative_args.
+ SIBCALL is nonzero if this is a sibling call. */
+
+void
+bfin_expand_call (rtx retval, rtx fnaddr, rtx callarg1, rtx cookie, int sibcall)
+{
+ rtx use = NULL, call;
+ rtx callee = XEXP (fnaddr, 0);
+ int nelts = 3;
+ rtx pat;
+ rtx picreg = get_hard_reg_initial_val (SImode, FDPIC_REGNO);
+ rtx retsreg = gen_rtx_REG (Pmode, REG_RETS);
+ int n;
+
+ /* In an untyped call, we can get NULL for operand 2. */
+ if (cookie == NULL_RTX)
+ cookie = const0_rtx;
+
+ /* Static functions and indirect calls don't need the pic register. */
+ if (!TARGET_FDPIC && flag_pic
+ && GET_CODE (callee) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (callee))
+ use_reg (&use, pic_offset_table_rtx);
+
+ if (TARGET_FDPIC)
+ {
+ int caller_in_sram, callee_in_sram;
+
+ /* 0 is not in sram, 1 is in L1 sram, 2 is in L2 sram. */
+ caller_in_sram = callee_in_sram = 0;
+
+ if (lookup_attribute ("l1_text",
+ DECL_ATTRIBUTES (cfun->decl)) != NULL_TREE)
+ caller_in_sram = 1;
+ else if (lookup_attribute ("l2",
+ DECL_ATTRIBUTES (cfun->decl)) != NULL_TREE)
+ caller_in_sram = 2;
+
+ if (GET_CODE (callee) == SYMBOL_REF
+ && SYMBOL_REF_DECL (callee) && DECL_P (SYMBOL_REF_DECL (callee)))
+ {
+ if (lookup_attribute
+ ("l1_text",
+ DECL_ATTRIBUTES (SYMBOL_REF_DECL (callee))) != NULL_TREE)
+ callee_in_sram = 1;
+ else if (lookup_attribute
+ ("l2",
+ DECL_ATTRIBUTES (SYMBOL_REF_DECL (callee))) != NULL_TREE)
+ callee_in_sram = 2;
+ }
+
+ if (GET_CODE (callee) != SYMBOL_REF
+ || bfin_longcall_p (callee, INTVAL (cookie))
+ || (GET_CODE (callee) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (callee)
+ && TARGET_INLINE_PLT)
+ || caller_in_sram != callee_in_sram
+ || (caller_in_sram && callee_in_sram
+ && (GET_CODE (callee) != SYMBOL_REF
+ || !SYMBOL_REF_LOCAL_P (callee))))
+ {
+ rtx addr = callee;
+ if (! address_operand (addr, Pmode))
+ addr = force_reg (Pmode, addr);
+
+ fnaddr = gen_reg_rtx (SImode);
+ emit_insn (gen_load_funcdescsi (fnaddr, addr));
+ fnaddr = gen_rtx_MEM (Pmode, fnaddr);
+
+ picreg = gen_reg_rtx (SImode);
+ emit_insn (gen_load_funcdescsi (picreg,
+ plus_constant (addr, 4)));
+ }
+
+ nelts++;
+ }
+ else if ((!register_no_elim_operand (callee, Pmode)
+ && GET_CODE (callee) != SYMBOL_REF)
+ || (GET_CODE (callee) == SYMBOL_REF
+ && ((TARGET_ID_SHARED_LIBRARY && !TARGET_LEAF_ID_SHARED_LIBRARY)
+ || bfin_longcall_p (callee, INTVAL (cookie)))))
+ {
+ callee = copy_to_mode_reg (Pmode, callee);
+ fnaddr = gen_rtx_MEM (Pmode, callee);
+ }
+ call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
+
+ if (retval)
+ call = gen_rtx_SET (VOIDmode, retval, call);
+
+ pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nelts));
+ n = 0;
+ XVECEXP (pat, 0, n++) = call;
+ if (TARGET_FDPIC)
+ XVECEXP (pat, 0, n++) = gen_rtx_USE (VOIDmode, picreg);
+ XVECEXP (pat, 0, n++) = gen_rtx_USE (VOIDmode, cookie);
+ if (sibcall)
+ XVECEXP (pat, 0, n++) = gen_rtx_RETURN (VOIDmode);
+ else
+ XVECEXP (pat, 0, n++) = gen_rtx_CLOBBER (VOIDmode, retsreg);
+ call = emit_call_insn (pat);
+ if (use)
+ CALL_INSN_FUNCTION_USAGE (call) = use;
+}
+
+/* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
+
+int
+hard_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ /* Allow only dregs to store value of mode HI or QI */
+ enum reg_class rclass = REGNO_REG_CLASS (regno);
+
+ if (mode == CCmode)
+ return 0;
+
+ if (mode == V2HImode)
+ return D_REGNO_P (regno);
+ if (rclass == CCREGS)
+ return mode == BImode;
+ if (mode == PDImode || mode == V2PDImode)
+ return regno == REG_A0 || regno == REG_A1;
+
+ /* Allow all normal 32-bit regs, except REG_M3, in case regclass ever comes
+ up with a bad register class (such as ALL_REGS) for DImode. */
+ if (mode == DImode)
+ return regno < REG_M3;
+
+ if (mode == SImode
+ && TEST_HARD_REG_BIT (reg_class_contents[PROLOGUE_REGS], regno))
+ return 1;
+
+ return TEST_HARD_REG_BIT (reg_class_contents[MOST_REGS], regno);
+}
+
+/* Implements target hook vector_mode_supported_p. */
+
+static bool
+bfin_vector_mode_supported_p (enum machine_mode mode)
+{
+ return mode == V2HImode;
+}
+
+/* Return the cost of moving data from a register in class CLASS1 to
+ one in class CLASS2. A cost of 2 is the default. */
+
+int
+bfin_register_move_cost (enum machine_mode mode,
+ enum reg_class class1, enum reg_class class2)
+{
+ /* These need secondary reloads, so they're more expensive. */
+ if ((class1 == CCREGS && !reg_class_subset_p (class2, DREGS))
+ || (class2 == CCREGS && !reg_class_subset_p (class1, DREGS)))
+ return 4;
+
+ /* If optimizing for size, always prefer reg-reg over reg-memory moves. */
+ if (optimize_size)
+ return 2;
+
+ if (GET_MODE_CLASS (mode) == MODE_INT)
+ {
+ /* Discourage trying to use the accumulators. */
+ if (TEST_HARD_REG_BIT (reg_class_contents[class1], REG_A0)
+ || TEST_HARD_REG_BIT (reg_class_contents[class1], REG_A1)
+ || TEST_HARD_REG_BIT (reg_class_contents[class2], REG_A0)
+ || TEST_HARD_REG_BIT (reg_class_contents[class2], REG_A1))
+ return 20;
+ }
+ return 2;
+}
+
+/* Return the cost of moving data of mode M between a
+ register and memory. A value of 2 is the default; this cost is
+ relative to those in `REGISTER_MOVE_COST'.
+
+ ??? In theory L1 memory has single-cycle latency. We should add a switch
+ that tells the compiler whether we expect to use only L1 memory for the
+ program; it'll make the costs more accurate. */
+
+int
+bfin_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ enum reg_class rclass,
+ int in ATTRIBUTE_UNUSED)
+{
+ /* Make memory accesses slightly more expensive than any register-register
+ move. Also, penalize non-DP registers, since they need secondary
+ reloads to load and store. */
+ if (! reg_class_subset_p (rclass, DPREGS))
+ return 10;
+
+ return 8;
+}
+
+/* Inform reload about cases where moving X with a mode MODE to a register in
+ RCLASS requires an extra scratch register. Return the class needed for the
+ scratch register. */
+
+static reg_class_t
+bfin_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
+ enum machine_mode mode, secondary_reload_info *sri)
+{
+ /* If we have HImode or QImode, we can only use DREGS as secondary registers;
+ in most other cases we can also use PREGS. */
+ enum reg_class default_class = GET_MODE_SIZE (mode) >= 4 ? DPREGS : DREGS;
+ enum reg_class x_class = NO_REGS;
+ enum rtx_code code = GET_CODE (x);
+ enum reg_class rclass = (enum reg_class) rclass_i;
+
+ if (code == SUBREG)
+ x = SUBREG_REG (x), code = GET_CODE (x);
+ if (REG_P (x))
+ {
+ int regno = REGNO (x);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ regno = reg_renumber[regno];
+
+ if (regno == -1)
+ code = MEM;
+ else
+ x_class = REGNO_REG_CLASS (regno);
+ }
+
+ /* We can be asked to reload (plus (FP) (large_constant)) into a DREG.
+ This happens as a side effect of register elimination, and we need
+ a scratch register to do it. */
+ if (fp_plus_const_operand (x, mode))
+ {
+ rtx op2 = XEXP (x, 1);
+ int large_constant_p = ! satisfies_constraint_Ks7 (op2);
+
+ if (rclass == PREGS || rclass == PREGS_CLOBBERED)
+ return NO_REGS;
+ /* If destination is a DREG, we can do this without a scratch register
+ if the constant is valid for an add instruction. */
+ if ((rclass == DREGS || rclass == DPREGS)
+ && ! large_constant_p)
+ return NO_REGS;
+ /* Reloading to anything other than a DREG? Use a PREG scratch
+ register. */
+ sri->icode = CODE_FOR_reload_insi;
+ return NO_REGS;
+ }
+
+ /* Data can usually be moved freely between registers of most classes.
+ AREGS are an exception; they can only move to or from another register
+ in AREGS or one in DREGS. They can also be assigned the constant 0. */
+ if (x_class == AREGS || x_class == EVEN_AREGS || x_class == ODD_AREGS)
+ return (rclass == DREGS || rclass == AREGS || rclass == EVEN_AREGS
+ || rclass == ODD_AREGS
+ ? NO_REGS : DREGS);
+
+ if (rclass == AREGS || rclass == EVEN_AREGS || rclass == ODD_AREGS)
+ {
+ if (code == MEM)
+ {
+ sri->icode = in_p ? CODE_FOR_reload_inpdi : CODE_FOR_reload_outpdi;
+ return NO_REGS;
+ }
+
+ if (x != const0_rtx && x_class != DREGS)
+ {
+ return DREGS;
+ }
+ else
+ return NO_REGS;
+ }
+
+ /* CCREGS can only be moved from/to DREGS. */
+ if (rclass == CCREGS && x_class != DREGS)
+ return DREGS;
+ if (x_class == CCREGS && rclass != DREGS)
+ return DREGS;
+
+ /* All registers other than AREGS can load arbitrary constants. The only
+ case that remains is MEM. */
+ if (code == MEM)
+ if (! reg_class_subset_p (rclass, default_class))
+ return default_class;
+
+ return NO_REGS;
+}
+
+/* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
+
+static bool
+bfin_class_likely_spilled_p (reg_class_t rclass)
+{
+ switch (rclass)
+ {
+ case PREGS_CLOBBERED:
+ case PROLOGUE_REGS:
+ case P0REGS:
+ case D0REGS:
+ case D1REGS:
+ case D2REGS:
+ case CCREGS:
+ return true;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+bfin_handle_option (size_t code, const char *arg, int value)
+{
+ switch (code)
+ {
+ case OPT_mshared_library_id_:
+ if (value > MAX_LIBRARY_ID)
+ error ("-mshared-library-id=%s is not between 0 and %d",
+ arg, MAX_LIBRARY_ID);
+ bfin_lib_id_given = 1;
+ return true;
+
+ case OPT_mcpu_:
+ {
+ const char *p, *q;
+ int i;
+
+ i = 0;
+ while ((p = bfin_cpus[i].name) != NULL)
+ {
+ if (strncmp (arg, p, strlen (p)) == 0)
+ break;
+ i++;
+ }
+
+ if (p == NULL)
+ {
+ error ("-mcpu=%s is not valid", arg);
+ return false;
+ }
+
+ bfin_cpu_type = bfin_cpus[i].type;
+
+ q = arg + strlen (p);
+
+ if (*q == '\0')
+ {
+ bfin_si_revision = bfin_cpus[i].si_revision;
+ bfin_workarounds |= bfin_cpus[i].workarounds;
+ }
+ else if (strcmp (q, "-none") == 0)
+ bfin_si_revision = -1;
+ else if (strcmp (q, "-any") == 0)
+ {
+ bfin_si_revision = 0xffff;
+ while (bfin_cpus[i].type == bfin_cpu_type)
+ {
+ bfin_workarounds |= bfin_cpus[i].workarounds;
+ i++;
+ }
+ }
+ else
+ {
+ unsigned int si_major, si_minor;
+ int rev_len, n;
+
+ rev_len = strlen (q);
+
+ if (sscanf (q, "-%u.%u%n", &si_major, &si_minor, &n) != 2
+ || n != rev_len
+ || si_major > 0xff || si_minor > 0xff)
+ {
+ invalid_silicon_revision:
+ error ("-mcpu=%s has invalid silicon revision", arg);
+ return false;
+ }
+
+ bfin_si_revision = (si_major << 8) | si_minor;
+
+ while (bfin_cpus[i].type == bfin_cpu_type
+ && bfin_cpus[i].si_revision != bfin_si_revision)
+ i++;
+
+ if (bfin_cpus[i].type != bfin_cpu_type)
+ goto invalid_silicon_revision;
+
+ bfin_workarounds |= bfin_cpus[i].workarounds;
+ }
+
+ return true;
+ }
+
+ default:
+ return true;
+ }
+}
+
+static struct machine_function *
+bfin_init_machine_status (void)
+{
+ return ggc_alloc_cleared_machine_function ();
+}
+
+/* Implement the TARGET_OPTION_OVERRIDE hook. */
+
+static void
+bfin_option_override (void)
+{
+ /* If processor type is not specified, enable all workarounds. */
+ if (bfin_cpu_type == BFIN_CPU_UNKNOWN)
+ {
+ int i;
+
+ for (i = 0; bfin_cpus[i].name != NULL; i++)
+ bfin_workarounds |= bfin_cpus[i].workarounds;
+
+ bfin_si_revision = 0xffff;
+ }
+
+ if (bfin_csync_anomaly == 1)
+ bfin_workarounds |= WA_SPECULATIVE_SYNCS;
+ else if (bfin_csync_anomaly == 0)
+ bfin_workarounds &= ~WA_SPECULATIVE_SYNCS;
+
+ if (bfin_specld_anomaly == 1)
+ bfin_workarounds |= WA_SPECULATIVE_LOADS;
+ else if (bfin_specld_anomaly == 0)
+ bfin_workarounds &= ~WA_SPECULATIVE_LOADS;
+
+ if (TARGET_OMIT_LEAF_FRAME_POINTER)
+ flag_omit_frame_pointer = 1;
+
+ /* Library identification */
+ if (bfin_lib_id_given && ! TARGET_ID_SHARED_LIBRARY)
+ error ("-mshared-library-id= specified without -mid-shared-library");
+
+ if (stack_limit_rtx && TARGET_STACK_CHECK_L1)
+ error ("can%'t use multiple stack checking methods together");
+
+ if (TARGET_ID_SHARED_LIBRARY && TARGET_FDPIC)
+ error ("ID shared libraries and FD-PIC mode can%'t be used together");
+
+ /* Don't allow the user to specify -mid-shared-library and -msep-data
+ together, as it makes little sense from a user's point of view... */
+ if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
+ error ("cannot specify both -msep-data and -mid-shared-library");
+ /* ... internally, however, it's nearly the same. */
+ if (TARGET_SEP_DATA)
+ target_flags |= MASK_ID_SHARED_LIBRARY | MASK_LEAF_ID_SHARED_LIBRARY;
+
+ if (TARGET_ID_SHARED_LIBRARY && flag_pic == 0)
+ flag_pic = 1;
+
+ /* There is no single unaligned SI op for PIC code. Sometimes we
+ need to use ".4byte" and sometimes we need to use ".picptr".
+ See bfin_assemble_integer for details. */
+ if (TARGET_FDPIC)
+ targetm.asm_out.unaligned_op.si = 0;
+
+ /* Silently turn off flag_pic if not doing FDPIC or ID shared libraries,
+ since we don't support it and it'll just break. */
+ if (flag_pic && !TARGET_FDPIC && !TARGET_ID_SHARED_LIBRARY)
+ flag_pic = 0;
+
+ if (TARGET_MULTICORE && bfin_cpu_type != BFIN_CPU_BF561)
+ error ("-mmulticore can only be used with BF561");
+
+ if (TARGET_COREA && !TARGET_MULTICORE)
+ error ("-mcorea should be used with -mmulticore");
+
+ if (TARGET_COREB && !TARGET_MULTICORE)
+ error ("-mcoreb should be used with -mmulticore");
+
+ if (TARGET_COREA && TARGET_COREB)
+ error ("-mcorea and -mcoreb can%'t be used together");
+
+ flag_schedule_insns = 0;
+
+ init_machine_status = bfin_init_machine_status;
+}
+
+/* Return the destination address of BRANCH.
+ We need to use this instead of get_attr_length, because the
+ cbranch_with_nops pattern conservatively sets its length to 6, and
+ we still prefer to use shorter sequences. */
+
+static int
+branch_dest (rtx branch)
+{
+ rtx dest;
+ int dest_uid;
+ rtx pat = PATTERN (branch);
+ if (GET_CODE (pat) == PARALLEL)
+ pat = XVECEXP (pat, 0, 0);
+ dest = SET_SRC (pat);
+ if (GET_CODE (dest) == IF_THEN_ELSE)
+ dest = XEXP (dest, 1);
+ dest = XEXP (dest, 0);
+ dest_uid = INSN_UID (dest);
+ return INSN_ADDRESSES (dest_uid);
+}
+
+/* Return nonzero if INSN is annotated with a REG_BR_PROB note that indicates
+ it's a branch that's predicted taken. */
+
+static int
+cbranch_predicted_taken_p (rtx insn)
+{
+ rtx x = find_reg_note (insn, REG_BR_PROB, 0);
+
+ if (x)
+ {
+ int pred_val = INTVAL (XEXP (x, 0));
+
+ return pred_val >= REG_BR_PROB_BASE / 2;
+ }
+
+ return 0;
+}
+
+/* Templates for use by asm_conditional_branch. */
+
+static const char *ccbranch_templates[][3] = {
+ { "if !cc jump %3;", "if cc jump 4 (bp); jump.s %3;", "if cc jump 6 (bp); jump.l %3;" },
+ { "if cc jump %3;", "if !cc jump 4 (bp); jump.s %3;", "if !cc jump 6 (bp); jump.l %3;" },
+ { "if !cc jump %3 (bp);", "if cc jump 4; jump.s %3;", "if cc jump 6; jump.l %3;" },
+ { "if cc jump %3 (bp);", "if !cc jump 4; jump.s %3;", "if !cc jump 6; jump.l %3;" },
+};
+
+/* Output INSN, which is a conditional branch instruction with operands
+ OPERANDS.
+
+ We deal with the various forms of conditional branches that can be generated
+ by bfin_reorg to prevent the hardware from doing speculative loads, by
+ - emitting a sufficient number of nops, if N_NOPS is nonzero, or
+ - always emitting the branch as predicted taken, if PREDICT_TAKEN is true.
+ Either of these is only necessary if the branch is short, otherwise the
+ template we use ends in an unconditional jump which flushes the pipeline
+ anyway. */
+
+void
+asm_conditional_branch (rtx insn, rtx *operands, int n_nops, int predict_taken)
+{
+ int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
+ /* Note : offset for instructions like if cc jmp; jump.[sl] offset
+ is to be taken from start of if cc rather than jump.
+ Range for jump.s is (-4094, 4096) instead of (-4096, 4094)
+ */
+ int len = (offset >= -1024 && offset <= 1022 ? 0
+ : offset >= -4094 && offset <= 4096 ? 1
+ : 2);
+ int bp = predict_taken && len == 0 ? 1 : cbranch_predicted_taken_p (insn);
+ int idx = (bp << 1) | (GET_CODE (operands[0]) == EQ ? BRF : BRT);
+ output_asm_insn (ccbranch_templates[idx][len], operands);
+ gcc_assert (n_nops == 0 || !bp);
+ if (len == 0)
+ while (n_nops-- > 0)
+ output_asm_insn ("nop;", NULL);
+}
+
+/* Emit rtl for a comparison operation CMP in mode MODE. Operands have been
+ stored in bfin_compare_op0 and bfin_compare_op1 already. */
+
+rtx
+bfin_gen_compare (rtx cmp, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ enum rtx_code code1, code2;
+ rtx op0 = XEXP (cmp, 0), op1 = XEXP (cmp, 1);
+ rtx tem = bfin_cc_rtx;
+ enum rtx_code code = GET_CODE (cmp);
+
+ /* If we have a BImode input, then we already have a compare result, and
+ do not need to emit another comparison. */
+ if (GET_MODE (op0) == BImode)
+ {
+ gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
+ tem = op0, code2 = code;
+ }
+ else
+ {
+ switch (code) {
+ /* bfin has these conditions */
+ case EQ:
+ case LT:
+ case LE:
+ case LEU:
+ case LTU:
+ code1 = code;
+ code2 = NE;
+ break;
+ default:
+ code1 = reverse_condition (code);
+ code2 = EQ;
+ break;
+ }
+ emit_insn (gen_rtx_SET (VOIDmode, tem,
+ gen_rtx_fmt_ee (code1, BImode, op0, op1)));
+ }
+
+ return gen_rtx_fmt_ee (code2, BImode, tem, CONST0_RTX (BImode));
+}
+
+/* Return nonzero iff C has exactly one bit set if it is interpreted
+ as a 32-bit constant. */
+
+int
+log2constp (unsigned HOST_WIDE_INT c)
+{
+ c &= 0xFFFFFFFF;
+ return c != 0 && (c & (c-1)) == 0;
+}
+
+/* Returns the number of consecutive least significant zeros in the binary
+ representation of *V.
+ We modify *V to contain the original value arithmetically shifted right by
+ the number of zeroes. */
+
+static int
+shiftr_zero (HOST_WIDE_INT *v)
+{
+ unsigned HOST_WIDE_INT tmp = *v;
+ unsigned HOST_WIDE_INT sgn;
+ int n = 0;
+
+ if (tmp == 0)
+ return 0;
+
+ sgn = tmp & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1));
+ while ((tmp & 0x1) == 0 && n <= 32)
+ {
+ tmp = (tmp >> 1) | sgn;
+ n++;
+ }
+ *v = tmp;
+ return n;
+}
+
+/* After reload, split the load of an immediate constant. OPERANDS are the
+ operands of the movsi_insn pattern which we are splitting. We return
+ nonzero if we emitted a sequence to load the constant, zero if we emitted
+ nothing because we want to use the splitter's default sequence. */
+
+int
+split_load_immediate (rtx operands[])
+{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+ HOST_WIDE_INT tmp;
+ HOST_WIDE_INT shifted = val;
+ HOST_WIDE_INT shifted_compl = ~val;
+ int num_zero = shiftr_zero (&shifted);
+ int num_compl_zero = shiftr_zero (&shifted_compl);
+ unsigned int regno = REGNO (operands[0]);
+
+ /* This case takes care of single-bit set/clear constants, which we could
+ also implement with BITSET/BITCLR. */
+ if (num_zero
+ && shifted >= -32768 && shifted < 65536
+ && (D_REGNO_P (regno)
+ || (regno >= REG_P0 && regno <= REG_P7 && num_zero <= 2)))
+ {
+ emit_insn (gen_movsi (operands[0], GEN_INT (shifted)));
+ emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (num_zero)));
+ return 1;
+ }
+
+ tmp = val & 0xFFFF;
+ tmp |= -(tmp & 0x8000);
+
+ /* If high word has one bit set or clear, try to use a bit operation. */
+ if (D_REGNO_P (regno))
+ {
+ if (log2constp (val & 0xFFFF0000))
+ {
+ emit_insn (gen_movsi (operands[0], GEN_INT (val & 0xFFFF)));
+ emit_insn (gen_iorsi3 (operands[0], operands[0], GEN_INT (val & 0xFFFF0000)));
+ return 1;
+ }
+ else if (log2constp (val | 0xFFFF) && (val & 0x8000) != 0)
+ {
+ emit_insn (gen_movsi (operands[0], GEN_INT (tmp)));
+ emit_insn (gen_andsi3 (operands[0], operands[0], GEN_INT (val | 0xFFFF)));
+ }
+ }
+
+ if (D_REGNO_P (regno))
+ {
+ if (tmp >= -64 && tmp <= 63)
+ {
+ emit_insn (gen_movsi (operands[0], GEN_INT (tmp)));
+ emit_insn (gen_movstricthi_high (operands[0], GEN_INT (val & -65536)));
+ return 1;
+ }
+
+ if ((val & 0xFFFF0000) == 0)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ emit_insn (gen_movsi_low (operands[0], operands[0], operands[1]));
+ return 1;
+ }
+
+ if ((val & 0xFFFF0000) == 0xFFFF0000)
+ {
+ emit_insn (gen_movsi (operands[0], constm1_rtx));
+ emit_insn (gen_movsi_low (operands[0], operands[0], operands[1]));
+ return 1;
+ }
+ }
+
+ /* Need DREGs for the remaining case. */
+ if (regno > REG_R7)
+ return 0;
+
+ if (optimize_size
+ && num_compl_zero && shifted_compl >= -64 && shifted_compl <= 63)
+ {
+ /* If optimizing for size, generate a sequence that has more instructions
+ but is shorter. */
+ emit_insn (gen_movsi (operands[0], GEN_INT (shifted_compl)));
+ emit_insn (gen_ashlsi3 (operands[0], operands[0],
+ GEN_INT (num_compl_zero)));
+ emit_insn (gen_one_cmplsi2 (operands[0], operands[0]));
+ return 1;
+ }
+ return 0;
+}
+
+/* Return true if the legitimate memory address for a memory operand of mode
+ MODE. Return false if not. */
+
+static bool
+bfin_valid_add (enum machine_mode mode, HOST_WIDE_INT value)
+{
+ unsigned HOST_WIDE_INT v = value > 0 ? value : -value;
+ int sz = GET_MODE_SIZE (mode);
+ int shift = sz == 1 ? 0 : sz == 2 ? 1 : 2;
+ /* The usual offsettable_memref machinery doesn't work so well for this
+ port, so we deal with the problem here. */
+ if (value > 0 && sz == 8)
+ v += 4;
+ return (v & ~(0x7fff << shift)) == 0;
+}
+
+static bool
+bfin_valid_reg_p (unsigned int regno, int strict, enum machine_mode mode,
+ enum rtx_code outer_code)
+{
+ if (strict)
+ return REGNO_OK_FOR_BASE_STRICT_P (regno, mode, outer_code, SCRATCH);
+ else
+ return REGNO_OK_FOR_BASE_NONSTRICT_P (regno, mode, outer_code, SCRATCH);
+}
+
+/* Recognize an RTL expression that is a valid memory address for an
+ instruction. The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ Blackfin addressing modes are as follows:
+
+ [preg]
+ [preg + imm16]
+
+ B [ Preg + uimm15 ]
+ W [ Preg + uimm16m2 ]
+ [ Preg + uimm17m4 ]
+
+ [preg++]
+ [preg--]
+ [--sp]
+*/
+
+static bool
+bfin_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
+{
+ switch (GET_CODE (x)) {
+ case REG:
+ if (bfin_valid_reg_p (REGNO (x), strict, mode, MEM))
+ return true;
+ break;
+ case PLUS:
+ if (REG_P (XEXP (x, 0))
+ && bfin_valid_reg_p (REGNO (XEXP (x, 0)), strict, mode, PLUS)
+ && ((GET_CODE (XEXP (x, 1)) == UNSPEC && mode == SImode)
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && bfin_valid_add (mode, INTVAL (XEXP (x, 1))))))
+ return true;
+ break;
+ case POST_INC:
+ case POST_DEC:
+ if (LEGITIMATE_MODE_FOR_AUTOINC_P (mode)
+ && REG_P (XEXP (x, 0))
+ && bfin_valid_reg_p (REGNO (XEXP (x, 0)), strict, mode, POST_INC))
+ return true;
+ case PRE_DEC:
+ if (LEGITIMATE_MODE_FOR_AUTOINC_P (mode)
+ && XEXP (x, 0) == stack_pointer_rtx
+ && REG_P (XEXP (x, 0))
+ && bfin_valid_reg_p (REGNO (XEXP (x, 0)), strict, mode, PRE_DEC))
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+/* Decide whether we can force certain constants to memory. If we
+ decide we can't, the caller should be able to cope with it in
+ another way. */
+
+static bool
+bfin_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
+{
+ /* We have only one class of non-legitimate constants, and our movsi
+ expander knows how to handle them. Dropping these constants into the
+ data section would only shift the problem - we'd still get relocs
+ outside the object, in the data section rather than the text section. */
+ return true;
+}
+
+/* Ensure that for any constant of the form symbol + offset, the offset
+ remains within the object. Any other constants are ok.
+ This ensures that flat binaries never have to deal with relocations
+ crossing section boundaries. */
+
+bool
+bfin_legitimate_constant_p (rtx x)
+{
+ rtx sym;
+ HOST_WIDE_INT offset;
+
+ if (GET_CODE (x) != CONST)
+ return true;
+
+ x = XEXP (x, 0);
+ gcc_assert (GET_CODE (x) == PLUS);
+
+ sym = XEXP (x, 0);
+ x = XEXP (x, 1);
+ if (GET_CODE (sym) != SYMBOL_REF
+ || GET_CODE (x) != CONST_INT)
+ return true;
+ offset = INTVAL (x);
+
+ if (SYMBOL_REF_DECL (sym) == 0)
+ return true;
+ if (offset < 0
+ || offset >= int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (sym))))
+ return false;
+
+ return true;
+}
+
+static bool
+bfin_rtx_costs (rtx x, int code_i, int outer_code_i, int *total, bool speed)
+{
+ enum rtx_code code = (enum rtx_code) code_i;
+ enum rtx_code outer_code = (enum rtx_code) outer_code_i;
+ int cost2 = COSTS_N_INSNS (1);
+ rtx op0, op1;
+
+ switch (code)
+ {
+ case CONST_INT:
+ if (outer_code == SET || outer_code == PLUS)
+ *total = satisfies_constraint_Ks7 (x) ? 0 : cost2;
+ else if (outer_code == AND)
+ *total = log2constp (~INTVAL (x)) ? 0 : cost2;
+ else if (outer_code == LE || outer_code == LT || outer_code == EQ)
+ *total = (INTVAL (x) >= -4 && INTVAL (x) <= 3) ? 0 : cost2;
+ else if (outer_code == LEU || outer_code == LTU)
+ *total = (INTVAL (x) >= 0 && INTVAL (x) <= 7) ? 0 : cost2;
+ else if (outer_code == MULT)
+ *total = (INTVAL (x) == 2 || INTVAL (x) == 4) ? 0 : cost2;
+ else if (outer_code == ASHIFT && (INTVAL (x) == 1 || INTVAL (x) == 2))
+ *total = 0;
+ else if (outer_code == ASHIFT || outer_code == ASHIFTRT
+ || outer_code == LSHIFTRT)
+ *total = (INTVAL (x) >= 0 && INTVAL (x) <= 31) ? 0 : cost2;
+ else if (outer_code == IOR || outer_code == XOR)
+ *total = (INTVAL (x) & (INTVAL (x) - 1)) == 0 ? 0 : cost2;
+ else
+ *total = cost2;
+ return true;
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ *total = COSTS_N_INSNS (2);
+ return true;
+
+ case PLUS:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+ if (GET_MODE (x) == SImode)
+ {
+ if (GET_CODE (op0) == MULT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT val = INTVAL (XEXP (op0, 1));
+ if (val == 2 || val == 4)
+ {
+ *total = cost2;
+ *total += rtx_cost (XEXP (op0, 0), outer_code, speed);
+ *total += rtx_cost (op1, outer_code, speed);
+ return true;
+ }
+ }
+ *total = cost2;
+ if (GET_CODE (op0) != REG
+ && (GET_CODE (op0) != SUBREG || GET_CODE (SUBREG_REG (op0)) != REG))
+ *total += rtx_cost (op0, SET, speed);
+#if 0 /* We'd like to do this for accuracy, but it biases the loop optimizer
+ towards creating too many induction variables. */
+ if (!reg_or_7bit_operand (op1, SImode))
+ *total += rtx_cost (op1, SET, speed);
+#endif
+ }
+ else if (GET_MODE (x) == DImode)
+ {
+ *total = 6 * cost2;
+ if (GET_CODE (op1) != CONST_INT
+ || !satisfies_constraint_Ks7 (op1))
+ *total += rtx_cost (op1, PLUS, speed);
+ if (GET_CODE (op0) != REG
+ && (GET_CODE (op0) != SUBREG || GET_CODE (SUBREG_REG (op0)) != REG))
+ *total += rtx_cost (op0, PLUS, speed);
+ }
+ return true;
+
+ case MINUS:
+ if (GET_MODE (x) == DImode)
+ *total = 6 * cost2;
+ else
+ *total = cost2;
+ return true;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ if (GET_MODE (x) == DImode)
+ *total = 6 * cost2;
+ else
+ *total = cost2;
+
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+ if (GET_CODE (op0) != REG
+ && (GET_CODE (op0) != SUBREG || GET_CODE (SUBREG_REG (op0)) != REG))
+ *total += rtx_cost (op0, code, speed);
+
+ return true;
+
+ case IOR:
+ case AND:
+ case XOR:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ /* Handle special cases of IOR: rotates, ALIGN insns, movstricthi_high. */
+ if (code == IOR)
+ {
+ if ((GET_CODE (op0) == LSHIFTRT && GET_CODE (op1) == ASHIFT)
+ || (GET_CODE (op0) == ASHIFT && GET_CODE (op1) == ZERO_EXTEND)
+ || (GET_CODE (op0) == ASHIFT && GET_CODE (op1) == LSHIFTRT)
+ || (GET_CODE (op0) == AND && GET_CODE (op1) == CONST_INT))
+ {
+ *total = cost2;
+ return true;
+ }
+ }
+
+ if (GET_CODE (op0) != REG
+ && (GET_CODE (op0) != SUBREG || GET_CODE (SUBREG_REG (op0)) != REG))
+ *total += rtx_cost (op0, code, speed);
+
+ if (GET_MODE (x) == DImode)
+ {
+ *total = 2 * cost2;
+ return true;
+ }
+ *total = cost2;
+ if (GET_MODE (x) != SImode)
+ return true;
+
+ if (code == AND)
+ {
+ if (! rhs_andsi3_operand (XEXP (x, 1), SImode))
+ *total += rtx_cost (XEXP (x, 1), code, speed);
+ }
+ else
+ {
+ if (! regorlog2_operand (XEXP (x, 1), SImode))
+ *total += rtx_cost (XEXP (x, 1), code, speed);
+ }
+
+ return true;
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ if (outer_code == SET
+ && XEXP (x, 1) == const1_rtx
+ && GET_CODE (XEXP (x, 2)) == CONST_INT)
+ {
+ *total = 2 * cost2;
+ return true;
+ }
+ /* fall through */
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ *total = cost2;
+ return true;
+
+ case MULT:
+ {
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+ if (GET_CODE (op0) == GET_CODE (op1)
+ && (GET_CODE (op0) == ZERO_EXTEND
+ || GET_CODE (op0) == SIGN_EXTEND))
+ {
+ *total = COSTS_N_INSNS (1);
+ op0 = XEXP (op0, 0);
+ op1 = XEXP (op1, 0);
+ }
+ else if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (3);
+
+ if (GET_CODE (op0) != REG
+ && (GET_CODE (op0) != SUBREG || GET_CODE (SUBREG_REG (op0)) != REG))
+ *total += rtx_cost (op0, MULT, speed);
+ if (GET_CODE (op1) != REG
+ && (GET_CODE (op1) != SUBREG || GET_CODE (SUBREG_REG (op1)) != REG))
+ *total += rtx_cost (op1, MULT, speed);
+ }
+ return true;
+
+ case UDIV:
+ case UMOD:
+ *total = COSTS_N_INSNS (32);
+ return true;
+
+ case VEC_CONCAT:
+ case VEC_SELECT:
+ if (outer_code == SET)
+ *total = cost2;
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Used for communication between {push,pop}_multiple_operation (which
+ we use not only as a predicate) and the corresponding output functions. */
+static int first_preg_to_save, first_dreg_to_save;
+static int n_regs_to_save;
+
+int
+push_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ int lastdreg = 8, lastpreg = 6;
+ int i, group;
+
+ first_preg_to_save = lastpreg;
+ first_dreg_to_save = lastdreg;
+ for (i = 1, group = 0; i < XVECLEN (op, 0) - 1; i++)
+ {
+ rtx t = XVECEXP (op, 0, i);
+ rtx src, dest;
+ int regno;
+
+ if (GET_CODE (t) != SET)
+ return 0;
+
+ src = SET_SRC (t);
+ dest = SET_DEST (t);
+ if (GET_CODE (dest) != MEM || ! REG_P (src))
+ return 0;
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) != PLUS
+ || ! REG_P (XEXP (dest, 0))
+ || REGNO (XEXP (dest, 0)) != REG_SP
+ || GET_CODE (XEXP (dest, 1)) != CONST_INT
+ || INTVAL (XEXP (dest, 1)) != -i * 4)
+ return 0;
+
+ regno = REGNO (src);
+ if (group == 0)
+ {
+ if (D_REGNO_P (regno))
+ {
+ group = 1;
+ first_dreg_to_save = lastdreg = regno - REG_R0;
+ }
+ else if (regno >= REG_P0 && regno <= REG_P7)
+ {
+ group = 2;
+ first_preg_to_save = lastpreg = regno - REG_P0;
+ }
+ else
+ return 0;
+
+ continue;
+ }
+
+ if (group == 1)
+ {
+ if (regno >= REG_P0 && regno <= REG_P7)
+ {
+ group = 2;
+ first_preg_to_save = lastpreg = regno - REG_P0;
+ }
+ else if (regno != REG_R0 + lastdreg + 1)
+ return 0;
+ else
+ lastdreg++;
+ }
+ else if (group == 2)
+ {
+ if (regno != REG_P0 + lastpreg + 1)
+ return 0;
+ lastpreg++;
+ }
+ }
+ n_regs_to_save = 8 - first_dreg_to_save + 6 - first_preg_to_save;
+ return 1;
+}
+
+int
+pop_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ int lastdreg = 8, lastpreg = 6;
+ int i, group;
+
+ for (i = 1, group = 0; i < XVECLEN (op, 0); i++)
+ {
+ rtx t = XVECEXP (op, 0, i);
+ rtx src, dest;
+ int regno;
+
+ if (GET_CODE (t) != SET)
+ return 0;
+
+ src = SET_SRC (t);
+ dest = SET_DEST (t);
+ if (GET_CODE (src) != MEM || ! REG_P (dest))
+ return 0;
+ src = XEXP (src, 0);
+
+ if (i == 1)
+ {
+ if (! REG_P (src) || REGNO (src) != REG_SP)
+ return 0;
+ }
+ else if (GET_CODE (src) != PLUS
+ || ! REG_P (XEXP (src, 0))
+ || REGNO (XEXP (src, 0)) != REG_SP
+ || GET_CODE (XEXP (src, 1)) != CONST_INT
+ || INTVAL (XEXP (src, 1)) != (i - 1) * 4)
+ return 0;
+
+ regno = REGNO (dest);
+ if (group == 0)
+ {
+ if (regno == REG_R7)
+ {
+ group = 1;
+ lastdreg = 7;
+ }
+ else if (regno != REG_P0 + lastpreg - 1)
+ return 0;
+ else
+ lastpreg--;
+ }
+ else if (group == 1)
+ {
+ if (regno != REG_R0 + lastdreg - 1)
+ return 0;
+ else
+ lastdreg--;
+ }
+ }
+ first_dreg_to_save = lastdreg;
+ first_preg_to_save = lastpreg;
+ n_regs_to_save = 8 - first_dreg_to_save + 6 - first_preg_to_save;
+ return 1;
+}
+
+/* Emit assembly code for one multi-register push described by INSN, with
+ operands in OPERANDS. */
+
+void
+output_push_multiple (rtx insn, rtx *operands)
+{
+ char buf[80];
+ int ok;
+
+ /* Validate the insn again, and compute first_[dp]reg_to_save. */
+ ok = push_multiple_operation (PATTERN (insn), VOIDmode);
+ gcc_assert (ok);
+
+ if (first_dreg_to_save == 8)
+ sprintf (buf, "[--sp] = ( p5:%d );\n", first_preg_to_save);
+ else if (first_preg_to_save == 6)
+ sprintf (buf, "[--sp] = ( r7:%d );\n", first_dreg_to_save);
+ else
+ sprintf (buf, "[--sp] = ( r7:%d, p5:%d );\n",
+ first_dreg_to_save, first_preg_to_save);
+
+ output_asm_insn (buf, operands);
+}
+
+/* Emit assembly code for one multi-register pop described by INSN, with
+ operands in OPERANDS. */
+
+void
+output_pop_multiple (rtx insn, rtx *operands)
+{
+ char buf[80];
+ int ok;
+
+ /* Validate the insn again, and compute first_[dp]reg_to_save. */
+ ok = pop_multiple_operation (PATTERN (insn), VOIDmode);
+ gcc_assert (ok);
+
+ if (first_dreg_to_save == 8)
+ sprintf (buf, "( p5:%d ) = [sp++];\n", first_preg_to_save);
+ else if (first_preg_to_save == 6)
+ sprintf (buf, "( r7:%d ) = [sp++];\n", first_dreg_to_save);
+ else
+ sprintf (buf, "( r7:%d, p5:%d ) = [sp++];\n",
+ first_dreg_to_save, first_preg_to_save);
+
+ output_asm_insn (buf, operands);
+}
+
+/* Adjust DST and SRC by OFFSET bytes, and generate one move in mode MODE. */
+
+static void
+single_move_for_movmem (rtx dst, rtx src, enum machine_mode mode, HOST_WIDE_INT offset)
+{
+ rtx scratch = gen_reg_rtx (mode);
+ rtx srcmem, dstmem;
+
+ srcmem = adjust_address_nv (src, mode, offset);
+ dstmem = adjust_address_nv (dst, mode, offset);
+ emit_move_insn (scratch, srcmem);
+ emit_move_insn (dstmem, scratch);
+}
+
+/* Expand a string move operation of COUNT_EXP bytes from SRC to DST, with
+ alignment ALIGN_EXP. Return true if successful, false if we should fall
+ back on a different method. */
+
+bool
+bfin_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
+{
+ rtx srcreg, destreg, countreg;
+ HOST_WIDE_INT align = 0;
+ unsigned HOST_WIDE_INT count = 0;
+
+ if (GET_CODE (align_exp) == CONST_INT)
+ align = INTVAL (align_exp);
+ if (GET_CODE (count_exp) == CONST_INT)
+ {
+ count = INTVAL (count_exp);
+#if 0
+ if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
+ return false;
+#endif
+ }
+
+ /* If optimizing for size, only do single copies inline. */
+ if (optimize_size)
+ {
+ if (count == 2 && align < 2)
+ return false;
+ if (count == 4 && align < 4)
+ return false;
+ if (count != 1 && count != 2 && count != 4)
+ return false;
+ }
+ if (align < 2 && count != 1)
+ return false;
+
+ destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
+ if (destreg != XEXP (dst, 0))
+ dst = replace_equiv_address_nv (dst, destreg);
+ srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
+ if (srcreg != XEXP (src, 0))
+ src = replace_equiv_address_nv (src, srcreg);
+
+ if (count != 0 && align >= 2)
+ {
+ unsigned HOST_WIDE_INT offset = 0;
+
+ if (align >= 4)
+ {
+ if ((count & ~3) == 4)
+ {
+ single_move_for_movmem (dst, src, SImode, offset);
+ offset = 4;
+ }
+ else if (count & ~3)
+ {
+ HOST_WIDE_INT new_count = ((count >> 2) & 0x3fffffff) - 1;
+ countreg = copy_to_mode_reg (Pmode, GEN_INT (new_count));
+
+ emit_insn (gen_rep_movsi (destreg, srcreg, countreg, destreg, srcreg));
+ cfun->machine->has_loopreg_clobber = true;
+ }
+ if (count & 2)
+ {
+ single_move_for_movmem (dst, src, HImode, offset);
+ offset += 2;
+ }
+ }
+ else
+ {
+ if ((count & ~1) == 2)
+ {
+ single_move_for_movmem (dst, src, HImode, offset);
+ offset = 2;
+ }
+ else if (count & ~1)
+ {
+ HOST_WIDE_INT new_count = ((count >> 1) & 0x7fffffff) - 1;
+ countreg = copy_to_mode_reg (Pmode, GEN_INT (new_count));
+
+ emit_insn (gen_rep_movhi (destreg, srcreg, countreg, destreg, srcreg));
+ cfun->machine->has_loopreg_clobber = true;
+ }
+ }
+ if (count & 1)
+ {
+ single_move_for_movmem (dst, src, QImode, offset);
+ }
+ return true;
+ }
+ return false;
+}
+
+/* Compute the alignment for a local variable.
+ TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object. */
+
+unsigned
+bfin_local_alignment (tree type, unsigned align)
+{
+ /* Increasing alignment for (relatively) big types allows the builtin
+ memcpy can use 32 bit loads/stores. */
+ if (TYPE_SIZE (type)
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (TREE_INT_CST_LOW (TYPE_SIZE (type)) > 8
+ || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 32)
+ return 32;
+ return align;
+}
+
+/* Implement TARGET_SCHED_ISSUE_RATE. */
+
+static int
+bfin_issue_rate (void)
+{
+ return 3;
+}
+
+static int
+bfin_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+{
+ enum attr_type dep_insn_type;
+ int dep_insn_code_number;
+
+ /* Anti and output dependencies have zero cost. */
+ if (REG_NOTE_KIND (link) != 0)
+ return 0;
+
+ dep_insn_code_number = recog_memoized (dep_insn);
+
+ /* If we can't recognize the insns, we can't really do anything. */
+ if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
+ return cost;
+
+ dep_insn_type = get_attr_type (dep_insn);
+
+ if (dep_insn_type == TYPE_MOVE || dep_insn_type == TYPE_MCLD)
+ {
+ rtx pat = PATTERN (dep_insn);
+ rtx dest, src;
+
+ if (GET_CODE (pat) == PARALLEL)
+ pat = XVECEXP (pat, 0, 0);
+ dest = SET_DEST (pat);
+ src = SET_SRC (pat);
+ if (! ADDRESS_REGNO_P (REGNO (dest))
+ || ! (MEM_P (src) || D_REGNO_P (REGNO (src))))
+ return cost;
+ return cost + (dep_insn_type == TYPE_MOVE ? 4 : 3);
+ }
+
+ return cost;
+}
+
+/* This function acts like NEXT_INSN, but is aware of three-insn bundles and
+ skips all subsequent parallel instructions if INSN is the start of such
+ a group. */
+static rtx
+find_next_insn_start (rtx insn)
+{
+ if (GET_MODE (insn) == SImode)
+ {
+ while (GET_MODE (insn) != QImode)
+ insn = NEXT_INSN (insn);
+ }
+ return NEXT_INSN (insn);
+}
+
+/* This function acts like PREV_INSN, but is aware of three-insn bundles and
+ skips all subsequent parallel instructions if INSN is the start of such
+ a group. */
+static rtx
+find_prev_insn_start (rtx insn)
+{
+ insn = PREV_INSN (insn);
+ gcc_assert (GET_MODE (insn) != SImode);
+ if (GET_MODE (insn) == QImode)
+ {
+ while (GET_MODE (PREV_INSN (insn)) == SImode)
+ insn = PREV_INSN (insn);
+ }
+ return insn;
+}
+
+/* Increment the counter for the number of loop instructions in the
+ current function. */
+
+void
+bfin_hardware_loop (void)
+{
+ cfun->machine->has_hardware_loops++;
+}
+
+/* Maximum loop nesting depth. */
+#define MAX_LOOP_DEPTH 2
+
+/* Maximum size of a loop. */
+#define MAX_LOOP_LENGTH 2042
+
+/* Maximum distance of the LSETUP instruction from the loop start. */
+#define MAX_LSETUP_DISTANCE 30
+
+/* We need to keep a vector of loops */
+typedef struct loop_info_d *loop_info;
+DEF_VEC_P (loop_info);
+DEF_VEC_ALLOC_P (loop_info,heap);
+
+/* Information about a loop we have found (or are in the process of
+ finding). */
+struct GTY (()) loop_info_d
+{
+ /* loop number, for dumps */
+ int loop_no;
+
+ /* All edges that jump into and out of the loop. */
+ VEC(edge,gc) *incoming;
+
+ /* We can handle two cases: all incoming edges have the same destination
+ block, or all incoming edges have the same source block. These two
+ members are set to the common source or destination we found, or NULL
+ if different blocks were found. If both are NULL the loop can't be
+ optimized. */
+ basic_block incoming_src;
+ basic_block incoming_dest;
+
+ /* First block in the loop. This is the one branched to by the loop_end
+ insn. */
+ basic_block head;
+
+ /* Last block in the loop (the one with the loop_end insn). */
+ basic_block tail;
+
+ /* The successor block of the loop. This is the one the loop_end insn
+ falls into. */
+ basic_block successor;
+
+ /* The last instruction in the tail. */
+ rtx last_insn;
+
+ /* The loop_end insn. */
+ rtx loop_end;
+
+ /* The iteration register. */
+ rtx iter_reg;
+
+ /* The new label placed at the beginning of the loop. */
+ rtx start_label;
+
+ /* The new label placed at the end of the loop. */
+ rtx end_label;
+
+ /* The length of the loop. */
+ int length;
+
+ /* The nesting depth of the loop. */
+ int depth;
+
+ /* Nonzero if we can't optimize this loop. */
+ int bad;
+
+ /* True if we have visited this loop. */
+ int visited;
+
+ /* True if this loop body clobbers any of LC0, LT0, or LB0. */
+ int clobber_loop0;
+
+ /* True if this loop body clobbers any of LC1, LT1, or LB1. */
+ int clobber_loop1;
+
+ /* Next loop in the graph. */
+ struct loop_info_d *next;
+
+ /* Immediate outer loop of this loop. */
+ struct loop_info_d *outer;
+
+ /* Vector of blocks only within the loop, including those within
+ inner loops. */
+ VEC (basic_block,heap) *blocks;
+
+ /* Same information in a bitmap. */
+ bitmap block_bitmap;
+
+ /* Vector of inner loops within this loop */
+ VEC (loop_info,heap) *loops;
+};
+
+static void
+bfin_dump_loops (loop_info loops)
+{
+ loop_info loop;
+
+ for (loop = loops; loop; loop = loop->next)
+ {
+ loop_info i;
+ basic_block b;
+ unsigned ix;
+
+ fprintf (dump_file, ";; loop %d: ", loop->loop_no);
+ if (loop->bad)
+ fprintf (dump_file, "(bad) ");
+ fprintf (dump_file, "{head:%d, depth:%d}", loop->head->index, loop->depth);
+
+ fprintf (dump_file, " blocks: [ ");
+ FOR_EACH_VEC_ELT (basic_block, loop->blocks, ix, b)
+ fprintf (dump_file, "%d ", b->index);
+ fprintf (dump_file, "] ");
+
+ fprintf (dump_file, " inner loops: [ ");
+ FOR_EACH_VEC_ELT (loop_info, loop->loops, ix, i)
+ fprintf (dump_file, "%d ", i->loop_no);
+ fprintf (dump_file, "]\n");
+ }
+ fprintf (dump_file, "\n");
+}
+
+/* Scan the blocks of LOOP (and its inferiors) looking for basic block
+ BB. Return true, if we find it. */
+
+static bool
+bfin_bb_in_loop (loop_info loop, basic_block bb)
+{
+ return bitmap_bit_p (loop->block_bitmap, bb->index);
+}
+
+/* Scan the blocks of LOOP (and its inferiors) looking for uses of
+ REG. Return true, if we find any. Don't count the loop's loop_end
+ insn if it matches LOOP_END. */
+
+static bool
+bfin_scan_loop (loop_info loop, rtx reg, rtx loop_end)
+{
+ unsigned ix;
+ basic_block bb;
+
+ FOR_EACH_VEC_ELT (basic_block, loop->blocks, ix, bb)
+ {
+ rtx insn;
+
+ for (insn = BB_HEAD (bb);
+ insn != NEXT_INSN (BB_END (bb));
+ insn = NEXT_INSN (insn))
+ {
+ if (!INSN_P (insn))
+ continue;
+ if (insn == loop_end)
+ continue;
+ if (reg_mentioned_p (reg, PATTERN (insn)))
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Estimate the length of INSN conservatively. */
+
+static int
+length_for_loop (rtx insn)
+{
+ int length = 0;
+ if (JUMP_P (insn) && any_condjump_p (insn) && !optimize_size)
+ {
+ if (ENABLE_WA_SPECULATIVE_SYNCS)
+ length = 8;
+ else if (ENABLE_WA_SPECULATIVE_LOADS)
+ length = 6;
+ }
+ else if (LABEL_P (insn))
+ {
+ if (ENABLE_WA_SPECULATIVE_SYNCS)
+ length = 4;
+ }
+
+ if (NONDEBUG_INSN_P (insn))
+ length += get_attr_length (insn);
+
+ return length;
+}
+
+/* Optimize LOOP. */
+
+static void
+bfin_optimize_loop (loop_info loop)
+{
+ basic_block bb;
+ loop_info inner;
+ rtx insn, last_insn;
+ rtx loop_init, start_label, end_label;
+ rtx reg_lc0, reg_lc1, reg_lt0, reg_lt1, reg_lb0, reg_lb1;
+ rtx iter_reg, scratchreg, scratch_init, scratch_init_insn;
+ rtx lc_reg, lt_reg, lb_reg;
+ rtx seq, seq_end;
+ int length;
+ unsigned ix;
+ int inner_depth = 0;
+
+ if (loop->visited)
+ return;
+
+ loop->visited = 1;
+
+ if (loop->bad)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d bad when found\n", loop->loop_no);
+ goto bad_loop;
+ }
+
+ /* Every loop contains in its list of inner loops every loop nested inside
+ it, even if there are intermediate loops. This works because we're doing
+ a depth-first search here and never visit a loop more than once. */
+ FOR_EACH_VEC_ELT (loop_info, loop->loops, ix, inner)
+ {
+ bfin_optimize_loop (inner);
+
+ if (!inner->bad && inner_depth < inner->depth)
+ {
+ inner_depth = inner->depth;
+
+ loop->clobber_loop0 |= inner->clobber_loop0;
+ loop->clobber_loop1 |= inner->clobber_loop1;
+ }
+ }
+
+ loop->depth = inner_depth + 1;
+ if (loop->depth > MAX_LOOP_DEPTH)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d too deep\n", loop->loop_no);
+ goto bad_loop;
+ }
+
+ /* Get the loop iteration register. */
+ iter_reg = loop->iter_reg;
+
+ if (!REG_P (iter_reg))
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d iteration count not in a register\n",
+ loop->loop_no);
+ goto bad_loop;
+ }
+ scratchreg = NULL_RTX;
+ scratch_init = iter_reg;
+ scratch_init_insn = NULL_RTX;
+ if (!PREG_P (iter_reg) && loop->incoming_src)
+ {
+ basic_block bb_in = loop->incoming_src;
+ int i;
+ for (i = REG_P0; i <= REG_P5; i++)
+ if ((df_regs_ever_live_p (i)
+ || (funkind (TREE_TYPE (current_function_decl)) == SUBROUTINE
+ && call_used_regs[i]))
+ && !REGNO_REG_SET_P (df_get_live_out (bb_in), i))
+ {
+ scratchreg = gen_rtx_REG (SImode, i);
+ break;
+ }
+ for (insn = BB_END (bb_in); insn != BB_HEAD (bb_in);
+ insn = PREV_INSN (insn))
+ {
+ rtx set;
+ if (NOTE_P (insn) || BARRIER_P (insn))
+ continue;
+ set = single_set (insn);
+ if (set && rtx_equal_p (SET_DEST (set), iter_reg))
+ {
+ if (CONSTANT_P (SET_SRC (set)))
+ {
+ scratch_init = SET_SRC (set);
+ scratch_init_insn = insn;
+ }
+ break;
+ }
+ else if (reg_mentioned_p (iter_reg, PATTERN (insn)))
+ break;
+ }
+ }
+
+ if (loop->incoming_src)
+ {
+ /* Make sure the predecessor is before the loop start label, as required by
+ the LSETUP instruction. */
+ length = 0;
+ insn = BB_END (loop->incoming_src);
+ /* If we have to insert the LSETUP before a jump, count that jump in the
+ length. */
+ if (VEC_length (edge, loop->incoming) > 1
+ || !(VEC_last (edge, loop->incoming)->flags & EDGE_FALLTHRU))
+ {
+ gcc_assert (JUMP_P (insn));
+ insn = PREV_INSN (insn);
+ }
+
+ for (; insn && insn != loop->start_label; insn = NEXT_INSN (insn))
+ length += length_for_loop (insn);
+
+ if (!insn)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d lsetup not before loop_start\n",
+ loop->loop_no);
+ goto bad_loop;
+ }
+
+ /* Account for the pop of a scratch register where necessary. */
+ if (!PREG_P (iter_reg) && scratchreg == NULL_RTX
+ && ENABLE_WA_LOAD_LCREGS)
+ length += 2;
+
+ if (length > MAX_LSETUP_DISTANCE)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d lsetup too far away\n", loop->loop_no);
+ goto bad_loop;
+ }
+ }
+
+ /* Check if start_label appears before loop_end and calculate the
+ offset between them. We calculate the length of instructions
+ conservatively. */
+ length = 0;
+ for (insn = loop->start_label;
+ insn && insn != loop->loop_end;
+ insn = NEXT_INSN (insn))
+ length += length_for_loop (insn);
+
+ if (!insn)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d start_label not before loop_end\n",
+ loop->loop_no);
+ goto bad_loop;
+ }
+
+ loop->length = length;
+ if (loop->length > MAX_LOOP_LENGTH)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d too long\n", loop->loop_no);
+ goto bad_loop;
+ }
+
+ /* Scan all the blocks to make sure they don't use iter_reg. */
+ if (bfin_scan_loop (loop, iter_reg, loop->loop_end))
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d uses iterator\n", loop->loop_no);
+ goto bad_loop;
+ }
+
+ /* Scan all the insns to see if the loop body clobber
+ any hardware loop registers. */
+
+ reg_lc0 = gen_rtx_REG (SImode, REG_LC0);
+ reg_lc1 = gen_rtx_REG (SImode, REG_LC1);
+ reg_lt0 = gen_rtx_REG (SImode, REG_LT0);
+ reg_lt1 = gen_rtx_REG (SImode, REG_LT1);
+ reg_lb0 = gen_rtx_REG (SImode, REG_LB0);
+ reg_lb1 = gen_rtx_REG (SImode, REG_LB1);
+
+ FOR_EACH_VEC_ELT (basic_block, loop->blocks, ix, bb)
+ {
+ rtx insn;
+
+ for (insn = BB_HEAD (bb);
+ insn != NEXT_INSN (BB_END (bb));
+ insn = NEXT_INSN (insn))
+ {
+ if (!INSN_P (insn))
+ continue;
+
+ if (reg_set_p (reg_lc0, insn)
+ || reg_set_p (reg_lt0, insn)
+ || reg_set_p (reg_lb0, insn))
+ loop->clobber_loop0 = 1;
+
+ if (reg_set_p (reg_lc1, insn)
+ || reg_set_p (reg_lt1, insn)
+ || reg_set_p (reg_lb1, insn))
+ loop->clobber_loop1 |= 1;
+ }
+ }
+
+ if ((loop->clobber_loop0 && loop->clobber_loop1)
+ || (loop->depth == MAX_LOOP_DEPTH && loop->clobber_loop0))
+ {
+ loop->depth = MAX_LOOP_DEPTH + 1;
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d no loop reg available\n",
+ loop->loop_no);
+ goto bad_loop;
+ }
+
+ /* There should be an instruction before the loop_end instruction
+ in the same basic block. And the instruction must not be
+ - JUMP
+ - CONDITIONAL BRANCH
+ - CALL
+ - CSYNC
+ - SSYNC
+ - Returns (RTS, RTN, etc.) */
+
+ bb = loop->tail;
+ last_insn = find_prev_insn_start (loop->loop_end);
+
+ while (1)
+ {
+ for (; last_insn != BB_HEAD (bb);
+ last_insn = find_prev_insn_start (last_insn))
+ if (NONDEBUG_INSN_P (last_insn))
+ break;
+
+ if (last_insn != BB_HEAD (bb))
+ break;
+
+ if (single_pred_p (bb)
+ && single_pred_edge (bb)->flags & EDGE_FALLTHRU
+ && single_pred (bb) != ENTRY_BLOCK_PTR)
+ {
+ bb = single_pred (bb);
+ last_insn = BB_END (bb);
+ continue;
+ }
+ else
+ {
+ last_insn = NULL_RTX;
+ break;
+ }
+ }
+
+ if (!last_insn)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d has no last instruction\n",
+ loop->loop_no);
+ goto bad_loop;
+ }
+
+ if (JUMP_P (last_insn) && !any_condjump_p (last_insn))
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d has bad last instruction\n",
+ loop->loop_no);
+ goto bad_loop;
+ }
+ /* In all other cases, try to replace a bad last insn with a nop. */
+ else if (JUMP_P (last_insn)
+ || CALL_P (last_insn)
+ || get_attr_type (last_insn) == TYPE_SYNC
+ || get_attr_type (last_insn) == TYPE_CALL
+ || get_attr_seq_insns (last_insn) == SEQ_INSNS_MULTI
+ || recog_memoized (last_insn) == CODE_FOR_return_internal
+ || GET_CODE (PATTERN (last_insn)) == ASM_INPUT
+ || asm_noperands (PATTERN (last_insn)) >= 0)
+ {
+ if (loop->length + 2 > MAX_LOOP_LENGTH)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d too long\n", loop->loop_no);
+ goto bad_loop;
+ }
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d has bad last insn; replace with nop\n",
+ loop->loop_no);
+
+ last_insn = emit_insn_after (gen_forced_nop (), last_insn);
+ }
+
+ loop->last_insn = last_insn;
+
+ /* The loop is good for replacement. */
+ start_label = loop->start_label;
+ end_label = gen_label_rtx ();
+ iter_reg = loop->iter_reg;
+
+ if (loop->depth == 1 && !loop->clobber_loop1)
+ {
+ lc_reg = reg_lc1;
+ lt_reg = reg_lt1;
+ lb_reg = reg_lb1;
+ loop->clobber_loop1 = 1;
+ }
+ else
+ {
+ lc_reg = reg_lc0;
+ lt_reg = reg_lt0;
+ lb_reg = reg_lb0;
+ loop->clobber_loop0 = 1;
+ }
+
+ loop->end_label = end_label;
+
+ /* Create a sequence containing the loop setup. */
+ start_sequence ();
+
+ /* LSETUP only accepts P registers. If we have one, we can use it,
+ otherwise there are several ways of working around the problem.
+ If we're not affected by anomaly 312, we can load the LC register
+ from any iteration register, and use LSETUP without initialization.
+ If we've found a P scratch register that's not live here, we can
+ instead copy the iter_reg into that and use an initializing LSETUP.
+ If all else fails, push and pop P0 and use it as a scratch. */
+ if (P_REGNO_P (REGNO (iter_reg)))
+ {
+ loop_init = gen_lsetup_with_autoinit (lt_reg, start_label,
+ lb_reg, end_label,
+ lc_reg, iter_reg);
+ seq_end = emit_insn (loop_init);
+ }
+ else if (!ENABLE_WA_LOAD_LCREGS && DPREG_P (iter_reg))
+ {
+ emit_insn (gen_movsi (lc_reg, iter_reg));
+ loop_init = gen_lsetup_without_autoinit (lt_reg, start_label,
+ lb_reg, end_label,
+ lc_reg);
+ seq_end = emit_insn (loop_init);
+ }
+ else if (scratchreg != NULL_RTX)
+ {
+ emit_insn (gen_movsi (scratchreg, scratch_init));
+ loop_init = gen_lsetup_with_autoinit (lt_reg, start_label,
+ lb_reg, end_label,
+ lc_reg, scratchreg);
+ seq_end = emit_insn (loop_init);
+ if (scratch_init_insn != NULL_RTX)
+ delete_insn (scratch_init_insn);
+ }
+ else
+ {
+ rtx p0reg = gen_rtx_REG (SImode, REG_P0);
+ rtx push = gen_frame_mem (SImode,
+ gen_rtx_PRE_DEC (SImode, stack_pointer_rtx));
+ rtx pop = gen_frame_mem (SImode,
+ gen_rtx_POST_INC (SImode, stack_pointer_rtx));
+ emit_insn (gen_movsi (push, p0reg));
+ emit_insn (gen_movsi (p0reg, scratch_init));
+ loop_init = gen_lsetup_with_autoinit (lt_reg, start_label,
+ lb_reg, end_label,
+ lc_reg, p0reg);
+ emit_insn (loop_init);
+ seq_end = emit_insn (gen_movsi (p0reg, pop));
+ if (scratch_init_insn != NULL_RTX)
+ delete_insn (scratch_init_insn);
+ }
+
+ if (dump_file)
+ {
+ fprintf (dump_file, ";; replacing loop %d initializer with\n",
+ loop->loop_no);
+ print_rtl_single (dump_file, loop_init);
+ fprintf (dump_file, ";; replacing loop %d terminator with\n",
+ loop->loop_no);
+ print_rtl_single (dump_file, loop->loop_end);
+ }
+
+ /* If the loop isn't entered at the top, also create a jump to the entry
+ point. */
+ if (!loop->incoming_src && loop->head != loop->incoming_dest)
+ {
+ rtx label = BB_HEAD (loop->incoming_dest);
+ /* If we're jumping to the final basic block in the loop, and there's
+ only one cheap instruction before the end (typically an increment of
+ an induction variable), we can just emit a copy here instead of a
+ jump. */
+ if (loop->incoming_dest == loop->tail
+ && next_real_insn (label) == last_insn
+ && asm_noperands (last_insn) < 0
+ && GET_CODE (PATTERN (last_insn)) == SET)
+ {
+ seq_end = emit_insn (copy_rtx (PATTERN (last_insn)));
+ }
+ else
+ {
+ emit_jump_insn (gen_jump (label));
+ seq_end = emit_barrier ();
+ }
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+
+ if (loop->incoming_src)
+ {
+ rtx prev = BB_END (loop->incoming_src);
+ if (VEC_length (edge, loop->incoming) > 1
+ || !(VEC_last (edge, loop->incoming)->flags & EDGE_FALLTHRU))
+ {
+ gcc_assert (JUMP_P (prev));
+ prev = PREV_INSN (prev);
+ }
+ emit_insn_after (seq, prev);
+ }
+ else
+ {
+ basic_block new_bb;
+ edge e;
+ edge_iterator ei;
+
+#ifdef ENABLE_CHECKING
+ if (loop->head != loop->incoming_dest)
+ {
+ /* We aren't entering the loop at the top. Since we've established
+ that the loop is entered only at one point, this means there
+ can't be fallthru edges into the head. Any such fallthru edges
+ would become invalid when we insert the new block, so verify
+ that this does not in fact happen. */
+ FOR_EACH_EDGE (e, ei, loop->head->preds)
+ gcc_assert (!(e->flags & EDGE_FALLTHRU));
+ }
+#endif
+
+ emit_insn_before (seq, BB_HEAD (loop->head));
+ seq = emit_label_before (gen_label_rtx (), seq);
+
+ new_bb = create_basic_block (seq, seq_end, loop->head->prev_bb);
+ FOR_EACH_EDGE (e, ei, loop->incoming)
+ {
+ if (!(e->flags & EDGE_FALLTHRU)
+ || e->dest != loop->head)
+ redirect_edge_and_branch_force (e, new_bb);
+ else
+ redirect_edge_succ (e, new_bb);
+ }
+ e = make_edge (new_bb, loop->head, 0);
+ }
+
+ delete_insn (loop->loop_end);
+ /* Insert the loop end label before the last instruction of the loop. */
+ emit_label_before (loop->end_label, loop->last_insn);
+
+ return;
+
+ bad_loop:
+
+ if (dump_file)
+ fprintf (dump_file, ";; loop %d is bad\n", loop->loop_no);
+
+ loop->bad = 1;
+
+ if (DPREG_P (loop->iter_reg))
+ {
+ /* If loop->iter_reg is a DREG or PREG, we can split it here
+ without scratch register. */
+ rtx insn, test;
+
+ emit_insn_before (gen_addsi3 (loop->iter_reg,
+ loop->iter_reg,
+ constm1_rtx),
+ loop->loop_end);
+
+ test = gen_rtx_NE (VOIDmode, loop->iter_reg, const0_rtx);
+ insn = emit_jump_insn_before (gen_cbranchsi4 (test,
+ loop->iter_reg, const0_rtx,
+ loop->start_label),
+ loop->loop_end);
+
+ JUMP_LABEL (insn) = loop->start_label;
+ LABEL_NUSES (loop->start_label)++;
+ delete_insn (loop->loop_end);
+ }
+}
+
+/* Called from bfin_reorg_loops when a potential loop end is found. LOOP is
+ a newly set up structure describing the loop, it is this function's
+ responsibility to fill most of it. TAIL_BB and TAIL_INSN point to the
+ loop_end insn and its enclosing basic block. */
+
+static void
+bfin_discover_loop (loop_info loop, basic_block tail_bb, rtx tail_insn)
+{
+ unsigned dwork = 0;
+ basic_block bb;
+ VEC (basic_block,heap) *works = VEC_alloc (basic_block,heap,20);
+
+ loop->tail = tail_bb;
+ loop->head = BRANCH_EDGE (tail_bb)->dest;
+ loop->successor = FALLTHRU_EDGE (tail_bb)->dest;
+ loop->loop_end = tail_insn;
+ loop->last_insn = NULL_RTX;
+ loop->iter_reg = SET_DEST (XVECEXP (PATTERN (tail_insn), 0, 1));
+ loop->depth = loop->length = 0;
+ loop->visited = 0;
+ loop->clobber_loop0 = loop->clobber_loop1 = 0;
+ loop->outer = NULL;
+ loop->loops = NULL;
+ loop->incoming = VEC_alloc (edge, gc, 2);
+ loop->start_label = XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (tail_insn), 0, 0)), 1), 0);
+ loop->end_label = NULL_RTX;
+ loop->bad = 0;
+
+ VEC_safe_push (basic_block, heap, works, loop->head);
+
+ while (VEC_iterate (basic_block, works, dwork++, bb))
+ {
+ edge e;
+ edge_iterator ei;
+ if (bb == EXIT_BLOCK_PTR)
+ {
+ /* We've reached the exit block. The loop must be bad. */
+ if (dump_file)
+ fprintf (dump_file,
+ ";; Loop is bad - reached exit block while scanning\n");
+ loop->bad = 1;
+ break;
+ }
+
+ if (!bitmap_set_bit (loop->block_bitmap, bb->index))
+ continue;
+
+ /* We've not seen this block before. Add it to the loop's
+ list and then add each successor to the work list. */
+
+ VEC_safe_push (basic_block, heap, loop->blocks, bb);
+
+ if (bb != tail_bb)
+ {
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ basic_block succ = EDGE_SUCC (bb, ei.index)->dest;
+ if (!REGNO_REG_SET_P (df_get_live_in (succ),
+ REGNO (loop->iter_reg)))
+ continue;
+ if (!VEC_space (basic_block, works, 1))
+ {
+ if (dwork)
+ {
+ VEC_block_remove (basic_block, works, 0, dwork);
+ dwork = 0;
+ }
+ else
+ VEC_reserve (basic_block, heap, works, 1);
+ }
+ VEC_quick_push (basic_block, works, succ);
+ }
+ }
+ }
+
+ /* Find the predecessor, and make sure nothing else jumps into this loop. */
+ if (!loop->bad)
+ {
+ int pass, retry;
+ FOR_EACH_VEC_ELT (basic_block, loop->blocks, dwork, bb)
+ {
+ edge e;
+ edge_iterator ei;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ basic_block pred = e->src;
+
+ if (!bfin_bb_in_loop (loop, pred))
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; Loop %d: incoming edge %d -> %d\n",
+ loop->loop_no, pred->index,
+ e->dest->index);
+ VEC_safe_push (edge, gc, loop->incoming, e);
+ }
+ }
+ }
+
+ for (pass = 0, retry = 1; retry && pass < 2; pass++)
+ {
+ edge e;
+ edge_iterator ei;
+ bool first = true;
+ retry = 0;
+
+ FOR_EACH_EDGE (e, ei, loop->incoming)
+ {
+ if (first)
+ {
+ loop->incoming_src = e->src;
+ loop->incoming_dest = e->dest;
+ first = false;
+ }
+ else
+ {
+ if (e->dest != loop->incoming_dest)
+ loop->incoming_dest = NULL;
+ if (e->src != loop->incoming_src)
+ loop->incoming_src = NULL;
+ }
+ if (loop->incoming_src == NULL && loop->incoming_dest == NULL)
+ {
+ if (pass == 0)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ ";; retrying loop %d with forwarder blocks\n",
+ loop->loop_no);
+ retry = 1;
+ break;
+ }
+ loop->bad = 1;
+ if (dump_file)
+ fprintf (dump_file,
+ ";; can't find suitable entry for loop %d\n",
+ loop->loop_no);
+ goto out;
+ }
+ }
+ if (retry)
+ {
+ retry = 0;
+ FOR_EACH_EDGE (e, ei, loop->incoming)
+ {
+ if (forwarder_block_p (e->src))
+ {
+ edge e2;
+ edge_iterator ei2;
+
+ if (dump_file)
+ fprintf (dump_file,
+ ";; Adding forwarder block %d to loop %d and retrying\n",
+ e->src->index, loop->loop_no);
+ VEC_safe_push (basic_block, heap, loop->blocks, e->src);
+ bitmap_set_bit (loop->block_bitmap, e->src->index);
+ FOR_EACH_EDGE (e2, ei2, e->src->preds)
+ VEC_safe_push (edge, gc, loop->incoming, e2);
+ VEC_unordered_remove (edge, loop->incoming, ei.index);
+ retry = 1;
+ break;
+ }
+ }
+ if (!retry)
+ {
+ if (dump_file)
+ fprintf (dump_file, ";; No forwarder blocks found\n");
+ loop->bad = 1;
+ }
+ }
+ }
+ }
+
+ out:
+ VEC_free (basic_block, heap, works);
+}
+
+/* Analyze the structure of the loops in the current function. Use STACK
+ for bitmap allocations. Returns all the valid candidates for hardware
+ loops found in this function. */
+static loop_info
+bfin_discover_loops (bitmap_obstack *stack, FILE *dump_file)
+{
+ loop_info loops = NULL;
+ loop_info loop;
+ basic_block bb;
+ bitmap tmp_bitmap;
+ int nloops = 0;
+
+ /* Find all the possible loop tails. This means searching for every
+ loop_end instruction. For each one found, create a loop_info
+ structure and add the head block to the work list. */
+ FOR_EACH_BB (bb)
+ {
+ rtx tail = BB_END (bb);
+
+ while (GET_CODE (tail) == NOTE)
+ tail = PREV_INSN (tail);
+
+ bb->aux = NULL;
+
+ if (INSN_P (tail) && recog_memoized (tail) == CODE_FOR_loop_end)
+ {
+ rtx insn;
+ /* A possible loop end */
+
+ /* There's a degenerate case we can handle - an empty loop consisting
+ of only a back branch. Handle that by deleting the branch. */
+ insn = BB_HEAD (BRANCH_EDGE (bb)->dest);
+ if (next_real_insn (insn) == tail)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, ";; degenerate loop ending at\n");
+ print_rtl_single (dump_file, tail);
+ }
+ delete_insn_and_edges (tail);
+ continue;
+ }
+
+ loop = XNEW (struct loop_info_d);
+ loop->next = loops;
+ loops = loop;
+ loop->loop_no = nloops++;
+ loop->blocks = VEC_alloc (basic_block, heap, 20);
+ loop->block_bitmap = BITMAP_ALLOC (stack);
+ bb->aux = loop;
+
+ if (dump_file)
+ {
+ fprintf (dump_file, ";; potential loop %d ending at\n",
+ loop->loop_no);
+ print_rtl_single (dump_file, tail);
+ }
+
+ bfin_discover_loop (loop, bb, tail);
+ }
+ }
+
+ tmp_bitmap = BITMAP_ALLOC (stack);
+ /* Compute loop nestings. */
+ for (loop = loops; loop; loop = loop->next)
+ {
+ loop_info other;
+ if (loop->bad)
+ continue;
+
+ for (other = loop->next; other; other = other->next)
+ {
+ if (other->bad)
+ continue;
+
+ bitmap_and (tmp_bitmap, other->block_bitmap, loop->block_bitmap);
+ if (bitmap_empty_p (tmp_bitmap))
+ continue;
+ if (bitmap_equal_p (tmp_bitmap, other->block_bitmap))
+ {
+ other->outer = loop;
+ VEC_safe_push (loop_info, heap, loop->loops, other);
+ }
+ else if (bitmap_equal_p (tmp_bitmap, loop->block_bitmap))
+ {
+ loop->outer = other;
+ VEC_safe_push (loop_info, heap, other->loops, loop);
+ }
+ else
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ ";; can't find suitable nesting for loops %d and %d\n",
+ loop->loop_no, other->loop_no);
+ loop->bad = other->bad = 1;
+ }
+ }
+ }
+ BITMAP_FREE (tmp_bitmap);
+
+ return loops;
+}
+
+/* Free up the loop structures in LOOPS. */
+static void
+free_loops (loop_info loops)
+{
+ while (loops)
+ {
+ loop_info loop = loops;
+ loops = loop->next;
+ VEC_free (loop_info, heap, loop->loops);
+ VEC_free (basic_block, heap, loop->blocks);
+ BITMAP_FREE (loop->block_bitmap);
+ XDELETE (loop);
+ }
+}
+
+#define BB_AUX_INDEX(BB) ((intptr_t)(BB)->aux)
+
+/* The taken-branch edge from the loop end can actually go forward. Since the
+ Blackfin's LSETUP instruction requires that the loop end be after the loop
+ start, try to reorder a loop's basic blocks when we find such a case. */
+static void
+bfin_reorder_loops (loop_info loops, FILE *dump_file)
+{
+ basic_block bb;
+ loop_info loop;
+
+ FOR_EACH_BB (bb)
+ bb->aux = NULL;
+ cfg_layout_initialize (0);
+
+ for (loop = loops; loop; loop = loop->next)
+ {
+ intptr_t index;
+ basic_block bb;
+ edge e;
+ edge_iterator ei;
+
+ if (loop->bad)
+ continue;
+
+ /* Recreate an index for basic blocks that represents their order. */
+ for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
+ bb != EXIT_BLOCK_PTR;
+ bb = bb->next_bb, index++)
+ bb->aux = (PTR) index;
+
+ if (BB_AUX_INDEX (loop->head) < BB_AUX_INDEX (loop->tail))
+ continue;
+
+ FOR_EACH_EDGE (e, ei, loop->head->succs)
+ {
+ if (bitmap_bit_p (loop->block_bitmap, e->dest->index)
+ && BB_AUX_INDEX (e->dest) < BB_AUX_INDEX (loop->tail))
+ {
+ basic_block start_bb = e->dest;
+ basic_block start_prev_bb = start_bb->prev_bb;
+
+ if (dump_file)
+ fprintf (dump_file, ";; Moving block %d before block %d\n",
+ loop->head->index, start_bb->index);
+ loop->head->prev_bb->next_bb = loop->head->next_bb;
+ loop->head->next_bb->prev_bb = loop->head->prev_bb;
+
+ loop->head->prev_bb = start_prev_bb;
+ loop->head->next_bb = start_bb;
+ start_prev_bb->next_bb = start_bb->prev_bb = loop->head;
+ break;
+ }
+ }
+ loops = loops->next;
+ }
+
+ FOR_EACH_BB (bb)
+ {
+ if (bb->next_bb != EXIT_BLOCK_PTR)
+ bb->aux = bb->next_bb;
+ else
+ bb->aux = NULL;
+ }
+ cfg_layout_finalize ();
+ df_analyze ();
+}
+
+/* Run from machine_dependent_reorg, this pass looks for doloop_end insns
+ and tries to rewrite the RTL of these loops so that proper Blackfin
+ hardware loops are generated. */
+
+static void
+bfin_reorg_loops (FILE *dump_file)
+{
+ loop_info loops = NULL;
+ loop_info loop;
+ basic_block bb;
+ bitmap_obstack stack;
+
+ bitmap_obstack_initialize (&stack);
+
+ if (dump_file)
+ fprintf (dump_file, ";; Find loops, first pass\n\n");
+
+ loops = bfin_discover_loops (&stack, dump_file);
+
+ if (dump_file)
+ bfin_dump_loops (loops);
+
+ bfin_reorder_loops (loops, dump_file);
+ free_loops (loops);
+
+ if (dump_file)
+ fprintf (dump_file, ";; Find loops, second pass\n\n");
+
+ loops = bfin_discover_loops (&stack, dump_file);
+ if (dump_file)
+ {
+ fprintf (dump_file, ";; All loops found:\n\n");
+ bfin_dump_loops (loops);
+ }
+
+ /* Now apply the optimizations. */
+ for (loop = loops; loop; loop = loop->next)
+ bfin_optimize_loop (loop);
+
+ if (dump_file)
+ {
+ fprintf (dump_file, ";; After hardware loops optimization:\n\n");
+ bfin_dump_loops (loops);
+ }
+
+ free_loops (loops);
+
+ if (dump_file)
+ print_rtl (dump_file, get_insns ());
+
+ FOR_EACH_BB (bb)
+ bb->aux = NULL;
+
+ splitting_loops = 1;
+ FOR_EACH_BB (bb)
+ {
+ rtx insn = BB_END (bb);
+ if (!JUMP_P (insn))
+ continue;
+
+ try_split (PATTERN (insn), insn, 1);
+ }
+ splitting_loops = 0;
+}
+
+/* Possibly generate a SEQUENCE out of three insns found in SLOT.
+ Returns true if we modified the insn chain, false otherwise. */
+static bool
+gen_one_bundle (rtx slot[3])
+{
+ gcc_assert (slot[1] != NULL_RTX);
+
+ /* Don't add extra NOPs if optimizing for size. */
+ if (optimize_size
+ && (slot[0] == NULL_RTX || slot[2] == NULL_RTX))
+ return false;
+
+ /* Verify that we really can do the multi-issue. */
+ if (slot[0])
+ {
+ rtx t = NEXT_INSN (slot[0]);
+ while (t != slot[1])
+ {
+ if (GET_CODE (t) != NOTE
+ || NOTE_KIND (t) != NOTE_INSN_DELETED)
+ return false;
+ t = NEXT_INSN (t);
+ }
+ }
+ if (slot[2])
+ {
+ rtx t = NEXT_INSN (slot[1]);
+ while (t != slot[2])
+ {
+ if (GET_CODE (t) != NOTE
+ || NOTE_KIND (t) != NOTE_INSN_DELETED)
+ return false;
+ t = NEXT_INSN (t);
+ }
+ }
+
+ if (slot[0] == NULL_RTX)
+ {
+ slot[0] = emit_insn_before (gen_mnop (), slot[1]);
+ df_insn_rescan (slot[0]);
+ }
+ if (slot[2] == NULL_RTX)
+ {
+ slot[2] = emit_insn_after (gen_forced_nop (), slot[1]);
+ df_insn_rescan (slot[2]);
+ }
+
+ /* Avoid line number information being printed inside one bundle. */
+ if (INSN_LOCATOR (slot[1])
+ && INSN_LOCATOR (slot[1]) != INSN_LOCATOR (slot[0]))
+ INSN_LOCATOR (slot[1]) = INSN_LOCATOR (slot[0]);
+ if (INSN_LOCATOR (slot[2])
+ && INSN_LOCATOR (slot[2]) != INSN_LOCATOR (slot[0]))
+ INSN_LOCATOR (slot[2]) = INSN_LOCATOR (slot[0]);
+
+ /* Terminate them with "|| " instead of ";" in the output. */
+ PUT_MODE (slot[0], SImode);
+ PUT_MODE (slot[1], SImode);
+ /* Terminate the bundle, for the benefit of reorder_var_tracking_notes. */
+ PUT_MODE (slot[2], QImode);
+ return true;
+}
+
+/* Go through all insns, and use the information generated during scheduling
+ to generate SEQUENCEs to represent bundles of instructions issued
+ simultaneously. */
+
+static void
+bfin_gen_bundles (void)
+{
+ basic_block bb;
+ FOR_EACH_BB (bb)
+ {
+ rtx insn, next;
+ rtx slot[3];
+ int n_filled = 0;
+
+ slot[0] = slot[1] = slot[2] = NULL_RTX;
+ for (insn = BB_HEAD (bb);; insn = next)
+ {
+ int at_end;
+ rtx delete_this = NULL_RTX;
+
+ if (NONDEBUG_INSN_P (insn))
+ {
+ enum attr_type type = get_attr_type (insn);
+
+ if (type == TYPE_STALL)
+ {
+ gcc_assert (n_filled == 0);
+ delete_this = insn;
+ }
+ else
+ {
+ if (type == TYPE_DSP32 || type == TYPE_DSP32SHIFTIMM)
+ slot[0] = insn;
+ else if (slot[1] == NULL_RTX)
+ slot[1] = insn;
+ else
+ slot[2] = insn;
+ n_filled++;
+ }
+ }
+
+ next = NEXT_INSN (insn);
+ while (next && insn != BB_END (bb)
+ && !(INSN_P (next)
+ && GET_CODE (PATTERN (next)) != USE
+ && GET_CODE (PATTERN (next)) != CLOBBER))
+ {
+ insn = next;
+ next = NEXT_INSN (insn);
+ }
+
+ /* BB_END can change due to emitting extra NOPs, so check here. */
+ at_end = insn == BB_END (bb);
+ if (delete_this == NULL_RTX && (at_end || GET_MODE (next) == TImode))
+ {
+ if ((n_filled < 2
+ || !gen_one_bundle (slot))
+ && slot[0] != NULL_RTX)
+ {
+ rtx pat = PATTERN (slot[0]);
+ if (GET_CODE (pat) == SET
+ && GET_CODE (SET_SRC (pat)) == UNSPEC
+ && XINT (SET_SRC (pat), 1) == UNSPEC_32BIT)
+ {
+ SET_SRC (pat) = XVECEXP (SET_SRC (pat), 0, 0);
+ INSN_CODE (slot[0]) = -1;
+ df_insn_rescan (slot[0]);
+ }
+ }
+ n_filled = 0;
+ slot[0] = slot[1] = slot[2] = NULL_RTX;
+ }
+ if (delete_this != NULL_RTX)
+ delete_insn (delete_this);
+ if (at_end)
+ break;
+ }
+ }
+}
+
+/* Ensure that no var tracking notes are emitted in the middle of a
+ three-instruction bundle. */
+
+static void
+reorder_var_tracking_notes (void)
+{
+ basic_block bb;
+ FOR_EACH_BB (bb)
+ {
+ rtx insn, next;
+ rtx queue = NULL_RTX;
+ bool in_bundle = false;
+
+ for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = next)
+ {
+ next = NEXT_INSN (insn);
+
+ if (INSN_P (insn))
+ {
+ /* Emit queued up notes at the last instruction of a bundle. */
+ if (GET_MODE (insn) == QImode)
+ {
+ while (queue)
+ {
+ rtx next_queue = PREV_INSN (queue);
+ PREV_INSN (NEXT_INSN (insn)) = queue;
+ NEXT_INSN (queue) = NEXT_INSN (insn);
+ NEXT_INSN (insn) = queue;
+ PREV_INSN (queue) = insn;
+ queue = next_queue;
+ }
+ in_bundle = false;
+ }
+ else if (GET_MODE (insn) == SImode)
+ in_bundle = true;
+ }
+ else if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
+ {
+ if (in_bundle)
+ {
+ rtx prev = PREV_INSN (insn);
+ PREV_INSN (next) = prev;
+ NEXT_INSN (prev) = next;
+
+ PREV_INSN (insn) = queue;
+ queue = insn;
+ }
+ }
+ }
+ }
+}
+
+/* On some silicon revisions, functions shorter than a certain number of cycles
+ can cause unpredictable behaviour. Work around this by adding NOPs as
+ needed. */
+static void
+workaround_rts_anomaly (void)
+{
+ rtx insn, first_insn = NULL_RTX;
+ int cycles = 4;
+
+ if (! ENABLE_WA_RETS)
+ return;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ rtx pat;
+
+ if (BARRIER_P (insn))
+ return;
+
+ if (NOTE_P (insn) || LABEL_P (insn))
+ continue;
+
+ if (first_insn == NULL_RTX)
+ first_insn = insn;
+ pat = PATTERN (insn);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER
+ || GET_CODE (pat) == ASM_INPUT || GET_CODE (pat) == ADDR_VEC
+ || GET_CODE (pat) == ADDR_DIFF_VEC || asm_noperands (pat) >= 0)
+ continue;
+
+ if (CALL_P (insn))
+ return;
+
+ if (JUMP_P (insn))
+ {
+ if (recog_memoized (insn) == CODE_FOR_return_internal)
+ break;
+
+ /* Nothing to worry about for direct jumps. */
+ if (!any_condjump_p (insn))
+ return;
+ if (cycles <= 1)
+ return;
+ cycles--;
+ }
+ else if (INSN_P (insn))
+ {
+ rtx pat = PATTERN (insn);
+ int this_cycles = 1;
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ if (push_multiple_operation (pat, VOIDmode)
+ || pop_multiple_operation (pat, VOIDmode))
+ this_cycles = n_regs_to_save;
+ }
+ else
+ {
+ int icode = recog_memoized (insn);
+
+ if (icode == CODE_FOR_link)
+ this_cycles = 4;
+ else if (icode == CODE_FOR_unlink)
+ this_cycles = 3;
+ else if (icode == CODE_FOR_mulsi3)
+ this_cycles = 5;
+ }
+ if (this_cycles >= cycles)
+ return;
+
+ cycles -= this_cycles;
+ }
+ }
+ while (cycles > 0)
+ {
+ emit_insn_before (gen_nop (), first_insn);
+ cycles--;
+ }
+}
+
+/* Return an insn type for INSN that can be used by the caller for anomaly
+ workarounds. This differs from plain get_attr_type in that it handles
+ SEQUENCEs. */
+
+static enum attr_type
+type_for_anomaly (rtx insn)
+{
+ rtx pat = PATTERN (insn);
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ enum attr_type t;
+ t = get_attr_type (XVECEXP (pat, 0, 1));
+ if (t == TYPE_MCLD)
+ return t;
+ t = get_attr_type (XVECEXP (pat, 0, 2));
+ if (t == TYPE_MCLD)
+ return t;
+ return TYPE_MCST;
+ }
+ else
+ return get_attr_type (insn);
+}
+
+/* Return true iff the address found in MEM is based on the register
+ NP_REG and optionally has a positive offset. */
+static bool
+harmless_null_pointer_p (rtx mem, int np_reg)
+{
+ mem = XEXP (mem, 0);
+ if (GET_CODE (mem) == POST_INC || GET_CODE (mem) == POST_DEC)
+ mem = XEXP (mem, 0);
+ if (REG_P (mem) && (int) REGNO (mem) == np_reg)
+ return true;
+ if (GET_CODE (mem) == PLUS
+ && REG_P (XEXP (mem, 0)) && (int) REGNO (XEXP (mem, 0)) == np_reg)
+ {
+ mem = XEXP (mem, 1);
+ if (GET_CODE (mem) == CONST_INT && INTVAL (mem) > 0)
+ return true;
+ }
+ return false;
+}
+
+/* Return nonzero if INSN contains any loads that may trap. */
+
+static bool
+trapping_loads_p (rtx insn, int np_reg, bool after_np_branch)
+{
+ rtx mem = SET_SRC (single_set (insn));
+
+ if (!after_np_branch)
+ np_reg = -1;
+ return ((np_reg == -1 || !harmless_null_pointer_p (mem, np_reg))
+ && may_trap_p (mem));
+}
+
+/* Return INSN if it is of TYPE_MCLD. Alternatively, if INSN is the start of
+ a three-insn bundle, see if one of them is a load and return that if so.
+ Return NULL_RTX if the insn does not contain loads. */
+static rtx
+find_load (rtx insn)
+{
+ if (!NONDEBUG_INSN_P (insn))
+ return NULL_RTX;
+ if (get_attr_type (insn) == TYPE_MCLD)
+ return insn;
+ if (GET_MODE (insn) != SImode)
+ return NULL_RTX;
+ do {
+ insn = NEXT_INSN (insn);
+ if ((GET_MODE (insn) == SImode || GET_MODE (insn) == QImode)
+ && get_attr_type (insn) == TYPE_MCLD)
+ return insn;
+ } while (GET_MODE (insn) != QImode);
+ return NULL_RTX;
+}
+
+/* Determine whether PAT is an indirect call pattern. */
+static bool
+indirect_call_p (rtx pat)
+{
+ if (GET_CODE (pat) == PARALLEL)
+ pat = XVECEXP (pat, 0, 0);
+ if (GET_CODE (pat) == SET)
+ pat = SET_SRC (pat);
+ gcc_assert (GET_CODE (pat) == CALL);
+ pat = XEXP (pat, 0);
+ gcc_assert (GET_CODE (pat) == MEM);
+ pat = XEXP (pat, 0);
+
+ return REG_P (pat);
+}
+
+/* During workaround_speculation, track whether we're in the shadow of a
+ conditional branch that tests a P register for NULL. If so, we can omit
+ emitting NOPs if we see a load from that P register, since a speculative
+ access at address 0 isn't a problem, and the load is executed in all other
+ cases anyway.
+ Global for communication with note_np_check_stores through note_stores.
+ */
+int np_check_regno = -1;
+bool np_after_branch = false;
+
+/* Subroutine of workaround_speculation, called through note_stores. */
+static void
+note_np_check_stores (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED)
+{
+ if (REG_P (x) && (REGNO (x) == REG_CC || (int) REGNO (x) == np_check_regno))
+ np_check_regno = -1;
+}
+
+static void
+workaround_speculation (void)
+{
+ rtx insn, next;
+ rtx last_condjump = NULL_RTX;
+ int cycles_since_jump = INT_MAX;
+ int delay_added = 0;
+
+ if (! ENABLE_WA_SPECULATIVE_LOADS && ! ENABLE_WA_SPECULATIVE_SYNCS
+ && ! ENABLE_WA_INDIRECT_CALLS)
+ return;
+
+ /* First pass: find predicted-false branches; if something after them
+ needs nops, insert them or change the branch to predict true. */
+ for (insn = get_insns (); insn; insn = next)
+ {
+ rtx pat;
+ int delay_needed = 0;
+
+ next = find_next_insn_start (insn);
+
+ if (NOTE_P (insn) || BARRIER_P (insn))
+ continue;
+
+ if (LABEL_P (insn))
+ {
+ np_check_regno = -1;
+ continue;
+ }
+
+ pat = PATTERN (insn);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER
+ || GET_CODE (pat) == ADDR_VEC || GET_CODE (pat) == ADDR_DIFF_VEC)
+ continue;
+
+ if (GET_CODE (pat) == ASM_INPUT || asm_noperands (pat) >= 0)
+ {
+ np_check_regno = -1;
+ continue;
+ }
+
+ if (JUMP_P (insn))
+ {
+ /* Is this a condjump based on a null pointer comparison we saw
+ earlier? */
+ if (np_check_regno != -1
+ && recog_memoized (insn) == CODE_FOR_cbranchbi4)
+ {
+ rtx op = XEXP (SET_SRC (PATTERN (insn)), 0);
+ gcc_assert (GET_CODE (op) == EQ || GET_CODE (op) == NE);
+ if (GET_CODE (op) == NE)
+ np_after_branch = true;
+ }
+ if (any_condjump_p (insn)
+ && ! cbranch_predicted_taken_p (insn))
+ {
+ last_condjump = insn;
+ delay_added = 0;
+ cycles_since_jump = 0;
+ }
+ else
+ cycles_since_jump = INT_MAX;
+ }
+ else if (CALL_P (insn))
+ {
+ np_check_regno = -1;
+ if (cycles_since_jump < INT_MAX)
+ cycles_since_jump++;
+ if (indirect_call_p (pat) && ENABLE_WA_INDIRECT_CALLS)
+ {
+ delay_needed = 3;
+ }
+ }
+ else if (NONDEBUG_INSN_P (insn))
+ {
+ rtx load_insn = find_load (insn);
+ enum attr_type type = type_for_anomaly (insn);
+
+ if (cycles_since_jump < INT_MAX)
+ cycles_since_jump++;
+
+ /* Detect a comparison of a P register with zero. If we later
+ see a condjump based on it, we have found a null pointer
+ check. */
+ if (recog_memoized (insn) == CODE_FOR_compare_eq)
+ {
+ rtx src = SET_SRC (PATTERN (insn));
+ if (REG_P (XEXP (src, 0))
+ && P_REGNO_P (REGNO (XEXP (src, 0)))
+ && XEXP (src, 1) == const0_rtx)
+ {
+ np_check_regno = REGNO (XEXP (src, 0));
+ np_after_branch = false;
+ }
+ else
+ np_check_regno = -1;
+ }
+
+ if (load_insn && ENABLE_WA_SPECULATIVE_LOADS)
+ {
+ if (trapping_loads_p (load_insn, np_check_regno,
+ np_after_branch))
+ delay_needed = 4;
+ }
+ else if (type == TYPE_SYNC && ENABLE_WA_SPECULATIVE_SYNCS)
+ delay_needed = 3;
+
+ /* See if we need to forget about a null pointer comparison
+ we found earlier. */
+ if (recog_memoized (insn) != CODE_FOR_compare_eq)
+ {
+ note_stores (PATTERN (insn), note_np_check_stores, NULL);
+ if (np_check_regno != -1)
+ {
+ if (find_regno_note (insn, REG_INC, np_check_regno))
+ np_check_regno = -1;
+ }
+ }
+
+ }
+
+ if (delay_needed > cycles_since_jump
+ && (delay_needed - cycles_since_jump) > delay_added)
+ {
+ rtx pat1;
+ int num_clobbers;
+ rtx *op = recog_data.operand;
+
+ delay_needed -= cycles_since_jump;
+
+ extract_insn (last_condjump);
+ if (optimize_size)
+ {
+ pat1 = gen_cbranch_predicted_taken (op[0], op[1], op[2],
+ op[3]);
+ cycles_since_jump = INT_MAX;
+ }
+ else
+ {
+ /* Do not adjust cycles_since_jump in this case, so that
+ we'll increase the number of NOPs for a subsequent insn
+ if necessary. */
+ pat1 = gen_cbranch_with_nops (op[0], op[1], op[2], op[3],
+ GEN_INT (delay_needed));
+ delay_added = delay_needed;
+ }
+ PATTERN (last_condjump) = pat1;
+ INSN_CODE (last_condjump) = recog (pat1, insn, &num_clobbers);
+ }
+ if (CALL_P (insn))
+ {
+ cycles_since_jump = INT_MAX;
+ delay_added = 0;
+ }
+ }
+
+ /* Second pass: for predicted-true branches, see if anything at the
+ branch destination needs extra nops. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ int cycles_since_jump;
+ if (JUMP_P (insn)
+ && any_condjump_p (insn)
+ && (INSN_CODE (insn) == CODE_FOR_cbranch_predicted_taken
+ || cbranch_predicted_taken_p (insn)))
+ {
+ rtx target = JUMP_LABEL (insn);
+ rtx label = target;
+ rtx next_tgt;
+
+ cycles_since_jump = 0;
+ for (; target && cycles_since_jump < 3; target = next_tgt)
+ {
+ rtx pat;
+
+ next_tgt = find_next_insn_start (target);
+
+ if (NOTE_P (target) || BARRIER_P (target) || LABEL_P (target))
+ continue;
+
+ pat = PATTERN (target);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER
+ || GET_CODE (pat) == ASM_INPUT || GET_CODE (pat) == ADDR_VEC
+ || GET_CODE (pat) == ADDR_DIFF_VEC || asm_noperands (pat) >= 0)
+ continue;
+
+ if (NONDEBUG_INSN_P (target))
+ {
+ rtx load_insn = find_load (target);
+ enum attr_type type = type_for_anomaly (target);
+ int delay_needed = 0;
+ if (cycles_since_jump < INT_MAX)
+ cycles_since_jump++;
+
+ if (load_insn && ENABLE_WA_SPECULATIVE_LOADS)
+ {
+ if (trapping_loads_p (load_insn, -1, false))
+ delay_needed = 2;
+ }
+ else if (type == TYPE_SYNC && ENABLE_WA_SPECULATIVE_SYNCS)
+ delay_needed = 2;
+
+ if (delay_needed > cycles_since_jump)
+ {
+ rtx prev = prev_real_insn (label);
+ delay_needed -= cycles_since_jump;
+ if (dump_file)
+ fprintf (dump_file, "Adding %d nops after %d\n",
+ delay_needed, INSN_UID (label));
+ if (JUMP_P (prev)
+ && INSN_CODE (prev) == CODE_FOR_cbranch_with_nops)
+ {
+ rtx x;
+ HOST_WIDE_INT v;
+
+ if (dump_file)
+ fprintf (dump_file,
+ "Reducing nops on insn %d.\n",
+ INSN_UID (prev));
+ x = PATTERN (prev);
+ x = XVECEXP (x, 0, 1);
+ v = INTVAL (XVECEXP (x, 0, 0)) - delay_needed;
+ XVECEXP (x, 0, 0) = GEN_INT (v);
+ }
+ while (delay_needed-- > 0)
+ emit_insn_after (gen_nop (), label);
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+/* Called just before the final scheduling pass. If we need to insert NOPs
+ later on to work around speculative loads, insert special placeholder
+ insns that cause loads to be delayed for as many cycles as necessary
+ (and possible). This reduces the number of NOPs we need to add.
+ The dummy insns we generate are later removed by bfin_gen_bundles. */
+static void
+add_sched_insns_for_speculation (void)
+{
+ rtx insn;
+
+ if (! ENABLE_WA_SPECULATIVE_LOADS && ! ENABLE_WA_SPECULATIVE_SYNCS
+ && ! ENABLE_WA_INDIRECT_CALLS)
+ return;
+
+ /* First pass: find predicted-false branches; if something after them
+ needs nops, insert them or change the branch to predict true. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ rtx pat;
+
+ if (NOTE_P (insn) || BARRIER_P (insn) || LABEL_P (insn))
+ continue;
+
+ pat = PATTERN (insn);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER
+ || GET_CODE (pat) == ASM_INPUT || GET_CODE (pat) == ADDR_VEC
+ || GET_CODE (pat) == ADDR_DIFF_VEC || asm_noperands (pat) >= 0)
+ continue;
+
+ if (JUMP_P (insn))
+ {
+ if (any_condjump_p (insn)
+ && !cbranch_predicted_taken_p (insn))
+ {
+ rtx n = next_real_insn (insn);
+ emit_insn_before (gen_stall (GEN_INT (3)), n);
+ }
+ }
+ }
+
+ /* Second pass: for predicted-true branches, see if anything at the
+ branch destination needs extra nops. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (JUMP_P (insn)
+ && any_condjump_p (insn)
+ && (cbranch_predicted_taken_p (insn)))
+ {
+ rtx target = JUMP_LABEL (insn);
+ rtx next = next_real_insn (target);
+
+ if (GET_CODE (PATTERN (next)) == UNSPEC_VOLATILE
+ && get_attr_type (next) == TYPE_STALL)
+ continue;
+ emit_insn_before (gen_stall (GEN_INT (1)), next);
+ }
+ }
+}
+
+/* We use the machine specific reorg pass for emitting CSYNC instructions
+ after conditional branches as needed.
+
+ The Blackfin is unusual in that a code sequence like
+ if cc jump label
+ r0 = (p0)
+ may speculatively perform the load even if the condition isn't true. This
+ happens for a branch that is predicted not taken, because the pipeline
+ isn't flushed or stalled, so the early stages of the following instructions,
+ which perform the memory reference, are allowed to execute before the
+ jump condition is evaluated.
+ Therefore, we must insert additional instructions in all places where this
+ could lead to incorrect behavior. The manual recommends CSYNC, while
+ VDSP seems to use NOPs (even though its corresponding compiler option is
+ named CSYNC).
+
+ When optimizing for speed, we emit NOPs, which seems faster than a CSYNC.
+ When optimizing for size, we turn the branch into a predicted taken one.
+ This may be slower due to mispredicts, but saves code size. */
+
+static void
+bfin_reorg (void)
+{
+ /* We are freeing block_for_insn in the toplev to keep compatibility
+ with old MDEP_REORGS that are not CFG based. Recompute it now. */
+ compute_bb_for_insn ();
+
+ if (flag_schedule_insns_after_reload)
+ {
+ splitting_for_sched = 1;
+ split_all_insns ();
+ splitting_for_sched = 0;
+
+ add_sched_insns_for_speculation ();
+
+ timevar_push (TV_SCHED2);
+ if (flag_selective_scheduling2
+ && !maybe_skip_selective_scheduling ())
+ run_selective_scheduling ();
+ else
+ schedule_insns ();
+ timevar_pop (TV_SCHED2);
+
+ /* Examine the schedule and insert nops as necessary for 64-bit parallel
+ instructions. */
+ bfin_gen_bundles ();
+ }
+
+ df_analyze ();
+
+ /* Doloop optimization */
+ if (cfun->machine->has_hardware_loops)
+ bfin_reorg_loops (dump_file);
+
+ workaround_speculation ();
+
+ if (flag_var_tracking)
+ {
+ timevar_push (TV_VAR_TRACKING);
+ variable_tracking_main ();
+ reorder_var_tracking_notes ();
+ timevar_pop (TV_VAR_TRACKING);
+ }
+
+ df_finish_pass (false);
+
+ workaround_rts_anomaly ();
+}
+
+/* Handle interrupt_handler, exception_handler and nmi_handler function
+ attributes; arguments as in struct attribute_spec.handler. */
+
+static tree
+handle_int_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree x = *node;
+ if (TREE_CODE (x) == FUNCTION_DECL)
+ x = TREE_TYPE (x);
+
+ if (TREE_CODE (x) != FUNCTION_TYPE)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ }
+ else if (funkind (x) != SUBROUTINE)
+ error ("multiple function type attributes specified");
+
+ return NULL_TREE;
+}
+
+/* Return 0 if the attributes for two types are incompatible, 1 if they
+ are compatible, and 2 if they are nearly compatible (which causes a
+ warning to be generated). */
+
+static int
+bfin_comp_type_attributes (const_tree type1, const_tree type2)
+{
+ e_funkind kind1, kind2;
+
+ if (TREE_CODE (type1) != FUNCTION_TYPE)
+ return 1;
+
+ kind1 = funkind (type1);
+ kind2 = funkind (type2);
+
+ if (kind1 != kind2)
+ return 0;
+
+ /* Check for mismatched modifiers */
+ if (!lookup_attribute ("nesting", TYPE_ATTRIBUTES (type1))
+ != !lookup_attribute ("nesting", TYPE_ATTRIBUTES (type2)))
+ return 0;
+
+ if (!lookup_attribute ("saveall", TYPE_ATTRIBUTES (type1))
+ != !lookup_attribute ("saveall", TYPE_ATTRIBUTES (type2)))
+ return 0;
+
+ if (!lookup_attribute ("kspisusp", TYPE_ATTRIBUTES (type1))
+ != !lookup_attribute ("kspisusp", TYPE_ATTRIBUTES (type2)))
+ return 0;
+
+ if (!lookup_attribute ("longcall", TYPE_ATTRIBUTES (type1))
+ != !lookup_attribute ("longcall", TYPE_ATTRIBUTES (type2)))
+ return 0;
+
+ return 1;
+}
+
+/* Handle a "longcall" or "shortcall" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+bfin_handle_longcall_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_TYPE
+ && TREE_CODE (*node) != FIELD_DECL
+ && TREE_CODE (*node) != TYPE_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ }
+
+ if ((strcmp (IDENTIFIER_POINTER (name), "longcall") == 0
+ && lookup_attribute ("shortcall", TYPE_ATTRIBUTES (*node)))
+ || (strcmp (IDENTIFIER_POINTER (name), "shortcall") == 0
+ && lookup_attribute ("longcall", TYPE_ATTRIBUTES (*node))))
+ {
+ warning (OPT_Wattributes,
+ "can%'t apply both longcall and shortcall attributes to the same function");
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle a "l1_text" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+bfin_handle_l1_text_attribute (tree *node, tree name, tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ tree decl = *node;
+
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ error ("%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ }
+
+ /* The decl may have already been given a section attribute
+ from a previous declaration. Ensure they match. */
+ else if (DECL_SECTION_NAME (decl) != NULL_TREE
+ && strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)),
+ ".l1.text") != 0)
+ {
+ error ("section of %q+D conflicts with previous declaration",
+ decl);
+ *no_add_attrs = true;
+ }
+ else
+ DECL_SECTION_NAME (decl) = build_string (9, ".l1.text");
+
+ return NULL_TREE;
+}
+
+/* Handle a "l1_data", "l1_data_A" or "l1_data_B" attribute;
+ arguments as in struct attribute_spec.handler. */
+
+static tree
+bfin_handle_l1_data_attribute (tree *node, tree name, tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ tree decl = *node;
+
+ if (TREE_CODE (decl) != VAR_DECL)
+ {
+ error ("%qE attribute only applies to variables",
+ name);
+ *no_add_attrs = true;
+ }
+ else if (current_function_decl != NULL_TREE
+ && !TREE_STATIC (decl))
+ {
+ error ("%qE attribute cannot be specified for local variables",
+ name);
+ *no_add_attrs = true;
+ }
+ else
+ {
+ const char *section_name;
+
+ if (strcmp (IDENTIFIER_POINTER (name), "l1_data") == 0)
+ section_name = ".l1.data";
+ else if (strcmp (IDENTIFIER_POINTER (name), "l1_data_A") == 0)
+ section_name = ".l1.data.A";
+ else if (strcmp (IDENTIFIER_POINTER (name), "l1_data_B") == 0)
+ section_name = ".l1.data.B";
+ else
+ gcc_unreachable ();
+
+ /* The decl may have already been given a section attribute
+ from a previous declaration. Ensure they match. */
+ if (DECL_SECTION_NAME (decl) != NULL_TREE
+ && strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)),
+ section_name) != 0)
+ {
+ error ("section of %q+D conflicts with previous declaration",
+ decl);
+ *no_add_attrs = true;
+ }
+ else
+ DECL_SECTION_NAME (decl)
+ = build_string (strlen (section_name) + 1, section_name);
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle a "l2" attribute; arguments as in struct attribute_spec.handler. */
+
+static tree
+bfin_handle_l2_attribute (tree *node, tree ARG_UNUSED (name),
+ tree ARG_UNUSED (args), int ARG_UNUSED (flags),
+ bool *no_add_attrs)
+{
+ tree decl = *node;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ if (DECL_SECTION_NAME (decl) != NULL_TREE
+ && strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)),
+ ".l2.text") != 0)
+ {
+ error ("section of %q+D conflicts with previous declaration",
+ decl);
+ *no_add_attrs = true;
+ }
+ else
+ DECL_SECTION_NAME (decl) = build_string (9, ".l2.text");
+ }
+ else if (TREE_CODE (decl) == VAR_DECL)
+ {
+ if (DECL_SECTION_NAME (decl) != NULL_TREE
+ && strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)),
+ ".l2.data") != 0)
+ {
+ error ("section of %q+D conflicts with previous declaration",
+ decl);
+ *no_add_attrs = true;
+ }
+ else
+ DECL_SECTION_NAME (decl) = build_string (9, ".l2.data");
+ }
+
+ return NULL_TREE;
+}
+
+/* Table of valid machine attributes. */
+static const struct attribute_spec bfin_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "interrupt_handler", 0, 0, false, true, true, handle_int_attribute },
+ { "exception_handler", 0, 0, false, true, true, handle_int_attribute },
+ { "nmi_handler", 0, 0, false, true, true, handle_int_attribute },
+ { "nesting", 0, 0, false, true, true, NULL },
+ { "kspisusp", 0, 0, false, true, true, NULL },
+ { "saveall", 0, 0, false, true, true, NULL },
+ { "longcall", 0, 0, false, true, true, bfin_handle_longcall_attribute },
+ { "shortcall", 0, 0, false, true, true, bfin_handle_longcall_attribute },
+ { "l1_text", 0, 0, true, false, false, bfin_handle_l1_text_attribute },
+ { "l1_data", 0, 0, true, false, false, bfin_handle_l1_data_attribute },
+ { "l1_data_A", 0, 0, true, false, false, bfin_handle_l1_data_attribute },
+ { "l1_data_B", 0, 0, true, false, false, bfin_handle_l1_data_attribute },
+ { "l2", 0, 0, true, false, false, bfin_handle_l2_attribute },
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+/* Implementation of TARGET_ASM_INTEGER. When using FD-PIC, we need to
+ tell the assembler to generate pointers to function descriptors in
+ some cases. */
+
+static bool
+bfin_assemble_integer (rtx value, unsigned int size, int aligned_p)
+{
+ if (TARGET_FDPIC && size == UNITS_PER_WORD)
+ {
+ if (GET_CODE (value) == SYMBOL_REF
+ && SYMBOL_REF_FUNCTION_P (value))
+ {
+ fputs ("\t.picptr\tfuncdesc(", asm_out_file);
+ output_addr_const (asm_out_file, value);
+ fputs (")\n", asm_out_file);
+ return true;
+ }
+ if (!aligned_p)
+ {
+ /* We've set the unaligned SI op to NULL, so we always have to
+ handle the unaligned case here. */
+ assemble_integer_with_op ("\t.4byte\t", value);
+ return true;
+ }
+ }
+ return default_assemble_integer (value, size, aligned_p);
+}
+
+/* Output the assembler code for a thunk function. THUNK_DECL is the
+ declaration for the thunk function itself, FUNCTION is the decl for
+ the target function. DELTA is an immediate constant offset to be
+ added to THIS. If VCALL_OFFSET is nonzero, the word at
+ *(*this + vcall_offset) should be added to THIS. */
+
+static void
+bfin_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
+ tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset, tree function)
+{
+ rtx xops[3];
+ /* The this parameter is passed as the first argument. */
+ rtx this_rtx = gen_rtx_REG (Pmode, REG_R0);
+
+ /* Adjust the this parameter by a fixed constant. */
+ if (delta)
+ {
+ xops[1] = this_rtx;
+ if (delta >= -64 && delta <= 63)
+ {
+ xops[0] = GEN_INT (delta);
+ output_asm_insn ("%1 += %0;", xops);
+ }
+ else if (delta >= -128 && delta < -64)
+ {
+ xops[0] = GEN_INT (delta + 64);
+ output_asm_insn ("%1 += -64; %1 += %0;", xops);
+ }
+ else if (delta > 63 && delta <= 126)
+ {
+ xops[0] = GEN_INT (delta - 63);
+ output_asm_insn ("%1 += 63; %1 += %0;", xops);
+ }
+ else
+ {
+ xops[0] = GEN_INT (delta);
+ output_asm_insn ("r3.l = %h0; r3.h = %d0; %1 = %1 + r3;", xops);
+ }
+ }
+
+ /* Adjust the this parameter by a value stored in the vtable. */
+ if (vcall_offset)
+ {
+ rtx p2tmp = gen_rtx_REG (Pmode, REG_P2);
+ rtx tmp = gen_rtx_REG (Pmode, REG_R3);
+
+ xops[1] = tmp;
+ xops[2] = p2tmp;
+ output_asm_insn ("%2 = r0; %2 = [%2];", xops);
+
+ /* Adjust the this parameter. */
+ xops[0] = gen_rtx_MEM (Pmode, plus_constant (p2tmp, vcall_offset));
+ if (!memory_operand (xops[0], Pmode))
+ {
+ rtx tmp2 = gen_rtx_REG (Pmode, REG_P1);
+ xops[0] = GEN_INT (vcall_offset);
+ xops[1] = tmp2;
+ output_asm_insn ("%h1 = %h0; %d1 = %d0; %2 = %2 + %1", xops);
+ xops[0] = gen_rtx_MEM (Pmode, p2tmp);
+ }
+ xops[2] = this_rtx;
+ output_asm_insn ("%1 = %0; %2 = %2 + %1;", xops);
+ }
+
+ xops[0] = XEXP (DECL_RTL (function), 0);
+ if (1 || !flag_pic || (*targetm.binds_local_p) (function))
+ output_asm_insn ("jump.l\t%P0", xops);
+}
+
+/* Codes for all the Blackfin builtins. */
+enum bfin_builtins
+{
+ BFIN_BUILTIN_CSYNC,
+ BFIN_BUILTIN_SSYNC,
+ BFIN_BUILTIN_ONES,
+ BFIN_BUILTIN_COMPOSE_2X16,
+ BFIN_BUILTIN_EXTRACTLO,
+ BFIN_BUILTIN_EXTRACTHI,
+
+ BFIN_BUILTIN_SSADD_2X16,
+ BFIN_BUILTIN_SSSUB_2X16,
+ BFIN_BUILTIN_SSADDSUB_2X16,
+ BFIN_BUILTIN_SSSUBADD_2X16,
+ BFIN_BUILTIN_MULT_2X16,
+ BFIN_BUILTIN_MULTR_2X16,
+ BFIN_BUILTIN_NEG_2X16,
+ BFIN_BUILTIN_ABS_2X16,
+ BFIN_BUILTIN_MIN_2X16,
+ BFIN_BUILTIN_MAX_2X16,
+
+ BFIN_BUILTIN_SSADD_1X16,
+ BFIN_BUILTIN_SSSUB_1X16,
+ BFIN_BUILTIN_MULT_1X16,
+ BFIN_BUILTIN_MULTR_1X16,
+ BFIN_BUILTIN_NORM_1X16,
+ BFIN_BUILTIN_NEG_1X16,
+ BFIN_BUILTIN_ABS_1X16,
+ BFIN_BUILTIN_MIN_1X16,
+ BFIN_BUILTIN_MAX_1X16,
+
+ BFIN_BUILTIN_SUM_2X16,
+ BFIN_BUILTIN_DIFFHL_2X16,
+ BFIN_BUILTIN_DIFFLH_2X16,
+
+ BFIN_BUILTIN_SSADD_1X32,
+ BFIN_BUILTIN_SSSUB_1X32,
+ BFIN_BUILTIN_NORM_1X32,
+ BFIN_BUILTIN_ROUND_1X32,
+ BFIN_BUILTIN_NEG_1X32,
+ BFIN_BUILTIN_ABS_1X32,
+ BFIN_BUILTIN_MIN_1X32,
+ BFIN_BUILTIN_MAX_1X32,
+ BFIN_BUILTIN_MULT_1X32,
+ BFIN_BUILTIN_MULT_1X32X32,
+ BFIN_BUILTIN_MULT_1X32X32NS,
+
+ BFIN_BUILTIN_MULHISILL,
+ BFIN_BUILTIN_MULHISILH,
+ BFIN_BUILTIN_MULHISIHL,
+ BFIN_BUILTIN_MULHISIHH,
+
+ BFIN_BUILTIN_LSHIFT_1X16,
+ BFIN_BUILTIN_LSHIFT_2X16,
+ BFIN_BUILTIN_SSASHIFT_1X16,
+ BFIN_BUILTIN_SSASHIFT_2X16,
+ BFIN_BUILTIN_SSASHIFT_1X32,
+
+ BFIN_BUILTIN_CPLX_MUL_16,
+ BFIN_BUILTIN_CPLX_MAC_16,
+ BFIN_BUILTIN_CPLX_MSU_16,
+
+ BFIN_BUILTIN_CPLX_MUL_16_S40,
+ BFIN_BUILTIN_CPLX_MAC_16_S40,
+ BFIN_BUILTIN_CPLX_MSU_16_S40,
+
+ BFIN_BUILTIN_CPLX_SQU,
+
+ BFIN_BUILTIN_LOADBYTES,
+
+ BFIN_BUILTIN_MAX
+};
+
+#define def_builtin(NAME, TYPE, CODE) \
+do { \
+ add_builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
+ NULL, NULL_TREE); \
+} while (0)
+
+/* Set up all builtin functions for this target. */
+static void
+bfin_init_builtins (void)
+{
+ tree V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
+ tree void_ftype_void
+ = build_function_type (void_type_node, void_list_node);
+ tree short_ftype_short
+ = build_function_type_list (short_integer_type_node, short_integer_type_node,
+ NULL_TREE);
+ tree short_ftype_int_int
+ = build_function_type_list (short_integer_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+ tree int_ftype_int_int
+ = build_function_type_list (integer_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+ tree int_ftype_int
+ = build_function_type_list (integer_type_node, integer_type_node,
+ NULL_TREE);
+ tree short_ftype_int
+ = build_function_type_list (short_integer_type_node, integer_type_node,
+ NULL_TREE);
+ tree int_ftype_v2hi_v2hi
+ = build_function_type_list (integer_type_node, V2HI_type_node,
+ V2HI_type_node, NULL_TREE);
+ tree v2hi_ftype_v2hi_v2hi
+ = build_function_type_list (V2HI_type_node, V2HI_type_node,
+ V2HI_type_node, NULL_TREE);
+ tree v2hi_ftype_v2hi_v2hi_v2hi
+ = build_function_type_list (V2HI_type_node, V2HI_type_node,
+ V2HI_type_node, V2HI_type_node, NULL_TREE);
+ tree v2hi_ftype_int_int
+ = build_function_type_list (V2HI_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+ tree v2hi_ftype_v2hi_int
+ = build_function_type_list (V2HI_type_node, V2HI_type_node,
+ integer_type_node, NULL_TREE);
+ tree int_ftype_short_short
+ = build_function_type_list (integer_type_node, short_integer_type_node,
+ short_integer_type_node, NULL_TREE);
+ tree v2hi_ftype_v2hi
+ = build_function_type_list (V2HI_type_node, V2HI_type_node, NULL_TREE);
+ tree short_ftype_v2hi
+ = build_function_type_list (short_integer_type_node, V2HI_type_node,
+ NULL_TREE);
+ tree int_ftype_pint
+ = build_function_type_list (integer_type_node,
+ build_pointer_type (integer_type_node),
+ NULL_TREE);
+
+ /* Add the remaining MMX insns with somewhat more complicated types. */
+ def_builtin ("__builtin_bfin_csync", void_ftype_void, BFIN_BUILTIN_CSYNC);
+ def_builtin ("__builtin_bfin_ssync", void_ftype_void, BFIN_BUILTIN_SSYNC);
+
+ def_builtin ("__builtin_bfin_ones", short_ftype_int, BFIN_BUILTIN_ONES);
+
+ def_builtin ("__builtin_bfin_compose_2x16", v2hi_ftype_int_int,
+ BFIN_BUILTIN_COMPOSE_2X16);
+ def_builtin ("__builtin_bfin_extract_hi", short_ftype_v2hi,
+ BFIN_BUILTIN_EXTRACTHI);
+ def_builtin ("__builtin_bfin_extract_lo", short_ftype_v2hi,
+ BFIN_BUILTIN_EXTRACTLO);
+
+ def_builtin ("__builtin_bfin_min_fr2x16", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_MIN_2X16);
+ def_builtin ("__builtin_bfin_max_fr2x16", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_MAX_2X16);
+
+ def_builtin ("__builtin_bfin_add_fr2x16", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_SSADD_2X16);
+ def_builtin ("__builtin_bfin_sub_fr2x16", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_SSSUB_2X16);
+ def_builtin ("__builtin_bfin_dspaddsubsat", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_SSADDSUB_2X16);
+ def_builtin ("__builtin_bfin_dspsubaddsat", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_SSSUBADD_2X16);
+ def_builtin ("__builtin_bfin_mult_fr2x16", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_MULT_2X16);
+ def_builtin ("__builtin_bfin_multr_fr2x16", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_MULTR_2X16);
+ def_builtin ("__builtin_bfin_negate_fr2x16", v2hi_ftype_v2hi,
+ BFIN_BUILTIN_NEG_2X16);
+ def_builtin ("__builtin_bfin_abs_fr2x16", v2hi_ftype_v2hi,
+ BFIN_BUILTIN_ABS_2X16);
+
+ def_builtin ("__builtin_bfin_min_fr1x16", short_ftype_int_int,
+ BFIN_BUILTIN_MIN_1X16);
+ def_builtin ("__builtin_bfin_max_fr1x16", short_ftype_int_int,
+ BFIN_BUILTIN_MAX_1X16);
+
+ def_builtin ("__builtin_bfin_add_fr1x16", short_ftype_int_int,
+ BFIN_BUILTIN_SSADD_1X16);
+ def_builtin ("__builtin_bfin_sub_fr1x16", short_ftype_int_int,
+ BFIN_BUILTIN_SSSUB_1X16);
+ def_builtin ("__builtin_bfin_mult_fr1x16", short_ftype_int_int,
+ BFIN_BUILTIN_MULT_1X16);
+ def_builtin ("__builtin_bfin_multr_fr1x16", short_ftype_int_int,
+ BFIN_BUILTIN_MULTR_1X16);
+ def_builtin ("__builtin_bfin_negate_fr1x16", short_ftype_short,
+ BFIN_BUILTIN_NEG_1X16);
+ def_builtin ("__builtin_bfin_abs_fr1x16", short_ftype_short,
+ BFIN_BUILTIN_ABS_1X16);
+ def_builtin ("__builtin_bfin_norm_fr1x16", short_ftype_int,
+ BFIN_BUILTIN_NORM_1X16);
+
+ def_builtin ("__builtin_bfin_sum_fr2x16", short_ftype_v2hi,
+ BFIN_BUILTIN_SUM_2X16);
+ def_builtin ("__builtin_bfin_diff_hl_fr2x16", short_ftype_v2hi,
+ BFIN_BUILTIN_DIFFHL_2X16);
+ def_builtin ("__builtin_bfin_diff_lh_fr2x16", short_ftype_v2hi,
+ BFIN_BUILTIN_DIFFLH_2X16);
+
+ def_builtin ("__builtin_bfin_mulhisill", int_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_MULHISILL);
+ def_builtin ("__builtin_bfin_mulhisihl", int_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_MULHISIHL);
+ def_builtin ("__builtin_bfin_mulhisilh", int_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_MULHISILH);
+ def_builtin ("__builtin_bfin_mulhisihh", int_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_MULHISIHH);
+
+ def_builtin ("__builtin_bfin_min_fr1x32", int_ftype_int_int,
+ BFIN_BUILTIN_MIN_1X32);
+ def_builtin ("__builtin_bfin_max_fr1x32", int_ftype_int_int,
+ BFIN_BUILTIN_MAX_1X32);
+
+ def_builtin ("__builtin_bfin_add_fr1x32", int_ftype_int_int,
+ BFIN_BUILTIN_SSADD_1X32);
+ def_builtin ("__builtin_bfin_sub_fr1x32", int_ftype_int_int,
+ BFIN_BUILTIN_SSSUB_1X32);
+ def_builtin ("__builtin_bfin_negate_fr1x32", int_ftype_int,
+ BFIN_BUILTIN_NEG_1X32);
+ def_builtin ("__builtin_bfin_abs_fr1x32", int_ftype_int,
+ BFIN_BUILTIN_ABS_1X32);
+ def_builtin ("__builtin_bfin_norm_fr1x32", short_ftype_int,
+ BFIN_BUILTIN_NORM_1X32);
+ def_builtin ("__builtin_bfin_round_fr1x32", short_ftype_int,
+ BFIN_BUILTIN_ROUND_1X32);
+ def_builtin ("__builtin_bfin_mult_fr1x32", int_ftype_short_short,
+ BFIN_BUILTIN_MULT_1X32);
+ def_builtin ("__builtin_bfin_mult_fr1x32x32", int_ftype_int_int,
+ BFIN_BUILTIN_MULT_1X32X32);
+ def_builtin ("__builtin_bfin_mult_fr1x32x32NS", int_ftype_int_int,
+ BFIN_BUILTIN_MULT_1X32X32NS);
+
+ /* Shifts. */
+ def_builtin ("__builtin_bfin_shl_fr1x16", short_ftype_int_int,
+ BFIN_BUILTIN_SSASHIFT_1X16);
+ def_builtin ("__builtin_bfin_shl_fr2x16", v2hi_ftype_v2hi_int,
+ BFIN_BUILTIN_SSASHIFT_2X16);
+ def_builtin ("__builtin_bfin_lshl_fr1x16", short_ftype_int_int,
+ BFIN_BUILTIN_LSHIFT_1X16);
+ def_builtin ("__builtin_bfin_lshl_fr2x16", v2hi_ftype_v2hi_int,
+ BFIN_BUILTIN_LSHIFT_2X16);
+ def_builtin ("__builtin_bfin_shl_fr1x32", int_ftype_int_int,
+ BFIN_BUILTIN_SSASHIFT_1X32);
+
+ /* Complex numbers. */
+ def_builtin ("__builtin_bfin_cmplx_add", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_SSADD_2X16);
+ def_builtin ("__builtin_bfin_cmplx_sub", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_SSSUB_2X16);
+ def_builtin ("__builtin_bfin_cmplx_mul", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_CPLX_MUL_16);
+ def_builtin ("__builtin_bfin_cmplx_mac", v2hi_ftype_v2hi_v2hi_v2hi,
+ BFIN_BUILTIN_CPLX_MAC_16);
+ def_builtin ("__builtin_bfin_cmplx_msu", v2hi_ftype_v2hi_v2hi_v2hi,
+ BFIN_BUILTIN_CPLX_MSU_16);
+ def_builtin ("__builtin_bfin_cmplx_mul_s40", v2hi_ftype_v2hi_v2hi,
+ BFIN_BUILTIN_CPLX_MUL_16_S40);
+ def_builtin ("__builtin_bfin_cmplx_mac_s40", v2hi_ftype_v2hi_v2hi_v2hi,
+ BFIN_BUILTIN_CPLX_MAC_16_S40);
+ def_builtin ("__builtin_bfin_cmplx_msu_s40", v2hi_ftype_v2hi_v2hi_v2hi,
+ BFIN_BUILTIN_CPLX_MSU_16_S40);
+ def_builtin ("__builtin_bfin_csqu_fr16", v2hi_ftype_v2hi,
+ BFIN_BUILTIN_CPLX_SQU);
+
+ /* "Unaligned" load. */
+ def_builtin ("__builtin_bfin_loadbytes", int_ftype_pint,
+ BFIN_BUILTIN_LOADBYTES);
+
+}
+
+
+struct builtin_description
+{
+ const enum insn_code icode;
+ const char *const name;
+ const enum bfin_builtins code;
+ int macflag;
+};
+
+static const struct builtin_description bdesc_2arg[] =
+{
+ { CODE_FOR_composev2hi, "__builtin_bfin_compose_2x16", BFIN_BUILTIN_COMPOSE_2X16, -1 },
+
+ { CODE_FOR_ssashiftv2hi3, "__builtin_bfin_shl_fr2x16", BFIN_BUILTIN_SSASHIFT_2X16, -1 },
+ { CODE_FOR_ssashifthi3, "__builtin_bfin_shl_fr1x16", BFIN_BUILTIN_SSASHIFT_1X16, -1 },
+ { CODE_FOR_lshiftv2hi3, "__builtin_bfin_lshl_fr2x16", BFIN_BUILTIN_LSHIFT_2X16, -1 },
+ { CODE_FOR_lshifthi3, "__builtin_bfin_lshl_fr1x16", BFIN_BUILTIN_LSHIFT_1X16, -1 },
+ { CODE_FOR_ssashiftsi3, "__builtin_bfin_shl_fr1x32", BFIN_BUILTIN_SSASHIFT_1X32, -1 },
+
+ { CODE_FOR_sminhi3, "__builtin_bfin_min_fr1x16", BFIN_BUILTIN_MIN_1X16, -1 },
+ { CODE_FOR_smaxhi3, "__builtin_bfin_max_fr1x16", BFIN_BUILTIN_MAX_1X16, -1 },
+ { CODE_FOR_ssaddhi3, "__builtin_bfin_add_fr1x16", BFIN_BUILTIN_SSADD_1X16, -1 },
+ { CODE_FOR_sssubhi3, "__builtin_bfin_sub_fr1x16", BFIN_BUILTIN_SSSUB_1X16, -1 },
+
+ { CODE_FOR_sminsi3, "__builtin_bfin_min_fr1x32", BFIN_BUILTIN_MIN_1X32, -1 },
+ { CODE_FOR_smaxsi3, "__builtin_bfin_max_fr1x32", BFIN_BUILTIN_MAX_1X32, -1 },
+ { CODE_FOR_ssaddsi3, "__builtin_bfin_add_fr1x32", BFIN_BUILTIN_SSADD_1X32, -1 },
+ { CODE_FOR_sssubsi3, "__builtin_bfin_sub_fr1x32", BFIN_BUILTIN_SSSUB_1X32, -1 },
+
+ { CODE_FOR_sminv2hi3, "__builtin_bfin_min_fr2x16", BFIN_BUILTIN_MIN_2X16, -1 },
+ { CODE_FOR_smaxv2hi3, "__builtin_bfin_max_fr2x16", BFIN_BUILTIN_MAX_2X16, -1 },
+ { CODE_FOR_ssaddv2hi3, "__builtin_bfin_add_fr2x16", BFIN_BUILTIN_SSADD_2X16, -1 },
+ { CODE_FOR_sssubv2hi3, "__builtin_bfin_sub_fr2x16", BFIN_BUILTIN_SSSUB_2X16, -1 },
+ { CODE_FOR_ssaddsubv2hi3, "__builtin_bfin_dspaddsubsat", BFIN_BUILTIN_SSADDSUB_2X16, -1 },
+ { CODE_FOR_sssubaddv2hi3, "__builtin_bfin_dspsubaddsat", BFIN_BUILTIN_SSSUBADD_2X16, -1 },
+
+ { CODE_FOR_flag_mulhisi, "__builtin_bfin_mult_fr1x32", BFIN_BUILTIN_MULT_1X32, MACFLAG_NONE },
+ { CODE_FOR_flag_mulhi, "__builtin_bfin_mult_fr1x16", BFIN_BUILTIN_MULT_1X16, MACFLAG_T },
+ { CODE_FOR_flag_mulhi, "__builtin_bfin_multr_fr1x16", BFIN_BUILTIN_MULTR_1X16, MACFLAG_NONE },
+ { CODE_FOR_flag_mulv2hi, "__builtin_bfin_mult_fr2x16", BFIN_BUILTIN_MULT_2X16, MACFLAG_T },
+ { CODE_FOR_flag_mulv2hi, "__builtin_bfin_multr_fr2x16", BFIN_BUILTIN_MULTR_2X16, MACFLAG_NONE },
+
+ { CODE_FOR_mulhisi_ll, "__builtin_bfin_mulhisill", BFIN_BUILTIN_MULHISILL, -1 },
+ { CODE_FOR_mulhisi_lh, "__builtin_bfin_mulhisilh", BFIN_BUILTIN_MULHISILH, -1 },
+ { CODE_FOR_mulhisi_hl, "__builtin_bfin_mulhisihl", BFIN_BUILTIN_MULHISIHL, -1 },
+ { CODE_FOR_mulhisi_hh, "__builtin_bfin_mulhisihh", BFIN_BUILTIN_MULHISIHH, -1 }
+
+};
+
+static const struct builtin_description bdesc_1arg[] =
+{
+ { CODE_FOR_loadbytes, "__builtin_bfin_loadbytes", BFIN_BUILTIN_LOADBYTES, 0 },
+
+ { CODE_FOR_ones, "__builtin_bfin_ones", BFIN_BUILTIN_ONES, 0 },
+
+ { CODE_FOR_signbitshi2, "__builtin_bfin_norm_fr1x16", BFIN_BUILTIN_NORM_1X16, 0 },
+ { CODE_FOR_ssneghi2, "__builtin_bfin_negate_fr1x16", BFIN_BUILTIN_NEG_1X16, 0 },
+ { CODE_FOR_abshi2, "__builtin_bfin_abs_fr1x16", BFIN_BUILTIN_ABS_1X16, 0 },
+
+ { CODE_FOR_signbitssi2, "__builtin_bfin_norm_fr1x32", BFIN_BUILTIN_NORM_1X32, 0 },
+ { CODE_FOR_ssroundsi2, "__builtin_bfin_round_fr1x32", BFIN_BUILTIN_ROUND_1X32, 0 },
+ { CODE_FOR_ssnegsi2, "__builtin_bfin_negate_fr1x32", BFIN_BUILTIN_NEG_1X32, 0 },
+ { CODE_FOR_ssabssi2, "__builtin_bfin_abs_fr1x32", BFIN_BUILTIN_ABS_1X32, 0 },
+
+ { CODE_FOR_movv2hi_hi_low, "__builtin_bfin_extract_lo", BFIN_BUILTIN_EXTRACTLO, 0 },
+ { CODE_FOR_movv2hi_hi_high, "__builtin_bfin_extract_hi", BFIN_BUILTIN_EXTRACTHI, 0 },
+ { CODE_FOR_ssnegv2hi2, "__builtin_bfin_negate_fr2x16", BFIN_BUILTIN_NEG_2X16, 0 },
+ { CODE_FOR_ssabsv2hi2, "__builtin_bfin_abs_fr2x16", BFIN_BUILTIN_ABS_2X16, 0 }
+};
+
+/* Errors in the source file can cause expand_expr to return const0_rtx
+ where we expect a vector. To avoid crashing, use one of the vector
+ clear instructions. */
+static rtx
+safe_vector_operand (rtx x, enum machine_mode mode)
+{
+ if (x != const0_rtx)
+ return x;
+ x = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (x, CONST0_RTX (SImode)));
+ return gen_lowpart (mode, x);
+}
+
+/* Subroutine of bfin_expand_builtin to take care of binop insns. MACFLAG is -1
+ if this is a normal binary op, or one of the MACFLAG_xxx constants. */
+
+static rtx
+bfin_expand_binop_builtin (enum insn_code icode, tree exp, rtx target,
+ int macflag)
+{
+ rtx pat;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode op0mode = GET_MODE (op0);
+ enum machine_mode op1mode = GET_MODE (op1);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+
+ if (! target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if ((op0mode == SImode || op0mode == VOIDmode) && mode0 == HImode)
+ {
+ op0mode = HImode;
+ op0 = gen_lowpart (HImode, op0);
+ }
+ if ((op1mode == SImode || op1mode == VOIDmode) && mode1 == HImode)
+ {
+ op1mode = HImode;
+ op1 = gen_lowpart (HImode, op1);
+ }
+ /* In case the insn wants input operands in modes different from
+ the result, abort. */
+ gcc_assert ((op0mode == mode0 || op0mode == VOIDmode)
+ && (op1mode == mode1 || op1mode == VOIDmode));
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ if (macflag == -1)
+ pat = GEN_FCN (icode) (target, op0, op1);
+ else
+ pat = GEN_FCN (icode) (target, op0, op1, GEN_INT (macflag));
+ if (! pat)
+ return 0;
+
+ emit_insn (pat);
+ return target;
+}
+
+/* Subroutine of bfin_expand_builtin to take care of unop insns. */
+
+static rtx
+bfin_expand_unop_builtin (enum insn_code icode, tree exp,
+ rtx target)
+{
+ rtx pat;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ rtx op0 = expand_normal (arg0);
+ enum machine_mode op0mode = GET_MODE (op0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ if (! target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+
+ if (op0mode == SImode && mode0 == HImode)
+ {
+ op0mode = HImode;
+ op0 = gen_lowpart (HImode, op0);
+ }
+ gcc_assert (op0mode == mode0 || op0mode == VOIDmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+bfin_expand_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ size_t i;
+ enum insn_code icode;
+ const struct builtin_description *d;
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree arg0, arg1, arg2;
+ rtx op0, op1, op2, accvec, pat, tmp1, tmp2, a0reg, a1reg;
+ enum machine_mode tmode, mode0;
+
+ switch (fcode)
+ {
+ case BFIN_BUILTIN_CSYNC:
+ emit_insn (gen_csync ());
+ return 0;
+ case BFIN_BUILTIN_SSYNC:
+ emit_insn (gen_ssync ());
+ return 0;
+
+ case BFIN_BUILTIN_DIFFHL_2X16:
+ case BFIN_BUILTIN_DIFFLH_2X16:
+ case BFIN_BUILTIN_SUM_2X16:
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+ icode = (fcode == BFIN_BUILTIN_DIFFHL_2X16 ? CODE_FOR_subhilov2hi3
+ : fcode == BFIN_BUILTIN_DIFFLH_2X16 ? CODE_FOR_sublohiv2hi3
+ : CODE_FOR_ssaddhilov2hi3);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (! target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (target, op0, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case BFIN_BUILTIN_MULT_1X32X32:
+ case BFIN_BUILTIN_MULT_1X32X32NS:
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ if (! target
+ || !register_operand (target, SImode))
+ target = gen_reg_rtx (SImode);
+ if (! register_operand (op0, SImode))
+ op0 = copy_to_mode_reg (SImode, op0);
+ if (! register_operand (op1, SImode))
+ op1 = copy_to_mode_reg (SImode, op1);
+
+ a1reg = gen_rtx_REG (PDImode, REG_A1);
+ a0reg = gen_rtx_REG (PDImode, REG_A0);
+ tmp1 = gen_lowpart (V2HImode, op0);
+ tmp2 = gen_lowpart (V2HImode, op1);
+ emit_insn (gen_flag_macinit1hi (a1reg,
+ gen_lowpart (HImode, op0),
+ gen_lowpart (HImode, op1),
+ GEN_INT (MACFLAG_FU)));
+ emit_insn (gen_lshrpdi3 (a1reg, a1reg, GEN_INT (16)));
+
+ if (fcode == BFIN_BUILTIN_MULT_1X32X32)
+ emit_insn (gen_flag_mul_macv2hi_parts_acconly (a0reg, a1reg, tmp1, tmp2,
+ const1_rtx, const1_rtx,
+ const1_rtx, const0_rtx, a1reg,
+ const0_rtx, GEN_INT (MACFLAG_NONE),
+ GEN_INT (MACFLAG_M)));
+ else
+ {
+ /* For saturating multiplication, there's exactly one special case
+ to be handled: multiplying the smallest negative value with
+ itself. Due to shift correction in fractional multiplies, this
+ can overflow. Iff this happens, OP2 will contain 1, which, when
+ added in 32 bits to the smallest negative, wraps to the largest
+ positive, which is the result we want. */
+ op2 = gen_reg_rtx (V2HImode);
+ emit_insn (gen_packv2hi (op2, tmp1, tmp2, const0_rtx, const0_rtx));
+ emit_insn (gen_movsibi (gen_rtx_REG (BImode, REG_CC),
+ gen_lowpart (SImode, op2)));
+ emit_insn (gen_flag_mul_macv2hi_parts_acconly_andcc0 (a0reg, a1reg, tmp1, tmp2,
+ const1_rtx, const1_rtx,
+ const1_rtx, const0_rtx, a1reg,
+ const0_rtx, GEN_INT (MACFLAG_NONE),
+ GEN_INT (MACFLAG_M)));
+ op2 = gen_reg_rtx (SImode);
+ emit_insn (gen_movbisi (op2, gen_rtx_REG (BImode, REG_CC)));
+ }
+ emit_insn (gen_flag_machi_parts_acconly (a1reg, tmp2, tmp1,
+ const1_rtx, const0_rtx,
+ a1reg, const0_rtx, GEN_INT (MACFLAG_M)));
+ emit_insn (gen_ashrpdi3 (a1reg, a1reg, GEN_INT (15)));
+ emit_insn (gen_sum_of_accumulators (target, a0reg, a0reg, a1reg));
+ if (fcode == BFIN_BUILTIN_MULT_1X32X32NS)
+ emit_insn (gen_addsi3 (target, target, op2));
+ return target;
+
+ case BFIN_BUILTIN_CPLX_MUL_16:
+ case BFIN_BUILTIN_CPLX_MUL_16_S40:
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ accvec = gen_reg_rtx (V2PDImode);
+ icode = CODE_FOR_flag_macv2hi_parts;
+ tmode = insn_data[icode].operand[0].mode;
+
+ if (! target
+ || GET_MODE (target) != V2HImode
+ || ! (*insn_data[icode].operand[0].predicate) (target, V2HImode))
+ target = gen_reg_rtx (tmode);
+ if (! register_operand (op0, GET_MODE (op0)))
+ op0 = copy_to_mode_reg (GET_MODE (op0), op0);
+ if (! register_operand (op1, GET_MODE (op1)))
+ op1 = copy_to_mode_reg (GET_MODE (op1), op1);
+
+ if (fcode == BFIN_BUILTIN_CPLX_MUL_16)
+ emit_insn (gen_flag_macinit1v2hi_parts (accvec, op0, op1, const0_rtx,
+ const0_rtx, const0_rtx,
+ const1_rtx, GEN_INT (MACFLAG_W32)));
+ else
+ emit_insn (gen_flag_macinit1v2hi_parts (accvec, op0, op1, const0_rtx,
+ const0_rtx, const0_rtx,
+ const1_rtx, GEN_INT (MACFLAG_NONE)));
+ emit_insn (gen_flag_macv2hi_parts (target, op0, op1, const1_rtx,
+ const1_rtx, const1_rtx,
+ const0_rtx, accvec, const1_rtx, const0_rtx,
+ GEN_INT (MACFLAG_NONE), accvec));
+
+ return target;
+
+ case BFIN_BUILTIN_CPLX_MAC_16:
+ case BFIN_BUILTIN_CPLX_MSU_16:
+ case BFIN_BUILTIN_CPLX_MAC_16_S40:
+ case BFIN_BUILTIN_CPLX_MSU_16_S40:
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ accvec = gen_reg_rtx (V2PDImode);
+ icode = CODE_FOR_flag_macv2hi_parts;
+ tmode = insn_data[icode].operand[0].mode;
+
+ if (! target
+ || GET_MODE (target) != V2HImode
+ || ! (*insn_data[icode].operand[0].predicate) (target, V2HImode))
+ target = gen_reg_rtx (tmode);
+ if (! register_operand (op1, GET_MODE (op1)))
+ op1 = copy_to_mode_reg (GET_MODE (op1), op1);
+ if (! register_operand (op2, GET_MODE (op2)))
+ op2 = copy_to_mode_reg (GET_MODE (op2), op2);
+
+ tmp1 = gen_reg_rtx (SImode);
+ tmp2 = gen_reg_rtx (SImode);
+ emit_insn (gen_ashlsi3 (tmp1, gen_lowpart (SImode, op0), GEN_INT (16)));
+ emit_move_insn (tmp2, gen_lowpart (SImode, op0));
+ emit_insn (gen_movstricthi_1 (gen_lowpart (HImode, tmp2), const0_rtx));
+ emit_insn (gen_load_accumulator_pair (accvec, tmp1, tmp2));
+ if (fcode == BFIN_BUILTIN_CPLX_MAC_16
+ || fcode == BFIN_BUILTIN_CPLX_MSU_16)
+ emit_insn (gen_flag_macv2hi_parts_acconly (accvec, op1, op2, const0_rtx,
+ const0_rtx, const0_rtx,
+ const1_rtx, accvec, const0_rtx,
+ const0_rtx,
+ GEN_INT (MACFLAG_W32)));
+ else
+ emit_insn (gen_flag_macv2hi_parts_acconly (accvec, op1, op2, const0_rtx,
+ const0_rtx, const0_rtx,
+ const1_rtx, accvec, const0_rtx,
+ const0_rtx,
+ GEN_INT (MACFLAG_NONE)));
+ if (fcode == BFIN_BUILTIN_CPLX_MAC_16
+ || fcode == BFIN_BUILTIN_CPLX_MAC_16_S40)
+ {
+ tmp1 = const1_rtx;
+ tmp2 = const0_rtx;
+ }
+ else
+ {
+ tmp1 = const0_rtx;
+ tmp2 = const1_rtx;
+ }
+ emit_insn (gen_flag_macv2hi_parts (target, op1, op2, const1_rtx,
+ const1_rtx, const1_rtx,
+ const0_rtx, accvec, tmp1, tmp2,
+ GEN_INT (MACFLAG_NONE), accvec));
+
+ return target;
+
+ case BFIN_BUILTIN_CPLX_SQU:
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+ accvec = gen_reg_rtx (V2PDImode);
+ icode = CODE_FOR_flag_mulv2hi;
+ tmp1 = gen_reg_rtx (V2HImode);
+ tmp2 = gen_reg_rtx (V2HImode);
+
+ if (! target
+ || GET_MODE (target) != V2HImode
+ || ! (*insn_data[icode].operand[0].predicate) (target, V2HImode))
+ target = gen_reg_rtx (V2HImode);
+ if (! register_operand (op0, GET_MODE (op0)))
+ op0 = copy_to_mode_reg (GET_MODE (op0), op0);
+
+ emit_insn (gen_flag_mulv2hi (tmp1, op0, op0, GEN_INT (MACFLAG_NONE)));
+
+ emit_insn (gen_flag_mulhi_parts (gen_lowpart (HImode, tmp2), op0, op0,
+ const0_rtx, const1_rtx,
+ GEN_INT (MACFLAG_NONE)));
+
+ emit_insn (gen_ssaddhi3_high_parts (target, tmp2, tmp2, tmp2, const0_rtx,
+ const0_rtx));
+ emit_insn (gen_sssubhi3_low_parts (target, target, tmp1, tmp1,
+ const0_rtx, const1_rtx));
+
+ return target;
+
+ default:
+ break;
+ }
+
+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ if (d->code == fcode)
+ return bfin_expand_binop_builtin (d->icode, exp, target,
+ d->macflag);
+
+ for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ if (d->code == fcode)
+ return bfin_expand_unop_builtin (d->icode, exp, target);
+
+ gcc_unreachable ();
+}
+
+static void
+bfin_conditional_register_usage (void)
+{
+ /* initialize condition code flag register rtx */
+ bfin_cc_rtx = gen_rtx_REG (BImode, REG_CC);
+ bfin_rets_rtx = gen_rtx_REG (Pmode, REG_RETS);
+ if (TARGET_FDPIC)
+ call_used_regs[FDPIC_REGNO] = 1;
+ if (!TARGET_FDPIC && flag_pic)
+ {
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+ }
+}
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS bfin_init_builtins
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN bfin_expand_builtin
+
+#undef TARGET_ASM_GLOBALIZE_LABEL
+#define TARGET_ASM_GLOBALIZE_LABEL bfin_globalize_label
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START output_file_start
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE bfin_attribute_table
+
+#undef TARGET_COMP_TYPE_ATTRIBUTES
+#define TARGET_COMP_TYPE_ATTRIBUTES bfin_comp_type_attributes
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS bfin_rtx_costs
+
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST bfin_address_cost
+
+#undef TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER bfin_assemble_integer
+
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG bfin_reorg
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL bfin_function_ok_for_sibcall
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK bfin_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
+
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST bfin_adjust_cost
+
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE bfin_issue_rate
+
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
+
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES bfin_arg_partial_bytes
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG bfin_function_arg
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE bfin_function_arg_advance
+
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE bfin_pass_by_reference
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX bfin_struct_value_rtx
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P bfin_vector_mode_supported_p
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION bfin_handle_option
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE bfin_option_override
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
+
+#undef TARGET_SECONDARY_RELOAD
+#define TARGET_SECONDARY_RELOAD bfin_secondary_reload
+
+#undef TARGET_CLASS_LIKELY_SPILLED_P
+#define TARGET_CLASS_LIKELY_SPILLED_P bfin_class_likely_spilled_p
+
+#undef TARGET_DELEGITIMIZE_ADDRESS
+#define TARGET_DELEGITIMIZE_ADDRESS bfin_delegitimize_address
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM bfin_cannot_force_const_mem
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY bfin_return_in_memory
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P bfin_legitimate_address_p
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED bfin_frame_pointer_required
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE bfin_can_eliminate
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE bfin_conditional_register_usage
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE bfin_asm_trampoline_template
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT bfin_trampoline_init
+
+/* Passes after sched2 can break the helpful TImode annotations that
+ haifa-sched puts on every insn. Just do scheduling in reorg. */
+#undef TARGET_DELAY_SCHED2
+#define TARGET_DELAY_SCHED2 true
+
+/* Variable tracking should be run after all optimizations which
+ change order of insns. It also needs a valid CFG. */
+#undef TARGET_DELAY_VARTRACK
+#define TARGET_DELAY_VARTRACK true
+
+struct gcc_target targetm = TARGET_INITIALIZER;
diff --git a/gcc/config/bfin/bfin.h b/gcc/config/bfin/bfin.h
new file mode 100644
index 000000000..c26b41cc5
--- /dev/null
+++ b/gcc/config/bfin/bfin.h
@@ -0,0 +1,1220 @@
+/* Definitions for the Blackfin port.
+ Copyright (C) 2005, 2007, 2008, 2009, 2010, 2011
+ Free Software Foundation, Inc.
+ Contributed by Analog Devices.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _BFIN_CONFIG
+#define _BFIN_CONFIG
+
+#define OBJECT_FORMAT_ELF
+
+#define BRT 1
+#define BRF 0
+
+/* CPU type. */
+typedef enum bfin_cpu_type
+{
+ BFIN_CPU_UNKNOWN,
+ BFIN_CPU_BF512,
+ BFIN_CPU_BF514,
+ BFIN_CPU_BF516,
+ BFIN_CPU_BF518,
+ BFIN_CPU_BF522,
+ BFIN_CPU_BF523,
+ BFIN_CPU_BF524,
+ BFIN_CPU_BF525,
+ BFIN_CPU_BF526,
+ BFIN_CPU_BF527,
+ BFIN_CPU_BF531,
+ BFIN_CPU_BF532,
+ BFIN_CPU_BF533,
+ BFIN_CPU_BF534,
+ BFIN_CPU_BF536,
+ BFIN_CPU_BF537,
+ BFIN_CPU_BF538,
+ BFIN_CPU_BF539,
+ BFIN_CPU_BF542,
+ BFIN_CPU_BF542M,
+ BFIN_CPU_BF544,
+ BFIN_CPU_BF544M,
+ BFIN_CPU_BF547,
+ BFIN_CPU_BF547M,
+ BFIN_CPU_BF548,
+ BFIN_CPU_BF548M,
+ BFIN_CPU_BF549,
+ BFIN_CPU_BF549M,
+ BFIN_CPU_BF561
+} bfin_cpu_t;
+
+/* Value of -mcpu= */
+extern bfin_cpu_t bfin_cpu_type;
+
+/* Value of -msi-revision= */
+extern int bfin_si_revision;
+
+extern unsigned int bfin_workarounds;
+
+/* Print subsidiary information on the compiler version in use. */
+#define TARGET_VERSION fprintf (stderr, " (BlackFin bfin)")
+
+/* Predefinition in the preprocessor for this target machine */
+#ifndef TARGET_CPU_CPP_BUILTINS
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("bfin"); \
+ builtin_define_std ("BFIN"); \
+ builtin_define ("__ADSPBLACKFIN__"); \
+ builtin_define ("__ADSPLPBLACKFIN__"); \
+ \
+ switch (bfin_cpu_type) \
+ { \
+ case BFIN_CPU_BF512: \
+ builtin_define ("__ADSPBF512__"); \
+ builtin_define ("__ADSPBF51x__"); \
+ break; \
+ case BFIN_CPU_BF514: \
+ builtin_define ("__ADSPBF514__"); \
+ builtin_define ("__ADSPBF51x__"); \
+ break; \
+ case BFIN_CPU_BF516: \
+ builtin_define ("__ADSPBF516__"); \
+ builtin_define ("__ADSPBF51x__"); \
+ break; \
+ case BFIN_CPU_BF518: \
+ builtin_define ("__ADSPBF518__"); \
+ builtin_define ("__ADSPBF51x__"); \
+ break; \
+ case BFIN_CPU_BF522: \
+ builtin_define ("__ADSPBF522__"); \
+ builtin_define ("__ADSPBF52x__"); \
+ break; \
+ case BFIN_CPU_BF523: \
+ builtin_define ("__ADSPBF523__"); \
+ builtin_define ("__ADSPBF52x__"); \
+ break; \
+ case BFIN_CPU_BF524: \
+ builtin_define ("__ADSPBF524__"); \
+ builtin_define ("__ADSPBF52x__"); \
+ break; \
+ case BFIN_CPU_BF525: \
+ builtin_define ("__ADSPBF525__"); \
+ builtin_define ("__ADSPBF52x__"); \
+ break; \
+ case BFIN_CPU_BF526: \
+ builtin_define ("__ADSPBF526__"); \
+ builtin_define ("__ADSPBF52x__"); \
+ break; \
+ case BFIN_CPU_BF527: \
+ builtin_define ("__ADSPBF527__"); \
+ builtin_define ("__ADSPBF52x__"); \
+ break; \
+ case BFIN_CPU_BF531: \
+ builtin_define ("__ADSPBF531__"); \
+ break; \
+ case BFIN_CPU_BF532: \
+ builtin_define ("__ADSPBF532__"); \
+ break; \
+ case BFIN_CPU_BF533: \
+ builtin_define ("__ADSPBF533__"); \
+ break; \
+ case BFIN_CPU_BF534: \
+ builtin_define ("__ADSPBF534__"); \
+ break; \
+ case BFIN_CPU_BF536: \
+ builtin_define ("__ADSPBF536__"); \
+ break; \
+ case BFIN_CPU_BF537: \
+ builtin_define ("__ADSPBF537__"); \
+ break; \
+ case BFIN_CPU_BF538: \
+ builtin_define ("__ADSPBF538__"); \
+ break; \
+ case BFIN_CPU_BF539: \
+ builtin_define ("__ADSPBF539__"); \
+ break; \
+ case BFIN_CPU_BF542M: \
+ builtin_define ("__ADSPBF542M__"); \
+ case BFIN_CPU_BF542: \
+ builtin_define ("__ADSPBF542__"); \
+ builtin_define ("__ADSPBF54x__"); \
+ break; \
+ case BFIN_CPU_BF544M: \
+ builtin_define ("__ADSPBF544M__"); \
+ case BFIN_CPU_BF544: \
+ builtin_define ("__ADSPBF544__"); \
+ builtin_define ("__ADSPBF54x__"); \
+ break; \
+ case BFIN_CPU_BF547M: \
+ builtin_define ("__ADSPBF547M__"); \
+ case BFIN_CPU_BF547: \
+ builtin_define ("__ADSPBF547__"); \
+ builtin_define ("__ADSPBF54x__"); \
+ break; \
+ case BFIN_CPU_BF548M: \
+ builtin_define ("__ADSPBF548M__"); \
+ case BFIN_CPU_BF548: \
+ builtin_define ("__ADSPBF548__"); \
+ builtin_define ("__ADSPBF54x__"); \
+ break; \
+ case BFIN_CPU_BF549M: \
+ builtin_define ("__ADSPBF549M__"); \
+ case BFIN_CPU_BF549: \
+ builtin_define ("__ADSPBF549__"); \
+ builtin_define ("__ADSPBF54x__"); \
+ break; \
+ case BFIN_CPU_BF561: \
+ builtin_define ("__ADSPBF561__"); \
+ break; \
+ } \
+ \
+ if (bfin_si_revision != -1) \
+ { \
+ /* space of 0xnnnn and a NUL */ \
+ char *buf = XALLOCAVEC (char, 7); \
+ \
+ sprintf (buf, "0x%04x", bfin_si_revision); \
+ builtin_define_with_value ("__SILICON_REVISION__", buf, 0); \
+ } \
+ \
+ if (bfin_workarounds) \
+ builtin_define ("__WORKAROUNDS_ENABLED"); \
+ if (ENABLE_WA_SPECULATIVE_LOADS) \
+ builtin_define ("__WORKAROUND_SPECULATIVE_LOADS"); \
+ if (ENABLE_WA_SPECULATIVE_SYNCS) \
+ builtin_define ("__WORKAROUND_SPECULATIVE_SYNCS"); \
+ if (ENABLE_WA_INDIRECT_CALLS) \
+ builtin_define ("__WORKAROUND_INDIRECT_CALLS"); \
+ if (ENABLE_WA_RETS) \
+ builtin_define ("__WORKAROUND_RETS"); \
+ \
+ if (TARGET_FDPIC) \
+ { \
+ builtin_define ("__BFIN_FDPIC__"); \
+ builtin_define ("__FDPIC__"); \
+ } \
+ if (TARGET_ID_SHARED_LIBRARY \
+ && !TARGET_SEP_DATA) \
+ builtin_define ("__ID_SHARED_LIB__"); \
+ if (flag_no_builtin) \
+ builtin_define ("__NO_BUILTIN"); \
+ if (TARGET_MULTICORE) \
+ builtin_define ("__BFIN_MULTICORE"); \
+ if (TARGET_COREA) \
+ builtin_define ("__BFIN_COREA"); \
+ if (TARGET_COREB) \
+ builtin_define ("__BFIN_COREB"); \
+ if (TARGET_SDRAM) \
+ builtin_define ("__BFIN_SDRAM"); \
+ } \
+ while (0)
+#endif
+
+#define DRIVER_SELF_SPECS SUBTARGET_DRIVER_SELF_SPECS "\
+ %{mleaf-id-shared-library:%{!mid-shared-library:-mid-shared-library}} \
+ %{mfdpic:%{!fpic:%{!fpie:%{!fPIC:%{!fPIE:\
+ %{!fno-pic:%{!fno-pie:%{!fno-PIC:%{!fno-PIE:-fpie}}}}}}}}} \
+"
+#ifndef SUBTARGET_DRIVER_SELF_SPECS
+# define SUBTARGET_DRIVER_SELF_SPECS
+#endif
+
+#define LINK_GCC_C_SEQUENCE_SPEC "\
+ %{mfast-fp:-lbffastfp} %G %L %{mfast-fp:-lbffastfp} %G \
+"
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+ %{mno-fdpic:-mnopic} %{mfdpic}"
+
+#define LINK_SPEC "\
+%{h*} %{v:-V} \
+%{mfdpic:-melf32bfinfd -z text} \
+%{static:-dn -Bstatic} \
+%{shared:-G -Bdynamic} \
+%{symbolic:-Bsymbolic} \
+-init __init -fini __fini "
+
+/* Generate DSP instructions, like DSP halfword loads */
+#define TARGET_DSP (1)
+
+#define TARGET_DEFAULT 0
+
+/* Maximum number of library ids we permit */
+#define MAX_LIBRARY_ID 255
+
+extern const char *bfin_library_id_string;
+
+#define FUNCTION_MODE SImode
+#define Pmode SImode
+
+/* store-condition-codes instructions store 0 for false
+ This is the value stored for true. */
+#define STORE_FLAG_VALUE 1
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+#define STACK_PUSH_CODE PRE_DEC
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* We define a dummy ARGP register; the parameters start at offset 0 from
+ it. */
+#define FIRST_PARM_OFFSET(DECL) 0
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM REG_P6
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM REG_P7
+
+/* A dummy register that will be eliminated to either FP or SP. */
+#define ARG_POINTER_REGNUM REG_ARGP
+
+/* `PIC_OFFSET_TABLE_REGNUM'
+ The register number of the register used to address a table of
+ static data addresses in memory. In some cases this register is
+ defined by a processor's "application binary interface" (ABI).
+ When this macro is defined, RTL is generated for this register
+ once, as with the stack pointer and frame pointer registers. If
+ this macro is not defined, it is up to the machine-dependent files
+ to allocate such a register (if necessary). */
+#define PIC_OFFSET_TABLE_REGNUM (REG_P5)
+
+#define FDPIC_FPTR_REGNO REG_P1
+#define FDPIC_REGNO REG_P3
+#define OUR_FDPIC_REG get_hard_reg_initial_val (SImode, FDPIC_REGNO)
+
+/* A static chain register for nested functions. We need to use a
+ call-clobbered register for this. */
+#define STATIC_CHAIN_REGNUM REG_P2
+
+/* Define this if functions should assume that stack space has been
+ allocated for arguments even when their values are passed in
+ registers.
+
+ The value of this macro is the size, in bytes, of the area reserved for
+ arguments passed in registers.
+
+ This space can either be allocated by the caller or be a part of the
+ machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE'
+ says which. */
+#define FIXED_STACK_AREA 12
+#define REG_PARM_STACK_SPACE(FNDECL) FIXED_STACK_AREA
+
+/* Define this if the above stack space is to be considered part of the
+ * space allocated by the caller. */
+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable crtl->outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/*#define DATA_ALIGNMENT(TYPE, BASIC-ALIGN) for arrays.. */
+
+/* If defined, a C expression to compute the alignment for a local
+ variable. TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. */
+
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) bfin_local_alignment ((TYPE), (ALIGN))
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define TRAMPOLINE_SIZE (TARGET_FDPIC ? 30 : 18)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ There are two registers that can always be eliminated on the i386.
+ The frame pointer and the arg pointer can be replaced by either the
+ hard frame pointer or to the stack pointer, depending upon the
+ circumstances. The hard frame pointer is not used before reload and
+ so it is not eligible for elimination. */
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}} \
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ ((OFFSET) = bfin_initial_elimination_offset ((FROM), (TO)))
+
+/* This processor has
+ 8 data register for doing arithmetic
+ 8 pointer register for doing addressing, including
+ 1 stack pointer P6
+ 1 frame pointer P7
+ 4 sets of indexing registers (I0-3, B0-3, L0-3, M0-3)
+ 1 condition code flag register CC
+ 5 return address registers RETS/I/X/N/E
+ 1 arithmetic status register (ASTAT). */
+
+#define FIRST_PSEUDO_REGISTER 50
+
+#define D_REGNO_P(X) ((X) <= REG_R7)
+#define P_REGNO_P(X) ((X) >= REG_P0 && (X) <= REG_P7)
+#define I_REGNO_P(X) ((X) >= REG_I0 && (X) <= REG_I3)
+#define DP_REGNO_P(X) (D_REGNO_P (X) || P_REGNO_P (X))
+#define ADDRESS_REGNO_P(X) ((X) >= REG_P0 && (X) <= REG_M3)
+#define DREG_P(X) (REG_P (X) && D_REGNO_P (REGNO (X)))
+#define PREG_P(X) (REG_P (X) && P_REGNO_P (REGNO (X)))
+#define IREG_P(X) (REG_P (X) && I_REGNO_P (REGNO (X)))
+#define DPREG_P(X) (REG_P (X) && DP_REGNO_P (REGNO (X)))
+
+#define REGISTER_NAMES { \
+ "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", \
+ "P0", "P1", "P2", "P3", "P4", "P5", "SP", "FP", \
+ "I0", "I1", "I2", "I3", "B0", "B1", "B2", "B3", \
+ "L0", "L1", "L2", "L3", "M0", "M1", "M2", "M3", \
+ "A0", "A1", \
+ "CC", \
+ "RETS", "RETI", "RETX", "RETN", "RETE", "ASTAT", "SEQSTAT", "USP", \
+ "ARGP", \
+ "LT0", "LT1", "LC0", "LC1", "LB0", "LB1" \
+}
+
+#define SHORT_REGISTER_NAMES { \
+ "R0.L", "R1.L", "R2.L", "R3.L", "R4.L", "R5.L", "R6.L", "R7.L", \
+ "P0.L", "P1.L", "P2.L", "P3.L", "P4.L", "P5.L", "SP.L", "FP.L", \
+ "I0.L", "I1.L", "I2.L", "I3.L", "B0.L", "B1.L", "B2.L", "B3.L", \
+ "L0.L", "L1.L", "L2.L", "L3.L", "M0.L", "M1.L", "M2.L", "M3.L", }
+
+#define HIGH_REGISTER_NAMES { \
+ "R0.H", "R1.H", "R2.H", "R3.H", "R4.H", "R5.H", "R6.H", "R7.H", \
+ "P0.H", "P1.H", "P2.H", "P3.H", "P4.H", "P5.H", "SP.H", "FP.H", \
+ "I0.H", "I1.H", "I2.H", "I3.H", "B0.H", "B1.H", "B2.H", "B3.H", \
+ "L0.H", "L1.H", "L2.H", "L3.H", "M0.H", "M1.H", "M2.H", "M3.H", }
+
+#define DREGS_PAIR_NAMES { \
+ "R1:0.p", 0, "R3:2.p", 0, "R5:4.p", 0, "R7:6.p", 0, }
+
+#define BYTE_REGISTER_NAMES { \
+ "R0.B", "R1.B", "R2.B", "R3.B", "R4.B", "R5.B", "R6.B", "R7.B", }
+
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+
+#define FIXED_REGISTERS \
+/*r0 r1 r2 r3 r4 r5 r6 r7 p0 p1 p2 p3 p4 p5 p6 p7 */ \
+{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, \
+/*i0 i1 i2 i3 b0 b1 b2 b3 l0 l1 l2 l3 m0 m1 m2 m3 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, \
+/*a0 a1 cc rets/i/x/n/e astat seqstat usp argp lt0/1 lc0/1 */ \
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+/*lb0/1 */ \
+ 1, 1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#define CALL_USED_REGISTERS \
+/*r0 r1 r2 r3 r4 r5 r6 r7 p0 p1 p2 p3 p4 p5 p6 p7 */ \
+{ 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, \
+/*i0 i1 i2 i3 b0 b1 b2 b3 l0 l1 l2 l3 m0 m1 m2 m3 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+/*a0 a1 cc rets/i/x/n/e astat seqstat usp argp lt0/1 lc0/1 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+/*lb0/1 */ \
+ 1, 1 \
+}
+
+/* Order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS. List frame pointer
+ late and fixed registers last. Note that, in general, we prefer
+ registers listed in CALL_USED_REGISTERS, keeping the others
+ available for storage of persistent values. */
+
+#define REG_ALLOC_ORDER \
+{ REG_R0, REG_R1, REG_R2, REG_R3, REG_R7, REG_R6, REG_R5, REG_R4, \
+ REG_P2, REG_P1, REG_P0, REG_P5, REG_P4, REG_P3, REG_P6, REG_P7, \
+ REG_A0, REG_A1, \
+ REG_I0, REG_I1, REG_I2, REG_I3, REG_B0, REG_B1, REG_B2, REG_B3, \
+ REG_L0, REG_L1, REG_L2, REG_L3, REG_M0, REG_M1, REG_M2, REG_M3, \
+ REG_RETS, REG_RETI, REG_RETX, REG_RETN, REG_RETE, \
+ REG_ASTAT, REG_SEQSTAT, REG_USP, \
+ REG_CC, REG_ARGP, \
+ REG_LT0, REG_LT1, REG_LC0, REG_LC1, REG_LB0, REG_LB1 \
+}
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+
+enum reg_class
+{
+ NO_REGS,
+ IREGS,
+ BREGS,
+ LREGS,
+ MREGS,
+ CIRCREGS, /* Circular buffering registers, Ix, Bx, Lx together form. See Automatic Circular Buffering. */
+ DAGREGS,
+ EVEN_AREGS,
+ ODD_AREGS,
+ AREGS,
+ CCREGS,
+ EVEN_DREGS,
+ ODD_DREGS,
+ D0REGS,
+ D1REGS,
+ D2REGS,
+ D3REGS,
+ D4REGS,
+ D5REGS,
+ D6REGS,
+ D7REGS,
+ DREGS,
+ P0REGS,
+ FDPIC_REGS,
+ FDPIC_FPTR_REGS,
+ PREGS_CLOBBERED,
+ PREGS,
+ IPREGS,
+ DPREGS,
+ MOST_REGS,
+ LT_REGS,
+ LC_REGS,
+ LB_REGS,
+ PROLOGUE_REGS,
+ NON_A_CC_REGS,
+ ALL_REGS, LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES ((int)LIM_REG_CLASSES)
+
+#define GENERAL_REGS DPREGS
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+{ "NO_REGS", \
+ "IREGS", \
+ "BREGS", \
+ "LREGS", \
+ "MREGS", \
+ "CIRCREGS", \
+ "DAGREGS", \
+ "EVEN_AREGS", \
+ "ODD_AREGS", \
+ "AREGS", \
+ "CCREGS", \
+ "EVEN_DREGS", \
+ "ODD_DREGS", \
+ "D0REGS", \
+ "D1REGS", \
+ "D2REGS", \
+ "D3REGS", \
+ "D4REGS", \
+ "D5REGS", \
+ "D6REGS", \
+ "D7REGS", \
+ "DREGS", \
+ "P0REGS", \
+ "FDPIC_REGS", \
+ "FDPIC_FPTR_REGS", \
+ "PREGS_CLOBBERED", \
+ "PREGS", \
+ "IPREGS", \
+ "DPREGS", \
+ "MOST_REGS", \
+ "LT_REGS", \
+ "LC_REGS", \
+ "LB_REGS", \
+ "PROLOGUE_REGS", \
+ "NON_A_CC_REGS", \
+ "ALL_REGS" }
+
+/* An initializer containing the contents of the register classes, as integers
+ which are bit masks. The Nth integer specifies the contents of class N.
+ The way the integer MASK is interpreted is that register R is in the class
+ if `MASK & (1 << R)' is 1.
+
+ When the machine has more than 32 registers, an integer does not suffice.
+ Then the integers are replaced by sub-initializers, braced groupings
+ containing several integers. Each sub-initializer must be suitable as an
+ initializer for the type `HARD_REG_SET' which is defined in
+ `hard-reg-set.h'. */
+
+/* NOTE: DSP registers, IREGS - AREGS, are not GENERAL_REGS. We use
+ MOST_REGS as the union of DPREGS and DAGREGS. */
+
+#define REG_CLASS_CONTENTS \
+ /* 31 - 0 63-32 */ \
+{ { 0x00000000, 0 }, /* NO_REGS */ \
+ { 0x000f0000, 0 }, /* IREGS */ \
+ { 0x00f00000, 0 }, /* BREGS */ \
+ { 0x0f000000, 0 }, /* LREGS */ \
+ { 0xf0000000, 0 }, /* MREGS */ \
+ { 0x0fff0000, 0 }, /* CIRCREGS */ \
+ { 0xffff0000, 0 }, /* DAGREGS */ \
+ { 0x00000000, 0x1 }, /* EVEN_AREGS */ \
+ { 0x00000000, 0x2 }, /* ODD_AREGS */ \
+ { 0x00000000, 0x3 }, /* AREGS */ \
+ { 0x00000000, 0x4 }, /* CCREGS */ \
+ { 0x00000055, 0 }, /* EVEN_DREGS */ \
+ { 0x000000aa, 0 }, /* ODD_DREGS */ \
+ { 0x00000001, 0 }, /* D0REGS */ \
+ { 0x00000002, 0 }, /* D1REGS */ \
+ { 0x00000004, 0 }, /* D2REGS */ \
+ { 0x00000008, 0 }, /* D3REGS */ \
+ { 0x00000010, 0 }, /* D4REGS */ \
+ { 0x00000020, 0 }, /* D5REGS */ \
+ { 0x00000040, 0 }, /* D6REGS */ \
+ { 0x00000080, 0 }, /* D7REGS */ \
+ { 0x000000ff, 0 }, /* DREGS */ \
+ { 0x00000100, 0x000 }, /* P0REGS */ \
+ { 0x00000800, 0x000 }, /* FDPIC_REGS */ \
+ { 0x00000200, 0x000 }, /* FDPIC_FPTR_REGS */ \
+ { 0x00004700, 0x800 }, /* PREGS_CLOBBERED */ \
+ { 0x0000ff00, 0x800 }, /* PREGS */ \
+ { 0x000fff00, 0x800 }, /* IPREGS */ \
+ { 0x0000ffff, 0x800 }, /* DPREGS */ \
+ { 0xffffffff, 0x800 }, /* MOST_REGS */\
+ { 0x00000000, 0x3000 }, /* LT_REGS */\
+ { 0x00000000, 0xc000 }, /* LC_REGS */\
+ { 0x00000000, 0x30000 }, /* LB_REGS */\
+ { 0x00000000, 0x3f7f8 }, /* PROLOGUE_REGS */\
+ { 0xffffffff, 0x3fff8 }, /* NON_A_CC_REGS */\
+ { 0xffffffff, 0x3ffff }} /* ALL_REGS */
+
+#define IREG_POSSIBLE_P(OUTER) \
+ ((OUTER) == POST_INC || (OUTER) == PRE_INC \
+ || (OUTER) == POST_DEC || (OUTER) == PRE_DEC \
+ || (OUTER) == MEM || (OUTER) == ADDRESS)
+
+#define MODE_CODE_BASE_REG_CLASS(MODE, OUTER, INDEX) \
+ ((MODE) == HImode && IREG_POSSIBLE_P (OUTER) ? IPREGS : PREGS)
+
+#define INDEX_REG_CLASS PREGS
+
+#define REGNO_OK_FOR_BASE_STRICT_P(X, MODE, OUTER, INDEX) \
+ (P_REGNO_P (X) || (X) == REG_ARGP \
+ || (IREG_POSSIBLE_P (OUTER) && (MODE) == HImode \
+ && I_REGNO_P (X)))
+
+#define REGNO_OK_FOR_BASE_NONSTRICT_P(X, MODE, OUTER, INDEX) \
+ ((X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO_OK_FOR_BASE_STRICT_P (X, MODE, OUTER, INDEX))
+
+#ifdef REG_OK_STRICT
+#define REGNO_MODE_CODE_OK_FOR_BASE_P(X, MODE, OUTER, INDEX) \
+ REGNO_OK_FOR_BASE_STRICT_P (X, MODE, OUTER, INDEX)
+#else
+#define REGNO_MODE_CODE_OK_FOR_BASE_P(X, MODE, OUTER, INDEX) \
+ REGNO_OK_FOR_BASE_NONSTRICT_P (X, MODE, OUTER, INDEX)
+#endif
+
+#define REGNO_OK_FOR_INDEX_P(X) 0
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) \
+((REGNO) == REG_R0 ? D0REGS \
+ : (REGNO) == REG_R1 ? D1REGS \
+ : (REGNO) == REG_R2 ? D2REGS \
+ : (REGNO) == REG_R3 ? D3REGS \
+ : (REGNO) == REG_R4 ? D4REGS \
+ : (REGNO) == REG_R5 ? D5REGS \
+ : (REGNO) == REG_R6 ? D6REGS \
+ : (REGNO) == REG_R7 ? D7REGS \
+ : (REGNO) == REG_P0 ? P0REGS \
+ : (REGNO) < REG_I0 ? PREGS \
+ : (REGNO) == REG_ARGP ? PREGS \
+ : (REGNO) >= REG_I0 && (REGNO) <= REG_I3 ? IREGS \
+ : (REGNO) >= REG_L0 && (REGNO) <= REG_L3 ? LREGS \
+ : (REGNO) >= REG_B0 && (REGNO) <= REG_B3 ? BREGS \
+ : (REGNO) >= REG_M0 && (REGNO) <= REG_M3 ? MREGS \
+ : (REGNO) == REG_A0 || (REGNO) == REG_A1 ? AREGS \
+ : (REGNO) == REG_LT0 || (REGNO) == REG_LT1 ? LT_REGS \
+ : (REGNO) == REG_LC0 || (REGNO) == REG_LC1 ? LC_REGS \
+ : (REGNO) == REG_LB0 || (REGNO) == REG_LB1 ? LB_REGS \
+ : (REGNO) == REG_CC ? CCREGS \
+ : (REGNO) >= REG_RETS ? PROLOGUE_REGS \
+ : NO_REGS)
+
+/* The following macro defines cover classes for Integrated Register
+ Allocator. Cover classes is a set of non-intersected register
+ classes covering all hard registers used for register allocation
+ purpose. Any move between two registers of a cover class should be
+ cheaper than load or store of the registers. The macro value is
+ array of register classes with LIM_REG_CLASSES used as the end
+ marker. */
+
+#define IRA_COVER_CLASSES \
+{ \
+ MOST_REGS, AREGS, CCREGS, LIM_REG_CLASSES \
+}
+
+/* When this hook returns true for MODE, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true
+
+/* Do not allow to store a value in REG_CC for any mode */
+/* Do not allow to store value in pregs if mode is not SI*/
+#define HARD_REGNO_MODE_OK(REGNO, MODE) hard_regno_mode_ok((REGNO), (MODE))
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((MODE) == V2PDImode && (CLASS) == AREGS ? 2 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((MODE) == PDImode && ((REGNO) == REG_A0 || (REGNO) == REG_A1) ? 1 \
+ : (MODE) == V2PDImode && ((REGNO) == REG_A0 || (REGNO) == REG_A1) ? 2 \
+ : CLASS_MAX_NREGS (GENERAL_REGS, MODE))
+
+/* A C expression that is nonzero if hard register TO can be
+ considered for use as a rename register for FROM register */
+#define HARD_REGNO_RENAME_OK(FROM, TO) bfin_hard_regno_rename_ok (FROM, TO)
+
+/* A C expression that is nonzero if it is desirable to choose
+ register allocation so as to avoid move instructions between a
+ value of mode MODE1 and a value of mode MODE2.
+
+ If `HARD_REGNO_MODE_OK (R, MODE1)' and `HARD_REGNO_MODE_OK (R,
+ MODE2)' are ever different for any R, then `MODES_TIEABLE_P (MODE1,
+ MODE2)' must be zero. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((MODE1) == (MODE2) \
+ || ((GET_MODE_CLASS (MODE1) == MODE_INT \
+ || GET_MODE_CLASS (MODE1) == MODE_FLOAT) \
+ && (GET_MODE_CLASS (MODE2) == MODE_INT \
+ || GET_MODE_CLASS (MODE2) == MODE_FLOAT) \
+ && (MODE1) != BImode && (MODE2) != BImode \
+ && GET_MODE_SIZE (MODE1) <= UNITS_PER_WORD \
+ && GET_MODE_SIZE (MODE2) <= UNITS_PER_WORD))
+
+/* `PREFERRED_RELOAD_CLASS (X, CLASS)'
+ A C expression that places additional restrictions on the register
+ class to use when it is necessary to copy value X into a register
+ in class CLASS. The value is a register class; perhaps CLASS, or
+ perhaps another, smaller class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) \
+ (GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == POST_DEC \
+ || GET_CODE (X) == PRE_DEC ? PREGS : (CLASS))
+
+/* Function Calling Conventions. */
+
+/* The type of the current function; normal functions are of type
+ SUBROUTINE. */
+typedef enum {
+ SUBROUTINE, INTERRUPT_HANDLER, EXCPT_HANDLER, NMI_HANDLER
+} e_funkind;
+#define FUNCTION_RETURN_REGISTERS { REG_RETS, REG_RETI, REG_RETX, REG_RETN }
+
+#define FUNCTION_ARG_REGISTERS { REG_R0, REG_R1, REG_R2, -1 }
+
+/* Flags for the call/call_value rtl operations set up by function_arg */
+#define CALL_NORMAL 0x00000000 /* no special processing */
+#define CALL_LONG 0x00000001 /* always call indirect */
+#define CALL_SHORT 0x00000002 /* always call by symbol */
+
+typedef struct {
+ int words; /* # words passed so far */
+ int nregs; /* # registers available for passing */
+ int *arg_regs; /* array of register -1 terminated */
+ int call_cookie; /* Do special things for this call */
+} CUMULATIVE_ARGS;
+
+#define FUNCTION_ARG_REGNO_P(REGNO) function_arg_regno_p (REGNO)
+
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT, N_NAMED_ARGS) \
+ (init_cumulative_args (&CUM, FNTYPE, LIBNAME))
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0.
+*/
+
+#define VALUE_REGNO(MODE) (REG_R0)
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx_REG (TYPE_MODE (VALTYPE), \
+ VALUE_REGNO(TYPE_MODE(VALTYPE)))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) gen_rtx_REG (MODE, VALUE_REGNO(MODE))
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == REG_R0)
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Before the prologue, the return address is in the RETS register. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, REG_RETS)
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) bfin_return_addr_rtx (COUNT)
+
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (REG_RETS)
+
+/* Call instructions don't modify the stack pointer on the Blackfin. */
+#define INCOMING_FRAME_SP_OFFSET 0
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) ((N) < 2 ? (N) : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, REG_P2)
+#define EH_RETURN_HANDLER_RTX \
+ gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx, UNITS_PER_WORD))
+
+/* Addressing Modes */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ symbol_ref are not legitimate and will be put into constant pool.
+ See force_const_mem().
+ If -mno-pool, all constants are legitimate.
+ */
+#define LEGITIMATE_CONSTANT_P(X) bfin_legitimate_constant_p (X)
+
+/* A number, the maximum number of registers that can appear in a
+ valid memory address. Note that it is up to you to specify a
+ value equal to the maximum number that `TARGET_LEGITIMATE_ADDRESS_P'
+ would ever accept. */
+#define MAX_REGS_PER_ADDRESS 1
+
+#define LEGITIMATE_MODE_FOR_AUTOINC_P(MODE) \
+ (GET_MODE_SIZE (MODE) <= 4 || (MODE) == PDImode)
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* `LEGITIMATE_PIC_OPERAND_P (X)'
+ A C expression that is nonzero if X is a legitimate immediate
+ operand on the target machine when generating position independent
+ code. You can assume that X satisfies `CONSTANT_P', so you need
+ not check this. You can also assume FLAG_PIC is true, so you need
+ not check it either. You need not define this macro if all
+ constants (including `SYMBOL_REF') can be immediate operands when
+ generating position independent code. */
+#define LEGITIMATE_PIC_OPERAND_P(X) ! SYMBOLIC_CONST (X)
+
+#define SYMBOLIC_CONST(X) \
+(GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
+
+#define NOTICE_UPDATE_CC(EXPR, INSN) 0
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX UNITS_PER_WORD
+
+/* If a memory-to-memory move would take MOVE_RATIO or more simple
+ move-instruction pairs, we will do a movmem or libcall instead. */
+
+#define MOVE_RATIO(speed) 5
+
+/* STORAGE LAYOUT: target machine storage layout
+ Define this macro as a C expression which is nonzero if accessing
+ less than a word of memory (i.e. a `char' or a `short') is no
+ faster than accessing a word of memory, i.e., if such access
+ require more than one instruction or if there is no difference in
+ cost between byte and (aligned) word loads.
+
+ When this macro is not defined, the compiler will access a field by
+ finding the smallest containing object; when it is defined, a
+ fullword load will be used if alignment permits. Unless bytes
+ accesses are faster than word accesses, using word accesses is
+ preferable since it may eliminate subsequent memory access if
+ subsequent accesses occur to other fields in the same word of the
+ structure, but to different bytes. */
+#define SLOW_BYTE_ACCESS 0
+#define SLOW_SHORT_ACCESS 0
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ We can't access bytes but if we could we would in the Big Endian order. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is numbered. */
+#define WORDS_BIG_ENDIAN 0
+
+/* number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD 32
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode1' defined below. */
+#define POINTER_SIZE 32
+
+/* Allocation boundary (in *bits*) for storing pointers in memory. */
+#define POINTER_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY BITS_PER_WORD
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 32
+
+/* Define this if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* (shell-command "rm c-decl.o stor-layout.o")
+ * never define PCC_BITFIELD_TYPE_MATTERS
+ * really cause some alignment problem
+ */
+
+#define UNITS_PER_FLOAT ((FLOAT_TYPE_SIZE + BITS_PER_UNIT - 1) / \
+ BITS_PER_UNIT)
+
+#define UNITS_PER_DOUBLE ((DOUBLE_TYPE_SIZE + BITS_PER_UNIT - 1) / \
+ BITS_PER_UNIT)
+
+
+/* what is the 'type' of size_t */
+#define SIZE_TYPE "long unsigned int"
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#define SHORT_TYPE_SIZE 16
+#define CHAR_TYPE_SIZE 8
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+
+/* Note: Fix this to depend on target switch. -- lev */
+
+/* Note: Try to implement double and force long double. -- tonyko
+ * #define __DOUBLES_ARE_FLOATS__
+ * #define DOUBLE_TYPE_SIZE FLOAT_TYPE_SIZE
+ * #define LONG_DOUBLE_TYPE_SIZE DOUBLE_TYPE_SIZE
+ * #define DOUBLES_ARE_FLOATS 1
+ */
+
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+/* `PROMOTE_MODE (M, UNSIGNEDP, TYPE)'
+ A macro to update M and UNSIGNEDP when an object whose type is
+ TYPE and which has the specified mode and signedness is to be
+ stored in a register. This macro is only called when TYPE is a
+ scalar type.
+
+ On most RISC machines, which only have operations that operate on
+ a full register, define this macro to set M to `word_mode' if M is
+ an integer mode narrower than `BITS_PER_WORD'. In most cases,
+ only integer modes should be widened because wider-precision
+ floating-point operations are usually more expensive than their
+ narrower counterparts.
+
+ For most machines, the macro definition does not change UNSIGNEDP.
+ However, some machines, have instructions that preferentially
+ handle either signed or unsigned quantities of certain modes. For
+ example, on the DEC Alpha, 32-bit loads from memory and 32-bit add
+ instructions sign-extend the result to 64 bits. On such machines,
+ set UNSIGNEDP according to which kind of extension is more
+ efficient.
+
+ Do not define this macro if it would never modify M.*/
+
+#define BFIN_PROMOTE_MODE_P(MODE) \
+ (!TARGET_DSP && GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD)
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (BFIN_PROMOTE_MODE_P(MODE)) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = 0; \
+ (MODE) = SImode; \
+ }
+
+/* Describing Relative Costs of Operations */
+
+/* Do not put function addr into constant pool */
+#define NO_FUNCTION_CSE 1
+
+/* A C expression for the cost of moving data from a register in class FROM to
+ one in class TO. The classes are expressed using the enumeration values
+ such as `GENERAL_REGS'. A value of 2 is the default; other values are
+ interpreted relative to that.
+
+ It is not required that the cost always equal 2 when FROM is the same as TO;
+ on some machines it is expensive to move between registers if they are not
+ general registers. */
+
+#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \
+ bfin_register_move_cost ((MODE), (CLASS1), (CLASS2))
+
+/* A C expression for the cost of moving data of mode M between a
+ register and memory. A value of 2 is the default; this cost is
+ relative to those in `REGISTER_MOVE_COST'.
+
+ If moving between registers and memory is more expensive than
+ between two registers, you should define this macro to express the
+ relative cost. */
+
+#define MEMORY_MOVE_COST(MODE, CLASS, IN) \
+ bfin_memory_move_cost ((MODE), (CLASS), (IN))
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+#define JUMP_TABLES_IN_TEXT_SECTION flag_pic
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified.
+#define WORD_REGISTER_OPERATIONS
+*/
+
+/* Evaluates to true if A and B are mac flags that can be used
+ together in a single multiply insn. That is the case if they are
+ both the same flag not involving M, or if one is a combination of
+ the other with M. */
+#define MACFLAGS_MATCH_P(A, B) \
+ ((A) == (B) \
+ || ((A) == MACFLAG_NONE && (B) == MACFLAG_M) \
+ || ((A) == MACFLAG_M && (B) == MACFLAG_NONE) \
+ || ((A) == MACFLAG_IS && (B) == MACFLAG_IS_M) \
+ || ((A) == MACFLAG_IS_M && (B) == MACFLAG_IS))
+
+/* Switch into a generic section. */
+#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
+
+#define PRINT_OPERAND(FILE, RTX, CODE) print_operand (FILE, RTX, CODE)
+#define PRINT_OPERAND_ADDRESS(FILE, RTX) print_address_operand (FILE, RTX)
+
+typedef enum sections {
+ CODE_DIR,
+ DATA_DIR,
+ LAST_SECT_NM
+} SECT_ENUM_T;
+
+typedef enum directives {
+ LONG_CONST_DIR,
+ SHORT_CONST_DIR,
+ BYTE_CONST_DIR,
+ SPACE_DIR,
+ INIT_DIR,
+ LAST_DIR_NM
+} DIR_ENUM_T;
+
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) \
+ ((C) == ';' \
+ || ((C) == '|' && (STR)[1] == '|'))
+
+#define TEXT_SECTION_ASM_OP ".text;"
+#define DATA_SECTION_ASM_OP ".data;"
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF ""
+
+#define ASM_GLOBALIZE_LABEL1(FILE, NAME) \
+ do { fputs (".global ", FILE); \
+ assemble_name (FILE, NAME); \
+ fputc (';',FILE); \
+ fputc ('\n',FILE); \
+ } while (0)
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE,NAME,DECL) \
+ do { \
+ fputs (".type ", FILE); \
+ assemble_name (FILE, NAME); \
+ fputs (", STT_FUNC", FILE); \
+ fputc (';',FILE); \
+ fputc ('\n',FILE); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+#define ASM_OUTPUT_LABEL(FILE, NAME) \
+ do { assemble_name (FILE, NAME); \
+ fputs (":\n",FILE); \
+ } while (0)
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ do { fprintf (FILE, "_%s", NAME); \
+ } while (0)
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+do { char __buf[256]; \
+ fprintf (FILE, "\t.dd\t"); \
+ ASM_GENERATE_INTERNAL_LABEL (__buf, "L", VALUE); \
+ assemble_name (FILE, __buf); \
+ fputc (';', FILE); \
+ fputc ('\n', FILE); \
+ } while (0)
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ MY_ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL)
+
+#define MY_ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+ do { \
+ char __buf[256]; \
+ fprintf (FILE, "\t.dd\t"); \
+ ASM_GENERATE_INTERNAL_LABEL (__buf, "L", VALUE); \
+ assemble_name (FILE, __buf); \
+ fputs (" - ", FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (__buf, "L", REL); \
+ assemble_name (FILE, __buf); \
+ fputc (';', FILE); \
+ fputc ('\n', FILE); \
+ } while (0)
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ do { \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", 1 << (LOG)); \
+ } while (0)
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ do { \
+ asm_output_skip (FILE, SIZE); \
+ } while (0)
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+do { \
+ switch_to_section (data_section); \
+ if ((SIZE) >= (unsigned int) 4 ) ASM_OUTPUT_ALIGN(FILE,2); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, SIZE); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ fprintf (FILE, "%s %ld;\n", ASM_SPACE, \
+ (ROUNDED) > (unsigned int) 1 ? (ROUNDED) : 1); \
+} while (0)
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+ do { \
+ ASM_GLOBALIZE_LABEL1(FILE,NAME); \
+ ASM_OUTPUT_LOCAL (FILE, NAME, SIZE, ROUNDED); } while(0)
+
+#define ASM_COMMENT_START "//"
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ do { \
+ fprintf (FILE, "\tCALL __mcount;\n"); \
+ } while(0)
+
+#undef NO_PROFILE_COUNTERS
+#define NO_PROFILE_COUNTERS 1
+
+#define ASM_OUTPUT_REG_PUSH(FILE, REGNO) fprintf (FILE, "[SP--] = %s;\n", reg_names[REGNO])
+#define ASM_OUTPUT_REG_POP(FILE, REGNO) fprintf (FILE, "%s = [SP++];\n", reg_names[REGNO])
+
+extern struct rtx_def *bfin_cc_rtx, *bfin_rets_rtx;
+
+/* This works for GAS and some other assemblers. */
+#define SET_ASM_OP ".set "
+
+/* DBX register number for a given compiler register number */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+#define SIZE_ASM_OP "\t.size\t"
+
+extern int splitting_for_sched, splitting_loops;
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) ((CHAR) == '!')
+
+#ifndef TARGET_SUPPORTS_SYNC_CALLS
+#define TARGET_SUPPORTS_SYNC_CALLS 0
+#endif
+
+#endif /* _BFIN_CONFIG */
diff --git a/gcc/config/bfin/bfin.md b/gcc/config/bfin/bfin.md
new file mode 100644
index 000000000..3fac01ca5
--- /dev/null
+++ b/gcc/config/bfin/bfin.md
@@ -0,0 +1,4211 @@
+;;- Machine description for Blackfin for GNU compiler
+;; Copyright 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+;; Contributed by Analog Devices.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+; operand punctuation marks:
+;
+; X -- integer value printed as log2
+; Y -- integer value printed as log2(~value) - for bitclear
+; h -- print half word register, low part
+; d -- print half word register, high part
+; D -- print operand as dregs pairs
+; w -- print operand as accumulator register word (a0w, a1w)
+; H -- high part of double mode operand
+; T -- byte register representation Oct. 02 2001
+
+; constant operand classes
+;
+; J 2**N 5bit imm scaled
+; Ks7 -64 .. 63 signed 7bit imm
+; Ku5 0..31 unsigned 5bit imm
+; Ks4 -8 .. 7 signed 4bit imm
+; Ks3 -4 .. 3 signed 3bit imm
+; Ku3 0 .. 7 unsigned 3bit imm
+; Pn 0, 1, 2 constants 0, 1 or 2, corresponding to n
+;
+; register operands
+; d (r0..r7)
+; a (p0..p5,fp,sp)
+; e (a0, a1)
+; b (i0..i3)
+; f (m0..m3)
+; v (b0..b3)
+; c (i0..i3,m0..m3) CIRCREGS
+; C (CC) CCREGS
+; t (lt0,lt1)
+; k (lc0,lc1)
+; u (lb0,lb1)
+;
+
+;; Define constants for hard registers.
+
+(define_constants
+ [(REG_R0 0)
+ (REG_R1 1)
+ (REG_R2 2)
+ (REG_R3 3)
+ (REG_R4 4)
+ (REG_R5 5)
+ (REG_R6 6)
+ (REG_R7 7)
+
+ (REG_P0 8)
+ (REG_P1 9)
+ (REG_P2 10)
+ (REG_P3 11)
+ (REG_P4 12)
+ (REG_P5 13)
+ (REG_P6 14)
+ (REG_P7 15)
+
+ (REG_SP 14)
+ (REG_FP 15)
+
+ (REG_I0 16)
+ (REG_I1 17)
+ (REG_I2 18)
+ (REG_I3 19)
+
+ (REG_B0 20)
+ (REG_B1 21)
+ (REG_B2 22)
+ (REG_B3 23)
+
+ (REG_L0 24)
+ (REG_L1 25)
+ (REG_L2 26)
+ (REG_L3 27)
+
+ (REG_M0 28)
+ (REG_M1 29)
+ (REG_M2 30)
+ (REG_M3 31)
+
+ (REG_A0 32)
+ (REG_A1 33)
+
+ (REG_CC 34)
+ (REG_RETS 35)
+ (REG_RETI 36)
+ (REG_RETX 37)
+ (REG_RETN 38)
+ (REG_RETE 39)
+
+ (REG_ASTAT 40)
+ (REG_SEQSTAT 41)
+ (REG_USP 42)
+
+ (REG_ARGP 43)
+
+ (REG_LT0 44)
+ (REG_LT1 45)
+ (REG_LC0 46)
+ (REG_LC1 47)
+ (REG_LB0 48)
+ (REG_LB1 49)])
+
+;; Constants used in UNSPECs and UNSPEC_VOLATILEs.
+
+(define_constants
+ [(UNSPEC_CBRANCH_TAKEN 0)
+ (UNSPEC_CBRANCH_NOPS 1)
+ (UNSPEC_RETURN 2)
+ (UNSPEC_MOVE_PIC 3)
+ (UNSPEC_LIBRARY_OFFSET 4)
+ (UNSPEC_PUSH_MULTIPLE 5)
+ ;; Multiply or MAC with extra CONST_INT operand specifying the macflag
+ (UNSPEC_MUL_WITH_FLAG 6)
+ (UNSPEC_MAC_WITH_FLAG 7)
+ (UNSPEC_MOVE_FDPIC 8)
+ (UNSPEC_FUNCDESC_GOT17M4 9)
+ (UNSPEC_LSETUP_END 10)
+ ;; Distinguish a 32-bit version of an insn from a 16-bit version.
+ (UNSPEC_32BIT 11)
+ (UNSPEC_NOP 12)
+ (UNSPEC_ONES 13)
+ (UNSPEC_ATOMIC 14)])
+
+(define_constants
+ [(UNSPEC_VOLATILE_CSYNC 1)
+ (UNSPEC_VOLATILE_SSYNC 2)
+ (UNSPEC_VOLATILE_LOAD_FUNCDESC 3)
+ (UNSPEC_VOLATILE_STORE_EH_HANDLER 4)
+ (UNSPEC_VOLATILE_DUMMY 5)
+ (UNSPEC_VOLATILE_STALL 6)])
+
+(define_constants
+ [(MACFLAG_NONE 0)
+ (MACFLAG_T 1)
+ (MACFLAG_FU 2)
+ (MACFLAG_TFU 3)
+ (MACFLAG_IS 4)
+ (MACFLAG_IU 5)
+ (MACFLAG_W32 6)
+ (MACFLAG_M 7)
+ (MACFLAG_IS_M 8)
+ (MACFLAG_S2RND 9)
+ (MACFLAG_ISS2 10)
+ (MACFLAG_IH 11)])
+
+(define_attr "type"
+ "move,movcc,mvi,mcld,mcst,dsp32,dsp32shiftimm,mult,alu0,shft,brcc,br,call,misc,sync,compare,dummy,stall"
+ (const_string "misc"))
+
+(define_attr "addrtype" "32bit,preg,spreg,ireg"
+ (cond [(and (eq_attr "type" "mcld")
+ (and (match_operand 0 "dp_register_operand" "")
+ (match_operand 1 "mem_p_address_operand" "")))
+ (const_string "preg")
+ (and (eq_attr "type" "mcld")
+ (and (match_operand 0 "dp_register_operand" "")
+ (match_operand 1 "mem_spfp_address_operand" "")))
+ (const_string "spreg")
+ (and (eq_attr "type" "mcld")
+ (and (match_operand 0 "dp_register_operand" "")
+ (match_operand 1 "mem_i_address_operand" "")))
+ (const_string "ireg")
+ (and (eq_attr "type" "mcst")
+ (and (match_operand 1 "dp_register_operand" "")
+ (match_operand 0 "mem_p_address_operand" "")))
+ (const_string "preg")
+ (and (eq_attr "type" "mcst")
+ (and (match_operand 1 "dp_register_operand" "")
+ (match_operand 0 "mem_spfp_address_operand" "")))
+ (const_string "spreg")
+ (and (eq_attr "type" "mcst")
+ (and (match_operand 1 "dp_register_operand" "")
+ (match_operand 0 "mem_i_address_operand" "")))
+ (const_string "ireg")]
+ (const_string "32bit")))
+
+(define_attr "storereg" "preg,other"
+ (cond [(and (eq_attr "type" "mcst")
+ (match_operand 1 "p_register_operand" ""))
+ (const_string "preg")]
+ (const_string "other")))
+
+;; Scheduling definitions
+
+(define_automaton "bfin")
+
+(define_cpu_unit "slot0" "bfin")
+(define_cpu_unit "slot1" "bfin")
+(define_cpu_unit "slot2" "bfin")
+
+;; Three units used to enforce parallel issue restrictions:
+;; only one of the 16-bit slots can use a P register in an address,
+;; and only one them can be a store.
+(define_cpu_unit "store" "bfin")
+(define_cpu_unit "pregs" "bfin")
+
+;; A dummy unit used to delay scheduling of loads after a conditional
+;; branch.
+(define_cpu_unit "load" "bfin")
+
+;; A logical unit used to work around anomaly 05000074.
+(define_cpu_unit "anomaly_05000074" "bfin")
+
+(define_reservation "core" "slot0+slot1+slot2")
+
+(define_insn_reservation "alu" 1
+ (eq_attr "type" "move,movcc,mvi,alu0,shft,brcc,br,call,misc,sync,compare")
+ "core")
+
+(define_insn_reservation "imul" 3
+ (eq_attr "type" "mult")
+ "core*3")
+
+(define_insn_reservation "dsp32" 1
+ (eq_attr "type" "dsp32")
+ "slot0")
+
+(define_insn_reservation "dsp32shiftimm" 1
+ (and (eq_attr "type" "dsp32shiftimm")
+ (eq (symbol_ref "ENABLE_WA_05000074")
+ (const_int 0)))
+ "slot0")
+
+(define_insn_reservation "dsp32shiftimm_anomaly_05000074" 1
+ (and (eq_attr "type" "dsp32shiftimm")
+ (ne (symbol_ref "ENABLE_WA_05000074")
+ (const_int 0)))
+ "slot0+anomaly_05000074")
+
+(define_insn_reservation "load32" 1
+ (and (not (eq_attr "seq_insns" "multi"))
+ (and (eq_attr "type" "mcld") (eq_attr "addrtype" "32bit")))
+ "core+load")
+
+(define_insn_reservation "loadp" 1
+ (and (not (eq_attr "seq_insns" "multi"))
+ (and (eq_attr "type" "mcld") (eq_attr "addrtype" "preg")))
+ "slot1+pregs+load")
+
+(define_insn_reservation "loadsp" 1
+ (and (not (eq_attr "seq_insns" "multi"))
+ (and (eq_attr "type" "mcld") (eq_attr "addrtype" "spreg")))
+ "slot1+pregs")
+
+(define_insn_reservation "loadi" 1
+ (and (not (eq_attr "seq_insns" "multi"))
+ (and (eq_attr "type" "mcld") (eq_attr "addrtype" "ireg")))
+ "(slot1|slot2)+load")
+
+(define_insn_reservation "store32" 1
+ (and (not (eq_attr "seq_insns" "multi"))
+ (and (eq_attr "type" "mcst") (eq_attr "addrtype" "32bit")))
+ "core")
+
+(define_insn_reservation "storep" 1
+ (and (and (not (eq_attr "seq_insns" "multi"))
+ (and (eq_attr "type" "mcst")
+ (ior (eq_attr "addrtype" "preg")
+ (eq_attr "addrtype" "spreg"))))
+ (ior (eq (symbol_ref "ENABLE_WA_05000074")
+ (const_int 0))
+ (eq_attr "storereg" "other")))
+ "slot1+pregs+store")
+
+(define_insn_reservation "storep_anomaly_05000074" 1
+ (and (and (not (eq_attr "seq_insns" "multi"))
+ (and (eq_attr "type" "mcst")
+ (ior (eq_attr "addrtype" "preg")
+ (eq_attr "addrtype" "spreg"))))
+ (and (ne (symbol_ref "ENABLE_WA_05000074")
+ (const_int 0))
+ (eq_attr "storereg" "preg")))
+ "slot1+anomaly_05000074+pregs+store")
+
+(define_insn_reservation "storei" 1
+ (and (and (not (eq_attr "seq_insns" "multi"))
+ (and (eq_attr "type" "mcst") (eq_attr "addrtype" "ireg")))
+ (ior (eq (symbol_ref "ENABLE_WA_05000074")
+ (const_int 0))
+ (eq_attr "storereg" "other")))
+ "(slot1|slot2)+store")
+
+(define_insn_reservation "storei_anomaly_05000074" 1
+ (and (and (not (eq_attr "seq_insns" "multi"))
+ (and (eq_attr "type" "mcst") (eq_attr "addrtype" "ireg")))
+ (and (ne (symbol_ref "ENABLE_WA_05000074")
+ (const_int 0))
+ (eq_attr "storereg" "preg")))
+ "((slot1+anomaly_05000074)|slot2)+store")
+
+(define_insn_reservation "multi" 2
+ (eq_attr "seq_insns" "multi")
+ "core")
+
+(define_insn_reservation "load_stall1" 1
+ (and (eq_attr "type" "stall")
+ (match_operand 0 "const1_operand" ""))
+ "core+load*2")
+
+(define_insn_reservation "load_stall3" 1
+ (and (eq_attr "type" "stall")
+ (match_operand 0 "const3_operand" ""))
+ "core+load*4")
+
+(absence_set "slot0" "slot1,slot2")
+(absence_set "slot1" "slot2")
+
+;; Make sure genautomata knows about the maximum latency that can be produced
+;; by the adjust_cost function.
+(define_insn_reservation "dummy" 5
+ (eq_attr "type" "dummy")
+ "core")
+
+;; Operand and operator predicates
+
+(include "predicates.md")
+(include "constraints.md")
+
+;;; FRIO branches have been optimized for code density
+;;; this comes at a slight cost of complexity when
+;;; a compiler needs to generate branches in the general
+;;; case. In order to generate the correct branching
+;;; mechanisms the compiler needs keep track of instruction
+;;; lengths. The follow table describes how to count instructions
+;;; for the FRIO architecture.
+;;;
+;;; unconditional br are 12-bit imm pcrelative branches *2
+;;; conditional br are 10-bit imm pcrelative branches *2
+;;; brcc 10-bit:
+;;; 1024 10-bit imm *2 is 2048 (-1024..1022)
+;;; br 12-bit :
+;;; 4096 12-bit imm *2 is 8192 (-4096..4094)
+;;; NOTE : For brcc we generate instructions such as
+;;; if cc jmp; jump.[sl] offset
+;;; offset of jump.[sl] is from the jump instruction but
+;;; gcc calculates length from the if cc jmp instruction
+;;; furthermore gcc takes the end address of the branch instruction
+;;; as (pc) for a forward branch
+;;; hence our range is (-4094, 4092) instead of (-4096, 4094) for a br
+;;;
+;;; The way the (pc) rtx works in these calculations is somewhat odd;
+;;; for backward branches it's the address of the current instruction,
+;;; for forward branches it's the previously known address of the following
+;;; instruction - we have to take this into account by reducing the range
+;;; for a forward branch.
+
+;; Lengths for type "mvi" insns are always defined by the instructions
+;; themselves.
+(define_attr "length" ""
+ (cond [(eq_attr "type" "mcld")
+ (if_then_else (match_operand 1 "effective_address_32bit_p" "")
+ (const_int 4) (const_int 2))
+
+ (eq_attr "type" "mcst")
+ (if_then_else (match_operand 0 "effective_address_32bit_p" "")
+ (const_int 4) (const_int 2))
+
+ (eq_attr "type" "move") (const_int 2)
+
+ (eq_attr "type" "dsp32") (const_int 4)
+ (eq_attr "type" "dsp32shiftimm") (const_int 4)
+ (eq_attr "type" "call") (const_int 4)
+
+ (eq_attr "type" "br")
+ (if_then_else (and
+ (le (minus (match_dup 0) (pc)) (const_int 4092))
+ (ge (minus (match_dup 0) (pc)) (const_int -4096)))
+ (const_int 2)
+ (const_int 4))
+
+ (eq_attr "type" "brcc")
+ (cond [(and
+ (le (minus (match_dup 3) (pc)) (const_int 1020))
+ (ge (minus (match_dup 3) (pc)) (const_int -1024)))
+ (const_int 2)
+ (and
+ (le (minus (match_dup 3) (pc)) (const_int 4092))
+ (ge (minus (match_dup 3) (pc)) (const_int -4094)))
+ (const_int 4)]
+ (const_int 6))
+ ]
+
+ (const_int 2)))
+
+;; Classify the insns into those that are one instruction and those that
+;; are more than one in sequence.
+(define_attr "seq_insns" "single,multi"
+ (const_string "single"))
+
+;; Describe a user's asm statement.
+(define_asm_attributes
+ [(set_attr "type" "misc")
+ (set_attr "seq_insns" "multi")
+ (set_attr "length" "4")])
+
+;; Conditional moves
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "register_operand" "")))]
+ ""
+{
+ operands[1] = bfin_gen_compare (operands[1], SImode);
+})
+
+(define_insn "*movsicc_insn1"
+ [(set (match_operand:SI 0 "register_operand" "=da,da,da")
+ (if_then_else:SI
+ (eq:BI (match_operand:BI 3 "register_operand" "C,C,C")
+ (const_int 0))
+ (match_operand:SI 1 "register_operand" "da,0,da")
+ (match_operand:SI 2 "register_operand" "0,da,da")))]
+ ""
+ "@
+ if !cc %0 =%1; /* movsicc-1a */
+ if cc %0 =%2; /* movsicc-1b */
+ if !cc %0 =%1; if cc %0=%2; /* movsicc-1 */"
+ [(set_attr "length" "2,2,4")
+ (set_attr "type" "movcc")
+ (set_attr "seq_insns" "*,*,multi")])
+
+(define_insn "*movsicc_insn2"
+ [(set (match_operand:SI 0 "register_operand" "=da,da,da")
+ (if_then_else:SI
+ (ne:BI (match_operand:BI 3 "register_operand" "C,C,C")
+ (const_int 0))
+ (match_operand:SI 1 "register_operand" "0,da,da")
+ (match_operand:SI 2 "register_operand" "da,0,da")))]
+ ""
+ "@
+ if !cc %0 =%2; /* movsicc-2b */
+ if cc %0 =%1; /* movsicc-2a */
+ if cc %0 =%1; if !cc %0=%2; /* movsicc-1 */"
+ [(set_attr "length" "2,2,4")
+ (set_attr "type" "movcc")
+ (set_attr "seq_insns" "*,*,multi")])
+
+;; Insns to load HIGH and LO_SUM
+
+(define_insn "movsi_high"
+ [(set (match_operand:SI 0 "register_operand" "=x")
+ (high:SI (match_operand:SI 1 "immediate_operand" "i")))]
+ "reload_completed"
+ "%d0 = %d1;"
+ [(set_attr "type" "mvi")
+ (set_attr "length" "4")])
+
+(define_insn "movstricthi_high"
+ [(set (match_operand:SI 0 "register_operand" "+x")
+ (ior:SI (and:SI (match_dup 0) (const_int 65535))
+ (match_operand:SI 1 "immediate_operand" "i")))]
+ "reload_completed"
+ "%d0 = %d1;"
+ [(set_attr "type" "mvi")
+ (set_attr "length" "4")])
+
+(define_insn "movsi_low"
+ [(set (match_operand:SI 0 "register_operand" "=x")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ "reload_completed"
+ "%h0 = %h2;"
+ [(set_attr "type" "mvi")
+ (set_attr "length" "4")])
+
+(define_insn "movsi_high_pic"
+ [(set (match_operand:SI 0 "register_operand" "=x")
+ (high:SI (unspec:SI [(match_operand:SI 1 "" "")]
+ UNSPEC_MOVE_PIC)))]
+ ""
+ "%d0 = %1@GOT_LOW;"
+ [(set_attr "type" "mvi")
+ (set_attr "length" "4")])
+
+(define_insn "movsi_low_pic"
+ [(set (match_operand:SI 0 "register_operand" "=x")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "0")
+ (unspec:SI [(match_operand:SI 2 "" "")]
+ UNSPEC_MOVE_PIC)))]
+ ""
+ "%h0 = %h2@GOT_HIGH;"
+ [(set_attr "type" "mvi")
+ (set_attr "length" "4")])
+
+;;; Move instructions
+
+(define_insn_and_split "movdi_insn"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=x,mx,r")
+ (match_operand:DI 1 "general_operand" "iFx,r,mx"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) == REG"
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+{
+ rtx lo_half[2], hi_half[2];
+ split_di (operands, 2, lo_half, hi_half);
+
+ if (reg_overlap_mentioned_p (lo_half[0], hi_half[1]))
+ {
+ operands[2] = hi_half[0];
+ operands[3] = hi_half[1];
+ operands[4] = lo_half[0];
+ operands[5] = lo_half[1];
+ }
+ else
+ {
+ operands[2] = lo_half[0];
+ operands[3] = lo_half[1];
+ operands[4] = hi_half[0];
+ operands[5] = hi_half[1];
+ }
+})
+
+(define_insn "movbi"
+ [(set (match_operand:BI 0 "nonimmediate_operand" "=x,x,d,md,C,d,C,P1")
+ (match_operand:BI 1 "general_operand" "x,xKs3,md,d,d,C,P0,P1"))]
+
+ ""
+ "@
+ %0 = %1;
+ %0 = %1 (X);
+ %0 = B %1 (Z)%!
+ B %0 = %1;
+ CC = %1;
+ %0 = CC;
+ CC = R0 < R0;
+ CC = R0 == R0;"
+ [(set_attr "type" "move,mvi,mcld,mcst,compare,compare,compare,compare")
+ (set_attr "length" "2,2,*,*,2,2,2,2")
+ (set_attr "seq_insns" "*,*,*,*,*,*,*,*")])
+
+(define_insn "movpdi"
+ [(set (match_operand:PDI 0 "nonimmediate_operand" "=e,<,e")
+ (match_operand:PDI 1 "general_operand" " e,e,>"))]
+ ""
+ "@
+ %0 = %1;
+ %0 = %x1; %0 = %w1;
+ %w0 = %1; %x0 = %1;"
+ [(set_attr "type" "move,mcst,mcld")
+ (set_attr "seq_insns" "*,multi,multi")])
+
+(define_insn "load_accumulator"
+ [(set (match_operand:PDI 0 "register_operand" "=e")
+ (sign_extend:PDI (match_operand:SI 1 "register_operand" "d")))]
+ ""
+ "%0 = %1;"
+ [(set_attr "type" "move")])
+
+(define_insn_and_split "load_accumulator_pair"
+ [(set (match_operand:V2PDI 0 "register_operand" "=e")
+ (sign_extend:V2PDI (vec_concat:V2SI
+ (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d"))))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 3) (sign_extend:PDI (match_dup 1)))
+ (set (match_dup 4) (sign_extend:PDI (match_dup 2)))]
+{
+ operands[3] = gen_rtx_REG (PDImode, REGNO (operands[0]));
+ operands[4] = gen_rtx_REG (PDImode, REGNO (operands[0]) + 1);
+})
+
+(define_insn "*pushsi_insn"
+ [(set (mem:SI (pre_dec:SI (reg:SI REG_SP)))
+ (match_operand:SI 0 "register_operand" "xy"))]
+ ""
+ "[--SP] = %0;"
+ [(set_attr "type" "mcst")
+ (set_attr "addrtype" "32bit")
+ (set_attr "length" "2")])
+
+(define_insn "*popsi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=d,xy")
+ (mem:SI (post_inc:SI (reg:SI REG_SP))))]
+ ""
+ "%0 = [SP++]%!"
+ [(set_attr "type" "mcld")
+ (set_attr "addrtype" "preg,32bit")
+ (set_attr "length" "2")])
+
+;; The first alternative is used to make reload choose a limited register
+;; class when faced with a movsi_insn that had its input operand replaced
+;; with a PLUS. We generally require fewer secondary reloads this way.
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=da,x,da,y,da,x,x,x,da,mr")
+ (match_operand:SI 1 "general_operand" "da,x,y,da,xKs7,xKsh,xKuh,ix,mr,da"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) == REG"
+ "@
+ %0 = %1;
+ %0 = %1;
+ %0 = %1;
+ %0 = %1;
+ %0 = %1 (X);
+ %0 = %1 (X);
+ %0 = %1 (Z);
+ #
+ %0 = %1%!
+ %0 = %1%!"
+ [(set_attr "type" "move,move,move,move,mvi,mvi,mvi,*,mcld,mcst")
+ (set_attr "length" "2,2,2,2,2,4,4,*,*,*")])
+
+(define_insn "*movsi_insn32"
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (unspec:SI [(match_operand:SI 1 "nonmemory_operand" "d,P0")] UNSPEC_32BIT))]
+ ""
+ "@
+ %0 = ROT %1 BY 0%!
+ %0 = %0 -|- %0%!"
+ [(set_attr "type" "dsp32shiftimm,dsp32")])
+
+(define_split
+ [(set (match_operand:SI 0 "d_register_operand" "")
+ (const_int 0))]
+ "splitting_for_sched && !optimize_size"
+ [(set (match_dup 0) (unspec:SI [(const_int 0)] UNSPEC_32BIT))])
+
+(define_split
+ [(set (match_operand:SI 0 "d_register_operand" "")
+ (match_operand:SI 1 "d_register_operand" ""))]
+ "splitting_for_sched && !optimize_size"
+ [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_32BIT))])
+
+(define_insn_and_split "*movv2hi_insn"
+ [(set (match_operand:V2HI 0 "nonimmediate_operand" "=da,da,d,dm")
+ (match_operand:V2HI 1 "general_operand" "i,di,md,d"))]
+
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) == REG"
+ "@
+ #
+ %0 = %1;
+ %0 = %1%!
+ %0 = %1%!"
+ "reload_completed && GET_CODE (operands[1]) == CONST_VECTOR"
+ [(set (match_dup 0) (high:SI (match_dup 2)))
+ (set (match_dup 0) (lo_sum:SI (match_dup 0) (match_dup 3)))]
+{
+ HOST_WIDE_INT intval = INTVAL (XVECEXP (operands[1], 0, 1)) << 16;
+ intval |= INTVAL (XVECEXP (operands[1], 0, 0)) & 0xFFFF;
+
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]));
+ operands[2] = operands[3] = GEN_INT (trunc_int_for_mode (intval, SImode));
+}
+ [(set_attr "type" "move,move,mcld,mcst")
+ (set_attr "length" "2,2,*,*")])
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=x,da,x,d,mr")
+ (match_operand:HI 1 "general_operand" "x,xKs7,xKsh,mr,d"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) == REG"
+{
+ static const char *templates[] = {
+ "%0 = %1;",
+ "%0 = %1 (X);",
+ "%0 = %1 (X);",
+ "%0 = W %1 (X)%!",
+ "W %0 = %1%!",
+ "%h0 = W %1%!",
+ "W %0 = %h1%!"
+ };
+ int alt = which_alternative;
+ rtx mem = (MEM_P (operands[0]) ? operands[0]
+ : MEM_P (operands[1]) ? operands[1] : NULL_RTX);
+ if (mem && bfin_dsp_memref_p (mem))
+ alt += 2;
+ return templates[alt];
+}
+ [(set_attr "type" "move,mvi,mvi,mcld,mcst")
+ (set_attr "length" "2,2,4,*,*")])
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=x,da,x,d,mr")
+ (match_operand:QI 1 "general_operand" "x,xKs7,xKsh,mr,d"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) == REG"
+ "@
+ %0 = %1;
+ %0 = %1 (X);
+ %0 = %1 (X);
+ %0 = B %1 (X)%!
+ B %0 = %1%!"
+ [(set_attr "type" "move,mvi,mvi,mcld,mcst")
+ (set_attr "length" "2,2,4,*,*")])
+
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=x,x,da,mr")
+ (match_operand:SF 1 "general_operand" "x,Fx,mr,da"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) == REG"
+ "@
+ %0 = %1;
+ #
+ %0 = %1%!
+ %0 = %1%!"
+ [(set_attr "type" "move,*,mcld,mcst")])
+
+(define_insn_and_split "movdf_insn"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=x,mx,r")
+ (match_operand:DF 1 "general_operand" "iFx,r,mx"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) == REG"
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+{
+ rtx lo_half[2], hi_half[2];
+ split_di (operands, 2, lo_half, hi_half);
+
+ if (reg_overlap_mentioned_p (lo_half[0], hi_half[1]))
+ {
+ operands[2] = hi_half[0];
+ operands[3] = hi_half[1];
+ operands[4] = lo_half[0];
+ operands[5] = lo_half[1];
+ }
+ else
+ {
+ operands[2] = lo_half[0];
+ operands[3] = lo_half[1];
+ operands[4] = hi_half[0];
+ operands[5] = hi_half[1];
+ }
+})
+
+;; Storing halfwords.
+(define_insn "*movsi_insv"
+ [(set (zero_extract:SI (match_operand 0 "register_operand" "+d,x")
+ (const_int 16)
+ (const_int 16))
+ (match_operand:SI 1 "nonmemory_operand" "d,n"))]
+ ""
+ "@
+ %d0 = %h1 << 0%!
+ %d0 = %1;"
+ [(set_attr "type" "dsp32shiftimm,mvi")])
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "immediate_operand" "")
+ (match_operand:SI 2 "immediate_operand" ""))
+ (match_operand:SI 3 "nonmemory_operand" ""))]
+ ""
+{
+ if (INTVAL (operands[1]) != 16 || INTVAL (operands[2]) != 16)
+ FAIL;
+
+ /* From mips.md: insert_bit_field doesn't verify that our source
+ matches the predicate, so check it again here. */
+ if (! register_operand (operands[0], VOIDmode))
+ FAIL;
+})
+
+;; This is the main "hook" for PIC code. When generating
+;; PIC, movsi is responsible for determining when the source address
+;; needs PIC relocation and appropriately calling legitimize_pic_address
+;; to perform the actual relocation.
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+{
+ if (expand_move (operands, SImode))
+ DONE;
+})
+
+(define_expand "movv2hi"
+ [(set (match_operand:V2HI 0 "nonimmediate_operand" "")
+ (match_operand:V2HI 1 "general_operand" ""))]
+ ""
+ "expand_move (operands, V2HImode);")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "expand_move (operands, DImode);")
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "expand_move (operands, SFmode);")
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "expand_move (operands, DFmode);")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "expand_move (operands, HImode);")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ " expand_move (operands, QImode); ")
+
+;; Some define_splits to break up SI/SFmode loads of immediate constants.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "symbolic_or_const_operand" ""))]
+ "reload_completed
+ /* Always split symbolic operands; split integer constants that are
+ too large for a single instruction. */
+ && (GET_CODE (operands[1]) != CONST_INT
+ || (INTVAL (operands[1]) < -32768
+ || INTVAL (operands[1]) >= 65536
+ || (INTVAL (operands[1]) >= 32768 && PREG_P (operands[0]))))"
+ [(set (match_dup 0) (high:SI (match_dup 1)))
+ (set (match_dup 0) (lo_sum:SI (match_dup 0) (match_dup 1)))]
+{
+ if (GET_CODE (operands[1]) == CONST_INT
+ && split_load_immediate (operands))
+ DONE;
+ /* ??? Do something about TARGET_LOW_64K. */
+})
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "immediate_operand" ""))]
+ "reload_completed"
+ [(set (match_dup 2) (high:SI (match_dup 3)))
+ (set (match_dup 2) (lo_sum:SI (match_dup 2) (match_dup 3)))]
+{
+ long values;
+ REAL_VALUE_TYPE value;
+
+ gcc_assert (GET_CODE (operands[1]) == CONST_DOUBLE);
+
+ REAL_VALUE_FROM_CONST_DOUBLE (value, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (value, values);
+
+ operands[2] = gen_rtx_REG (SImode, true_regnum (operands[0]));
+ operands[3] = GEN_INT (trunc_int_for_mode (values, SImode));
+ if (values >= -32768 && values < 65536)
+ {
+ emit_move_insn (operands[2], operands[3]);
+ DONE;
+ }
+ if (split_load_immediate (operands + 2))
+ DONE;
+})
+
+;; Sadly, this can't be a proper named movstrict pattern, since the compiler
+;; expects to be able to use registers for operand 1.
+;; Note that the asm instruction is defined by the manual to take an unsigned
+;; constant, but it doesn't matter to the assembler, and the compiler only
+;; deals with sign-extended constants. Hence "Ksh".
+(define_insn "movstricthi_1"
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "+x"))
+ (match_operand:HI 1 "immediate_operand" "Ksh"))]
+ ""
+ "%h0 = %1;"
+ [(set_attr "type" "mvi")
+ (set_attr "length" "4")])
+
+;; Sign and zero extensions
+
+(define_insn_and_split "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d, d")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "d, m")))]
+ ""
+ "@
+ %0 = %h1 (X);
+ %0 = W %h1 (X)%!"
+ "reload_completed && bfin_dsp_memref_p (operands[1])"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (sign_extend:SI (match_dup 2)))]
+{
+ operands[2] = gen_lowpart (HImode, operands[0]);
+}
+ [(set_attr "type" "alu0,mcld")])
+
+(define_insn_and_split "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d, d")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "d, m")))]
+ ""
+ "@
+ %0 = %h1 (Z);
+ %0 = W %h1 (Z)%!"
+ "reload_completed && bfin_dsp_memref_p (operands[1])"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (zero_extend:SI (match_dup 2)))]
+{
+ operands[2] = gen_lowpart (HImode, operands[0]);
+}
+ [(set_attr "type" "alu0,mcld")])
+
+(define_insn "zero_extendbisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI (match_operand:BI 1 "nonimmediate_operand" "C")))]
+ ""
+ "%0 = %1;"
+ [(set_attr "type" "compare")])
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=d, d")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "m, d")))]
+ ""
+ "@
+ %0 = B %1 (X)%!
+ %0 = %T1 (X);"
+ [(set_attr "type" "mcld,alu0")])
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d, d")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "m, d")))]
+ ""
+ "@
+ %0 = B %1 (X)%!
+ %0 = %T1 (X);"
+ [(set_attr "type" "mcld,alu0")])
+
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=d, d")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "m, d")))]
+ ""
+ "@
+ %0 = B %1 (Z)%!
+ %0 = %T1 (Z);"
+ [(set_attr "type" "mcld,alu0")])
+
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d, d")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "m, d")))]
+ ""
+ "@
+ %0 = B %1 (Z)%!
+ %0 = %T1 (Z);"
+ [(set_attr "type" "mcld,alu0")])
+
+;; DImode logical operations
+
+(define_code_iterator any_logical [and ior xor])
+(define_code_attr optab [(and "and")
+ (ior "ior")
+ (xor "xor")])
+(define_code_attr op [(and "&")
+ (ior "|")
+ (xor "^")])
+(define_code_attr high_result [(and "0")
+ (ior "%H1")
+ (xor "%H1")])
+
+;; Keep this pattern around to avoid generating NO_CONFLICT blocks.
+(define_expand "<optab>di3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (any_logical:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "general_operand" "d")))]
+ ""
+{
+ rtx hi_half[3], lo_half[3];
+ enum insn_code icode = CODE_FOR_<optab>si3;
+ if (!reg_overlap_mentioned_p (operands[0], operands[1])
+ && !reg_overlap_mentioned_p (operands[0], operands[2]))
+ emit_clobber (operands[0]);
+ split_di (operands, 3, lo_half, hi_half);
+ if (!(*insn_data[icode].operand[2].predicate) (lo_half[2], SImode))
+ lo_half[2] = force_reg (SImode, lo_half[2]);
+ emit_insn (GEN_FCN (icode) (lo_half[0], lo_half[1], lo_half[2]));
+ if (!(*insn_data[icode].operand[2].predicate) (hi_half[2], SImode))
+ hi_half[2] = force_reg (SImode, hi_half[2]);
+ emit_insn (GEN_FCN (icode) (hi_half[0], hi_half[1], hi_half[2]));
+ DONE;
+})
+
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (zero_extend:DI (match_operand:QI 1 "register_operand" "d")))]
+ ""
+ "%0 = %T1 (Z);\\n\\t%H0 = 0;"
+ [(set_attr "length" "4")
+ (set_attr "seq_insns" "multi")])
+
+(define_insn "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (zero_extend:DI (match_operand:HI 1 "register_operand" "d")))]
+ ""
+ "%0 = %h1 (Z);\\n\\t%H0 = 0;"
+ [(set_attr "length" "4")
+ (set_attr "seq_insns" "multi")])
+
+(define_insn_and_split "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "d")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 3) (ashiftrt:SI (match_dup 3) (const_int 31)))]
+{
+ split_di (operands, 1, operands + 2, operands + 3);
+ if (REGNO (operands[0]) != REGNO (operands[1]))
+ emit_move_insn (operands[2], operands[1]);
+})
+
+(define_insn_and_split "extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (sign_extend:DI (match_operand:QI 1 "register_operand" "d")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (sign_extend:SI (match_dup 1)))
+ (set (match_dup 3) (sign_extend:SI (match_dup 1)))
+ (set (match_dup 3) (ashiftrt:SI (match_dup 3) (const_int 31)))]
+{
+ split_di (operands, 1, operands + 2, operands + 3);
+})
+
+(define_insn_and_split "extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (sign_extend:DI (match_operand:HI 1 "register_operand" "d")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (sign_extend:SI (match_dup 1)))
+ (set (match_dup 3) (sign_extend:SI (match_dup 1)))
+ (set (match_dup 3) (ashiftrt:SI (match_dup 3) (const_int 31)))]
+{
+ split_di (operands, 1, operands + 2, operands + 3);
+})
+
+;; DImode arithmetic operations
+
+(define_insn "add_with_carry"
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,d")
+ (match_operand:SI 2 "nonmemory_operand" "Ks7,d")))
+ (set (match_operand:BI 3 "register_operand" "=C,C")
+ (ltu:BI (not:SI (match_dup 1)) (match_dup 2)))]
+ ""
+ "@
+ %0 += %2; cc = ac0;
+ %0 = %1 + %2; cc = ac0;"
+ [(set_attr "type" "alu0")
+ (set_attr "length" "4")
+ (set_attr "seq_insns" "multi")])
+
+(define_insn "sub_with_carry"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (minus:SI (match_operand:SI 1 "register_operand" "%d")
+ (match_operand:SI 2 "nonmemory_operand" "d")))
+ (set (match_operand:BI 3 "register_operand" "=C")
+ (leu:BI (match_dup 2) (match_dup 1)))]
+ ""
+ "%0 = %1 - %2; cc = ac0;"
+ [(set_attr "type" "alu0")
+ (set_attr "length" "4")
+ (set_attr "seq_insns" "multi")])
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "nonmemory_operand" "")))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (reg:CC 34))]
+ ""
+{
+ rtx xops[8];
+ xops[0] = gen_lowpart (SImode, operands[0]);
+ xops[1] = simplify_gen_subreg (SImode, operands[0], DImode, 4);
+ xops[2] = gen_lowpart (SImode, operands[1]);
+ xops[3] = simplify_gen_subreg (SImode, operands[1], DImode, 4);
+ xops[4] = gen_lowpart (SImode, operands[2]);
+ xops[5] = simplify_gen_subreg (SImode, operands[2], DImode, 4);
+ xops[6] = gen_reg_rtx (SImode);
+ xops[7] = gen_rtx_REG (BImode, REG_CC);
+ if (!register_operand (xops[4], SImode)
+ && (GET_CODE (xops[4]) != CONST_INT
+ || !satisfies_constraint_Ks7 (xops[4])))
+ xops[4] = force_reg (SImode, xops[4]);
+ if (!reg_overlap_mentioned_p (operands[0], operands[1])
+ && !reg_overlap_mentioned_p (operands[0], operands[2]))
+ emit_clobber (operands[0]);
+ emit_insn (gen_add_with_carry (xops[0], xops[2], xops[4], xops[7]));
+ emit_insn (gen_movbisi (xops[6], xops[7]));
+ if (!register_operand (xops[5], SImode)
+ && (GET_CODE (xops[5]) != CONST_INT
+ || !satisfies_constraint_Ks7 (xops[5])))
+ xops[5] = force_reg (SImode, xops[5]);
+ if (xops[5] != const0_rtx)
+ emit_insn (gen_addsi3 (xops[1], xops[3], xops[5]));
+ else
+ emit_move_insn (xops[1], xops[3]);
+ emit_insn (gen_addsi3 (xops[1], xops[1], xops[6]));
+ DONE;
+})
+
+(define_expand "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (reg:CC 34))]
+ ""
+{
+ rtx xops[8];
+ xops[0] = gen_lowpart (SImode, operands[0]);
+ xops[1] = simplify_gen_subreg (SImode, operands[0], DImode, 4);
+ xops[2] = gen_lowpart (SImode, operands[1]);
+ xops[3] = simplify_gen_subreg (SImode, operands[1], DImode, 4);
+ xops[4] = gen_lowpart (SImode, operands[2]);
+ xops[5] = simplify_gen_subreg (SImode, operands[2], DImode, 4);
+ xops[6] = gen_reg_rtx (SImode);
+ xops[7] = gen_rtx_REG (BImode, REG_CC);
+ if (!reg_overlap_mentioned_p (operands[0], operands[1])
+ && !reg_overlap_mentioned_p (operands[0], operands[2]))
+ emit_clobber (operands[0]);
+ emit_insn (gen_sub_with_carry (xops[0], xops[2], xops[4], xops[7]));
+ emit_insn (gen_notbi (xops[7], xops[7]));
+ emit_insn (gen_movbisi (xops[6], xops[7]));
+ emit_insn (gen_subsi3 (xops[1], xops[3], xops[5]));
+ emit_insn (gen_subsi3 (xops[1], xops[1], xops[6]));
+ DONE;
+})
+
+;; Combined shift/add instructions
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a,d")
+ (ashift:SI (plus:SI (match_operand:SI 1 "register_operand" "%0,0")
+ (match_operand:SI 2 "register_operand" "a,d"))
+ (match_operand:SI 3 "pos_scale_operand" "P1P2,P1P2")))]
+ ""
+ "%0 = (%0 + %2) << %3;" /* "shadd %0,%2,%3;" */
+ [(set_attr "type" "alu0")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (match_operand:SI 1 "register_operand" "a")
+ (mult:SI (match_operand:SI 2 "register_operand" "a")
+ (match_operand:SI 3 "scale_by_operand" "i"))))]
+ ""
+ "%0 = %1 + (%2 << %X3);"
+ [(set_attr "type" "alu0")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (match_operand:SI 1 "register_operand" "a")
+ (ashift:SI (match_operand:SI 2 "register_operand" "a")
+ (match_operand:SI 3 "pos_scale_operand" "i"))))]
+ ""
+ "%0 = %1 + (%2 << %3);"
+ [(set_attr "type" "alu0")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "a")
+ (match_operand:SI 2 "scale_by_operand" "i"))
+ (match_operand:SI 3 "register_operand" "a")))]
+ ""
+ "%0 = %3 + (%1 << %X2);"
+ [(set_attr "type" "alu0")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (ashift:SI (match_operand:SI 1 "register_operand" "a")
+ (match_operand:SI 2 "pos_scale_operand" "i"))
+ (match_operand:SI 3 "register_operand" "a")))]
+ ""
+ "%0 = %3 + (%1 << %2);"
+ [(set_attr "type" "alu0")])
+
+(define_insn "mulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%d"))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "d"))))]
+ ""
+ "%0 = %h1 * %h2 (IS)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "umulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (zero_extend:SI (match_operand:HI 1 "register_operand" "%d"))
+ (zero_extend:SI (match_operand:HI 2 "register_operand" "d"))))]
+ ""
+ "%0 = %h1 * %h2 (FU)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=W")
+ (mult:SI (zero_extend:SI (match_operand:HI 1 "register_operand" "W"))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "W"))))]
+ ""
+ "%0 = %h2 * %h1 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+;; The processor also supports ireg += mreg or ireg -= mreg, but these
+;; are unusable if we don't ensure that the corresponding lreg is zero.
+;; The same applies to the add/subtract constant versions involving
+;; iregs
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=ad,a,d")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0, a,d")
+ (match_operand:SI 2 "reg_or_7bit_operand" "Ks7, a,d")))]
+ ""
+ "@
+ %0 += %2;
+ %0 = %1 + %2;
+ %0 = %1 + %2;"
+ [(set_attr "type" "alu0")
+ (set_attr "length" "2,2,2")])
+
+(define_insn "ssaddsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ss_plus:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")))]
+ ""
+ "%0 = %1 + %2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=da,d,a")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,d,0")
+ (match_operand:SI 2 "reg_or_neg7bit_operand" "KN7,d,a")))]
+ ""
+{
+ static const char *const strings_subsi3[] = {
+ "%0 += -%2;",
+ "%0 = %1 - %2;",
+ "%0 -= %2;",
+ };
+
+ if (CONSTANT_P (operands[2]) && INTVAL (operands[2]) < 0) {
+ rtx tmp_op = operands[2];
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ output_asm_insn ("%0 += %2;", operands);
+ operands[2] = tmp_op;
+ return "";
+ }
+
+ return strings_subsi3[which_alternative];
+}
+ [(set_attr "type" "alu0")])
+
+(define_insn "sssubsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ss_minus:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")))]
+ ""
+ "%0 = %1 - %2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+;; Accumulator addition
+
+(define_insn "addpdi3"
+ [(set (match_operand:PDI 0 "register_operand" "=A")
+ (ss_plus:PDI (match_operand:PDI 1 "register_operand" "%0")
+ (match_operand:PDI 2 "nonmemory_operand" "B")))]
+ ""
+ "A0 += A1%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "sum_of_accumulators"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ss_truncate:SI
+ (ss_plus:PDI (match_operand:PDI 2 "register_operand" "1")
+ (match_operand:PDI 3 "register_operand" "B"))))
+ (set (match_operand:PDI 1 "register_operand" "=A")
+ (ss_plus:PDI (match_dup 2) (match_dup 3)))]
+ ""
+ "%0 = (A0 += A1)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "us_truncpdisi2"
+ [(set (match_operand:SI 0 "register_operand" "=D,W")
+ (us_truncate:SI (match_operand:PDI 1 "register_operand" "A,B")))]
+ ""
+ "%0 = %1 (FU)%!"
+ [(set_attr "type" "dsp32")])
+
+;; Bit test instructions
+
+(define_insn "*not_bittst"
+ [(set (match_operand:BI 0 "register_operand" "=C")
+ (eq:BI (zero_extract:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 1)
+ (match_operand:SI 2 "immediate_operand" "Ku5"))
+ (const_int 0)))]
+ ""
+ "cc = !BITTST (%1,%2);"
+ [(set_attr "type" "alu0")])
+
+(define_insn "*bittst"
+ [(set (match_operand:BI 0 "register_operand" "=C")
+ (ne:BI (zero_extract:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 1)
+ (match_operand:SI 2 "immediate_operand" "Ku5"))
+ (const_int 0)))]
+ ""
+ "cc = BITTST (%1,%2);"
+ [(set_attr "type" "alu0")])
+
+(define_insn_and_split "*bit_extract"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 1)
+ (match_operand:SI 2 "immediate_operand" "Ku5")))
+ (clobber (reg:BI REG_CC))]
+ ""
+ "#"
+ ""
+ [(set (reg:BI REG_CC)
+ (ne:BI (zero_extract:SI (match_dup 1) (const_int 1) (match_dup 2))
+ (const_int 0)))
+ (set (match_dup 0)
+ (ne:SI (reg:BI REG_CC) (const_int 0)))])
+
+(define_insn_and_split "*not_bit_extract"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extract:SI (not:SI (match_operand:SI 1 "register_operand" "d"))
+ (const_int 1)
+ (match_operand:SI 2 "immediate_operand" "Ku5")))
+ (clobber (reg:BI REG_CC))]
+ ""
+ "#"
+ ""
+ [(set (reg:BI REG_CC)
+ (eq:BI (zero_extract:SI (match_dup 1) (const_int 1) (match_dup 2))
+ (const_int 0)))
+ (set (match_dup 0)
+ (ne:SI (reg:BI REG_CC) (const_int 0)))])
+
+(define_insn "*andsi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=d,d,d,d")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,d,d,d")
+ (match_operand:SI 2 "rhs_andsi3_operand" "L,M1,M2,d")))]
+ ""
+ "@
+ BITCLR (%0,%Y2);
+ %0 = %T1 (Z);
+ %0 = %h1 (Z);
+ %0 = %1 & %2;"
+ [(set_attr "type" "alu0")])
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ""
+{
+ if (highbits_operand (operands[2], SImode))
+ {
+ operands[2] = GEN_INT (exact_log2 (-INTVAL (operands[2])));
+ emit_insn (gen_ashrsi3 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_ashlsi3 (operands[0], operands[0], operands[2]));
+ DONE;
+ }
+ if (! rhs_andsi3_operand (operands[2], SImode))
+ operands[2] = force_reg (SImode, operands[2]);
+})
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,d")
+ (match_operand:SI 2 "regorlog2_operand" "J,d")))]
+ ""
+ "@
+ BITSET (%0, %X2);
+ %0 = %1 | %2;"
+ [(set_attr "type" "alu0")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,d")
+ (match_operand:SI 2 "regorlog2_operand" "J,d")))]
+ ""
+ "@
+ BITTGL (%0, %X2);
+ %0 = %1 ^ %2;"
+ [(set_attr "type" "alu0")])
+
+(define_insn "ones"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (unspec:HI [(match_operand:SI 1 "register_operand" "d")]
+ UNSPEC_ONES))]
+ ""
+ "%h0 = ONES %1;"
+ [(set_attr "type" "alu0")])
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (smax:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")))]
+ ""
+ "%0 = max(%1,%2)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (smin:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")))]
+ ""
+ "%0 = min(%1,%2)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (abs:SI (match_operand:SI 1 "register_operand" "d")))]
+ ""
+ "%0 = abs %1%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ssabssi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ss_abs:SI (match_operand:SI 1 "register_operand" "d")))]
+ ""
+ "%0 = abs %1%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (neg:SI (match_operand:SI 1 "register_operand" "d")))]
+ ""
+ "%0 = -%1;"
+ [(set_attr "type" "alu0")])
+
+(define_insn "ssnegsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ss_neg:SI (match_operand:SI 1 "register_operand" "d")))]
+ ""
+ "%0 = -%1 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (not:SI (match_operand:SI 1 "register_operand" "d")))]
+ ""
+ "%0 = ~%1;"
+ [(set_attr "type" "alu0")])
+
+(define_insn "signbitssi2"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (if_then_else:HI
+ (lt (match_operand:SI 1 "register_operand" "d") (const_int 0))
+ (clz:HI (not:SI (match_dup 1)))
+ (clz:HI (match_dup 1))))]
+ ""
+ "%h0 = signbits %1%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ssroundsi2"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (truncate:HI
+ (lshiftrt:SI (ss_plus:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 32768))
+ (const_int 16))))]
+ ""
+ "%h0 = %1 (RND)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "smaxhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (smax:HI (match_operand:HI 1 "register_operand" "d")
+ (match_operand:HI 2 "register_operand" "d")))]
+ ""
+ "%0 = max(%1,%2) (V)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "sminhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (smin:HI (match_operand:HI 1 "register_operand" "d")
+ (match_operand:HI 2 "register_operand" "d")))]
+ ""
+ "%0 = min(%1,%2) (V)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "abshi2"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (abs:HI (match_operand:HI 1 "register_operand" "d")))]
+ ""
+ "%0 = abs %1 (V)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "neghi2"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (neg:HI (match_operand:HI 1 "register_operand" "d")))]
+ ""
+ "%0 = -%1;"
+ [(set_attr "type" "alu0")])
+
+(define_insn "ssneghi2"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (ss_neg:HI (match_operand:HI 1 "register_operand" "d")))]
+ ""
+ "%0 = -%1 (V)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "signbitshi2"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (if_then_else:HI
+ (lt (match_operand:HI 1 "register_operand" "d") (const_int 0))
+ (clz:HI (not:HI (match_dup 1)))
+ (clz:HI (match_dup 1))))]
+ ""
+ "%h0 = signbits %h1%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "d")))]
+ ""
+ "%0 *= %2;"
+ [(set_attr "type" "mult")])
+
+(define_expand "umulsi3_highpart"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" ""))
+ (zero_extend:DI
+ (match_operand:SI 2 "register_operand" "")))
+ (const_int 32))))
+ (clobber (reg:PDI REG_A0))
+ (clobber (reg:PDI REG_A1))])]
+ ""
+{
+ if (!optimize_size)
+ {
+ rtx a1reg = gen_rtx_REG (PDImode, REG_A1);
+ rtx a0reg = gen_rtx_REG (PDImode, REG_A0);
+ emit_insn (gen_flag_macinit1hi (a1reg,
+ gen_lowpart (HImode, operands[1]),
+ gen_lowpart (HImode, operands[2]),
+ GEN_INT (MACFLAG_FU)));
+ emit_insn (gen_lshrpdi3 (a1reg, a1reg, GEN_INT (16)));
+ emit_insn (gen_flag_mul_macv2hi_parts_acconly (a0reg, a1reg,
+ gen_lowpart (V2HImode, operands[1]),
+ gen_lowpart (V2HImode, operands[2]),
+ const1_rtx, const1_rtx,
+ const1_rtx, const0_rtx, a1reg,
+ const0_rtx, GEN_INT (MACFLAG_FU),
+ GEN_INT (MACFLAG_FU)));
+ emit_insn (gen_flag_machi_parts_acconly (a1reg,
+ gen_lowpart (V2HImode, operands[2]),
+ gen_lowpart (V2HImode, operands[1]),
+ const1_rtx, const0_rtx,
+ a1reg, const0_rtx, GEN_INT (MACFLAG_FU)));
+ emit_insn (gen_lshrpdi3 (a1reg, a1reg, GEN_INT (16)));
+ emit_insn (gen_addpdi3 (a0reg, a0reg, a1reg));
+ emit_insn (gen_us_truncpdisi2 (operands[0], a0reg));
+ }
+ else
+ {
+ rtx umulsi3_highpart_libfunc
+ = init_one_libfunc ("__umulsi3_highpart");
+
+ emit_library_call_value (umulsi3_highpart_libfunc,
+ operands[0], LCT_NORMAL, SImode,
+ 2, operands[1], SImode, operands[2], SImode);
+ }
+ DONE;
+})
+
+(define_expand "smulsi3_highpart"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" ""))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "")))
+ (const_int 32))))
+ (clobber (reg:PDI REG_A0))
+ (clobber (reg:PDI REG_A1))])]
+ ""
+{
+ if (!optimize_size)
+ {
+ rtx a1reg = gen_rtx_REG (PDImode, REG_A1);
+ rtx a0reg = gen_rtx_REG (PDImode, REG_A0);
+ emit_insn (gen_flag_macinit1hi (a1reg,
+ gen_lowpart (HImode, operands[1]),
+ gen_lowpart (HImode, operands[2]),
+ GEN_INT (MACFLAG_FU)));
+ emit_insn (gen_lshrpdi3 (a1reg, a1reg, GEN_INT (16)));
+ emit_insn (gen_flag_mul_macv2hi_parts_acconly (a0reg, a1reg,
+ gen_lowpart (V2HImode, operands[1]),
+ gen_lowpart (V2HImode, operands[2]),
+ const1_rtx, const1_rtx,
+ const1_rtx, const0_rtx, a1reg,
+ const0_rtx, GEN_INT (MACFLAG_IS),
+ GEN_INT (MACFLAG_IS_M)));
+ emit_insn (gen_flag_machi_parts_acconly (a1reg,
+ gen_lowpart (V2HImode, operands[2]),
+ gen_lowpart (V2HImode, operands[1]),
+ const1_rtx, const0_rtx,
+ a1reg, const0_rtx, GEN_INT (MACFLAG_IS_M)));
+ emit_insn (gen_ashrpdi3 (a1reg, a1reg, GEN_INT (16)));
+ emit_insn (gen_sum_of_accumulators (operands[0], a0reg, a0reg, a1reg));
+ }
+ else
+ {
+ rtx smulsi3_highpart_libfunc
+ = init_one_libfunc ("__smulsi3_highpart");
+
+ emit_library_call_value (smulsi3_highpart_libfunc,
+ operands[0], LCT_NORMAL, SImode,
+ 2, operands[1], SImode, operands[2], SImode);
+ }
+ DONE;
+})
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+})
+
+(define_insn_and_split "*ashlsi3_insn"
+ [(set (match_operand:SI 0 "register_operand" "=d,d,a,a,a")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,d,a,a,a")
+ (match_operand:SI 2 "nonmemory_operand" "dKu5,Ku5,P1,P2,?P3P4")))]
+ ""
+ "@
+ %0 <<= %2;
+ %0 = %1 << %2%!
+ %0 = %1 + %1;
+ %0 = %1 << %2;
+ #"
+ "PREG_P (operands[0]) && INTVAL (operands[2]) > 2"
+ [(set (match_dup 0) (ashift:SI (match_dup 1) (const_int 2)))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 3)))]
+ "operands[3] = GEN_INT (INTVAL (operands[2]) - 2);"
+ [(set_attr "type" "shft,dsp32shiftimm,shft,shft,*")])
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,d")
+ (match_operand:SI 2 "nonmemory_operand" "dKu5,Ku5")))]
+ ""
+ "@
+ %0 >>>= %2;
+ %0 = %1 >>> %2%!"
+ [(set_attr "type" "shft,dsp32shiftimm")])
+
+(define_insn "rotl16"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (rotate:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 16)))]
+ ""
+ "%0 = PACK (%h1, %d1)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (rotate:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")))]
+ ""
+{
+ if (INTVAL (operands[2]) != 16)
+ FAIL;
+})
+
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")))]
+ ""
+{
+ if (INTVAL (operands[2]) != 16)
+ FAIL;
+ emit_insn (gen_rotl16 (operands[0], operands[1]));
+ DONE;
+})
+
+
+(define_insn "ror_one"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ior:SI (lshiftrt:SI (match_operand:SI 1 "register_operand" "d") (const_int 1))
+ (ashift:SI (zero_extend:SI (reg:BI REG_CC)) (const_int 31))))
+ (set (reg:BI REG_CC)
+ (zero_extract:BI (match_dup 1) (const_int 1) (const_int 0)))]
+ ""
+ "%0 = ROT %1 BY -1%!"
+ [(set_attr "type" "dsp32shiftimm")])
+
+(define_insn "rol_one"
+ [(set (match_operand:SI 0 "register_operand" "+d")
+ (ior:SI (ashift:SI (match_operand:SI 1 "register_operand" "d") (const_int 1))
+ (zero_extend:SI (reg:BI REG_CC))))
+ (set (reg:BI REG_CC)
+ (zero_extract:BI (match_dup 1) (const_int 31) (const_int 0)))]
+ ""
+ "%0 = ROT %1 BY 1%!"
+ [(set_attr "type" "dsp32shiftimm")])
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "general_operand" "")))]
+ ""
+{
+ rtx lo_half[2], hi_half[2];
+
+ if (operands[2] != const1_rtx)
+ FAIL;
+ if (! rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+
+ split_di (operands, 2, lo_half, hi_half);
+
+ emit_move_insn (bfin_cc_rtx, const0_rtx);
+ emit_insn (gen_ror_one (hi_half[0], hi_half[0]));
+ emit_insn (gen_ror_one (lo_half[0], lo_half[0]));
+ DONE;
+})
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "general_operand" "")))]
+ ""
+{
+ rtx lo_half[2], hi_half[2];
+
+ if (operands[2] != const1_rtx)
+ FAIL;
+ if (! rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+
+ split_di (operands, 2, lo_half, hi_half);
+
+ emit_insn (gen_compare_lt (gen_rtx_REG (BImode, REG_CC),
+ hi_half[1], const0_rtx));
+ emit_insn (gen_ror_one (hi_half[0], hi_half[0]));
+ emit_insn (gen_ror_one (lo_half[0], lo_half[0]));
+ DONE;
+})
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "general_operand" "")))]
+ ""
+{
+ rtx lo_half[2], hi_half[2];
+
+ if (operands[2] != const1_rtx)
+ FAIL;
+ if (! rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+
+ split_di (operands, 2, lo_half, hi_half);
+
+ emit_move_insn (bfin_cc_rtx, const0_rtx);
+ emit_insn (gen_rol_one (lo_half[0], lo_half[0]));
+ emit_insn (gen_rol_one (hi_half[0], hi_half[0]));
+ DONE;
+})
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,d,a")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,d,a")
+ (match_operand:SI 2 "nonmemory_operand" "dKu5,Ku5,P1P2")))]
+ ""
+ "@
+ %0 >>= %2;
+ %0 = %1 >> %2%!
+ %0 = %1 >> %2;"
+ [(set_attr "type" "shft,dsp32shiftimm,shft")])
+
+(define_insn "lshrpdi3"
+ [(set (match_operand:PDI 0 "register_operand" "=e")
+ (lshiftrt:PDI (match_operand:PDI 1 "register_operand" "0")
+ (match_operand:SI 2 "nonmemory_operand" "Ku5")))]
+ ""
+ "%0 = %1 >> %2%!"
+ [(set_attr "type" "dsp32shiftimm")])
+
+(define_insn "ashrpdi3"
+ [(set (match_operand:PDI 0 "register_operand" "=e")
+ (ashiftrt:PDI (match_operand:PDI 1 "register_operand" "0")
+ (match_operand:SI 2 "nonmemory_operand" "Ku5")))]
+ ""
+ "%0 = %1 >>> %2%!"
+ [(set_attr "type" "dsp32shiftimm")])
+
+;; A pattern to reload the equivalent of
+;; (set (Dreg) (plus (FP) (large_constant)))
+;; or
+;; (set (dagreg) (plus (FP) (arbitrary_constant)))
+;; using a scratch register
+(define_expand "reload_insi"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "=w")
+ (match_operand:SI 1 "fp_plus_const_operand" ""))
+ (clobber (match_operand:SI 2 "register_operand" "=&a"))])]
+ ""
+{
+ rtx fp_op = XEXP (operands[1], 0);
+ rtx const_op = XEXP (operands[1], 1);
+ rtx primary = operands[0];
+ rtx scratch = operands[2];
+
+ emit_move_insn (scratch, const_op);
+ emit_insn (gen_addsi3 (scratch, scratch, fp_op));
+ emit_move_insn (primary, scratch);
+ DONE;
+})
+
+(define_mode_iterator AREG [PDI V2PDI])
+
+(define_insn "reload_in<mode>"
+ [(set (match_operand:AREG 0 "register_operand" "=e")
+ (match_operand:AREG 1 "memory_operand" "m"))
+ (clobber (match_operand:SI 2 "register_operand" "=d"))]
+ ""
+{
+ rtx xops[4];
+ xops[0] = operands[0];
+ xops[1] = operands[2];
+ split_di (operands + 1, 1, xops + 2, xops + 3);
+ output_asm_insn ("%1 = %2;", xops);
+ output_asm_insn ("%w0 = %1;", xops);
+ output_asm_insn ("%1 = %3;", xops);
+ output_asm_insn ("%x0 = %1;", xops);
+ return "";
+}
+ [(set_attr "seq_insns" "multi")
+ (set_attr "type" "mcld")
+ (set_attr "length" "12")])
+
+(define_insn "reload_out<mode>"
+ [(set (match_operand:AREG 0 "memory_operand" "=m")
+ (match_operand:AREG 1 "register_operand" "e"))
+ (clobber (match_operand:SI 2 "register_operand" "=d"))]
+ ""
+{
+ rtx xops[4];
+ xops[0] = operands[1];
+ xops[1] = operands[2];
+ split_di (operands, 1, xops + 2, xops + 3);
+ output_asm_insn ("%1 = %w0;", xops);
+ output_asm_insn ("%2 = %1;", xops);
+ output_asm_insn ("%1 = %x0;", xops);
+ output_asm_insn ("%3 = %1;", xops);
+ return "";
+}
+ [(set_attr "seq_insns" "multi")
+ (set_attr "type" "mcld")
+ (set_attr "length" "12")])
+
+;; Jump instructions
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+{
+ if (get_attr_length (insn) == 2)
+ return "jump.s %0;";
+ else
+ return "jump.l %0;";
+}
+ [(set_attr "type" "br")])
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "register_operand" "a"))]
+ ""
+ "jump (%0);"
+ [(set_attr "type" "misc")])
+
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand:SI 0 "register_operand" "a"))
+ (use (label_ref (match_operand 1 "" "")))])]
+ ""
+{
+ /* In PIC mode, the table entries are stored PC relative.
+ Convert the relative address to an absolute address. */
+ if (flag_pic)
+ {
+ rtx op1 = gen_rtx_LABEL_REF (Pmode, operands[1]);
+
+ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
+ op1, NULL_RTX, 0, OPTAB_DIRECT);
+ }
+})
+
+(define_insn "*tablejump_internal"
+ [(set (pc) (match_operand:SI 0 "register_operand" "a"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jump (%0);"
+ [(set_attr "type" "misc")])
+
+;; Hardware loop
+
+; operand 0 is the loop count pseudo register
+; operand 1 is the number of loop iterations or 0 if it is unknown
+; operand 2 is the maximum number of loop iterations
+; operand 3 is the number of levels of enclosed loops
+; operand 4 is the label to jump to at the top of the loop
+(define_expand "doloop_end"
+ [(parallel [(set (pc) (if_then_else
+ (ne (match_operand:SI 0 "" "")
+ (const_int 1))
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (unspec [(const_int 0)] UNSPEC_LSETUP_END)
+ (clobber (match_scratch:SI 5 ""))])]
+ ""
+{
+ /* The loop optimizer doesn't check the predicates... */
+ if (GET_MODE (operands[0]) != SImode)
+ FAIL;
+ /* Due to limitations in the hardware (an initial loop count of 0
+ does not loop 2^32 times) we must avoid to generate a hardware
+ loops when we cannot rule out this case. */
+ if (!flag_unsafe_loop_optimizations
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 0xFFFFFFFF)
+ FAIL;
+ bfin_hardware_loop ();
+})
+
+(define_insn "loop_end"
+ [(set (pc)
+ (if_then_else (ne (match_operand:SI 0 "nonimmediate_operand" "+a*d,*b*v*f,m")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus (match_dup 0)
+ (const_int -1)))
+ (unspec [(const_int 0)] UNSPEC_LSETUP_END)
+ (clobber (match_scratch:SI 2 "=X,&r,&r"))]
+ ""
+ "@
+ /* loop end %0 %l1 */
+ #
+ #"
+ [(set_attr "length" "6,10,14")])
+
+(define_split
+ [(set (pc)
+ (if_then_else (ne (match_operand:SI 0 "nondp_reg_or_memory_operand" "")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus (match_dup 0)
+ (const_int -1)))
+ (unspec [(const_int 0)] UNSPEC_LSETUP_END)
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "splitting_loops"
+ [(set (match_dup 2) (match_dup 0))
+ (set (match_dup 2) (plus:SI (match_dup 2) (const_int -1)))
+ (set (match_dup 0) (match_dup 2))
+ (set (reg:BI REG_CC) (eq:BI (match_dup 2) (const_int 0)))
+ (set (pc)
+ (if_then_else (eq (reg:BI REG_CC)
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))]
+ "")
+
+(define_insn "lsetup_with_autoinit"
+ [(set (match_operand:SI 0 "lt_register_operand" "=t")
+ (label_ref (match_operand 1 "" "")))
+ (set (match_operand:SI 2 "lb_register_operand" "=u")
+ (label_ref (match_operand 3 "" "")))
+ (set (match_operand:SI 4 "lc_register_operand" "=k")
+ (match_operand:SI 5 "register_operand" "a"))]
+ ""
+ "LSETUP (%1, %3) %4 = %5;"
+ [(set_attr "length" "4")])
+
+(define_insn "lsetup_without_autoinit"
+ [(set (match_operand:SI 0 "lt_register_operand" "=t")
+ (label_ref (match_operand 1 "" "")))
+ (set (match_operand:SI 2 "lb_register_operand" "=u")
+ (label_ref (match_operand 3 "" "")))
+ (use (match_operand:SI 4 "lc_register_operand" "k"))]
+ ""
+ "LSETUP (%1, %3) %4;"
+ [(set_attr "length" "4")])
+
+;; Call instructions..
+
+;; The explicit MEM inside the UNSPEC prevents the compiler from moving
+;; the load before a branch after a NULL test, or before a store that
+;; initializes a function descriptor.
+
+(define_insn_and_split "load_funcdescsi"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec_volatile:SI [(mem:SI (match_operand:SI 1 "address_operand" "p"))]
+ UNSPEC_VOLATILE_LOAD_FUNCDESC))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 0) (mem:SI (match_dup 1)))])
+
+(define_expand "call"
+ [(parallel [(call (match_operand:SI 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ ""
+{
+ bfin_expand_call (NULL_RTX, operands[0], operands[1], operands[2], 0);
+ DONE;
+})
+
+(define_expand "sibcall"
+ [(parallel [(call (match_operand:SI 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (return)])]
+ ""
+{
+ bfin_expand_call (NULL_RTX, operands[0], operands[1], operands[2], 1);
+ DONE;
+})
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "register_operand" "")
+ (call (match_operand:SI 1 "" "")
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ ""
+{
+ bfin_expand_call (operands[0], operands[1], operands[2], operands[3], 0);
+ DONE;
+})
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "register_operand" "")
+ (call (match_operand:SI 1 "" "")
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (return)])]
+ ""
+{
+ bfin_expand_call (operands[0], operands[1], operands[2], operands[3], 1);
+ DONE;
+})
+
+(define_insn "*call_symbol_fdpic"
+ [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "Q"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand:SI 2 "register_operand" "Z"))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI REG_RETS))]
+ "! SIBLING_CALL_P (insn)
+ && GET_CODE (operands[0]) == SYMBOL_REF
+ && !bfin_longcall_p (operands[0], INTVAL (operands[3]))"
+ "call %0;"
+ [(set_attr "type" "call")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_symbol_fdpic"
+ [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "Q"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand:SI 2 "register_operand" "Z"))
+ (use (match_operand 3 "" ""))
+ (return)]
+ "SIBLING_CALL_P (insn)
+ && GET_CODE (operands[0]) == SYMBOL_REF
+ && !bfin_longcall_p (operands[0], INTVAL (operands[3]))"
+ "jump.l %0;"
+ [(set_attr "type" "br")
+ (set_attr "length" "4")])
+
+(define_insn "*call_value_symbol_fdpic"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "Q"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand:SI 3 "register_operand" "Z"))
+ (use (match_operand 4 "" ""))
+ (clobber (reg:SI REG_RETS))]
+ "! SIBLING_CALL_P (insn)
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && !bfin_longcall_p (operands[1], INTVAL (operands[4]))"
+ "call %1;"
+ [(set_attr "type" "call")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_symbol_fdpic"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "Q"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand:SI 3 "register_operand" "Z"))
+ (use (match_operand 4 "" ""))
+ (return)]
+ "SIBLING_CALL_P (insn)
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && !bfin_longcall_p (operands[1], INTVAL (operands[4]))"
+ "jump.l %1;"
+ [(set_attr "type" "br")
+ (set_attr "length" "4")])
+
+(define_insn "*call_insn_fdpic"
+ [(call (mem:SI (match_operand:SI 0 "register_no_elim_operand" "Y"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand:SI 2 "register_operand" "Z"))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI REG_RETS))]
+ "! SIBLING_CALL_P (insn)"
+ "call (%0);"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")])
+
+(define_insn "*sibcall_insn_fdpic"
+ [(call (mem:SI (match_operand:SI 0 "register_no_elim_operand" "Y"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand:SI 2 "register_operand" "Z"))
+ (use (match_operand 3 "" ""))
+ (return)]
+ "SIBLING_CALL_P (insn)"
+ "jump (%0);"
+ [(set_attr "type" "br")
+ (set_attr "length" "2")])
+
+(define_insn "*call_value_insn_fdpic"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (mem:SI (match_operand:SI 1 "register_no_elim_operand" "Y"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand:SI 3 "register_operand" "Z"))
+ (use (match_operand 4 "" ""))
+ (clobber (reg:SI REG_RETS))]
+ "! SIBLING_CALL_P (insn)"
+ "call (%1);"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")])
+
+(define_insn "*sibcall_value_insn_fdpic"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (mem:SI (match_operand:SI 1 "register_no_elim_operand" "Y"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand:SI 3 "register_operand" "Z"))
+ (use (match_operand 4 "" ""))
+ (return)]
+ "SIBLING_CALL_P (insn)"
+ "jump (%1);"
+ [(set_attr "type" "br")
+ (set_attr "length" "2")])
+
+(define_insn "*call_symbol"
+ [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "Q"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI REG_RETS))]
+ "! SIBLING_CALL_P (insn)
+ && (!TARGET_ID_SHARED_LIBRARY || TARGET_LEAF_ID_SHARED_LIBRARY)
+ && GET_CODE (operands[0]) == SYMBOL_REF
+ && !bfin_longcall_p (operands[0], INTVAL (operands[2]))"
+ "call %0;"
+ [(set_attr "type" "call")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_symbol"
+ [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "Q"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand 2 "" ""))
+ (return)]
+ "SIBLING_CALL_P (insn)
+ && (!TARGET_ID_SHARED_LIBRARY || TARGET_LEAF_ID_SHARED_LIBRARY)
+ && GET_CODE (operands[0]) == SYMBOL_REF
+ && !bfin_longcall_p (operands[0], INTVAL (operands[2]))"
+ "jump.l %0;"
+ [(set_attr "type" "br")
+ (set_attr "length" "4")])
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "Q"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI REG_RETS))]
+ "! SIBLING_CALL_P (insn)
+ && (!TARGET_ID_SHARED_LIBRARY || TARGET_LEAF_ID_SHARED_LIBRARY)
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && !bfin_longcall_p (operands[1], INTVAL (operands[3]))"
+ "call %1;"
+ [(set_attr "type" "call")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_symbol"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "Q"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (return)]
+ "SIBLING_CALL_P (insn)
+ && (!TARGET_ID_SHARED_LIBRARY || TARGET_LEAF_ID_SHARED_LIBRARY)
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && !bfin_longcall_p (operands[1], INTVAL (operands[3]))"
+ "jump.l %1;"
+ [(set_attr "type" "br")
+ (set_attr "length" "4")])
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "register_no_elim_operand" "a"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI REG_RETS))]
+ "! SIBLING_CALL_P (insn)"
+ "call (%0);"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")])
+
+(define_insn "*sibcall_insn"
+ [(call (mem:SI (match_operand:SI 0 "register_no_elim_operand" "z"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand 2 "" ""))
+ (return)]
+ "SIBLING_CALL_P (insn)"
+ "jump (%0);"
+ [(set_attr "type" "br")
+ (set_attr "length" "2")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (mem:SI (match_operand:SI 1 "register_no_elim_operand" "a"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI REG_RETS))]
+ "! SIBLING_CALL_P (insn)"
+ "call (%1);"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")])
+
+(define_insn "*sibcall_value_insn"
+ [(set (match_operand 0 "register_operand" "=d")
+ (call (mem:SI (match_operand:SI 1 "register_no_elim_operand" "z"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (return)]
+ "SIBLING_CALL_P (insn)"
+ "jump (%1);"
+ [(set_attr "type" "br")
+ (set_attr "length" "2")])
+
+;; Block move patterns
+
+;; We cheat. This copies one more word than operand 2 indicates.
+
+(define_insn "rep_movsi"
+ [(set (match_operand:SI 0 "register_operand" "=&a")
+ (plus:SI (plus:SI (match_operand:SI 3 "register_operand" "0")
+ (ashift:SI (match_operand:SI 2 "register_operand" "a")
+ (const_int 2)))
+ (const_int 4)))
+ (set (match_operand:SI 1 "register_operand" "=&b")
+ (plus:SI (plus:SI (match_operand:SI 4 "register_operand" "1")
+ (ashift:SI (match_dup 2) (const_int 2)))
+ (const_int 4)))
+ (set (mem:BLK (match_dup 3))
+ (mem:BLK (match_dup 4)))
+ (use (match_dup 2))
+ (clobber (match_scratch:HI 5 "=&d"))
+ (clobber (reg:SI REG_LT1))
+ (clobber (reg:SI REG_LC1))
+ (clobber (reg:SI REG_LB1))]
+ ""
+ "%5 = [%4++]; lsetup (1f, 1f) LC1 = %2; 1: MNOP || [%3++] = %5 || %5 = [%4++]; [%3++] = %5;"
+ [(set_attr "type" "misc")
+ (set_attr "length" "16")
+ (set_attr "seq_insns" "multi")])
+
+(define_insn "rep_movhi"
+ [(set (match_operand:SI 0 "register_operand" "=&a")
+ (plus:SI (plus:SI (match_operand:SI 3 "register_operand" "0")
+ (ashift:SI (match_operand:SI 2 "register_operand" "a")
+ (const_int 1)))
+ (const_int 2)))
+ (set (match_operand:SI 1 "register_operand" "=&b")
+ (plus:SI (plus:SI (match_operand:SI 4 "register_operand" "1")
+ (ashift:SI (match_dup 2) (const_int 1)))
+ (const_int 2)))
+ (set (mem:BLK (match_dup 3))
+ (mem:BLK (match_dup 4)))
+ (use (match_dup 2))
+ (clobber (match_scratch:HI 5 "=&d"))
+ (clobber (reg:SI REG_LT1))
+ (clobber (reg:SI REG_LC1))
+ (clobber (reg:SI REG_LB1))]
+ ""
+ "%h5 = W[%4++]; lsetup (1f, 1f) LC1 = %2; 1: MNOP || W [%3++] = %5 || %h5 = W [%4++]; W [%3++] = %5;"
+ [(set_attr "type" "misc")
+ (set_attr "length" "16")
+ (set_attr "seq_insns" "multi")])
+
+(define_expand "movmemsi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+{
+ if (bfin_expand_movmem (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ FAIL;
+})
+
+;; Conditional branch patterns
+;; The Blackfin has only few condition codes: eq, lt, lte, ltu, leu
+
+(define_insn "compare_eq"
+ [(set (match_operand:BI 0 "register_operand" "=C,C")
+ (eq:BI (match_operand:SI 1 "register_operand" "d,a")
+ (match_operand:SI 2 "reg_or_const_int_operand" "dKs3,aKs3")))]
+ ""
+ "cc =%1==%2;"
+ [(set_attr "type" "compare")])
+
+(define_insn "compare_ne"
+ [(set (match_operand:BI 0 "register_operand" "=C,C")
+ (ne:BI (match_operand:SI 1 "register_operand" "d,a")
+ (match_operand:SI 2 "reg_or_const_int_operand" "dKs3,aKs3")))]
+ "0"
+ "cc =%1!=%2;"
+ [(set_attr "type" "compare")])
+
+(define_insn "compare_lt"
+ [(set (match_operand:BI 0 "register_operand" "=C,C")
+ (lt:BI (match_operand:SI 1 "register_operand" "d,a")
+ (match_operand:SI 2 "reg_or_const_int_operand" "dKs3,aKs3")))]
+ ""
+ "cc =%1<%2;"
+ [(set_attr "type" "compare")])
+
+(define_insn "compare_le"
+ [(set (match_operand:BI 0 "register_operand" "=C,C")
+ (le:BI (match_operand:SI 1 "register_operand" "d,a")
+ (match_operand:SI 2 "reg_or_const_int_operand" "dKs3,aKs3")))]
+ ""
+ "cc =%1<=%2;"
+ [(set_attr "type" "compare")])
+
+(define_insn "compare_leu"
+ [(set (match_operand:BI 0 "register_operand" "=C,C")
+ (leu:BI (match_operand:SI 1 "register_operand" "d,a")
+ (match_operand:SI 2 "reg_or_const_int_operand" "dKu3,aKu3")))]
+ ""
+ "cc =%1<=%2 (iu);"
+ [(set_attr "type" "compare")])
+
+(define_insn "compare_ltu"
+ [(set (match_operand:BI 0 "register_operand" "=C,C")
+ (ltu:BI (match_operand:SI 1 "register_operand" "d,a")
+ (match_operand:SI 2 "reg_or_const_int_operand" "dKu3,aKu3")))]
+ ""
+ "cc =%1<%2 (iu);"
+ [(set_attr "type" "compare")])
+
+;; Same as above, but and CC with the overflow bit generated by the first
+;; multiplication.
+(define_insn "flag_mul_macv2hi_parts_acconly_andcc0"
+ [(set (match_operand:PDI 0 "register_operand" "=B,e,e")
+ (unspec:PDI [(vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "d,d,d")
+ (parallel [(match_operand 4 "const01_operand" "P0P1,P0P1,P0P1")]))
+ (vec_select:HI
+ (match_operand:V2HI 3 "register_operand" "d,d,d")
+ (parallel [(match_operand 6 "const01_operand" "P0P1,P0P1,P0P1")]))
+ (match_operand 10 "const_int_operand" "PB,PA,PA")]
+ UNSPEC_MUL_WITH_FLAG))
+ (set (match_operand:PDI 1 "register_operand" "=B,e,e")
+ (unspec:PDI [(vec_select:HI
+ (match_dup 2)
+ (parallel [(match_operand 5 "const01_operand" "P0P1,P0P1,P0P1")]))
+ (vec_select:HI
+ (match_dup 3)
+ (parallel [(match_operand 7 "const01_operand" "P0P1,P0P1,P0P1")]))
+ (match_operand:PDI 8 "register_operand" "1,1,1")
+ (match_operand 9 "const01_operand" "P0P1,P0P1,P0P1")
+ (match_operand 11 "const_int_operand" "PA,PB,PA")]
+ UNSPEC_MAC_WITH_FLAG))
+ (set (reg:BI REG_CC)
+ (and:BI (reg:BI REG_CC)
+ (unspec:BI [(vec_select:HI (match_dup 2) (parallel [(match_dup 4)]))
+ (vec_select:HI (match_dup 3) (parallel [(match_dup 6)]))
+ (match_dup 10)]
+ UNSPEC_MUL_WITH_FLAG)))]
+ "MACFLAGS_MATCH_P (INTVAL (operands[10]), INTVAL (operands[11]))"
+{
+ rtx xops[6];
+ const char *templates[] = {
+ "%0 = %h2 * %h3, %1 %b4 %h2 * %h3 %M5;\n\tCC &= %v0;",
+ "%0 = %d2 * %h3, %1 %b4 %h2 * %h3 %M5;\n\tCC &= %v0;",
+ "%0 = %h2 * %h3, %1 %b4 %d2 * %h3 %M5;\n\tCC &= %v0;",
+ "%0 = %d2 * %h3, %1 %b4 %d2 * %h3 %M5;\n\tCC &= %v0;",
+ "%0 = %h2 * %d3, %1 %b4 %h2 * %h3 %M5;\n\tCC &= %v0;",
+ "%0 = %d2 * %d3, %1 %b4 %h2 * %h3 %M5;\n\tCC &= %v0;",
+ "%0 = %h2 * %d3, %1 %b4 %d2 * %h3 %M5;\n\tCC &= %v0;",
+ "%0 = %d2 * %d3, %1 %b4 %d2 * %h3 %M5;\n\tCC &= %v0;",
+ "%0 = %h2 * %h3, %1 %b4 %h2 * %d3 %M5;\n\tCC &= %v0;",
+ "%0 = %d2 * %h3, %1 %b4 %h2 * %d3 %M5;\n\tCC &= %v0;",
+ "%0 = %h2 * %h3, %1 %b4 %d2 * %d3 %M5;\n\tCC &= %v0;",
+ "%0 = %d2 * %h3, %1 %b4 %d2 * %d3 %M5;\n\tCC &= %v0;",
+ "%0 = %h2 * %d3, %1 %b4 %h2 * %d3 %M5;\n\tCC &= %v0;",
+ "%0 = %d2 * %d3, %1 %b4 %h2 * %d3 %M5;\n\tCC &= %v0;",
+ "%0 = %h2 * %d3, %1 %b4 %d2 * %d3 %M5;\n\tCC &= %v0;",
+ "%0 = %d2 * %d3, %1 %b4 %d2 * %d3 %M5;\n\tCC &= %v0;" };
+ int alt = (INTVAL (operands[4]) + (INTVAL (operands[5]) << 1)
+ + (INTVAL (operands[6]) << 2) + (INTVAL (operands[7]) << 3));
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = operands[2];
+ xops[3] = operands[3];
+ xops[4] = operands[9];
+ xops[5] = which_alternative == 0 ? operands[10] : operands[11];
+ output_asm_insn (templates[alt], xops);
+ return "";
+}
+ [(set_attr "type" "misc")
+ (set_attr "length" "6")
+ (set_attr "seq_insns" "multi")])
+
+(define_expand "cbranchsi4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_const_int_operand" "")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+{
+ rtx bi_compare = bfin_gen_compare (operands[0], SImode);
+ emit_jump_insn (gen_cbranchbi4 (bi_compare, bfin_cc_rtx, CONST0_RTX (BImode),
+ operands[3]));
+ DONE;
+})
+
+(define_insn "cbranchbi4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "bfin_bimode_comparison_operator"
+ [(match_operand:BI 1 "register_operand" "C")
+ (match_operand:BI 2 "immediate_operand" "P0")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+{
+ asm_conditional_branch (insn, operands, 0, 0);
+ return "";
+}
+ [(set_attr "type" "brcc")])
+
+;; Special cbranch patterns to deal with the speculative load problem - see
+;; bfin_reorg for details.
+
+(define_insn "cbranch_predicted_taken"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "bfin_bimode_comparison_operator"
+ [(match_operand:BI 1 "register_operand" "C")
+ (match_operand:BI 2 "immediate_operand" "P0")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (unspec [(const_int 0)] UNSPEC_CBRANCH_TAKEN)]
+ ""
+{
+ asm_conditional_branch (insn, operands, 0, 1);
+ return "";
+}
+ [(set_attr "type" "brcc")])
+
+(define_insn "cbranch_with_nops"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "bfin_bimode_comparison_operator"
+ [(match_operand:BI 1 "register_operand" "C")
+ (match_operand:BI 2 "immediate_operand" "P0")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (unspec [(match_operand 4 "immediate_operand" "")] UNSPEC_CBRANCH_NOPS)]
+ "reload_completed"
+{
+ asm_conditional_branch (insn, operands, INTVAL (operands[4]), 0);
+ return "";
+}
+ [(set_attr "type" "brcc")
+ (set_attr "length" "8")])
+
+;; setcc insns.
+
+(define_expand "cstorebi4"
+ [(set (match_dup 4)
+ (match_operator:BI 1 "bfin_bimode_comparison_operator"
+ [(match_operand:BI 2 "register_operand" "")
+ (match_operand:BI 3 "reg_or_const_int_operand" "")]))
+ (set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_dup 4) (const_int 0)))]
+ ""
+{
+ /* It could be expanded as a movbisi instruction, but the portable
+ alternative produces better code. */
+ if (GET_CODE (operands[1]) == NE)
+ FAIL;
+
+ operands[4] = bfin_cc_rtx;
+})
+
+(define_expand "cstoresi4"
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "reg_or_const_int_operand" "")]))]
+ ""
+{
+ rtx bi_compare, test;
+
+ if (!bfin_direct_comparison_operator (operands[1], SImode))
+ {
+ if (!register_operand (operands[3], SImode)
+ || GET_CODE (operands[1]) == NE)
+ FAIL;
+ test = gen_rtx_fmt_ee (swap_condition (GET_CODE (operands[1])),
+ SImode, operands[3], operands[2]);
+ }
+ else
+ test = operands[1];
+
+ bi_compare = bfin_gen_compare (test, SImode);
+ gcc_assert (GET_CODE (bi_compare) == NE);
+ emit_insn (gen_movbisi (operands[0], bfin_cc_rtx));
+ DONE;
+})
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop;")
+
+;; A nop which stays there when emitted.
+(define_insn "forced_nop"
+ [(unspec [(const_int 0)] UNSPEC_NOP)]
+ ""
+ "nop;")
+
+(define_insn "mnop"
+ [(unspec [(const_int 0)] UNSPEC_32BIT)]
+ ""
+ "mnop%!"
+ [(set_attr "type" "dsp32")])
+
+;;;;;;;;;;;;;;;;;;;; CC2dreg ;;;;;;;;;;;;;;;;;;;;;;;;;
+(define_insn "movsibi"
+ [(set (match_operand:BI 0 "register_operand" "=C")
+ (ne:BI (match_operand:SI 1 "register_operand" "d")
+ (const_int 0)))]
+ ""
+ "CC = %1;"
+ [(set_attr "length" "2")])
+
+(define_insn_and_split "movbisi"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ne:SI (match_operand:BI 1 "register_operand" "C")
+ (const_int 0)))]
+ ""
+ "#"
+ ""
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:BI 1 "register_operand" "")))]
+ "")
+
+(define_insn "notbi"
+ [(set (match_operand:BI 0 "register_operand" "=C")
+ (eq:BI (match_operand:BI 1 "register_operand" " 0")
+ (const_int 0)))]
+ ""
+ "%0 = ! %0;" /* NOT CC;" */
+ [(set_attr "type" "compare")])
+
+;; Vector and DSP insns
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ior:SI (ashift:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 24))
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "d")
+ (const_int 8))))]
+ ""
+ "%0 = ALIGN8(%1, %2)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ior:SI (ashift:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 16))
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "d")
+ (const_int 16))))]
+ ""
+ "%0 = ALIGN16(%1, %2)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ior:SI (ashift:SI (match_operand:SI 1 "register_operand" "d")
+ (const_int 8))
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "d")
+ (const_int 24))))]
+ ""
+ "%0 = ALIGN24(%1, %2)%!"
+ [(set_attr "type" "dsp32")])
+
+;; Prologue and epilogue.
+
+(define_expand "prologue"
+ [(const_int 1)]
+ ""
+ "bfin_expand_prologue (); DONE;")
+
+(define_expand "epilogue"
+ [(const_int 1)]
+ ""
+ "bfin_expand_epilogue (1, 0, 0); DONE;")
+
+(define_expand "sibcall_epilogue"
+ [(const_int 1)]
+ ""
+ "bfin_expand_epilogue (0, 0, 1); DONE;")
+
+(define_expand "eh_return"
+ [(use (match_operand:SI 0 "register_operand" ""))]
+ ""
+{
+ emit_insn (gen_eh_store_handler (EH_RETURN_HANDLER_RTX, operands[0]));
+ emit_jump_insn (gen_eh_return_internal ());
+ emit_barrier ();
+ DONE;
+})
+
+(define_insn "eh_store_handler"
+ [(unspec_volatile [(match_operand:SI 1 "register_operand" "da")]
+ UNSPEC_VOLATILE_STORE_EH_HANDLER)
+ (clobber (match_operand:SI 0 "memory_operand" "=m"))]
+ ""
+ "%0 = %1%!"
+ [(set_attr "type" "mcst")])
+
+(define_insn_and_split "eh_return_internal"
+ [(eh_return)]
+ ""
+ "#"
+ "epilogue_completed"
+ [(const_int 1)]
+ "bfin_expand_epilogue (1, 1, 0); DONE;")
+
+(define_insn "link"
+ [(set (mem:SI (plus:SI (reg:SI REG_SP) (const_int -4))) (reg:SI REG_RETS))
+ (set (mem:SI (plus:SI (reg:SI REG_SP) (const_int -8))) (reg:SI REG_FP))
+ (set (reg:SI REG_FP)
+ (plus:SI (reg:SI REG_SP) (const_int -8)))
+ (set (reg:SI REG_SP)
+ (plus:SI (reg:SI REG_SP) (match_operand:SI 0 "immediate_operand" "i")))]
+ ""
+ "LINK %Z0;"
+ [(set_attr "length" "4")])
+
+(define_insn "unlink"
+ [(set (reg:SI REG_FP) (mem:SI (reg:SI REG_FP)))
+ (set (reg:SI REG_RETS) (mem:SI (plus:SI (reg:SI REG_FP) (const_int 4))))
+ (set (reg:SI REG_SP) (plus:SI (reg:SI REG_FP) (const_int 8)))]
+ ""
+ "UNLINK;"
+ [(set_attr "length" "4")])
+
+;; This pattern is slightly clumsy. The stack adjust must be the final SET in
+;; the pattern, otherwise dwarf2out becomes very confused about which reg goes
+;; where on the stack, since it goes through all elements of the parallel in
+;; sequence.
+(define_insn "push_multiple"
+ [(match_parallel 0 "push_multiple_operation"
+ [(unspec [(match_operand:SI 1 "immediate_operand" "i")] UNSPEC_PUSH_MULTIPLE)])]
+ ""
+{
+ output_push_multiple (insn, operands);
+ return "";
+})
+
+(define_insn "pop_multiple"
+ [(match_parallel 0 "pop_multiple_operation"
+ [(set (reg:SI REG_SP)
+ (plus:SI (reg:SI REG_SP) (match_operand:SI 1 "immediate_operand" "i")))])]
+ ""
+{
+ output_pop_multiple (insn, operands);
+ return "";
+})
+
+(define_insn "return_internal"
+ [(return)
+ (use (match_operand 0 "register_operand" ""))]
+ "reload_completed"
+{
+ switch (REGNO (operands[0]))
+ {
+ case REG_RETX:
+ return "rtx;";
+ case REG_RETN:
+ return "rtn;";
+ case REG_RETI:
+ return "rti;";
+ case REG_RETS:
+ return "rts;";
+ }
+ gcc_unreachable ();
+})
+
+;; When used at a location where CC contains 1, causes a speculative load
+;; that is later cancelled. This is used for certain workarounds in
+;; interrupt handler prologues.
+(define_insn "dummy_load"
+ [(unspec_volatile [(match_operand 0 "register_operand" "a")
+ (match_operand 1 "register_operand" "C")]
+ UNSPEC_VOLATILE_DUMMY)]
+ ""
+ "if cc jump 4;\n\tr7 = [%0];"
+ [(set_attr "type" "misc")
+ (set_attr "length" "4")
+ (set_attr "seq_insns" "multi")])
+
+;; A placeholder insn inserted before the final scheduling pass. It is used
+;; to improve scheduling of loads when workarounds for speculative loads are
+;; needed, by not placing them in the first few cycles after a conditional
+;; branch.
+(define_insn "stall"
+ [(unspec_volatile [(match_operand 0 "const_int_operand" "P1P3")]
+ UNSPEC_VOLATILE_STALL)]
+ ""
+ ""
+ [(set_attr "type" "stall")])
+
+(define_insn "csync"
+ [(unspec_volatile [(const_int 0)] UNSPEC_VOLATILE_CSYNC)]
+ ""
+ "csync;"
+ [(set_attr "type" "sync")])
+
+(define_insn "ssync"
+ [(unspec_volatile [(const_int 0)] UNSPEC_VOLATILE_SSYNC)]
+ ""
+ "ssync;"
+ [(set_attr "type" "sync")])
+
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 3))]
+ ""
+ "excpt 3;"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2")])
+
+(define_insn "trapifcc"
+ [(trap_if (reg:BI REG_CC) (const_int 3))]
+ ""
+ "if !cc jump 4 (bp); excpt 3;"
+ [(set_attr "type" "misc")
+ (set_attr "length" "4")
+ (set_attr "seq_insns" "multi")])
+
+;;; Vector instructions
+
+;; First, all sorts of move variants
+
+(define_insn "movhiv2hi_low"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (vec_concat:V2HI
+ (match_operand:HI 2 "register_operand" "d")
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "0")
+ (parallel [(const_int 1)]))))]
+ ""
+ "%h0 = %h2 << 0%!"
+ [(set_attr "type" "dsp32shiftimm")])
+
+(define_insn "movhiv2hi_high"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (vec_concat:V2HI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (match_operand:HI 2 "register_operand" "d")))]
+ ""
+ "%d0 = %h2 << 0%!"
+ [(set_attr "type" "dsp32shiftimm")])
+
+;; No earlyclobber on alternative two since our sequence ought to be safe.
+;; The order of operands is intentional to match the VDSP builtin (high word
+;; is passed first).
+(define_insn_and_split "composev2hi"
+ [(set (match_operand:V2HI 0 "register_operand" "=d,d")
+ (vec_concat:V2HI (match_operand:HI 2 "register_operand" "0,d")
+ (match_operand:HI 1 "register_operand" "d,d")))]
+ ""
+ "@
+ %d0 = %h1 << 0%!
+ #"
+ "reload_completed"
+ [(set (match_dup 0)
+ (vec_concat:V2HI
+ (vec_select:HI (match_dup 0) (parallel [(const_int 0)]))
+ (match_dup 1)))
+ (set (match_dup 0)
+ (vec_concat:V2HI
+ (match_dup 2)
+ (vec_select:HI (match_dup 0) (parallel [(const_int 1)]))))]
+ ""
+ [(set_attr "type" "dsp32shiftimm")])
+
+; Like composev2hi, but operating on elements of V2HI vectors.
+; Useful on its own, and as a combiner bridge for the multiply and
+; mac patterns.
+(define_insn "packv2hi"
+ [(set (match_operand:V2HI 0 "register_operand" "=d,d,d,d,d,d,d,d")
+ (vec_concat:V2HI (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "0,0,d,d,d,d,d,d")
+ (parallel [(match_operand 3 "const01_operand" "P0,P0,P0,P1,P0,P1,P0,P1")]))
+ (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "d,d,0,0,d,d,d,d")
+ (parallel [(match_operand 4 "const01_operand" "P0,P1,P1,P1,P0,P0,P1,P1")]))))]
+ ""
+ "@
+ %d0 = %h2 << 0%!
+ %d0 = %d2 << 0%!
+ %h0 = %h1 << 0%!
+ %h0 = %d1 << 0%!
+ %0 = PACK (%h2,%h1)%!
+ %0 = PACK (%h2,%d1)%!
+ %0 = PACK (%d2,%h1)%!
+ %0 = PACK (%d2,%d1)%!"
+ [(set_attr "type" "dsp32shiftimm,dsp32shiftimm,dsp32shiftimm,dsp32shiftimm,dsp32,dsp32,dsp32,dsp32")])
+
+(define_insn "movv2hi_hi"
+ [(set (match_operand:HI 0 "register_operand" "=d,d,d")
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "0,d,d")
+ (parallel [(match_operand 2 "const01_operand" "P0,P0,P1")])))]
+ ""
+ "@
+ /* optimized out */
+ %h0 = %h1 << 0%!
+ %h0 = %d1 << 0%!"
+ [(set_attr "type" "dsp32shiftimm")])
+
+(define_expand "movv2hi_hi_low"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "")
+ (parallel [(const_int 0)])))]
+ ""
+ "")
+
+(define_expand "movv2hi_hi_high"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "")
+ (parallel [(const_int 1)])))]
+ ""
+ "")
+
+;; Unusual arithmetic operations on 16-bit registers.
+
+(define_code_iterator sp_or_sm [ss_plus ss_minus])
+(define_code_attr spm_string [(ss_plus "+") (ss_minus "-")])
+(define_code_attr spm_name [(ss_plus "add") (ss_minus "sub")])
+
+(define_insn "ss<spm_name>hi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (sp_or_sm:HI (match_operand:HI 1 "register_operand" "d")
+ (match_operand:HI 2 "register_operand" "d")))]
+ ""
+ "%h0 = %h1 <spm_string> %h2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ss<spm_name>hi3_parts"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (sp_or_sm:HI (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(match_operand 3 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 4 "const01_operand" "P0P1")]))))]
+ ""
+{
+ const char *templates[] = {
+ "%h0 = %h1 <spm_string> %h2 (S)%!",
+ "%h0 = %d1 <spm_string> %h2 (S)%!",
+ "%h0 = %h1 <spm_string> %d2 (S)%!",
+ "%h0 = %d1 <spm_string> %d2 (S)%!" };
+ int alt = INTVAL (operands[3]) + (INTVAL (operands[4]) << 1);
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ss<spm_name>hi3_low_parts"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (vec_concat:V2HI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (sp_or_sm:HI (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 4 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_operand:V2HI 3 "register_operand" "d")
+ (parallel [(match_operand 5 "const01_operand" "P0P1")])))))]
+ ""
+{
+ const char *templates[] = {
+ "%h0 = %h2 <spm_string> %h3 (S)%!",
+ "%h0 = %d2 <spm_string> %h3 (S)%!",
+ "%h0 = %h2 <spm_string> %d3 (S)%!",
+ "%h0 = %d2 <spm_string> %d3 (S)%!" };
+ int alt = INTVAL (operands[4]) + (INTVAL (operands[5]) << 1);
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ss<spm_name>hi3_high_parts"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (vec_concat:V2HI
+ (sp_or_sm:HI (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 4 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_operand:V2HI 3 "register_operand" "d")
+ (parallel [(match_operand 5 "const01_operand" "P0P1")])))
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "0")
+ (parallel [(const_int 1)]))))]
+ ""
+{
+ const char *templates[] = {
+ "%d0 = %h2 <spm_string> %h3 (S)%!",
+ "%d0 = %d2 <spm_string> %h3 (S)%!",
+ "%d0 = %h2 <spm_string> %d3 (S)%!",
+ "%d0 = %d2 <spm_string> %d3 (S)%!" };
+ int alt = INTVAL (operands[4]) + (INTVAL (operands[5]) << 1);
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+;; V2HI vector insns
+
+(define_insn "addv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (plus:V2HI (match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")))]
+ ""
+ "%0 = %1 +|+ %2%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ssaddv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (ss_plus:V2HI (match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")))]
+ ""
+ "%0 = %1 +|+ %2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "subv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (minus:V2HI (match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")))]
+ ""
+ "%0 = %1 -|- %2%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "sssubv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (ss_minus:V2HI (match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")))]
+ ""
+ "%0 = %1 -|- %2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "addsubv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (vec_concat:V2HI
+ (plus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)])))
+ (minus:HI (vec_select:HI (match_dup 1) (parallel [(const_int 1)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %1 +|- %2%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "subaddv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (vec_concat:V2HI
+ (minus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)])))
+ (plus:HI (vec_select:HI (match_dup 1) (parallel [(const_int 1)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %1 -|+ %2%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ssaddsubv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (vec_concat:V2HI
+ (ss_plus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)])))
+ (ss_minus:HI (vec_select:HI (match_dup 1) (parallel [(const_int 1)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %1 +|- %2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "sssubaddv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (vec_concat:V2HI
+ (ss_minus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)])))
+ (ss_plus:HI (vec_select:HI (match_dup 1) (parallel [(const_int 1)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %1 -|+ %2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "sublohiv2hi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (minus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 1)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)]))))]
+ ""
+ "%h0 = %d1 - %h2%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "subhilov2hi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (minus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 1)]))))]
+ ""
+ "%h0 = %h1 - %d2%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "sssublohiv2hi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (ss_minus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 1)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)]))))]
+ ""
+ "%h0 = %d1 - %h2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "sssubhilov2hi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (ss_minus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 1)]))))]
+ ""
+ "%h0 = %h1 - %d2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "addlohiv2hi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (plus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 1)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)]))))]
+ ""
+ "%h0 = %d1 + %h2%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "addhilov2hi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (plus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 1)]))))]
+ ""
+ "%h0 = %h1 + %d2%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ssaddlohiv2hi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (ss_plus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 1)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)]))))]
+ ""
+ "%h0 = %d1 + %h2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ssaddhilov2hi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (ss_plus:HI (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 1)]))))]
+ ""
+ "%h0 = %h1 + %d2 (S)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "sminv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (smin:V2HI (match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")))]
+ ""
+ "%0 = MIN (%1, %2) (V)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "smaxv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (smax:V2HI (match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")))]
+ ""
+ "%0 = MAX (%1, %2) (V)%!"
+ [(set_attr "type" "dsp32")])
+
+;; Multiplications.
+
+;; The Blackfin allows a lot of different options, and we need many patterns to
+;; cover most of the hardware's abilities.
+;; There are a few simple patterns using MULT rtx codes, but most of them use
+;; an unspec with a const_int operand that determines which flag to use in the
+;; instruction.
+;; There are variants for single and parallel multiplications.
+;; There are variants which just use 16-bit lowparts as inputs, and variants
+;; which allow the user to choose just which halves to use as input values.
+;; There are variants which set D registers, variants which set accumulators,
+;; variants which set both, some of them optionally using the accumulators as
+;; inputs for multiply-accumulate operations.
+
+(define_insn "flag_mulhi"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (unspec:HI [(match_operand:HI 1 "register_operand" "d")
+ (match_operand:HI 2 "register_operand" "d")
+ (match_operand 3 "const_int_operand" "n")]
+ UNSPEC_MUL_WITH_FLAG))]
+ ""
+ "%h0 = %h1 * %h2 %M3%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_mulhi_parts"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (unspec:HI [(vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(match_operand 3 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 4 "const01_operand" "P0P1")]))
+ (match_operand 5 "const_int_operand" "n")]
+ UNSPEC_MUL_WITH_FLAG))]
+ ""
+{
+ const char *templates[] = {
+ "%h0 = %h1 * %h2 %M5%!",
+ "%h0 = %d1 * %h2 %M5%!",
+ "%h0 = %h1 * %d2 %M5%!",
+ "%h0 = %d1 * %d2 %M5%!" };
+ int alt = INTVAL (operands[3]) + (INTVAL (operands[4]) << 1);
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_mulhisi"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:HI 1 "register_operand" "d")
+ (match_operand:HI 2 "register_operand" "d")
+ (match_operand 3 "const_int_operand" "n")]
+ UNSPEC_MUL_WITH_FLAG))]
+ ""
+ "%0 = %h1 * %h2 %M3%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_mulhisi_parts"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(match_operand 3 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 4 "const01_operand" "P0P1")]))
+ (match_operand 5 "const_int_operand" "n")]
+ UNSPEC_MUL_WITH_FLAG))]
+ ""
+{
+ const char *templates[] = {
+ "%0 = %h1 * %h2 %M5%!",
+ "%0 = %d1 * %h2 %M5%!",
+ "%0 = %h1 * %d2 %M5%!",
+ "%0 = %d1 * %d2 %M5%!" };
+ int alt = INTVAL (operands[3]) + (INTVAL (operands[4]) << 1);
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+;; Three alternatives here to cover all possible allocations:
+;; 0. mac flag is usable only for accumulator 1 - use A1 and odd DREG
+;; 1. mac flag is usable for accumulator 0 - use A0 and even DREG
+;; 2. mac flag is usable in any accumulator - use A1 and odd DREG
+;; Other patterns which don't have a DREG destination can collapse cases
+;; 1 and 2 into one.
+(define_insn "flag_machi"
+ [(set (match_operand:HI 0 "register_operand" "=W,D,W")
+ (unspec:HI [(match_operand:HI 2 "register_operand" "d,d,d")
+ (match_operand:HI 3 "register_operand" "d,d,d")
+ (match_operand 4 "register_operand" "1,1,1")
+ (match_operand 5 "const01_operand" "P0P1,P0P1,P0P1")
+ (match_operand 6 "const_int_operand" "PB,PA,PA")]
+ UNSPEC_MAC_WITH_FLAG))
+ (set (match_operand:PDI 1 "register_operand" "=B,A,B")
+ (unspec:PDI [(match_dup 1) (match_dup 2) (match_dup 3)
+ (match_dup 4) (match_dup 5)]
+ UNSPEC_MAC_WITH_FLAG))]
+ ""
+ "%h0 = (%1 %b5 %h2 * %h3) %M6%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_machi_acconly"
+ [(set (match_operand:PDI 0 "register_operand" "=B,e")
+ (unspec:PDI [(match_operand:HI 1 "register_operand" "d,d")
+ (match_operand:HI 2 "register_operand" "d,d")
+ (match_operand 3 "register_operand" "0,0")
+ (match_operand 4 "const01_operand" "P0P1,P0P1")
+ (match_operand 5 "const_int_operand" "PB,PA")]
+ UNSPEC_MAC_WITH_FLAG))]
+ ""
+ "%0 %b4 %h1 * %h2 %M5%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_machi_parts_acconly"
+ [(set (match_operand:PDI 0 "register_operand" "=B,e")
+ (unspec:PDI [(vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "d,d")
+ (parallel [(match_operand 3 "const01_operand" "P0P1,P0P1")]))
+ (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "d,d")
+ (parallel [(match_operand 4 "const01_operand" "P0P1,P0P1")]))
+ (match_operand:PDI 5 "register_operand" "0,0")
+ (match_operand 6 "const01_operand" "P0P1,P0P1")
+ (match_operand 7 "const_int_operand" "PB,PA")]
+ UNSPEC_MAC_WITH_FLAG))]
+ ""
+{
+ const char *templates[] = {
+ "%0 %b6 %h1 * %h2 %M7%!",
+ "%0 %b6 %d1 * %h2 %M7%!",
+ "%0 %b6 %h1 * %d2 %M7%!",
+ "%0 %b6 %d1 * %d2 %M7%!"
+ };
+ int alt = INTVAL (operands[3]) + (INTVAL (operands[4]) << 1);
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_macinithi"
+ [(set (match_operand:HI 0 "register_operand" "=W,D,W")
+ (unspec:HI [(match_operand:HI 1 "register_operand" "d,d,d")
+ (match_operand:HI 2 "register_operand" "d,d,d")
+ (match_operand 3 "const_int_operand" "PB,PA,PA")]
+ UNSPEC_MAC_WITH_FLAG))
+ (set (match_operand:PDI 4 "register_operand" "=B,A,B")
+ (unspec:PDI [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_MAC_WITH_FLAG))]
+ ""
+ "%h0 = (%4 = %h1 * %h2) %M3%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_macinit1hi"
+ [(set (match_operand:PDI 0 "register_operand" "=B,e")
+ (unspec:PDI [(match_operand:HI 1 "register_operand" "d,d")
+ (match_operand:HI 2 "register_operand" "d,d")
+ (match_operand 3 "const_int_operand" "PB,PA")]
+ UNSPEC_MAC_WITH_FLAG))]
+ ""
+ "%0 = %h1 * %h2 %M3%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "mulv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (mult:V2HI (match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")))]
+ ""
+ "%h0 = %h1 * %h2, %d0 = %d1 * %d2 (IS)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_mulv2hi"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")
+ (match_operand 3 "const_int_operand" "n")]
+ UNSPEC_MUL_WITH_FLAG))]
+ ""
+ "%h0 = %h1 * %h2, %d0 = %d1 * %d2 %M3%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_mulv2hi_parts"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(vec_concat:V2HI
+ (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(match_operand 3 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_dup 1)
+ (parallel [(match_operand 4 "const01_operand" "P0P1")])))
+ (vec_concat:V2HI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 5 "const01_operand" "P0P1")]))
+ (vec_select:HI (match_dup 2)
+ (parallel [(match_operand 6 "const01_operand" "P0P1")])))
+ (match_operand 7 "const_int_operand" "n")]
+ UNSPEC_MUL_WITH_FLAG))]
+ ""
+{
+ const char *templates[] = {
+ "%h0 = %h1 * %h2, %d0 = %h1 * %h2 %M7%!",
+ "%h0 = %d1 * %h2, %d0 = %h1 * %h2 %M7%!",
+ "%h0 = %h1 * %h2, %d0 = %d1 * %h2 %M7%!",
+ "%h0 = %d1 * %h2, %d0 = %d1 * %h2 %M7%!",
+ "%h0 = %h1 * %d2, %d0 = %h1 * %h2 %M7%!",
+ "%h0 = %d1 * %d2, %d0 = %h1 * %h2 %M7%!",
+ "%h0 = %h1 * %d2, %d0 = %d1 * %h2 %M7%!",
+ "%h0 = %d1 * %d2, %d0 = %d1 * %h2 %M7%!",
+ "%h0 = %h1 * %h2, %d0 = %h1 * %d2 %M7%!",
+ "%h0 = %d1 * %h2, %d0 = %h1 * %d2 %M7%!",
+ "%h0 = %h1 * %h2, %d0 = %d1 * %d2 %M7%!",
+ "%h0 = %d1 * %h2, %d0 = %d1 * %d2 %M7%!",
+ "%h0 = %h1 * %d2, %d0 = %h1 * %d2 %M7%!",
+ "%h0 = %d1 * %d2, %d0 = %h1 * %d2 %M7%!",
+ "%h0 = %h1 * %d2, %d0 = %d1 * %d2 %M7%!",
+ "%h0 = %d1 * %d2, %d0 = %d1 * %d2 %M7%!" };
+ int alt = (INTVAL (operands[3]) + (INTVAL (operands[4]) << 1)
+ + (INTVAL (operands[5]) << 2) + (INTVAL (operands[6]) << 3));
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+;; A slightly complicated pattern.
+;; Operand 0 is the halfword output; operand 11 is the accumulator output
+;; Halfword inputs are operands 1 and 2; operands 3, 4, 5 and 6 specify which
+;; parts of these 2x16 bit registers to use.
+;; Operand 7 is the accumulator input.
+;; Operands 8/9 specify whether low/high parts are mac (0) or msu (1)
+;; Operand 10 is the macflag to be used.
+(define_insn "flag_macv2hi_parts"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(vec_concat:V2HI
+ (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(match_operand 3 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_dup 1)
+ (parallel [(match_operand 4 "const01_operand" "P0P1")])))
+ (vec_concat:V2HI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 5 "const01_operand" "P0P1")]))
+ (vec_select:HI (match_dup 2)
+ (parallel [(match_operand 6 "const01_operand" "P0P1")])))
+ (match_operand:V2PDI 7 "register_operand" "e")
+ (match_operand 8 "const01_operand" "P0P1")
+ (match_operand 9 "const01_operand" "P0P1")
+ (match_operand 10 "const_int_operand" "n")]
+ UNSPEC_MAC_WITH_FLAG))
+ (set (match_operand:V2PDI 11 "register_operand" "=e")
+ (unspec:V2PDI [(vec_concat:V2HI
+ (vec_select:HI (match_dup 1) (parallel [(match_dup 3)]))
+ (vec_select:HI (match_dup 1) (parallel [(match_dup 4)])))
+ (vec_concat:V2HI
+ (vec_select:HI (match_dup 2) (parallel [(match_dup 5)]))
+ (vec_select:HI (match_dup 2) (parallel [(match_dup 5)])))
+ (match_dup 7) (match_dup 8) (match_dup 9) (match_dup 10)]
+ UNSPEC_MAC_WITH_FLAG))]
+ ""
+{
+ const char *templates[] = {
+ "%h0 = (A0 %b8 %h1 * %h2), %d0 = (A1 %b9 %h1 * %h2) %M10%!",
+ "%h0 = (A0 %b8 %d1 * %h2), %d0 = (A1 %b9 %h1 * %h2) %M10%!",
+ "%h0 = (A0 %b8 %h1 * %h2), %d0 = (A1 %b9 %d1 * %h2) %M10%!",
+ "%h0 = (A0 %b8 %d1 * %h2), %d0 = (A1 %b9 %d1 * %h2) %M10%!",
+ "%h0 = (A0 %b8 %h1 * %d2), %d0 = (A1 %b9 %h1 * %h2) %M10%!",
+ "%h0 = (A0 %b8 %d1 * %d2), %d0 = (A1 %b9 %h1 * %h2) %M10%!",
+ "%h0 = (A0 %b8 %h1 * %d2), %d0 = (A1 %b9 %d1 * %h2) %M10%!",
+ "%h0 = (A0 %b8 %d1 * %d2), %d0 = (A1 %b9 %d1 * %h2) %M10%!",
+ "%h0 = (A0 %b8 %h1 * %h2), %d0 = (A1 %b9 %h1 * %d2) %M10%!",
+ "%h0 = (A0 %b8 %d1 * %h2), %d0 = (A1 %b9 %h1 * %d2) %M10%!",
+ "%h0 = (A0 %b8 %h1 * %h2), %d0 = (A1 %b9 %d1 * %d2) %M10%!",
+ "%h0 = (A0 %b8 %d1 * %h2), %d0 = (A1 %b9 %d1 * %d2) %M10%!",
+ "%h0 = (A0 %b8 %h1 * %d2), %d0 = (A1 %b9 %h1 * %d2) %M10%!",
+ "%h0 = (A0 %b8 %d1 * %d2), %d0 = (A1 %b9 %h1 * %d2) %M10%!",
+ "%h0 = (A0 %b8 %h1 * %d2), %d0 = (A1 %b9 %d1 * %d2) %M10%!",
+ "%h0 = (A0 %b8 %d1 * %d2), %d0 = (A1 %b9 %d1 * %d2) %M10%!" };
+ int alt = (INTVAL (operands[3]) + (INTVAL (operands[4]) << 1)
+ + (INTVAL (operands[5]) << 2) + (INTVAL (operands[6]) << 3));
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_macv2hi_parts_acconly"
+ [(set (match_operand:V2PDI 0 "register_operand" "=e")
+ (unspec:V2PDI [(vec_concat:V2HI
+ (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(match_operand 3 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_dup 1)
+ (parallel [(match_operand 4 "const01_operand" "P0P1")])))
+ (vec_concat:V2HI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 5 "const01_operand" "P0P1")]))
+ (vec_select:HI (match_dup 2)
+ (parallel [(match_operand 6 "const01_operand" "P0P1")])))
+ (match_operand:V2PDI 7 "register_operand" "e")
+ (match_operand 8 "const01_operand" "P0P1")
+ (match_operand 9 "const01_operand" "P0P1")
+ (match_operand 10 "const_int_operand" "n")]
+ UNSPEC_MAC_WITH_FLAG))]
+ ""
+{
+ const char *templates[] = {
+ "A0 %b8 %h1 * %h2, A1 %b9 %h1 * %h2 %M10%!",
+ "A0 %b8 %d1 * %h2, A1 %b9 %h1 * %h2 %M10%!",
+ "A0 %b8 %h1 * %h2, A1 %b9 %d1 * %h2 %M10%!",
+ "A0 %b8 %d1 * %h2, A1 %b9 %d1 * %h2 %M10%!",
+ "A0 %b8 %h1 * %d2, A1 %b9 %h1 * %h2 %M10%!",
+ "A0 %b8 %d1 * %d2, A1 %b9 %h1 * %h2 %M10%!",
+ "A0 %b8 %h1 * %d2, A1 %b9 %d1 * %h2 %M10%!",
+ "A0 %b8 %d1 * %d2, A1 %b9 %d1 * %h2 %M10%!",
+ "A0 %b8 %h1 * %h2, A1 %b9 %h1 * %d2 %M10%!",
+ "A0 %b8 %d1 * %h2, A1 %b9 %h1 * %d2 %M10%!",
+ "A0 %b8 %h1 * %h2, A1 %b9 %d1 * %d2 %M10%!",
+ "A0 %b8 %d1 * %h2, A1 %b9 %d1 * %d2 %M10%!",
+ "A0 %b8 %h1 * %d2, A1 %b9 %h1 * %d2 %M10%!",
+ "A0 %b8 %d1 * %d2, A1 %b9 %h1 * %d2 %M10%!",
+ "A0 %b8 %h1 * %d2, A1 %b9 %d1 * %d2 %M10%!",
+ "A0 %b8 %d1 * %d2, A1 %b9 %d1 * %d2 %M10%!" };
+ int alt = (INTVAL (operands[3]) + (INTVAL (operands[4]) << 1)
+ + (INTVAL (operands[5]) << 2) + (INTVAL (operands[6]) << 3));
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+;; Same as above, but initializing the accumulators and therefore a couple fewer
+;; necessary operands.
+(define_insn "flag_macinitv2hi_parts"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(vec_concat:V2HI
+ (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(match_operand 3 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_dup 1)
+ (parallel [(match_operand 4 "const01_operand" "P0P1")])))
+ (vec_concat:V2HI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 5 "const01_operand" "P0P1")]))
+ (vec_select:HI (match_dup 2)
+ (parallel [(match_operand 6 "const01_operand" "P0P1")])))
+ (match_operand 7 "const_int_operand" "n")]
+ UNSPEC_MAC_WITH_FLAG))
+ (set (match_operand:V2PDI 8 "register_operand" "=e")
+ (unspec:V2PDI [(vec_concat:V2HI
+ (vec_select:HI (match_dup 1) (parallel [(match_dup 3)]))
+ (vec_select:HI (match_dup 1) (parallel [(match_dup 4)])))
+ (vec_concat:V2HI
+ (vec_select:HI (match_dup 2) (parallel [(match_dup 5)]))
+ (vec_select:HI (match_dup 2) (parallel [(match_dup 5)])))
+ (match_dup 7)]
+ UNSPEC_MAC_WITH_FLAG))]
+ ""
+{
+ const char *templates[] = {
+ "%h0 = (A0 = %h1 * %h2), %d0 = (A1 = %h1 * %h2) %M7%!",
+ "%h0 = (A0 = %d1 * %h2), %d0 = (A1 = %h1 * %h2) %M7%!",
+ "%h0 = (A0 = %h1 * %h2), %d0 = (A1 = %d1 * %h2) %M7%!",
+ "%h0 = (A0 = %d1 * %h2), %d0 = (A1 = %d1 * %h2) %M7%!",
+ "%h0 = (A0 = %h1 * %d2), %d0 = (A1 = %h1 * %h2) %M7%!",
+ "%h0 = (A0 = %d1 * %d2), %d0 = (A1 = %h1 * %h2) %M7%!",
+ "%h0 = (A0 = %h1 * %d2), %d0 = (A1 = %d1 * %h2) %M7%!",
+ "%h0 = (A0 = %d1 * %d2), %d0 = (A1 = %d1 * %h2) %M7%!",
+ "%h0 = (A0 = %h1 * %h2), %d0 = (A1 = %h1 * %d2) %M7%!",
+ "%h0 = (A0 = %d1 * %h2), %d0 = (A1 = %h1 * %d2) %M7%!",
+ "%h0 = (A0 = %h1 * %h2), %d0 = (A1 = %d1 * %d2) %M7%!",
+ "%h0 = (A0 = %d1 * %h2), %d0 = (A1 = %d1 * %d2) %M7%!",
+ "%h0 = (A0 = %h1 * %d2), %d0 = (A1 = %h1 * %d2) %M7%!",
+ "%h0 = (A0 = %d1 * %d2), %d0 = (A1 = %h1 * %d2) %M7%!",
+ "%h0 = (A0 = %h1 * %d2), %d0 = (A1 = %d1 * %d2) %M7%!",
+ "%h0 = (A0 = %d1 * %d2), %d0 = (A1 = %d1 * %d2) %M7%!" };
+ int alt = (INTVAL (operands[3]) + (INTVAL (operands[4]) << 1)
+ + (INTVAL (operands[5]) << 2) + (INTVAL (operands[6]) << 3));
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+(define_insn "flag_macinit1v2hi_parts"
+ [(set (match_operand:V2PDI 0 "register_operand" "=e")
+ (unspec:V2PDI [(vec_concat:V2HI
+ (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(match_operand 3 "const01_operand" "P0P1")]))
+ (vec_select:HI
+ (match_dup 1)
+ (parallel [(match_operand 4 "const01_operand" "P0P1")])))
+ (vec_concat:V2HI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(match_operand 5 "const01_operand" "P0P1")]))
+ (vec_select:HI (match_dup 2)
+ (parallel [(match_operand 6 "const01_operand" "P0P1")])))
+ (match_operand 7 "const_int_operand" "n")]
+ UNSPEC_MAC_WITH_FLAG))]
+ ""
+{
+ const char *templates[] = {
+ "A0 = %h1 * %h2, A1 = %h1 * %h2 %M7%!",
+ "A0 = %d1 * %h2, A1 = %h1 * %h2 %M7%!",
+ "A0 = %h1 * %h2, A1 = %d1 * %h2 %M7%!",
+ "A0 = %d1 * %h2, A1 = %d1 * %h2 %M7%!",
+ "A0 = %h1 * %d2, A1 = %h1 * %h2 %M7%!",
+ "A0 = %d1 * %d2, A1 = %h1 * %h2 %M7%!",
+ "A0 = %h1 * %d2, A1 = %d1 * %h2 %M7%!",
+ "A0 = %d1 * %d2, A1 = %d1 * %h2 %M7%!",
+ "A0 = %h1 * %h2, A1 = %h1 * %d2 %M7%!",
+ "A0 = %d1 * %h2, A1 = %h1 * %d2 %M7%!",
+ "A0 = %h1 * %h2, A1 = %d1 * %d2 %M7%!",
+ "A0 = %d1 * %h2, A1 = %d1 * %d2 %M7%!",
+ "A0 = %h1 * %d2, A1 = %h1 * %d2 %M7%!",
+ "A0 = %d1 * %d2, A1 = %h1 * %d2 %M7%!",
+ "A0 = %h1 * %d2, A1 = %d1 * %d2 %M7%!",
+ "A0 = %d1 * %d2, A1 = %d1 * %d2 %M7%!" };
+ int alt = (INTVAL (operands[3]) + (INTVAL (operands[4]) << 1)
+ + (INTVAL (operands[5]) << 2) + (INTVAL (operands[6]) << 3));
+ return templates[alt];
+}
+ [(set_attr "type" "dsp32")])
+
+;; A mixture of multiply and multiply-accumulate for when we only want to
+;; initialize one part.
+(define_insn "flag_mul_macv2hi_parts_acconly"
+ [(set (match_operand:PDI 0 "register_operand" "=B,e,e")
+ (unspec:PDI [(vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "d,d,d")
+ (parallel [(match_operand 4 "const01_operand" "P0P1,P0P1,P0P1")]))
+ (vec_select:HI
+ (match_operand:V2HI 3 "register_operand" "d,d,d")
+ (parallel [(match_operand 6 "const01_operand" "P0P1,P0P1,P0P1")]))
+ (match_operand 10 "const_int_operand" "PB,PA,PA")]
+ UNSPEC_MUL_WITH_FLAG))
+ (set (match_operand:PDI 1 "register_operand" "=B,e,e")
+ (unspec:PDI [(vec_select:HI
+ (match_dup 2)
+ (parallel [(match_operand 5 "const01_operand" "P0P1,P0P1,P0P1")]))
+ (vec_select:HI
+ (match_dup 3)
+ (parallel [(match_operand 7 "const01_operand" "P0P1,P0P1,P0P1")]))
+ (match_operand:PDI 8 "register_operand" "1,1,1")
+ (match_operand 9 "const01_operand" "P0P1,P0P1,P0P1")
+ (match_operand 11 "const_int_operand" "PA,PB,PA")]
+ UNSPEC_MAC_WITH_FLAG))]
+ "MACFLAGS_MATCH_P (INTVAL (operands[10]), INTVAL (operands[11]))"
+{
+ rtx xops[6];
+ const char *templates[] = {
+ "%0 = %h2 * %h3, %1 %b4 %h2 * %h3 %M5%!",
+ "%0 = %d2 * %h3, %1 %b4 %h2 * %h3 %M5%!",
+ "%0 = %h2 * %h3, %1 %b4 %d2 * %h3 %M5%!",
+ "%0 = %d2 * %h3, %1 %b4 %d2 * %h3 %M5%!",
+ "%0 = %h2 * %d3, %1 %b4 %h2 * %h3 %M5%!",
+ "%0 = %d2 * %d3, %1 %b4 %h2 * %h3 %M5%!",
+ "%0 = %h2 * %d3, %1 %b4 %d2 * %h3 %M5%!",
+ "%0 = %d2 * %d3, %1 %b4 %d2 * %h3 %M5%!",
+ "%0 = %h2 * %h3, %1 %b4 %h2 * %d3 %M5%!",
+ "%0 = %d2 * %h3, %1 %b4 %h2 * %d3 %M5%!",
+ "%0 = %h2 * %h3, %1 %b4 %d2 * %d3 %M5%!",
+ "%0 = %d2 * %h3, %1 %b4 %d2 * %d3 %M5%!",
+ "%0 = %h2 * %d3, %1 %b4 %h2 * %d3 %M5%!",
+ "%0 = %d2 * %d3, %1 %b4 %h2 * %d3 %M5%!",
+ "%0 = %h2 * %d3, %1 %b4 %d2 * %d3 %M5%!",
+ "%0 = %d2 * %d3, %1 %b4 %d2 * %d3 %M5%!" };
+ int alt = (INTVAL (operands[4]) + (INTVAL (operands[5]) << 1)
+ + (INTVAL (operands[6]) << 2) + (INTVAL (operands[7]) << 3));
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = operands[2];
+ xops[3] = operands[3];
+ xops[4] = operands[9];
+ xops[5] = which_alternative == 0 ? operands[10] : operands[11];
+ output_asm_insn (templates[alt], xops);
+ return "";
+}
+ [(set_attr "type" "dsp32")])
+
+
+(define_code_iterator s_or_u [sign_extend zero_extend])
+(define_code_attr su_optab [(sign_extend "mul")
+ (zero_extend "umul")])
+(define_code_attr su_modifier [(sign_extend "IS")
+ (zero_extend "FU")])
+
+(define_insn "<su_optab>hisi_ll"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "%d")
+ (parallel [(const_int 0)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %h1 * %h2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "<su_optab>hisi_lh"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %h1 * %d2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "<su_optab>hisi_hl"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 1)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %d1 * %h2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "<su_optab>hisi_hh"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "%d")
+ (parallel [(const_int 1)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %d1 * %d2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+;; Additional variants for signed * unsigned multiply.
+
+(define_insn "usmulhisi_ull"
+ [(set (match_operand:SI 0 "register_operand" "=W")
+ (mult:SI (zero_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "%d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %h2 * %h1 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_ulh"
+ [(set (match_operand:SI 0 "register_operand" "=W")
+ (mult:SI (zero_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %d2 * %h1 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_uhl"
+ [(set (match_operand:SI 0 "register_operand" "=W")
+ (mult:SI (zero_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %h2 * %d1 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_uhh"
+ [(set (match_operand:SI 0 "register_operand" "=W")
+ (mult:SI (zero_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "%d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d")
+ (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %d2 * %d1 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+;; Parallel versions of these operations. First, normal signed or unsigned
+;; multiplies.
+
+(define_insn "<su_optab>hisi_ll_lh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 0)])))
+ (s_or_u:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %h1 * %h2, %3 = %h1 * %d2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "<su_optab>hisi_ll_hl"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (s_or_u:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %h1 * %h2, %3 = %d1 * %h2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "<su_optab>hisi_ll_hh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (s_or_u:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %h1 * %h2, %3 = %d1 * %d2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "<su_optab>hisi_lh_hl"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (s_or_u:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %h1 * %d2, %3 = %d1 * %h2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "<su_optab>hisi_lh_hh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (s_or_u:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %h1 * %d2, %3 = %d1 * %d2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "<su_optab>hisi_hl_hh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))
+ (s_or_u:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (s_or_u:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (s_or_u:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %d1 * %h2, %3 = %d1 * %d2 (<su_modifier>)%!"
+ [(set_attr "type" "dsp32")])
+
+;; Special signed * unsigned variants.
+
+(define_insn "usmulhisi_ll_lul"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 0)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %h1 * %h2, %3 = %h1 * %h2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_ll_luh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 0)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %h1 * %h2, %3 = %h1 * %d2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_ll_hul"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %h1 * %h2, %3 = %d1 * %h2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_ll_huh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %h1 * %h2, %3 = %d1 * %d2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_lh_lul"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 0)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %h1 * %d2, %3 = %h1 * %h2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_lh_luh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 0)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %h1 * %d2, %3 = %h1 * %d2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_lh_hul"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %h1 * %d2, %3 = %d1 * %h2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_lh_huh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %h1 * %d2, %3 = %d1 * %d2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_hl_lul"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 0)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %d1 * %h2, %3 = %h1 * %h2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_hl_luh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 0)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %d1 * %h2, %3 = %h1 * %d2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_hl_hul"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %d1 * %h2, %3 = %d1 * %h2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_hl_huh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 0)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %d1 * %h2, %3 = %d1 * %d2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_hh_lul"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 0)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %d1 * %d2, %3 = %h1 * %h2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_hh_luh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 0)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %d1 * %d2, %3 = %h1 * %d2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_hh_hul"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 0)])))))]
+ ""
+ "%0 = %d1 * %d2, %3 = %d1 * %h2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "usmulhisi_hh_huh"
+ [(set (match_operand:SI 0 "register_operand" "=q0,q2,q4,q6")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 1 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_operand:V2HI 2 "register_operand" "d,d,d,d")
+ (parallel [(const_int 1)])))))
+ (set (match_operand:SI 3 "register_operand" "=q1,q3,q5,q7")
+ (mult:SI (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (zero_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))]
+ ""
+ "%0 = %d1 * %d2, %3 = %d1 * %d2 (IS,M)%!"
+ [(set_attr "type" "dsp32")])
+
+;; Vector neg/abs.
+
+(define_insn "ssnegv2hi2"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (ss_neg:V2HI (match_operand:V2HI 1 "register_operand" "d")))]
+ ""
+ "%0 = - %1 (V)%!"
+ [(set_attr "type" "dsp32")])
+
+(define_insn "ssabsv2hi2"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (ss_abs:V2HI (match_operand:V2HI 1 "register_operand" "d")))]
+ ""
+ "%0 = ABS %1 (V)%!"
+ [(set_attr "type" "dsp32")])
+
+;; Shifts.
+
+(define_insn "ssashiftv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d,d,d")
+ (if_then_else:V2HI
+ (lt (match_operand:HI 2 "vec_shift_operand" "d,Ku4,Ks4") (const_int 0))
+ (ashiftrt:V2HI (match_operand:V2HI 1 "register_operand" "d,d,d")
+ (match_dup 2))
+ (ss_ashift:V2HI (match_dup 1) (match_dup 2))))]
+ ""
+ "@
+ %0 = ASHIFT %1 BY %h2 (V, S)%!
+ %0 = %1 << %2 (V,S)%!
+ %0 = %1 >>> %N2 (V,S)%!"
+ [(set_attr "type" "dsp32,dsp32shiftimm,dsp32shiftimm")])
+
+(define_insn "ssashifthi3"
+ [(set (match_operand:HI 0 "register_operand" "=d,d,d")
+ (if_then_else:HI
+ (lt (match_operand:HI 2 "vec_shift_operand" "d,Ku4,Ks4") (const_int 0))
+ (ashiftrt:HI (match_operand:HI 1 "register_operand" "d,d,d")
+ (match_dup 2))
+ (ss_ashift:HI (match_dup 1) (match_dup 2))))]
+ ""
+ "@
+ %0 = ASHIFT %1 BY %h2 (V, S)%!
+ %0 = %1 << %2 (V,S)%!
+ %0 = %1 >>> %N2 (V,S)%!"
+ [(set_attr "type" "dsp32,dsp32shiftimm,dsp32shiftimm")])
+
+(define_insn "ssashiftsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,d,d")
+ (if_then_else:SI
+ (lt (match_operand:HI 2 "reg_or_const_int_operand" "d,Ku5,Ks5") (const_int 0))
+ (ashiftrt:SI (match_operand:HI 1 "register_operand" "d,d,d")
+ (match_dup 2))
+ (ss_ashift:SI (match_dup 1) (match_dup 2))))]
+ ""
+ "@
+ %0 = ASHIFT %1 BY %h2 (S)%!
+ %0 = %1 << %2 (S)%!
+ %0 = %1 >>> %N2 (S)%!"
+ [(set_attr "type" "dsp32,dsp32shiftimm,dsp32shiftimm")])
+
+(define_insn "lshiftv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=d,d,d")
+ (if_then_else:V2HI
+ (lt (match_operand:HI 2 "vec_shift_operand" "d,Ku4,Ks4") (const_int 0))
+ (lshiftrt:V2HI (match_operand:V2HI 1 "register_operand" "d,d,d")
+ (match_dup 2))
+ (ashift:V2HI (match_dup 1) (match_dup 2))))]
+ ""
+ "@
+ %0 = LSHIFT %1 BY %h2 (V)%!
+ %0 = %1 << %2 (V)%!
+ %0 = %1 >> %N2 (V)%!"
+ [(set_attr "type" "dsp32,dsp32shiftimm,dsp32shiftimm")])
+
+(define_insn "lshifthi3"
+ [(set (match_operand:HI 0 "register_operand" "=d,d,d")
+ (if_then_else:HI
+ (lt (match_operand:HI 2 "vec_shift_operand" "d,Ku4,Ks4") (const_int 0))
+ (lshiftrt:HI (match_operand:HI 1 "register_operand" "d,d,d")
+ (match_dup 2))
+ (ashift:HI (match_dup 1) (match_dup 2))))]
+ ""
+ "@
+ %0 = LSHIFT %1 BY %h2 (V)%!
+ %0 = %1 << %2 (V)%!
+ %0 = %1 >> %N2 (V)%!"
+ [(set_attr "type" "dsp32,dsp32shiftimm,dsp32shiftimm")])
+
+;; Load without alignment exception (masking off low bits)
+
+(define_insn "loadbytes"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mem:SI (and:SI (match_operand:SI 1 "register_operand" "b")
+ (const_int -4))))]
+ ""
+ "DISALGNEXCPT || %0 = [%1];"
+ [(set_attr "type" "mcld")
+ (set_attr "length" "8")])
+
+(include "sync.md")
diff --git a/gcc/config/bfin/bfin.opt b/gcc/config/bfin/bfin.opt
new file mode 100644
index 000000000..c7a905602
--- /dev/null
+++ b/gcc/config/bfin/bfin.opt
@@ -0,0 +1,101 @@
+; Options for the Blackfin port of the compiler
+;
+; Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+msim
+Target RejectNegative
+Use simulator runtime
+
+mcpu=
+Target RejectNegative Joined
+Specify the name of the target CPU
+
+momit-leaf-frame-pointer
+Target Report Mask(OMIT_LEAF_FRAME_POINTER)
+Omit frame pointer for leaf functions
+
+mlow64k
+Target Report Mask(LOW_64K)
+Program is entirely located in low 64k of memory
+
+mcsync-anomaly
+Target Report Var(bfin_csync_anomaly) Init(-1)
+Work around a hardware anomaly by adding a number of NOPs before a
+CSYNC or SSYNC instruction.
+
+mspecld-anomaly
+Target Report Var(bfin_specld_anomaly) Init(-1)
+Avoid speculative loads to work around a hardware anomaly.
+
+mid-shared-library
+Target Report Mask(ID_SHARED_LIBRARY)
+Enabled ID based shared library
+
+mleaf-id-shared-library
+Target Report Mask(LEAF_ID_SHARED_LIBRARY)
+Generate code that won't be linked against any other ID shared libraries,
+but may be used as a shared library.
+
+mshared-library-id=
+Target RejectNegative Joined UInteger Var(bfin_library_id)
+ID of shared library to build
+
+msep-data
+Target Report Mask(SEP_DATA)
+Enable separate data segment
+
+mlong-calls
+Target Report Mask(LONG_CALLS)
+Avoid generating pc-relative calls; use indirection
+
+mfast-fp
+Target Report Mask(FAST_FP)
+Link with the fast floating-point library
+
+mfdpic
+Target Report Mask(FDPIC)
+Enable Function Descriptor PIC mode
+
+minline-plt
+Target Report Mask(INLINE_PLT)
+Enable inlining of PLT in function calls
+
+mstack-check-l1
+Target Report Mask(STACK_CHECK_L1)
+Do stack checking using bounds in L1 scratch memory
+
+mmulticore
+Target Report Mask(MULTICORE)
+Enable multicore support
+
+mcorea
+Target Report Mask(COREA)
+Build for Core A
+
+mcoreb
+Target Report Mask(COREB)
+Build for Core B
+
+msdram
+Target Report Mask(SDRAM)
+Build for SDRAM
+
+micplb
+Target Report Mask(ICPLB)
+Assume ICPLBs are enabled at runtime.
diff --git a/gcc/config/bfin/constraints.md b/gcc/config/bfin/constraints.md
new file mode 100644
index 000000000..fa9dcf143
--- /dev/null
+++ b/gcc/config/bfin/constraints.md
@@ -0,0 +1,225 @@
+;; Constraint definitions for Blackfin
+;; Copyright (C) 2008 Free Software Foundation, Inc.
+;; Contributed by Analog Devices
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_register_constraint "a" "PREGS"
+ "A Pn register.")
+
+(define_register_constraint "d" "DREGS"
+ "A Rn register.")
+
+(define_register_constraint "z" "PREGS_CLOBBERED"
+ "A call clobbered Pn register.")
+
+(define_register_constraint "D" "EVEN_DREGS"
+ "An even-numbered Rn register.")
+
+(define_register_constraint "W" "ODD_DREGS"
+ "An odd-numbered Rn register.")
+
+(define_register_constraint "e" "AREGS"
+ "An accumulator register.")
+
+(define_register_constraint "A" "EVEN_AREGS"
+ "An even-numbered accumulator; A0.")
+
+(define_register_constraint "B" "ODD_AREGS"
+ "An odd-numbered accumulator; A1.")
+
+(define_register_constraint "b" "IREGS"
+ "An I register.")
+
+(define_register_constraint "v" "BREGS"
+ "A B register.")
+
+(define_register_constraint "f" "MREGS"
+ "An M register.")
+
+(define_register_constraint "c" "CIRCREGS"
+ "A register used for circular buffering, i.e. I, B, or L registers.")
+
+(define_register_constraint "C" "CCREGS"
+ "The CC register.")
+
+(define_register_constraint "t" "LT_REGS"
+ "LT0 or LT1.")
+
+(define_register_constraint "u" "LB_REGS"
+ "LB0 or LB1.")
+
+(define_register_constraint "k" "LC_REGS"
+ "LC0 or LC1.")
+
+(define_register_constraint "x" "MOST_REGS"
+ "Any R, P, B, M, I or L register.")
+
+(define_register_constraint "y" "PROLOGUE_REGS"
+ "Additional registers typically used only in prologues and epilogues:
+ RETS, RETN, RETI, RETX, RETE, ASTAT, SEQSTAT and USP.")
+
+(define_register_constraint "w" "NON_A_CC_REGS"
+ "Any register except accumulators or CC.")
+
+(define_register_constraint "Z" "FDPIC_REGS"
+ "@internal The FD-PIC GOT pointer; P3.")
+
+(define_register_constraint "Y" "FDPIC_FPTR_REGS"
+ "@internal The FD-PIC function pointer register; P1.")
+
+(define_register_constraint "q0" "D0REGS"
+ "The register R0.")
+
+(define_register_constraint "q1" "D1REGS"
+ "The register R1.")
+
+(define_register_constraint "q2" "D2REGS"
+ "The register R2.")
+
+(define_register_constraint "q3" "D3REGS"
+ "The register R3.")
+
+(define_register_constraint "q4" "D4REGS"
+ "The register R4.")
+
+(define_register_constraint "q5" "D5REGS"
+ "The register R5.")
+
+(define_register_constraint "q6" "D6REGS"
+ "The register R6.")
+
+(define_register_constraint "q7" "D7REGS"
+ "The register R7.")
+
+(define_register_constraint "qA" "P0REGS"
+ "The register P0.")
+
+;; Constant constraints.
+
+(define_constraint "J"
+ "A constant value of the form 2**N, where N 5-bit wide."
+ (and (match_code "const_int")
+ (match_test "log2constp (ival)")))
+
+(define_constraint "Ks3"
+ "A signed 3 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -4 && ival <= 3")))
+
+(define_constraint "Ku3"
+ "An unsigned 3 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 7")))
+
+(define_constraint "Ks4"
+ "A signed 4 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -8 && ival <= 7")))
+
+(define_constraint "Ku4"
+ "An unsigned 4 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 15")))
+
+(define_constraint "Ks5"
+ "A signed 5 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -16 && ival <= 15")))
+
+(define_constraint "Ku5"
+ "An unsigned 5 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 31")))
+
+(define_constraint "Ks7"
+ "A signed 7 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -64 && ival <= 63")))
+
+(define_constraint "KN7"
+ "A constant that when negated is a signed 7 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -63 && ival <= 64")))
+
+(define_constraint "Ksh"
+ "A signed 16 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -32768 && ival <= 32767")))
+
+(define_constraint "Kuh"
+ "An unsigned 16 bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 65535")))
+
+(define_constraint "L"
+ "A constant value of the form ~(2**N)."
+ (and (match_code "const_int")
+ (match_test "log2constp (~ival)")))
+
+(define_constraint "M1"
+ "An integer with the value 255."
+ (and (match_code "const_int")
+ (match_test "ival == 255")))
+
+(define_constraint "M2"
+ "An integer with the value 65535."
+ (and (match_code "const_int")
+ (match_test "ival == 65535")))
+
+(define_constraint "P0"
+ "An integer with the value 0."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "P1"
+ "An integer with the value 1."
+ (and (match_code "const_int")
+ (match_test "ival == 1")))
+
+(define_constraint "P2"
+ "An integer with the value 2."
+ (and (match_code "const_int")
+ (match_test "ival == 2")))
+
+(define_constraint "P3"
+ "An integer with the value 3."
+ (and (match_code "const_int")
+ (match_test "ival == 3")))
+
+(define_constraint "P4"
+ "An integer with the value 4."
+ (and (match_code "const_int")
+ (match_test "ival == 4")))
+
+(define_constraint "PA"
+ "An integer constant describing any macflag except variants involving M."
+ (and (match_code "const_int")
+ (match_test "ival != MACFLAG_M && ival != MACFLAG_IS_M")))
+
+(define_constraint "PB"
+ "An integer constant describing any macflag involving M."
+ (and (match_code "const_int")
+ (match_test "ival == MACFLAG_M || ival == MACFLAG_IS_M")))
+
+
+;; Extra constraints
+
+(define_constraint "Q"
+ "A SYMBOL_REF."
+ (match_code "symbol_ref"))
+
diff --git a/gcc/config/bfin/crti.s b/gcc/config/bfin/crti.s
new file mode 100644
index 000000000..b6f20fc9e
--- /dev/null
+++ b/gcc/config/bfin/crti.s
@@ -0,0 +1,59 @@
+/* Specialized code needed to support construction and destruction of
+ file-scope objects in C++ and Java code, and to support exception handling.
+ Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Analog Devices.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file just supplies function prologues for the .init and .fini
+ * sections. It is linked in before crtbegin.o.
+ */
+
+ .ident "GNU C crti.o"
+
+ .section .init
+ .globl __init
+ .type __init,@function
+__init:
+#if defined __ID_SHARED_LIB__
+ [--SP] = P5;
+#elif defined __BFIN_FDPIC__
+ [--SP] = P3;
+#endif
+ LINK 12;
+#if defined __ID_SHARED_LIB__
+ P5 = [P5 + _current_shared_library_p5_offset_]
+#endif
+ .section .fini
+ .globl __fini
+ .type __fini,@function
+__fini:
+#if defined __ID_SHARED_LIB__
+ [--SP] = P5;
+#elif defined __BFIN_FDPIC__
+ [--SP] = P3;
+#endif
+ LINK 12;
+#if defined __ID_SHARED_LIB__
+ P5 = [P5 + _current_shared_library_p5_offset_]
+#endif
diff --git a/gcc/config/bfin/crtlibid.s b/gcc/config/bfin/crtlibid.s
new file mode 100644
index 000000000..beab80938
--- /dev/null
+++ b/gcc/config/bfin/crtlibid.s
@@ -0,0 +1,29 @@
+/* Provide a weak definition of the library ID, for the benefit of certain
+ configure scripts.
+ Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ .ident "GNU C crtlibid.o"
+
+.weak _current_shared_library_p5_offset_
+.set _current_shared_library_p5_offset_, 0
diff --git a/gcc/config/bfin/crtn.s b/gcc/config/bfin/crtn.s
new file mode 100644
index 000000000..7fcd27bfa
--- /dev/null
+++ b/gcc/config/bfin/crtn.s
@@ -0,0 +1,50 @@
+/* Specialized code needed to support construction and destruction of
+ file-scope objects in C++ and Java code, and to support exception handling.
+ Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Analog Devices.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file supplies function epilogues for the .init and .fini sections.
+ * It is linked in after all other files.
+ */
+
+ .ident "GNU C crtn.o"
+
+ .section .init
+ unlink;
+#if defined __ID_SHARED_LIB__
+ P5 = [SP++];
+#elif defined __BFIN_FDPIC__
+ P3 = [SP++];
+#endif
+ rts;
+
+ .section .fini
+ unlink;
+#if defined __ID_SHARED_LIB__
+ P5 = [SP++];
+#elif defined __BFIN_FDPIC__
+ P3 = [SP++];
+#endif
+ rts;
diff --git a/gcc/config/bfin/elf.h b/gcc/config/bfin/elf.h
new file mode 100644
index 000000000..975212faa
--- /dev/null
+++ b/gcc/config/bfin/elf.h
@@ -0,0 +1,73 @@
+/* Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "\
+%{msim:%{!shared:crt0%O%s}} \
+%{!msim:%{!mcpu=bf561*:%{!msdram:basiccrt%O%s} %{msdram:basiccrts%O%s};: \
+ %{!msdram:basiccrt561%O%s} %{msdram:basiccrt561s%O%s}} \
+ %{mcpu=bf561*:%{mmulticore:%{!mcorea:%{!mcoreb:basiccrt561b%O%s}}}}} \
+crti%O%s crtbegin%O%s crtlibid%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
+
+#undef LIB_SPEC
+#define LIB_SPEC "--start-group -lc %{msim:-lsim}%{!msim:-lnosys} --end-group \
+%{!T*:%{!msim:%{!msdram: \
+ %{mcpu=bf512*:-T bf512.ld%s}%{mcpu=bf514*:-T bf514.ld%s} \
+ %{mcpu=bf516*:-T bf516.ld%s}%{mcpu=bf518*:-T bf518.ld%s} \
+ %{mcpu=bf522*:-T bf522.ld%s}%{mcpu=bf523*:-T bf523.ld%s} \
+ %{mcpu=bf524*:-T bf524.ld%s}%{mcpu=bf525*:-T bf525.ld%s} \
+ %{mcpu=bf526*:-T bf526.ld%s}%{mcpu=bf527*:-T bf527.ld%s} \
+ %{mcpu=bf531*:-T bf531.ld%s}%{mcpu=bf532*:-T bf532.ld%s} \
+ %{mcpu=bf533*:-T bf533.ld%s}%{mcpu=bf534*:-T bf534.ld%s} \
+ %{mcpu=bf536*:-T bf536.ld%s}%{mcpu=bf537*:-T bf537.ld%s} \
+ %{mcpu=bf538*:-T bf538.ld%s}%{mcpu=bf539*:-T bf539.ld%s} \
+ %{mcpu=bf542*:-T bf542.ld%s}%{mcpu=bf544*:-T bf544.ld%s} \
+ %{mcpu=bf547*:-T bf547.ld%s}%{mcpu=bf548*:-T bf548.ld%s} \
+ %{mcpu=bf549*:-T bf549.ld%s} \
+ %{mcpu=bf561*:%{!mmulticore:-T bf561.ld%s} \
+ %{mmulticore:%{mcorea:-T bf561a.ld%s}} \
+ %{mmulticore:%{mcoreb:-T bf561b.ld%s}} \
+ %{mmulticore:%{!mcorea:%{!mcoreb:-T bf561m.ld%s}}}} \
+ %{!mcpu=*:%eno processor type specified for linking} \
+ %{!mcpu=bf561*:-T bfin-common-sc.ld%s} \
+ %{mcpu=bf561*:%{!mmulticore:-T bfin-common-sc.ld%s} \
+ %{mmulticore:-T bfin-common-mc.ld%s}}}}}"
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+#ifdef __BFIN_FDPIC__
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+asm (SECTION_OP); \
+asm ("P3 = [SP + 20];\n\tcall " USER_LABEL_PREFIX #FUNC ";"); \
+asm (TEXT_SECTION_ASM_OP);
+#endif
+
+#undef SUBTARGET_DRIVER_SELF_SPECS
+#define SUBTARGET_DRIVER_SELF_SPECS \
+ "%{mfdpic:-msim} %{mid-shared-library:-msim}"
+
+#define NO_IMPLICIT_EXTERN_C
diff --git a/gcc/config/bfin/lib1funcs.asm b/gcc/config/bfin/lib1funcs.asm
new file mode 100644
index 000000000..4e15ad230
--- /dev/null
+++ b/gcc/config/bfin/lib1funcs.asm
@@ -0,0 +1,146 @@
+/* libgcc functions for Blackfin.
+ Copyright (C) 2005, 2009 Free Software Foundation, Inc.
+ Contributed by Analog Devices.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifdef L_divsi3
+.text
+.align 2
+.global ___divsi3;
+.type ___divsi3, STT_FUNC;
+
+___divsi3:
+ [--SP]= RETS;
+ [--SP] = R7;
+
+ R2 = -R0;
+ CC = R0 < 0;
+ IF CC R0 = R2;
+ R7 = CC;
+
+ R2 = -R1;
+ CC = R1 < 0;
+ IF CC R1 = R2;
+ R2 = CC;
+ R7 = R7 ^ R2;
+
+ CALL ___udivsi3;
+
+ CC = R7;
+ R1 = -R0;
+ IF CC R0 = R1;
+
+ R7 = [SP++];
+ RETS = [SP++];
+ RTS;
+#endif
+
+#ifdef L_modsi3
+.align 2
+.global ___modsi3;
+.type ___modsi3, STT_FUNC;
+
+___modsi3:
+ [--SP] = RETS;
+ [--SP] = R0;
+ [--SP] = R1;
+ CALL ___divsi3;
+ R2 = [SP++];
+ R1 = [SP++];
+ R2 *= R0;
+ R0 = R1 - R2;
+ RETS = [SP++];
+ RTS;
+#endif
+
+#ifdef L_udivsi3
+.align 2
+.global ___udivsi3;
+.type ___udivsi3, STT_FUNC;
+
+___udivsi3:
+ P0 = 32;
+ LSETUP (0f, 1f) LC0 = P0;
+ /* upper half of dividend */
+ R3 = 0;
+0:
+ /* The first time round in the loop we shift in garbage, but since we
+ perform 33 shifts, it doesn't matter. */
+ R0 = ROT R0 BY 1;
+ R3 = ROT R3 BY 1;
+ R2 = R3 - R1;
+ CC = R3 < R1 (IU);
+1:
+ /* Last instruction of the loop. */
+ IF ! CC R3 = R2;
+
+ /* Shift in the last bit. */
+ R0 = ROT R0 BY 1;
+ /* R0 is the result, R3 contains the remainder. */
+ R0 = ~ R0;
+ RTS;
+#endif
+
+#ifdef L_umodsi3
+.align 2
+.global ___umodsi3;
+.type ___umodsi3, STT_FUNC;
+
+___umodsi3:
+ [--SP] = RETS;
+ CALL ___udivsi3;
+ R0 = R3;
+ RETS = [SP++];
+ RTS;
+#endif
+
+#ifdef L_umulsi3_highpart
+.align 2
+.global ___umulsi3_highpart;
+.type ___umulsi3_highpart, STT_FUNC;
+
+___umulsi3_highpart:
+ A1 = R1.L * R0.L (FU);
+ A1 = A1 >> 16;
+ A0 = R1.H * R0.H, A1 += R1.L * R0.H (FU);
+ A1 += R0.L * R1.H (FU);
+ A1 = A1 >> 16;
+ A0 += A1;
+ R0 = A0 (FU);
+ RTS;
+#endif
+
+#ifdef L_smulsi3_highpart
+.align 2
+.global ___smulsi3_highpart;
+.type ___smulsi3_highpart, STT_FUNC;
+
+___smulsi3_highpart:
+ A1 = R1.L * R0.L (FU);
+ A1 = A1 >> 16;
+ A0 = R0.H * R1.H, A1 += R0.H * R1.L (IS,M);
+ A1 += R1.H * R0.L (IS,M);
+ A1 = A1 >>> 16;
+ R0 = (A0 += A1);
+ RTS;
+#endif
diff --git a/gcc/config/bfin/libgcc-bfin.ver b/gcc/config/bfin/libgcc-bfin.ver
new file mode 100644
index 000000000..516d91f65
--- /dev/null
+++ b/gcc/config/bfin/libgcc-bfin.ver
@@ -0,0 +1,1914 @@
+# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
+# 2008, 2009, 2010 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+GCC_3.0 {
+ # libgcc1 integer symbols
+ ___absvsi2
+ ___addvsi3
+ ___ashlsi3
+ ___ashrsi3
+ ___divsi3
+ ___lshrsi3
+ ___modsi3
+ ___mulsi3
+ ___mulvsi3
+ ___negvsi2
+ ___subvsi3
+ ___udivsi3
+ ___umodsi3
+
+ # libgcc1 floating point symbols
+ ___addsf3
+ ___adddf3
+ ___addxf3
+ ___addtf3
+ ___divsf3
+ ___divdf3
+ ___divxf3
+ ___divtf3
+ ___eqsf2
+ ___eqdf2
+ ___eqxf2
+ ___eqtf2
+ ___extenddfxf2
+ ___extenddftf2
+ ___extendsfdf2
+ ___extendsfxf2
+ ___extendsftf2
+ ___fixsfsi
+ ___fixdfsi
+ ___fixxfsi
+ ___fixtfsi
+ ___floatsisf
+ ___floatsidf
+ ___floatsixf
+ ___floatsitf
+ ___gesf2
+ ___gedf2
+ ___gexf2
+ ___getf2
+ ___gtsf2
+ ___gtdf2
+ ___gtxf2
+ ___gttf2
+ ___lesf2
+ ___ledf2
+ ___lexf2
+ ___letf2
+ ___ltsf2
+ ___ltdf2
+ ___ltxf2
+ ___lttf2
+ ___mulsf3
+ ___muldf3
+ ___mulxf3
+ ___multf3
+ ___negsf2
+ ___negdf2
+ ___negxf2
+ ___negtf2
+ ___nesf2
+ ___nedf2
+ ___nexf2
+ ___netf2
+ ___subsf3
+ ___subdf3
+ ___subxf3
+ ___subtf3
+ ___truncdfsf2
+ ___truncxfsf2
+ ___trunctfsf2
+ ___truncxfdf2
+ ___trunctfdf2
+
+ # libgcc2 DImode arithmetic (for 32-bit targets).
+ ___absvdi2
+ ___addvdi3
+ ___ashldi3
+ ___ashrdi3
+ ___cmpdi2
+ ___divdi3
+ ___ffsdi2
+ ___fixdfdi
+ ___fixsfdi
+ ___fixtfdi
+ ___fixxfdi
+ ___fixunsdfdi
+ ___fixunsdfsi
+ ___fixunssfsi
+ ___fixunssfdi
+ ___fixunstfdi
+ ___fixunstfsi
+ ___fixunsxfdi
+ ___fixunsxfsi
+ ___floatdidf
+ ___floatdisf
+ ___floatdixf
+ ___floatditf
+ ___lshrdi3
+ ___moddi3
+ ___muldi3
+ ___mulvdi3
+ ___negdi2
+ ___negvdi2
+ ___subvdi3
+ ___ucmpdi2
+ ___udivdi3
+ ___udivmoddi4
+ ___umoddi3
+
+ # libgcc2 TImode arithmetic (for 64-bit targets).
+ ___ashlti3
+ ___ashrti3
+ ___cmpti2
+ ___divti3
+ ___ffsti2
+ ___fixdfti
+ ___fixsfti
+ ___fixtfti
+ ___fixxfti
+ ___lshrti3
+ ___modti3
+ ___multi3
+ ___negti2
+ ___ucmpti2
+ ___udivmodti4
+ ___udivti3
+ ___umodti3
+ ___fixunsdfti
+ ___fixunssfti
+ ___fixunstfti
+ ___fixunsxfti
+ ___floattidf
+ ___floattisf
+ ___floattixf
+ ___floattitf
+
+ # Used to deal with trampoline initialization on some platforms
+ ___clear_cache
+
+ # EH symbols
+ __Unwind_DeleteException
+ __Unwind_Find_FDE
+ __Unwind_ForcedUnwind
+ __Unwind_GetGR
+ __Unwind_GetIP
+ __Unwind_GetLanguageSpecificData
+ __Unwind_GetRegionStart
+ __Unwind_GetTextRelBase
+ __Unwind_GetDataRelBase
+ __Unwind_RaiseException
+ __Unwind_Resume
+ __Unwind_SetGR
+ __Unwind_SetIP
+ ___deregister_frame
+ ___deregister_frame_info
+ ___deregister_frame_info_bases
+ ___register_frame
+ ___register_frame_info
+ ___register_frame_info_bases
+ ___register_frame_info_table
+ ___register_frame_info_table_bases
+ ___register_frame_table
+
+ # SjLj EH symbols
+ __Unwind_SjLj_Register
+ __Unwind_SjLj_Unregister
+ __Unwind_SjLj_RaiseException
+ __Unwind_SjLj_ForcedUnwind
+ __Unwind_SjLj_Resume
+}
+
+%inherit GCC_3.3 GCC_3.0
+GCC_3.3 {
+ __Unwind_FindEnclosingFunction
+ __Unwind_GetCFA
+ __Unwind_Backtrace
+ __Unwind_Resume_or_Rethrow
+ __Unwind_SjLj_Resume_or_Rethrow
+}
+
+%inherit GCC_3.3.1 GCC_3.3
+GCC_3.3.1 {
+ ___gcc_personality_sj0
+ ___gcc_personality_v0
+}
+
+%inherit GCC_3.3.2 GCC_3.3.1
+GCC_3.3.2 {
+}
+%inherit GCC_3.3.4 GCC_3.3.2
+GCC_3.3.4 {
+ ___unorddf2
+ ___unordsf2
+}
+
+%inherit GCC_3.4 GCC_3.3.4
+GCC_3.4 {
+ # bit scanning and counting built-ins
+ ___clzsi2
+ ___clzdi2
+ ___clzti2
+ ___ctzsi2
+ ___ctzdi2
+ ___ctzti2
+ ___popcountsi2
+ ___popcountdi2
+ ___popcountti2
+ ___paritysi2
+ ___paritydi2
+ ___parityti2
+}
+
+%inherit GCC_3.4.2 GCC_3.4
+GCC_3.4.2 {
+ # Used to deal with trampoline initialization on some platforms
+ ___enable_execute_stack
+ ___trampoline_setup
+}
+
+%inherit GCC_3.4.4 GCC_3.4.2
+GCC_3.4.4 {
+ # libgcc2 TImode arithmetic (for 64-bit targets).
+ ___absvti2
+ ___addvti3
+ ___mulvti3
+ ___negvti2
+ ___subvti3
+}
+
+%inherit GCC_4.0.0 GCC_3.4.4
+GCC_4.0.0 {
+ # libgcc2 __builtin_powi helpers.
+ ___powisf2
+ ___powidf2
+ ___powixf2
+ ___powitf2
+
+ # c99 compliant complex arithmetic
+ ___divsc3
+ ___divdc3
+ ___divxc3
+ ___divtc3
+ ___mulsc3
+ ___muldc3
+ ___mulxc3
+ ___multc3
+}
+
+%inherit GCC_4.1.0 GCC_4.0.0
+GCC_4.1.0 {
+ ___smulsi3_highpart
+ ___umulsi3_highpart
+}
+
+%inherit GCC_4.2.0 GCC_4.1.0
+GCC_4.2.0 {
+ # unsigned-to-floating conversions
+ ___floatunsisf
+ ___floatunsidf
+ ___floatunsixf
+ ___floatunsitf
+ ___floatundidf
+ ___floatundisf
+ ___floatundixf
+ ___floatunditf
+ ___floatuntidf
+ ___floatuntisf
+ ___floatuntixf
+ ___floatuntitf
+ __Unwind_GetIPInfo
+}
+
+%inherit GCC_4.3.0 GCC_4.2.0
+GCC_4.3.0 {
+ # byte swapping routines
+ ___bswapsi2
+ ___bswapdi2
+ ___emutls_get_address
+ ___emutls_register_common
+ ___ffssi2
+ ___extendxftf2
+ ___trunctfxf2
+
+ # fixed-point routines
+ ___addqq3
+ ___addhq3
+ ___addsq3
+ ___adddq3
+ ___addtq3
+ ___adduqq3
+ ___adduhq3
+ ___addusq3
+ ___addudq3
+ ___addutq3
+ ___addha3
+ ___addsa3
+ ___addda3
+ ___addta3
+ ___adduha3
+ ___addusa3
+ ___adduda3
+ ___adduta3
+ ___ssaddqq3
+ ___ssaddhq3
+ ___ssaddsq3
+ ___ssadddq3
+ ___ssaddtq3
+ ___ssaddha3
+ ___ssaddsa3
+ ___ssaddda3
+ ___ssaddta3
+ ___usadduqq3
+ ___usadduhq3
+ ___usaddusq3
+ ___usaddudq3
+ ___usaddutq3
+ ___usadduha3
+ ___usaddusa3
+ ___usadduda3
+ ___usadduta3
+ ___subqq3
+ ___subhq3
+ ___subsq3
+ ___subdq3
+ ___subtq3
+ ___subuqq3
+ ___subuhq3
+ ___subusq3
+ ___subudq3
+ ___subutq3
+ ___subha3
+ ___subsa3
+ ___subda3
+ ___subta3
+ ___subuha3
+ ___subusa3
+ ___subuda3
+ ___subuta3
+ ___sssubqq3
+ ___sssubhq3
+ ___sssubsq3
+ ___sssubdq3
+ ___sssubtq3
+ ___sssubha3
+ ___sssubsa3
+ ___sssubda3
+ ___sssubta3
+ ___ussubuqq3
+ ___ussubuhq3
+ ___ussubusq3
+ ___ussubudq3
+ ___ussubutq3
+ ___ussubuha3
+ ___ussubusa3
+ ___ussubuda3
+ ___ussubuta3
+ ___mulqq3
+ ___mulhq3
+ ___mulsq3
+ ___muldq3
+ ___multq3
+ ___muluqq3
+ ___muluhq3
+ ___mulusq3
+ ___muludq3
+ ___mulutq3
+ ___mulha3
+ ___mulsa3
+ ___mulda3
+ ___multa3
+ ___muluha3
+ ___mulusa3
+ ___muluda3
+ ___muluta3
+ ___ssmulqq3
+ ___ssmulhq3
+ ___ssmulsq3
+ ___ssmuldq3
+ ___ssmultq3
+ ___ssmulha3
+ ___ssmulsa3
+ ___ssmulda3
+ ___ssmulta3
+ ___usmuluqq3
+ ___usmuluhq3
+ ___usmulusq3
+ ___usmuludq3
+ ___usmulutq3
+ ___usmuluha3
+ ___usmulusa3
+ ___usmuluda3
+ ___usmuluta3
+ ___divqq3
+ ___divhq3
+ ___divsq3
+ ___divdq3
+ ___divtq3
+ ___divha3
+ ___divsa3
+ ___divda3
+ ___divta3
+ ___udivuqq3
+ ___udivuhq3
+ ___udivusq3
+ ___udivudq3
+ ___udivutq3
+ ___udivuha3
+ ___udivusa3
+ ___udivuda3
+ ___udivuta3
+ ___ssdivqq3
+ ___ssdivhq3
+ ___ssdivsq3
+ ___ssdivdq3
+ ___ssdivtq3
+ ___ssdivha3
+ ___ssdivsa3
+ ___ssdivda3
+ ___ssdivta3
+ ___usdivuqq3
+ ___usdivuhq3
+ ___usdivusq3
+ ___usdivudq3
+ ___usdivutq3
+ ___usdivuha3
+ ___usdivusa3
+ ___usdivuda3
+ ___usdivuta3
+ ___negqq2
+ ___neghq2
+ ___negsq2
+ ___negdq2
+ ___negtq2
+ ___neguqq2
+ ___neguhq2
+ ___negusq2
+ ___negudq2
+ ___negutq2
+ ___negha2
+ ___negsa2
+ ___negda2
+ ___negta2
+ ___neguha2
+ ___negusa2
+ ___neguda2
+ ___neguta2
+ ___ssnegqq2
+ ___ssneghq2
+ ___ssnegsq2
+ ___ssnegdq2
+ ___ssnegtq2
+ ___ssnegha2
+ ___ssnegsa2
+ ___ssnegda2
+ ___ssnegta2
+ ___usneguqq2
+ ___usneguhq2
+ ___usnegusq2
+ ___usnegudq2
+ ___usnegutq2
+ ___usneguha2
+ ___usnegusa2
+ ___usneguda2
+ ___usneguta2
+ ___ashlqq3
+ ___ashlhq3
+ ___ashlsq3
+ ___ashldq3
+ ___ashltq3
+ ___ashluqq3
+ ___ashluhq3
+ ___ashlusq3
+ ___ashludq3
+ ___ashlutq3
+ ___ashlha3
+ ___ashlsa3
+ ___ashlda3
+ ___ashlta3
+ ___ashluha3
+ ___ashlusa3
+ ___ashluda3
+ ___ashluta3
+ ___ashrqq3
+ ___ashrhq3
+ ___ashrsq3
+ ___ashrdq3
+ ___ashrtq3
+ ___ashrha3
+ ___ashrsa3
+ ___ashrda3
+ ___ashrta3
+ ___lshruqq3
+ ___lshruhq3
+ ___lshrusq3
+ ___lshrudq3
+ ___lshrutq3
+ ___lshruha3
+ ___lshrusa3
+ ___lshruda3
+ ___lshruta3
+ ___ssashlqq3
+ ___ssashlhq3
+ ___ssashlsq3
+ ___ssashldq3
+ ___ssashltq3
+ ___ssashlha3
+ ___ssashlsa3
+ ___ssashlda3
+ ___ssashlta3
+ ___usashluqq3
+ ___usashluhq3
+ ___usashlusq3
+ ___usashludq3
+ ___usashlutq3
+ ___usashluha3
+ ___usashlusa3
+ ___usashluda3
+ ___usashluta3
+ ___cmpqq2
+ ___cmphq2
+ ___cmpsq2
+ ___cmpdq2
+ ___cmptq2
+ ___cmpuqq2
+ ___cmpuhq2
+ ___cmpusq2
+ ___cmpudq2
+ ___cmputq2
+ ___cmpha2
+ ___cmpsa2
+ ___cmpda2
+ ___cmpta2
+ ___cmpuha2
+ ___cmpusa2
+ ___cmpuda2
+ ___cmputa2
+ ___fractqqhq2
+ ___fractqqsq2
+ ___fractqqdq2
+ ___fractqqtq2
+ ___fractqqha
+ ___fractqqsa
+ ___fractqqda
+ ___fractqqta
+ ___fractqquqq
+ ___fractqquhq
+ ___fractqqusq
+ ___fractqqudq
+ ___fractqqutq
+ ___fractqquha
+ ___fractqqusa
+ ___fractqquda
+ ___fractqquta
+ ___fractqqqi
+ ___fractqqhi
+ ___fractqqsi
+ ___fractqqdi
+ ___fractqqti
+ ___fractqqsf
+ ___fractqqdf
+ ___fracthqqq2
+ ___fracthqsq2
+ ___fracthqdq2
+ ___fracthqtq2
+ ___fracthqha
+ ___fracthqsa
+ ___fracthqda
+ ___fracthqta
+ ___fracthquqq
+ ___fracthquhq
+ ___fracthqusq
+ ___fracthqudq
+ ___fracthqutq
+ ___fracthquha
+ ___fracthqusa
+ ___fracthquda
+ ___fracthquta
+ ___fracthqqi
+ ___fracthqhi
+ ___fracthqsi
+ ___fracthqdi
+ ___fracthqti
+ ___fracthqsf
+ ___fracthqdf
+ ___fractsqqq2
+ ___fractsqhq2
+ ___fractsqdq2
+ ___fractsqtq2
+ ___fractsqha
+ ___fractsqsa
+ ___fractsqda
+ ___fractsqta
+ ___fractsquqq
+ ___fractsquhq
+ ___fractsqusq
+ ___fractsqudq
+ ___fractsqutq
+ ___fractsquha
+ ___fractsqusa
+ ___fractsquda
+ ___fractsquta
+ ___fractsqqi
+ ___fractsqhi
+ ___fractsqsi
+ ___fractsqdi
+ ___fractsqti
+ ___fractsqsf
+ ___fractsqdf
+ ___fractdqqq2
+ ___fractdqhq2
+ ___fractdqsq2
+ ___fractdqtq2
+ ___fractdqha
+ ___fractdqsa
+ ___fractdqda
+ ___fractdqta
+ ___fractdquqq
+ ___fractdquhq
+ ___fractdqusq
+ ___fractdqudq
+ ___fractdqutq
+ ___fractdquha
+ ___fractdqusa
+ ___fractdquda
+ ___fractdquta
+ ___fractdqqi
+ ___fractdqhi
+ ___fractdqsi
+ ___fractdqdi
+ ___fractdqti
+ ___fractdqsf
+ ___fractdqdf
+ ___fracttqqq2
+ ___fracttqhq2
+ ___fracttqsq2
+ ___fracttqdq2
+ ___fracttqha
+ ___fracttqsa
+ ___fracttqda
+ ___fracttqta
+ ___fracttquqq
+ ___fracttquhq
+ ___fracttqusq
+ ___fracttqudq
+ ___fracttqutq
+ ___fracttquha
+ ___fracttqusa
+ ___fracttquda
+ ___fracttquta
+ ___fracttqqi
+ ___fracttqhi
+ ___fracttqsi
+ ___fracttqdi
+ ___fracttqti
+ ___fracttqsf
+ ___fracttqdf
+ ___fracthaqq
+ ___fracthahq
+ ___fracthasq
+ ___fracthadq
+ ___fracthatq
+ ___fracthasa2
+ ___fracthada2
+ ___fracthata2
+ ___fracthauqq
+ ___fracthauhq
+ ___fracthausq
+ ___fracthaudq
+ ___fracthautq
+ ___fracthauha
+ ___fracthausa
+ ___fracthauda
+ ___fracthauta
+ ___fracthaqi
+ ___fracthahi
+ ___fracthasi
+ ___fracthadi
+ ___fracthati
+ ___fracthasf
+ ___fracthadf
+ ___fractsaqq
+ ___fractsahq
+ ___fractsasq
+ ___fractsadq
+ ___fractsatq
+ ___fractsaha2
+ ___fractsada2
+ ___fractsata2
+ ___fractsauqq
+ ___fractsauhq
+ ___fractsausq
+ ___fractsaudq
+ ___fractsautq
+ ___fractsauha
+ ___fractsausa
+ ___fractsauda
+ ___fractsauta
+ ___fractsaqi
+ ___fractsahi
+ ___fractsasi
+ ___fractsadi
+ ___fractsati
+ ___fractsasf
+ ___fractsadf
+ ___fractdaqq
+ ___fractdahq
+ ___fractdasq
+ ___fractdadq
+ ___fractdatq
+ ___fractdaha2
+ ___fractdasa2
+ ___fractdata2
+ ___fractdauqq
+ ___fractdauhq
+ ___fractdausq
+ ___fractdaudq
+ ___fractdautq
+ ___fractdauha
+ ___fractdausa
+ ___fractdauda
+ ___fractdauta
+ ___fractdaqi
+ ___fractdahi
+ ___fractdasi
+ ___fractdadi
+ ___fractdati
+ ___fractdasf
+ ___fractdadf
+ ___fracttaqq
+ ___fracttahq
+ ___fracttasq
+ ___fracttadq
+ ___fracttatq
+ ___fracttaha2
+ ___fracttasa2
+ ___fracttada2
+ ___fracttauqq
+ ___fracttauhq
+ ___fracttausq
+ ___fracttaudq
+ ___fracttautq
+ ___fracttauha
+ ___fracttausa
+ ___fracttauda
+ ___fracttauta
+ ___fracttaqi
+ ___fracttahi
+ ___fracttasi
+ ___fracttadi
+ ___fracttati
+ ___fracttasf
+ ___fracttadf
+ ___fractuqqqq
+ ___fractuqqhq
+ ___fractuqqsq
+ ___fractuqqdq
+ ___fractuqqtq
+ ___fractuqqha
+ ___fractuqqsa
+ ___fractuqqda
+ ___fractuqqta
+ ___fractuqquhq2
+ ___fractuqqusq2
+ ___fractuqqudq2
+ ___fractuqqutq2
+ ___fractuqquha
+ ___fractuqqusa
+ ___fractuqquda
+ ___fractuqquta
+ ___fractuqqqi
+ ___fractuqqhi
+ ___fractuqqsi
+ ___fractuqqdi
+ ___fractuqqti
+ ___fractuqqsf
+ ___fractuqqdf
+ ___fractuhqqq
+ ___fractuhqhq
+ ___fractuhqsq
+ ___fractuhqdq
+ ___fractuhqtq
+ ___fractuhqha
+ ___fractuhqsa
+ ___fractuhqda
+ ___fractuhqta
+ ___fractuhquqq2
+ ___fractuhqusq2
+ ___fractuhqudq2
+ ___fractuhqutq2
+ ___fractuhquha
+ ___fractuhqusa
+ ___fractuhquda
+ ___fractuhquta
+ ___fractuhqqi
+ ___fractuhqhi
+ ___fractuhqsi
+ ___fractuhqdi
+ ___fractuhqti
+ ___fractuhqsf
+ ___fractuhqdf
+ ___fractusqqq
+ ___fractusqhq
+ ___fractusqsq
+ ___fractusqdq
+ ___fractusqtq
+ ___fractusqha
+ ___fractusqsa
+ ___fractusqda
+ ___fractusqta
+ ___fractusquqq2
+ ___fractusquhq2
+ ___fractusqudq2
+ ___fractusqutq2
+ ___fractusquha
+ ___fractusqusa
+ ___fractusquda
+ ___fractusquta
+ ___fractusqqi
+ ___fractusqhi
+ ___fractusqsi
+ ___fractusqdi
+ ___fractusqti
+ ___fractusqsf
+ ___fractusqdf
+ ___fractudqqq
+ ___fractudqhq
+ ___fractudqsq
+ ___fractudqdq
+ ___fractudqtq
+ ___fractudqha
+ ___fractudqsa
+ ___fractudqda
+ ___fractudqta
+ ___fractudquqq2
+ ___fractudquhq2
+ ___fractudqusq2
+ ___fractudqutq2
+ ___fractudquha
+ ___fractudqusa
+ ___fractudquda
+ ___fractudquta
+ ___fractudqqi
+ ___fractudqhi
+ ___fractudqsi
+ ___fractudqdi
+ ___fractudqti
+ ___fractudqsf
+ ___fractudqdf
+ ___fractutqqq
+ ___fractutqhq
+ ___fractutqsq
+ ___fractutqdq
+ ___fractutqtq
+ ___fractutqha
+ ___fractutqsa
+ ___fractutqda
+ ___fractutqta
+ ___fractutquqq2
+ ___fractutquhq2
+ ___fractutqusq2
+ ___fractutqudq2
+ ___fractutquha
+ ___fractutqusa
+ ___fractutquda
+ ___fractutquta
+ ___fractutqqi
+ ___fractutqhi
+ ___fractutqsi
+ ___fractutqdi
+ ___fractutqti
+ ___fractutqsf
+ ___fractutqdf
+ ___fractuhaqq
+ ___fractuhahq
+ ___fractuhasq
+ ___fractuhadq
+ ___fractuhatq
+ ___fractuhaha
+ ___fractuhasa
+ ___fractuhada
+ ___fractuhata
+ ___fractuhauqq
+ ___fractuhauhq
+ ___fractuhausq
+ ___fractuhaudq
+ ___fractuhautq
+ ___fractuhausa2
+ ___fractuhauda2
+ ___fractuhauta2
+ ___fractuhaqi
+ ___fractuhahi
+ ___fractuhasi
+ ___fractuhadi
+ ___fractuhati
+ ___fractuhasf
+ ___fractuhadf
+ ___fractusaqq
+ ___fractusahq
+ ___fractusasq
+ ___fractusadq
+ ___fractusatq
+ ___fractusaha
+ ___fractusasa
+ ___fractusada
+ ___fractusata
+ ___fractusauqq
+ ___fractusauhq
+ ___fractusausq
+ ___fractusaudq
+ ___fractusautq
+ ___fractusauha2
+ ___fractusauda2
+ ___fractusauta2
+ ___fractusaqi
+ ___fractusahi
+ ___fractusasi
+ ___fractusadi
+ ___fractusati
+ ___fractusasf
+ ___fractusadf
+ ___fractudaqq
+ ___fractudahq
+ ___fractudasq
+ ___fractudadq
+ ___fractudatq
+ ___fractudaha
+ ___fractudasa
+ ___fractudada
+ ___fractudata
+ ___fractudauqq
+ ___fractudauhq
+ ___fractudausq
+ ___fractudaudq
+ ___fractudautq
+ ___fractudauha2
+ ___fractudausa2
+ ___fractudauta2
+ ___fractudaqi
+ ___fractudahi
+ ___fractudasi
+ ___fractudadi
+ ___fractudati
+ ___fractudasf
+ ___fractudadf
+ ___fractutaqq
+ ___fractutahq
+ ___fractutasq
+ ___fractutadq
+ ___fractutatq
+ ___fractutaha
+ ___fractutasa
+ ___fractutada
+ ___fractutata
+ ___fractutauqq
+ ___fractutauhq
+ ___fractutausq
+ ___fractutaudq
+ ___fractutautq
+ ___fractutauha2
+ ___fractutausa2
+ ___fractutauda2
+ ___fractutaqi
+ ___fractutahi
+ ___fractutasi
+ ___fractutadi
+ ___fractutati
+ ___fractutasf
+ ___fractutadf
+ ___fractqiqq
+ ___fractqihq
+ ___fractqisq
+ ___fractqidq
+ ___fractqitq
+ ___fractqiha
+ ___fractqisa
+ ___fractqida
+ ___fractqita
+ ___fractqiuqq
+ ___fractqiuhq
+ ___fractqiusq
+ ___fractqiudq
+ ___fractqiutq
+ ___fractqiuha
+ ___fractqiusa
+ ___fractqiuda
+ ___fractqiuta
+ ___fracthiqq
+ ___fracthihq
+ ___fracthisq
+ ___fracthidq
+ ___fracthitq
+ ___fracthiha
+ ___fracthisa
+ ___fracthida
+ ___fracthita
+ ___fracthiuqq
+ ___fracthiuhq
+ ___fracthiusq
+ ___fracthiudq
+ ___fracthiutq
+ ___fracthiuha
+ ___fracthiusa
+ ___fracthiuda
+ ___fracthiuta
+ ___fractsiqq
+ ___fractsihq
+ ___fractsisq
+ ___fractsidq
+ ___fractsitq
+ ___fractsiha
+ ___fractsisa
+ ___fractsida
+ ___fractsita
+ ___fractsiuqq
+ ___fractsiuhq
+ ___fractsiusq
+ ___fractsiudq
+ ___fractsiutq
+ ___fractsiuha
+ ___fractsiusa
+ ___fractsiuda
+ ___fractsiuta
+ ___fractdiqq
+ ___fractdihq
+ ___fractdisq
+ ___fractdidq
+ ___fractditq
+ ___fractdiha
+ ___fractdisa
+ ___fractdida
+ ___fractdita
+ ___fractdiuqq
+ ___fractdiuhq
+ ___fractdiusq
+ ___fractdiudq
+ ___fractdiutq
+ ___fractdiuha
+ ___fractdiusa
+ ___fractdiuda
+ ___fractdiuta
+ ___fracttiqq
+ ___fracttihq
+ ___fracttisq
+ ___fracttidq
+ ___fracttitq
+ ___fracttiha
+ ___fracttisa
+ ___fracttida
+ ___fracttita
+ ___fracttiuqq
+ ___fracttiuhq
+ ___fracttiusq
+ ___fracttiudq
+ ___fracttiutq
+ ___fracttiuha
+ ___fracttiusa
+ ___fracttiuda
+ ___fracttiuta
+ ___fractsfqq
+ ___fractsfhq
+ ___fractsfsq
+ ___fractsfdq
+ ___fractsftq
+ ___fractsfha
+ ___fractsfsa
+ ___fractsfda
+ ___fractsfta
+ ___fractsfuqq
+ ___fractsfuhq
+ ___fractsfusq
+ ___fractsfudq
+ ___fractsfutq
+ ___fractsfuha
+ ___fractsfusa
+ ___fractsfuda
+ ___fractsfuta
+ ___fractdfqq
+ ___fractdfhq
+ ___fractdfsq
+ ___fractdfdq
+ ___fractdftq
+ ___fractdfha
+ ___fractdfsa
+ ___fractdfda
+ ___fractdfta
+ ___fractdfuqq
+ ___fractdfuhq
+ ___fractdfusq
+ ___fractdfudq
+ ___fractdfutq
+ ___fractdfuha
+ ___fractdfusa
+ ___fractdfuda
+ ___fractdfuta
+ ___satfractqqhq2
+ ___satfractqqsq2
+ ___satfractqqdq2
+ ___satfractqqtq2
+ ___satfractqqha
+ ___satfractqqsa
+ ___satfractqqda
+ ___satfractqqta
+ ___satfractqquqq
+ ___satfractqquhq
+ ___satfractqqusq
+ ___satfractqqudq
+ ___satfractqqutq
+ ___satfractqquha
+ ___satfractqqusa
+ ___satfractqquda
+ ___satfractqquta
+ ___satfracthqqq2
+ ___satfracthqsq2
+ ___satfracthqdq2
+ ___satfracthqtq2
+ ___satfracthqha
+ ___satfracthqsa
+ ___satfracthqda
+ ___satfracthqta
+ ___satfracthquqq
+ ___satfracthquhq
+ ___satfracthqusq
+ ___satfracthqudq
+ ___satfracthqutq
+ ___satfracthquha
+ ___satfracthqusa
+ ___satfracthquda
+ ___satfracthquta
+ ___satfractsqqq2
+ ___satfractsqhq2
+ ___satfractsqdq2
+ ___satfractsqtq2
+ ___satfractsqha
+ ___satfractsqsa
+ ___satfractsqda
+ ___satfractsqta
+ ___satfractsquqq
+ ___satfractsquhq
+ ___satfractsqusq
+ ___satfractsqudq
+ ___satfractsqutq
+ ___satfractsquha
+ ___satfractsqusa
+ ___satfractsquda
+ ___satfractsquta
+ ___satfractdqqq2
+ ___satfractdqhq2
+ ___satfractdqsq2
+ ___satfractdqtq2
+ ___satfractdqha
+ ___satfractdqsa
+ ___satfractdqda
+ ___satfractdqta
+ ___satfractdquqq
+ ___satfractdquhq
+ ___satfractdqusq
+ ___satfractdqudq
+ ___satfractdqutq
+ ___satfractdquha
+ ___satfractdqusa
+ ___satfractdquda
+ ___satfractdquta
+ ___satfracttqqq2
+ ___satfracttqhq2
+ ___satfracttqsq2
+ ___satfracttqdq2
+ ___satfracttqha
+ ___satfracttqsa
+ ___satfracttqda
+ ___satfracttqta
+ ___satfracttquqq
+ ___satfracttquhq
+ ___satfracttqusq
+ ___satfracttqudq
+ ___satfracttqutq
+ ___satfracttquha
+ ___satfracttqusa
+ ___satfracttquda
+ ___satfracttquta
+ ___satfracthaqq
+ ___satfracthahq
+ ___satfracthasq
+ ___satfracthadq
+ ___satfracthatq
+ ___satfracthasa2
+ ___satfracthada2
+ ___satfracthata2
+ ___satfracthauqq
+ ___satfracthauhq
+ ___satfracthausq
+ ___satfracthaudq
+ ___satfracthautq
+ ___satfracthauha
+ ___satfracthausa
+ ___satfracthauda
+ ___satfracthauta
+ ___satfractsaqq
+ ___satfractsahq
+ ___satfractsasq
+ ___satfractsadq
+ ___satfractsatq
+ ___satfractsaha2
+ ___satfractsada2
+ ___satfractsata2
+ ___satfractsauqq
+ ___satfractsauhq
+ ___satfractsausq
+ ___satfractsaudq
+ ___satfractsautq
+ ___satfractsauha
+ ___satfractsausa
+ ___satfractsauda
+ ___satfractsauta
+ ___satfractdaqq
+ ___satfractdahq
+ ___satfractdasq
+ ___satfractdadq
+ ___satfractdatq
+ ___satfractdaha2
+ ___satfractdasa2
+ ___satfractdata2
+ ___satfractdauqq
+ ___satfractdauhq
+ ___satfractdausq
+ ___satfractdaudq
+ ___satfractdautq
+ ___satfractdauha
+ ___satfractdausa
+ ___satfractdauda
+ ___satfractdauta
+ ___satfracttaqq
+ ___satfracttahq
+ ___satfracttasq
+ ___satfracttadq
+ ___satfracttatq
+ ___satfracttaha2
+ ___satfracttasa2
+ ___satfracttada2
+ ___satfracttauqq
+ ___satfracttauhq
+ ___satfracttausq
+ ___satfracttaudq
+ ___satfracttautq
+ ___satfracttauha
+ ___satfracttausa
+ ___satfracttauda
+ ___satfracttauta
+ ___satfractuqqqq
+ ___satfractuqqhq
+ ___satfractuqqsq
+ ___satfractuqqdq
+ ___satfractuqqtq
+ ___satfractuqqha
+ ___satfractuqqsa
+ ___satfractuqqda
+ ___satfractuqqta
+ ___satfractuqquhq2
+ ___satfractuqqusq2
+ ___satfractuqqudq2
+ ___satfractuqqutq2
+ ___satfractuqquha
+ ___satfractuqqusa
+ ___satfractuqquda
+ ___satfractuqquta
+ ___satfractuhqqq
+ ___satfractuhqhq
+ ___satfractuhqsq
+ ___satfractuhqdq
+ ___satfractuhqtq
+ ___satfractuhqha
+ ___satfractuhqsa
+ ___satfractuhqda
+ ___satfractuhqta
+ ___satfractuhquqq2
+ ___satfractuhqusq2
+ ___satfractuhqudq2
+ ___satfractuhqutq2
+ ___satfractuhquha
+ ___satfractuhqusa
+ ___satfractuhquda
+ ___satfractuhquta
+ ___satfractusqqq
+ ___satfractusqhq
+ ___satfractusqsq
+ ___satfractusqdq
+ ___satfractusqtq
+ ___satfractusqha
+ ___satfractusqsa
+ ___satfractusqda
+ ___satfractusqta
+ ___satfractusquqq2
+ ___satfractusquhq2
+ ___satfractusqudq2
+ ___satfractusqutq2
+ ___satfractusquha
+ ___satfractusqusa
+ ___satfractusquda
+ ___satfractusquta
+ ___satfractudqqq
+ ___satfractudqhq
+ ___satfractudqsq
+ ___satfractudqdq
+ ___satfractudqtq
+ ___satfractudqha
+ ___satfractudqsa
+ ___satfractudqda
+ ___satfractudqta
+ ___satfractudquqq2
+ ___satfractudquhq2
+ ___satfractudqusq2
+ ___satfractudqutq2
+ ___satfractudquha
+ ___satfractudqusa
+ ___satfractudquda
+ ___satfractudquta
+ ___satfractutqqq
+ ___satfractutqhq
+ ___satfractutqsq
+ ___satfractutqdq
+ ___satfractutqtq
+ ___satfractutqha
+ ___satfractutqsa
+ ___satfractutqda
+ ___satfractutqta
+ ___satfractutquqq2
+ ___satfractutquhq2
+ ___satfractutqusq2
+ ___satfractutqudq2
+ ___satfractutquha
+ ___satfractutqusa
+ ___satfractutquda
+ ___satfractutquta
+ ___satfractuhaqq
+ ___satfractuhahq
+ ___satfractuhasq
+ ___satfractuhadq
+ ___satfractuhatq
+ ___satfractuhaha
+ ___satfractuhasa
+ ___satfractuhada
+ ___satfractuhata
+ ___satfractuhauqq
+ ___satfractuhauhq
+ ___satfractuhausq
+ ___satfractuhaudq
+ ___satfractuhautq
+ ___satfractuhausa2
+ ___satfractuhauda2
+ ___satfractuhauta2
+ ___satfractusaqq
+ ___satfractusahq
+ ___satfractusasq
+ ___satfractusadq
+ ___satfractusatq
+ ___satfractusaha
+ ___satfractusasa
+ ___satfractusada
+ ___satfractusata
+ ___satfractusauqq
+ ___satfractusauhq
+ ___satfractusausq
+ ___satfractusaudq
+ ___satfractusautq
+ ___satfractusauha2
+ ___satfractusauda2
+ ___satfractusauta2
+ ___satfractudaqq
+ ___satfractudahq
+ ___satfractudasq
+ ___satfractudadq
+ ___satfractudatq
+ ___satfractudaha
+ ___satfractudasa
+ ___satfractudada
+ ___satfractudata
+ ___satfractudauqq
+ ___satfractudauhq
+ ___satfractudausq
+ ___satfractudaudq
+ ___satfractudautq
+ ___satfractudauha2
+ ___satfractudausa2
+ ___satfractudauta2
+ ___satfractutaqq
+ ___satfractutahq
+ ___satfractutasq
+ ___satfractutadq
+ ___satfractutatq
+ ___satfractutaha
+ ___satfractutasa
+ ___satfractutada
+ ___satfractutata
+ ___satfractutauqq
+ ___satfractutauhq
+ ___satfractutausq
+ ___satfractutaudq
+ ___satfractutautq
+ ___satfractutauha2
+ ___satfractutausa2
+ ___satfractutauda2
+ ___satfractqiqq
+ ___satfractqihq
+ ___satfractqisq
+ ___satfractqidq
+ ___satfractqitq
+ ___satfractqiha
+ ___satfractqisa
+ ___satfractqida
+ ___satfractqita
+ ___satfractqiuqq
+ ___satfractqiuhq
+ ___satfractqiusq
+ ___satfractqiudq
+ ___satfractqiutq
+ ___satfractqiuha
+ ___satfractqiusa
+ ___satfractqiuda
+ ___satfractqiuta
+ ___satfracthiqq
+ ___satfracthihq
+ ___satfracthisq
+ ___satfracthidq
+ ___satfracthitq
+ ___satfracthiha
+ ___satfracthisa
+ ___satfracthida
+ ___satfracthita
+ ___satfracthiuqq
+ ___satfracthiuhq
+ ___satfracthiusq
+ ___satfracthiudq
+ ___satfracthiutq
+ ___satfracthiuha
+ ___satfracthiusa
+ ___satfracthiuda
+ ___satfracthiuta
+ ___satfractsiqq
+ ___satfractsihq
+ ___satfractsisq
+ ___satfractsidq
+ ___satfractsitq
+ ___satfractsiha
+ ___satfractsisa
+ ___satfractsida
+ ___satfractsita
+ ___satfractsiuqq
+ ___satfractsiuhq
+ ___satfractsiusq
+ ___satfractsiudq
+ ___satfractsiutq
+ ___satfractsiuha
+ ___satfractsiusa
+ ___satfractsiuda
+ ___satfractsiuta
+ ___satfractdiqq
+ ___satfractdihq
+ ___satfractdisq
+ ___satfractdidq
+ ___satfractditq
+ ___satfractdiha
+ ___satfractdisa
+ ___satfractdida
+ ___satfractdita
+ ___satfractdiuqq
+ ___satfractdiuhq
+ ___satfractdiusq
+ ___satfractdiudq
+ ___satfractdiutq
+ ___satfractdiuha
+ ___satfractdiusa
+ ___satfractdiuda
+ ___satfractdiuta
+ ___satfracttiqq
+ ___satfracttihq
+ ___satfracttisq
+ ___satfracttidq
+ ___satfracttitq
+ ___satfracttiha
+ ___satfracttisa
+ ___satfracttida
+ ___satfracttita
+ ___satfracttiuqq
+ ___satfracttiuhq
+ ___satfracttiusq
+ ___satfracttiudq
+ ___satfracttiutq
+ ___satfracttiuha
+ ___satfracttiusa
+ ___satfracttiuda
+ ___satfracttiuta
+ ___satfractsfqq
+ ___satfractsfhq
+ ___satfractsfsq
+ ___satfractsfdq
+ ___satfractsftq
+ ___satfractsfha
+ ___satfractsfsa
+ ___satfractsfda
+ ___satfractsfta
+ ___satfractsfuqq
+ ___satfractsfuhq
+ ___satfractsfusq
+ ___satfractsfudq
+ ___satfractsfutq
+ ___satfractsfuha
+ ___satfractsfusa
+ ___satfractsfuda
+ ___satfractsfuta
+ ___satfractdfqq
+ ___satfractdfhq
+ ___satfractdfsq
+ ___satfractdfdq
+ ___satfractdftq
+ ___satfractdfha
+ ___satfractdfsa
+ ___satfractdfda
+ ___satfractdfta
+ ___satfractdfuqq
+ ___satfractdfuhq
+ ___satfractdfusq
+ ___satfractdfudq
+ ___satfractdfutq
+ ___satfractdfuha
+ ___satfractdfusa
+ ___satfractdfuda
+ ___satfractdfuta
+ ___fractunsqqqi
+ ___fractunsqqhi
+ ___fractunsqqsi
+ ___fractunsqqdi
+ ___fractunsqqti
+ ___fractunshqqi
+ ___fractunshqhi
+ ___fractunshqsi
+ ___fractunshqdi
+ ___fractunshqti
+ ___fractunssqqi
+ ___fractunssqhi
+ ___fractunssqsi
+ ___fractunssqdi
+ ___fractunssqti
+ ___fractunsdqqi
+ ___fractunsdqhi
+ ___fractunsdqsi
+ ___fractunsdqdi
+ ___fractunsdqti
+ ___fractunstqqi
+ ___fractunstqhi
+ ___fractunstqsi
+ ___fractunstqdi
+ ___fractunstqti
+ ___fractunshaqi
+ ___fractunshahi
+ ___fractunshasi
+ ___fractunshadi
+ ___fractunshati
+ ___fractunssaqi
+ ___fractunssahi
+ ___fractunssasi
+ ___fractunssadi
+ ___fractunssati
+ ___fractunsdaqi
+ ___fractunsdahi
+ ___fractunsdasi
+ ___fractunsdadi
+ ___fractunsdati
+ ___fractunstaqi
+ ___fractunstahi
+ ___fractunstasi
+ ___fractunstadi
+ ___fractunstati
+ ___fractunsuqqqi
+ ___fractunsuqqhi
+ ___fractunsuqqsi
+ ___fractunsuqqdi
+ ___fractunsuqqti
+ ___fractunsuhqqi
+ ___fractunsuhqhi
+ ___fractunsuhqsi
+ ___fractunsuhqdi
+ ___fractunsuhqti
+ ___fractunsusqqi
+ ___fractunsusqhi
+ ___fractunsusqsi
+ ___fractunsusqdi
+ ___fractunsusqti
+ ___fractunsudqqi
+ ___fractunsudqhi
+ ___fractunsudqsi
+ ___fractunsudqdi
+ ___fractunsudqti
+ ___fractunsutqqi
+ ___fractunsutqhi
+ ___fractunsutqsi
+ ___fractunsutqdi
+ ___fractunsutqti
+ ___fractunsuhaqi
+ ___fractunsuhahi
+ ___fractunsuhasi
+ ___fractunsuhadi
+ ___fractunsuhati
+ ___fractunsusaqi
+ ___fractunsusahi
+ ___fractunsusasi
+ ___fractunsusadi
+ ___fractunsusati
+ ___fractunsudaqi
+ ___fractunsudahi
+ ___fractunsudasi
+ ___fractunsudadi
+ ___fractunsudati
+ ___fractunsutaqi
+ ___fractunsutahi
+ ___fractunsutasi
+ ___fractunsutadi
+ ___fractunsutati
+ ___fractunsqiqq
+ ___fractunsqihq
+ ___fractunsqisq
+ ___fractunsqidq
+ ___fractunsqitq
+ ___fractunsqiha
+ ___fractunsqisa
+ ___fractunsqida
+ ___fractunsqita
+ ___fractunsqiuqq
+ ___fractunsqiuhq
+ ___fractunsqiusq
+ ___fractunsqiudq
+ ___fractunsqiutq
+ ___fractunsqiuha
+ ___fractunsqiusa
+ ___fractunsqiuda
+ ___fractunsqiuta
+ ___fractunshiqq
+ ___fractunshihq
+ ___fractunshisq
+ ___fractunshidq
+ ___fractunshitq
+ ___fractunshiha
+ ___fractunshisa
+ ___fractunshida
+ ___fractunshita
+ ___fractunshiuqq
+ ___fractunshiuhq
+ ___fractunshiusq
+ ___fractunshiudq
+ ___fractunshiutq
+ ___fractunshiuha
+ ___fractunshiusa
+ ___fractunshiuda
+ ___fractunshiuta
+ ___fractunssiqq
+ ___fractunssihq
+ ___fractunssisq
+ ___fractunssidq
+ ___fractunssitq
+ ___fractunssiha
+ ___fractunssisa
+ ___fractunssida
+ ___fractunssita
+ ___fractunssiuqq
+ ___fractunssiuhq
+ ___fractunssiusq
+ ___fractunssiudq
+ ___fractunssiutq
+ ___fractunssiuha
+ ___fractunssiusa
+ ___fractunssiuda
+ ___fractunssiuta
+ ___fractunsdiqq
+ ___fractunsdihq
+ ___fractunsdisq
+ ___fractunsdidq
+ ___fractunsditq
+ ___fractunsdiha
+ ___fractunsdisa
+ ___fractunsdida
+ ___fractunsdita
+ ___fractunsdiuqq
+ ___fractunsdiuhq
+ ___fractunsdiusq
+ ___fractunsdiudq
+ ___fractunsdiutq
+ ___fractunsdiuha
+ ___fractunsdiusa
+ ___fractunsdiuda
+ ___fractunsdiuta
+ ___fractunstiqq
+ ___fractunstihq
+ ___fractunstisq
+ ___fractunstidq
+ ___fractunstitq
+ ___fractunstiha
+ ___fractunstisa
+ ___fractunstida
+ ___fractunstita
+ ___fractunstiuqq
+ ___fractunstiuhq
+ ___fractunstiusq
+ ___fractunstiudq
+ ___fractunstiutq
+ ___fractunstiuha
+ ___fractunstiusa
+ ___fractunstiuda
+ ___fractunstiuta
+ ___satfractunsqiqq
+ ___satfractunsqihq
+ ___satfractunsqisq
+ ___satfractunsqidq
+ ___satfractunsqitq
+ ___satfractunsqiha
+ ___satfractunsqisa
+ ___satfractunsqida
+ ___satfractunsqita
+ ___satfractunsqiuqq
+ ___satfractunsqiuhq
+ ___satfractunsqiusq
+ ___satfractunsqiudq
+ ___satfractunsqiutq
+ ___satfractunsqiuha
+ ___satfractunsqiusa
+ ___satfractunsqiuda
+ ___satfractunsqiuta
+ ___satfractunshiqq
+ ___satfractunshihq
+ ___satfractunshisq
+ ___satfractunshidq
+ ___satfractunshitq
+ ___satfractunshiha
+ ___satfractunshisa
+ ___satfractunshida
+ ___satfractunshita
+ ___satfractunshiuqq
+ ___satfractunshiuhq
+ ___satfractunshiusq
+ ___satfractunshiudq
+ ___satfractunshiutq
+ ___satfractunshiuha
+ ___satfractunshiusa
+ ___satfractunshiuda
+ ___satfractunshiuta
+ ___satfractunssiqq
+ ___satfractunssihq
+ ___satfractunssisq
+ ___satfractunssidq
+ ___satfractunssitq
+ ___satfractunssiha
+ ___satfractunssisa
+ ___satfractunssida
+ ___satfractunssita
+ ___satfractunssiuqq
+ ___satfractunssiuhq
+ ___satfractunssiusq
+ ___satfractunssiudq
+ ___satfractunssiutq
+ ___satfractunssiuha
+ ___satfractunssiusa
+ ___satfractunssiuda
+ ___satfractunssiuta
+ ___satfractunsdiqq
+ ___satfractunsdihq
+ ___satfractunsdisq
+ ___satfractunsdidq
+ ___satfractunsditq
+ ___satfractunsdiha
+ ___satfractunsdisa
+ ___satfractunsdida
+ ___satfractunsdita
+ ___satfractunsdiuqq
+ ___satfractunsdiuhq
+ ___satfractunsdiusq
+ ___satfractunsdiudq
+ ___satfractunsdiutq
+ ___satfractunsdiuha
+ ___satfractunsdiusa
+ ___satfractunsdiuda
+ ___satfractunsdiuta
+ ___satfractunstiqq
+ ___satfractunstihq
+ ___satfractunstisq
+ ___satfractunstidq
+ ___satfractunstitq
+ ___satfractunstiha
+ ___satfractunstisa
+ ___satfractunstida
+ ___satfractunstita
+ ___satfractunstiuqq
+ ___satfractunstiuhq
+ ___satfractunstiusq
+ ___satfractunstiudq
+ ___satfractunstiutq
+ ___satfractunstiuha
+ ___satfractunstiusa
+ ___satfractunstiuda
+ ___satfractunstiuta
+}
+
+%inherit GCC_4.4.0 GCC_4.3.0
+GCC_4.4.0 {
+ ___sync_fetch_and_add_1
+ ___sync_fetch_and_sub_1
+ ___sync_fetch_and_or_1
+ ___sync_fetch_and_and_1
+ ___sync_fetch_and_xor_1
+ ___sync_fetch_and_nand_1
+ ___sync_add_and_fetch_1
+ ___sync_sub_and_fetch_1
+ ___sync_or_and_fetch_1
+ ___sync_and_and_fetch_1
+ ___sync_xor_and_fetch_1
+ ___sync_nand_and_fetch_1
+ ___sync_bool_compare_and_swap_1
+ ___sync_val_compare_and_swap_1
+ ___sync_lock_test_and_set_1
+
+ ___sync_fetch_and_add_2
+ ___sync_fetch_and_sub_2
+ ___sync_fetch_and_or_2
+ ___sync_fetch_and_and_2
+ ___sync_fetch_and_xor_2
+ ___sync_fetch_and_nand_2
+ ___sync_add_and_fetch_2
+ ___sync_sub_and_fetch_2
+ ___sync_or_and_fetch_2
+ ___sync_and_and_fetch_2
+ ___sync_xor_and_fetch_2
+ ___sync_nand_and_fetch_2
+ ___sync_bool_compare_and_swap_2
+ ___sync_val_compare_and_swap_2
+ ___sync_lock_test_and_set_2
+
+ ___sync_fetch_and_add_4
+ ___sync_fetch_and_sub_4
+ ___sync_fetch_and_or_4
+ ___sync_fetch_and_and_4
+ ___sync_fetch_and_xor_4
+ ___sync_fetch_and_nand_4
+ ___sync_add_and_fetch_4
+ ___sync_sub_and_fetch_4
+ ___sync_or_and_fetch_4
+ ___sync_and_and_fetch_4
+ ___sync_xor_and_fetch_4
+ ___sync_nand_and_fetch_4
+ ___sync_bool_compare_and_swap_4
+ ___sync_val_compare_and_swap_4
+ ___sync_lock_test_and_set_4
+
+ ___sync_fetch_and_add_8
+ ___sync_fetch_and_sub_8
+ ___sync_fetch_and_or_8
+ ___sync_fetch_and_and_8
+ ___sync_fetch_and_xor_8
+ ___sync_fetch_and_nand_8
+ ___sync_add_and_fetch_8
+ ___sync_sub_and_fetch_8
+ ___sync_or_and_fetch_8
+ ___sync_and_and_fetch_8
+ ___sync_xor_and_fetch_8
+ ___sync_nand_and_fetch_8
+ ___sync_bool_compare_and_swap_8
+ ___sync_val_compare_and_swap_8
+ ___sync_lock_test_and_set_8
+
+ ___sync_fetch_and_add_16
+ ___sync_fetch_and_sub_16
+ ___sync_fetch_and_or_16
+ ___sync_fetch_and_and_16
+ ___sync_fetch_and_xor_16
+ ___sync_fetch_and_nand_16
+ ___sync_add_and_fetch_16
+ ___sync_sub_and_fetch_16
+ ___sync_or_and_fetch_16
+ ___sync_and_and_fetch_16
+ ___sync_xor_and_fetch_16
+ ___sync_nand_and_fetch_16
+ ___sync_bool_compare_and_swap_16
+ ___sync_val_compare_and_swap_16
+ ___sync_lock_test_and_set_16
+
+ ___sync_synchronize
+}
+
+%inherit GCC_4.5.0 GCC_4.4.0
+GCC_4.5.0 {
+ ___unordxf2
+ ___unordtf2
+}
diff --git a/gcc/config/bfin/linux-unwind.h b/gcc/config/bfin/linux-unwind.h
new file mode 100644
index 000000000..15bb2f12b
--- /dev/null
+++ b/gcc/config/bfin/linux-unwind.h
@@ -0,0 +1,164 @@
+/* DWARF2 EH unwinding support for Blackfin.
+ Copyright (C) 2007, 2009, 2012 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs.
+ Don't use this at all if inhibit_libc is used. */
+
+#ifndef inhibit_libc
+
+#include <signal.h>
+#include <sys/ucontext.h>
+
+#define MD_FALLBACK_FRAME_STATE_FOR bfin_fallback_frame_state
+
+static _Unwind_Reason_Code
+bfin_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ unsigned char *pc = context->ra;
+ struct sigcontext *sc;
+ long new_cfa;
+
+ /* P0=__NR_rt_sigreturn (X); EXCPT 0x0; */
+ if (*(unsigned short *)pc == 0xe128
+ && *(unsigned short *)(pc + 2) == 0x00ad
+ && *(unsigned short *)(pc + 4) == 0x00a0)
+ {
+ struct rt_sigframe {
+ int sig;
+ siginfo_t *pinfo;
+ void *puc;
+ char retcode[8];
+ siginfo_t info;
+ struct ucontext uc;
+ } *rt_ = context->cfa;
+
+ /* The void * cast is necessary to avoid an aliasing warning.
+ The aliasing warning is correct, but should not be a problem
+ because it does not alias anything. */
+ sc = (struct sigcontext *)(void *)&rt_->uc.uc_mcontext.gregs;
+ }
+ else
+ return _URC_END_OF_STACK;
+
+ new_cfa = sc->sc_usp;
+ fs->regs.cfa_how = CFA_REG_OFFSET;
+ fs->regs.cfa_reg = 14;
+ fs->regs.cfa_offset = new_cfa - (long) context->cfa;
+
+ fs->regs.reg[0].how = REG_SAVED_OFFSET;
+ fs->regs.reg[0].loc.offset = (long)&sc->sc_r0 - new_cfa;
+ fs->regs.reg[1].how = REG_SAVED_OFFSET;
+ fs->regs.reg[1].loc.offset = (long)&sc->sc_r1 - new_cfa;
+ fs->regs.reg[2].how = REG_SAVED_OFFSET;
+ fs->regs.reg[2].loc.offset = (long)&sc->sc_r2 - new_cfa;
+ fs->regs.reg[3].how = REG_SAVED_OFFSET;
+ fs->regs.reg[3].loc.offset = (long)&sc->sc_r3 - new_cfa;
+ fs->regs.reg[4].how = REG_SAVED_OFFSET;
+ fs->regs.reg[4].loc.offset = (long)&sc->sc_r4 - new_cfa;
+ fs->regs.reg[5].how = REG_SAVED_OFFSET;
+ fs->regs.reg[5].loc.offset = (long)&sc->sc_r5 - new_cfa;
+ fs->regs.reg[6].how = REG_SAVED_OFFSET;
+ fs->regs.reg[6].loc.offset = (long)&sc->sc_r6 - new_cfa;
+ fs->regs.reg[7].how = REG_SAVED_OFFSET;
+ fs->regs.reg[7].loc.offset = (long)&sc->sc_r7 - new_cfa;
+ fs->regs.reg[8].how = REG_SAVED_OFFSET;
+ fs->regs.reg[8].loc.offset = (long)&sc->sc_p0 - new_cfa;
+ fs->regs.reg[9].how = REG_SAVED_OFFSET;
+ fs->regs.reg[9].loc.offset = (long)&sc->sc_p1 - new_cfa;
+ fs->regs.reg[10].how = REG_SAVED_OFFSET;
+ fs->regs.reg[10].loc.offset = (long)&sc->sc_p2 - new_cfa;
+ fs->regs.reg[11].how = REG_SAVED_OFFSET;
+ fs->regs.reg[11].loc.offset = (long)&sc->sc_p3 - new_cfa;
+ fs->regs.reg[12].how = REG_SAVED_OFFSET;
+ fs->regs.reg[12].loc.offset = (long)&sc->sc_p4 - new_cfa;
+ fs->regs.reg[13].how = REG_SAVED_OFFSET;
+ fs->regs.reg[13].loc.offset = (long)&sc->sc_p5 - new_cfa;
+
+ fs->regs.reg[15].how = REG_SAVED_OFFSET;
+ fs->regs.reg[15].loc.offset = (long)&sc->sc_fp - new_cfa;
+ fs->regs.reg[16].how = REG_SAVED_OFFSET;
+ fs->regs.reg[16].loc.offset = (long)&sc->sc_i0 - new_cfa;
+ fs->regs.reg[17].how = REG_SAVED_OFFSET;
+ fs->regs.reg[17].loc.offset = (long)&sc->sc_i1 - new_cfa;
+ fs->regs.reg[18].how = REG_SAVED_OFFSET;
+ fs->regs.reg[18].loc.offset = (long)&sc->sc_i2 - new_cfa;
+ fs->regs.reg[19].how = REG_SAVED_OFFSET;
+ fs->regs.reg[19].loc.offset = (long)&sc->sc_i3 - new_cfa;
+ fs->regs.reg[20].how = REG_SAVED_OFFSET;
+ fs->regs.reg[20].loc.offset = (long)&sc->sc_b0 - new_cfa;
+ fs->regs.reg[21].how = REG_SAVED_OFFSET;
+ fs->regs.reg[21].loc.offset = (long)&sc->sc_b1 - new_cfa;
+ fs->regs.reg[22].how = REG_SAVED_OFFSET;
+ fs->regs.reg[22].loc.offset = (long)&sc->sc_b2 - new_cfa;
+ fs->regs.reg[23].how = REG_SAVED_OFFSET;
+ fs->regs.reg[23].loc.offset = (long)&sc->sc_b3 - new_cfa;
+ fs->regs.reg[24].how = REG_SAVED_OFFSET;
+ fs->regs.reg[24].loc.offset = (long)&sc->sc_l0 - new_cfa;
+ fs->regs.reg[25].how = REG_SAVED_OFFSET;
+ fs->regs.reg[25].loc.offset = (long)&sc->sc_l1 - new_cfa;
+ fs->regs.reg[26].how = REG_SAVED_OFFSET;
+ fs->regs.reg[26].loc.offset = (long)&sc->sc_l2 - new_cfa;
+ fs->regs.reg[27].how = REG_SAVED_OFFSET;
+ fs->regs.reg[27].loc.offset = (long)&sc->sc_l3 - new_cfa;
+ fs->regs.reg[28].how = REG_SAVED_OFFSET;
+ fs->regs.reg[28].loc.offset = (long)&sc->sc_m0 - new_cfa;
+ fs->regs.reg[29].how = REG_SAVED_OFFSET;
+ fs->regs.reg[29].loc.offset = (long)&sc->sc_m1 - new_cfa;
+ fs->regs.reg[30].how = REG_SAVED_OFFSET;
+ fs->regs.reg[30].loc.offset = (long)&sc->sc_m2 - new_cfa;
+ fs->regs.reg[31].how = REG_SAVED_OFFSET;
+ fs->regs.reg[31].loc.offset = (long)&sc->sc_m3 - new_cfa;
+ /* FIXME: Handle A0, A1, CC. */
+ fs->regs.reg[35].how = REG_SAVED_OFFSET;
+ fs->regs.reg[35].loc.offset = (long)&sc->sc_rets - new_cfa;
+ fs->regs.reg[36].how = REG_SAVED_OFFSET;
+ fs->regs.reg[36].loc.offset = (long)&sc->sc_pc - new_cfa;
+ fs->regs.reg[37].how = REG_SAVED_OFFSET;
+ fs->regs.reg[37].loc.offset = (long)&sc->sc_retx - new_cfa;
+
+ fs->regs.reg[40].how = REG_SAVED_OFFSET;
+ fs->regs.reg[40].loc.offset = (long)&sc->sc_astat - new_cfa;
+ fs->regs.reg[41].how = REG_SAVED_OFFSET;
+ fs->regs.reg[41].loc.offset = (long)&sc->sc_seqstat - new_cfa;
+
+ fs->regs.reg[44].how = REG_SAVED_OFFSET;
+ fs->regs.reg[44].loc.offset = (long)&sc->sc_lt0 - new_cfa;
+ fs->regs.reg[45].how = REG_SAVED_OFFSET;
+ fs->regs.reg[45].loc.offset = (long)&sc->sc_lt1 - new_cfa;
+ fs->regs.reg[46].how = REG_SAVED_OFFSET;
+ fs->regs.reg[46].loc.offset = (long)&sc->sc_lc0 - new_cfa;
+ fs->regs.reg[47].how = REG_SAVED_OFFSET;
+ fs->regs.reg[47].loc.offset = (long)&sc->sc_lc1 - new_cfa;
+ fs->regs.reg[48].how = REG_SAVED_OFFSET;
+ fs->regs.reg[48].loc.offset = (long)&sc->sc_lb0 - new_cfa;
+ fs->regs.reg[49].how = REG_SAVED_OFFSET;
+ fs->regs.reg[49].loc.offset = (long)&sc->sc_lb1 - new_cfa;
+ fs->retaddr_column = 35;
+
+ return _URC_NO_REASON;
+}
+
+#endif /* ifdef inhibit_libc */
diff --git a/gcc/config/bfin/linux.h b/gcc/config/bfin/linux.h
new file mode 100644
index 000000000..a75074592
--- /dev/null
+++ b/gcc/config/bfin/linux.h
@@ -0,0 +1,54 @@
+/* Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#undef SUBTARGET_DRIVER_SELF_SPECS
+#define SUBTARGET_DRIVER_SELF_SPECS \
+ "%{!mno-fdpic:-mfdpic} -micplb",
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: %{pg|p|profile:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}} crtreloc.o%s \
+ crti.o%s %{shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+
+#undef LINK_GCC_C_SEQUENCE_SPEC
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "%{static:--start-group} %{mfast-fp:-lbffastfp} %G %L %{static:--end-group} \
+ %{!static:%{mfast-fp:-lbffastfp} %G}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "\
+ %{mfdpic: -m elf32bfinfd -z text} %{shared} %{pie} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -Bdynamic} \
+ %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker /lib/ld-uClibc.so.0} \
+ %{static}} -init __init -fini __fini"
+
+#define MD_UNWIND_SUPPORT "config/bfin/linux-unwind.h"
+
+#undef TARGET_SUPPORTS_SYNC_CALLS
+#define TARGET_SUPPORTS_SYNC_CALLS 1
diff --git a/gcc/config/bfin/predicates.md b/gcc/config/bfin/predicates.md
new file mode 100644
index 000000000..84bf59195
--- /dev/null
+++ b/gcc/config/bfin/predicates.md
@@ -0,0 +1,241 @@
+;; Predicate definitions for the Blackfin.
+;; Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+;; Contributed by Analog Devices.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Return nonzero iff OP is one of the integer constants 1 or 2.
+(define_predicate "pos_scale_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 1 || INTVAL (op) == 2")))
+
+;; Return nonzero iff OP is one of the integer constants 2 or 4.
+(define_predicate "scale_by_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 2 || INTVAL (op) == 4")))
+
+;; Return nonzero if OP is a constant that consists of two parts; lower
+;; bits all zero and upper bits all ones. In this case, we can perform
+;; an AND operation with a sequence of two shifts. Don't return nonzero
+;; if the constant would be cheap to load.
+(define_predicate "highbits_operand"
+ (and (match_code "const_int")
+ (match_test "log2constp (-INTVAL (op)) && !satisfies_constraint_Ks7 (op)")))
+
+;; Return nonzero if OP is suitable as a right-hand side operand for an
+;; andsi3 operation.
+(define_predicate "rhs_andsi3_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_int")
+ (match_test "log2constp (~INTVAL (op)) || INTVAL (op) == 255 || INTVAL (op) == 65535"))))
+
+;; Return nonzero if OP is a register or a constant with exactly one bit
+;; set.
+(define_predicate "regorlog2_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_int")
+ (match_test "log2constp (INTVAL (op))"))))
+
+;; Return nonzero if OP is a register or an integer constant.
+(define_predicate "reg_or_const_int_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_code "const_int")))
+
+(define_predicate "const01_operand"
+ (and (match_code "const_int")
+ (match_test "op == const0_rtx || op == const1_rtx")))
+
+(define_predicate "const1_operand"
+ (and (match_code "const_int")
+ (match_test "op == const1_rtx")))
+
+(define_predicate "const3_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 3")))
+
+(define_predicate "vec_shift_operand"
+ (ior (and (match_code "const_int")
+ (match_test "INTVAL (op) >= -16 && INTVAL (op) < 15"))
+ (match_operand 0 "register_operand")))
+
+;; Like register_operand, but make sure that hard regs have a valid mode.
+(define_predicate "valid_reg_operand"
+ (match_operand 0 "register_operand")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (REGNO (op) < FIRST_PSEUDO_REGISTER)
+ return HARD_REGNO_MODE_OK (REGNO (op), mode);
+ return 1;
+})
+
+;; Return nonzero if OP is a D register.
+(define_predicate "d_register_operand"
+ (and (match_code "reg")
+ (match_test "D_REGNO_P (REGNO (op))")))
+
+(define_predicate "p_register_operand"
+ (and (match_code "reg")
+ (match_test "P_REGNO_P (REGNO (op))")))
+
+(define_predicate "dp_register_operand"
+ (and (match_code "reg")
+ (match_test "D_REGNO_P (REGNO (op)) || P_REGNO_P (REGNO (op))")))
+
+;; Return nonzero if OP is a LC register.
+(define_predicate "lc_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == REG_LC0 || REGNO (op) == REG_LC1")))
+
+;; Return nonzero if OP is a LT register.
+(define_predicate "lt_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == REG_LT0 || REGNO (op) == REG_LT1")))
+
+;; Return nonzero if OP is a LB register.
+(define_predicate "lb_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == REG_LB0 || REGNO (op) == REG_LB1")))
+
+;; Return nonzero if OP is a register or a 7-bit signed constant.
+(define_predicate "reg_or_7bit_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_Ks7 (op)"))))
+
+;; Return nonzero if OP is a register other than DREG and PREG.
+(define_predicate "nondp_register_operand"
+ (match_operand 0 "register_operand")
+{
+ unsigned int regno;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ regno = REGNO (op);
+ return (regno >= FIRST_PSEUDO_REGISTER || !DP_REGNO_P (regno));
+})
+
+;; Return nonzero if OP is a register other than DREG and PREG, or MEM.
+(define_predicate "nondp_reg_or_memory_operand"
+ (ior (match_operand 0 "nondp_register_operand")
+ (match_operand 0 "memory_operand")))
+
+;; Return nonzero if OP is a register or, when negated, a 7-bit signed
+;; constant.
+(define_predicate "reg_or_neg7bit_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_KN7 (op)"))))
+
+;; Used for secondary reloads, this function returns 1 if OP is of the
+;; form (plus (fp) (const_int)).
+(define_predicate "fp_plus_const_operand"
+ (match_code "plus")
+{
+ rtx op1, op2;
+
+ op1 = XEXP (op, 0);
+ op2 = XEXP (op, 1);
+ return (REG_P (op1)
+ && (REGNO (op1) == FRAME_POINTER_REGNUM
+ || REGNO (op1) == STACK_POINTER_REGNUM)
+ && GET_CODE (op2) == CONST_INT);
+})
+
+;; Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
+;; possibly with an offset.
+(define_predicate "symbolic_operand"
+ (ior (match_code "symbol_ref,label_ref")
+ (and (match_code "const")
+ (match_test "GET_CODE (XEXP (op,0)) == PLUS
+ && (GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF)
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT"))))
+
+;; Returns 1 if OP is a plain constant or matched by symbolic_operand.
+(define_predicate "symbolic_or_const_operand"
+ (ior (match_code "const_int,const_double")
+ (match_operand 0 "symbolic_operand")))
+
+;; Returns 1 if OP is a SYMBOL_REF.
+(define_predicate "symbol_ref_operand"
+ (match_code "symbol_ref"))
+
+;; True for any non-virtual or eliminable register. Used in places where
+;; instantiation of such a register may cause the pattern to not be recognized.
+(define_predicate "register_no_elim_operand"
+ (match_operand 0 "register_operand")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ return !(op == arg_pointer_rtx
+ || op == frame_pointer_rtx
+ || (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ && REGNO (op) <= LAST_VIRTUAL_REGISTER));
+})
+
+;; Test for an operator valid in a BImode conditional branch
+(define_predicate "bfin_bimode_comparison_operator"
+ (match_code "eq,ne"))
+
+;; Test for an operator whose result is accessible with movbisi.
+(define_predicate "bfin_direct_comparison_operator"
+ (match_code "eq,lt,le,leu,ltu"))
+
+;; The following three are used to compute the addrtype attribute. They return
+;; true if passed a memory address usable for a 16-bit load or store using a
+;; P or I register, respectively. If neither matches, we know we have a
+;; 32-bit instruction.
+;; We subdivide the P case into normal P registers, and SP/FP. We can assume
+;; that speculative loads through SP and FP are no problem, so this has
+;; an effect on the anomaly workaround code.
+
+(define_predicate "mem_p_address_operand"
+ (match_code "mem")
+{
+ if (effective_address_32bit_p (op, mode))
+ return 0;
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == PLUS || GET_RTX_CLASS (GET_CODE (op)) == RTX_AUTOINC)
+ op = XEXP (op, 0);
+ gcc_assert (REG_P (op));
+ return PREG_P (op) && op != stack_pointer_rtx && op != frame_pointer_rtx;
+})
+
+(define_predicate "mem_spfp_address_operand"
+ (match_code "mem")
+{
+ if (effective_address_32bit_p (op, mode))
+ return 0;
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == PLUS || GET_RTX_CLASS (GET_CODE (op)) == RTX_AUTOINC)
+ op = XEXP (op, 0);
+ gcc_assert (REG_P (op));
+ return op == stack_pointer_rtx || op == frame_pointer_rtx;
+})
+
+(define_predicate "mem_i_address_operand"
+ (match_code "mem")
+{
+ if (effective_address_32bit_p (op, mode))
+ return 0;
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == PLUS || GET_RTX_CLASS (GET_CODE (op)) == RTX_AUTOINC)
+ op = XEXP (op, 0);
+ gcc_assert (REG_P (op));
+ return IREG_P (op);
+})
diff --git a/gcc/config/bfin/print-sysroot-suffix.sh b/gcc/config/bfin/print-sysroot-suffix.sh
new file mode 100644
index 000000000..c33ff47c3
--- /dev/null
+++ b/gcc/config/bfin/print-sysroot-suffix.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+# Copyright (C) 2007 Free Software Foundation, Inc.
+# This file is part of GCC.
+
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# This script takes the following arguments:
+#
+# - the target sysroot
+# - the value of $(MULTILIB_MATCHES)
+# - the value of $(MULTILIB_OPTIONS)
+#
+# It uses these arguments to construct a definition of SYSROOT_SUFFIX_SPEC,
+# which it prints to the standard output. For each multilib directory FOO,
+# the script checks whether $sysroot has a subdirectory FOO, and if so will
+# use /FOO for all compatible command-line options. It will not add a
+# suffix for /FOO's options otherwise. These suffixes are concatenated,
+# with one subspec for each space-separated entry in $(MULTILIB_OPTIONS).
+set -e
+sysroot=$1
+matches=$2
+options=$3
+
+# For each multilib option OPT, add to $substs a sed command of the
+# form "-e 's/OPT/OPT/'".
+substs=""
+for option in `echo "$options" | tr '/' ' '`
+do
+ substs="$substs -e 's/$option/$option/g'"
+done
+
+# For each ALIAS=CANONICAL entry in $MULTILIB_MATCHES, look for sed
+# arguments in $substs of the form "-e 's/CANONICAL/.../'". Replace
+# such entries with "-e 's/CANONICAL/ALIAS|.../'". Both the ALIAS and
+# CANONICAL parts of $MULTILIB_MATCHES use '?' to stand for '='.
+#
+# After this loop, a command of the form "echo FOO | eval sed $substs"
+# will replace a canonical option FOO with a %{...}-style spec pattern.
+for match in $matches
+do
+ canonical=`echo "$match" | sed -e 's/=.*//' -e 's/?/=/g'`
+ alias=`echo "$match" | sed -e 's/.*=//' -e 's/?/=/g'`
+ substs=`echo "$substs" | sed -e "s,s/$canonical/,&$alias|,"`
+done
+
+# Build up the final SYSROOT_SUFFIX_SPEC in $spec.
+spec=
+for combo in $options
+do
+ # See which option alternatives in $combo have their own sysroot
+ # directory. Create a subspec of the form "%{PAT1:/DIR1;...;PATn:DIRn}"
+ # from each such option OPTi, where DIRi is the directory associated
+ # with OPTi and PATi is the result of passing OPTi through $substs.
+ subspec=
+ for option in `echo "$combo" | tr '/' ' '`
+ do
+ dir=`echo "$option" | sed 's/mcpu=//'`
+ if test -d "$sysroot/$dir"; then
+ test -z "$subspec" || subspec="$subspec;"
+ subspec="$subspec"`echo "$option" | eval sed $substs`":/$dir"
+ fi
+ done
+ # Concatenate all the subspecs.
+ test -z "$subspec" || spec="$spec%{$subspec}"
+done
+if test -n "$spec"; then
+ echo "#undef SYSROOT_SUFFIX_SPEC"
+ echo "#define SYSROOT_SUFFIX_SPEC \"$spec\""
+fi
diff --git a/gcc/config/bfin/rtems.h b/gcc/config/bfin/rtems.h
new file mode 100644
index 000000000..6fa6ef10e
--- /dev/null
+++ b/gcc/config/bfin/rtems.h
@@ -0,0 +1,28 @@
+/* Definitions for rtems targeting a bfin
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ Contributed by Ralf Corsépius (ralf.corsepius@rtems.org).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Target OS preprocessor built-ins. */
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__rtems__"); \
+ builtin_assert ("system=rtems"); \
+ } \
+ while (0)
diff --git a/gcc/config/bfin/sync.md b/gcc/config/bfin/sync.md
new file mode 100644
index 000000000..7025af497
--- /dev/null
+++ b/gcc/config/bfin/sync.md
@@ -0,0 +1,178 @@
+;; GCC machine description for Blackfin synchronization instructions.
+;; Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+;; Contributed by Analog Devices.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_code_iterator FETCHOP [plus minus ior and xor])
+(define_code_attr fetchop_name
+ [(plus "add") (minus "sub") (ior "ior") (and "and") (xor "xor")])
+(define_code_attr fetchop_addr
+ [(plus "1072") (minus "1088") (ior "1104") (and "1120") (xor "1136")])
+
+(define_insn "sync_<fetchop_name>si_internal"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "qA"))
+ (unspec:SI
+ [(FETCHOP:SI (mem:SI (match_dup 0))
+ (match_operand:SI 1 "register_operand" "q0"))
+ (match_operand:SI 2 "register_no_elim_operand" "a")]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=q0"))
+ (clobber (match_scratch:SI 4 "=q1"))
+ (clobber (reg:SI REG_RETS))]
+ "TARGET_SUPPORTS_SYNC_CALLS"
+ "call (%2);"
+ [(set_attr "type" "call")])
+
+(define_expand "sync_<fetchop_name>si"
+ [(parallel
+ [(set (match_operand:SI 0 "memory_operand" "+m")
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 0)
+ (match_operand:SI 1 "register_operand" "q0"))
+ (match_dup 2)]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (reg:SI REG_RETS))])]
+ "TARGET_SUPPORTS_SYNC_CALLS"
+{
+ if (!REG_P (XEXP (operands[0], 0)))
+ {
+ operands[0] = shallow_copy_rtx (operands[0]);
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+ }
+ operands[2] = force_reg (Pmode, GEN_INT (<fetchop_addr>));
+})
+
+(define_insn "sync_old_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "register_operand" "=q1")
+ (mem:SI (match_operand:SI 1 "register_operand" "qA")))
+ (set (mem:SI (match_dup 1))
+ (unspec:SI
+ [(FETCHOP:SI (mem:SI (match_dup 1))
+ (match_operand:SI 2 "register_operand" "q0"))
+ (match_operand:SI 3 "register_no_elim_operand" "a")]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 4 "=q0"))
+ (clobber (reg:SI REG_RETS))]
+ "TARGET_SUPPORTS_SYNC_CALLS"
+ "call (%3);"
+ [(set_attr "type" "call")])
+
+(define_expand "sync_old_<fetchop_name>si"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1)
+ (match_operand:SI 2 "register_operand" ""))
+ (match_dup 3)]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (reg:SI REG_RETS))])]
+ "TARGET_SUPPORTS_SYNC_CALLS"
+{
+ if (!REG_P (XEXP (operands[1], 0)))
+ {
+ operands[1] = shallow_copy_rtx (operands[1]);
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+ }
+ operands[3] = force_reg (Pmode, GEN_INT (<fetchop_addr>));
+})
+
+(define_insn "sync_new_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "register_operand" "=q0")
+ (unspec:SI
+ [(FETCHOP:SI
+ (mem:SI (match_operand:SI 1 "register_operand" "qA"))
+ (match_operand:SI 2 "register_operand" "q0"))
+ (match_operand:SI 3 "register_no_elim_operand" "a")]
+ UNSPEC_ATOMIC))
+ (set (mem:SI (match_dup 1))
+ (unspec:SI
+ [(FETCHOP:SI (mem:SI (match_dup 1)) (match_dup 2))
+ (match_dup 3)]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 4 "=q1"))
+ (clobber (reg:SI REG_RETS))]
+ "TARGET_SUPPORTS_SYNC_CALLS"
+ "call (%3);"
+ [(set_attr "type" "call")])
+
+(define_expand "sync_new_<fetchop_name>si"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI
+ [(FETCHOP:SI (match_operand:SI 1 "memory_operand" "")
+ (match_operand:SI 2 "register_operand" ""))
+ (match_dup 3)]
+ UNSPEC_ATOMIC))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (reg:SI REG_RETS))])]
+ "TARGET_SUPPORTS_SYNC_CALLS"
+{
+ if (!REG_P (XEXP (operands[1], 0)))
+ {
+ operands[1] = shallow_copy_rtx (operands[1]);
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+ }
+ operands[3] = force_reg (Pmode, GEN_INT (<fetchop_addr>));
+})
+
+(define_insn "sync_compare_and_swapsi_internal"
+ [(set (match_operand:SI 0 "register_operand" "=q0")
+ (mem:SI (match_operand:SI 1 "register_operand" "qA")))
+ (set (mem:SI (match_dup 1))
+ (unspec:SI
+ [(mem:SI (match_dup 1))
+ (match_operand:SI 2 "register_operand" "q1")
+ (match_operand:SI 3 "register_operand" "q2")
+ (match_operand:SI 4 "register_no_elim_operand" "a")]
+ UNSPEC_ATOMIC))
+ (clobber (reg:SI REG_RETS))]
+ "TARGET_SUPPORTS_SYNC_CALLS"
+ "call (%4);"
+ [(set_attr "type" "call")])
+
+(define_expand "sync_compare_and_swapsi"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:SI
+ [(match_dup 1)
+ (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "register_operand" "")
+ (match_dup 4)]
+ UNSPEC_ATOMIC))
+ (clobber (reg:SI REG_RETS))])]
+ "TARGET_SUPPORTS_SYNC_CALLS"
+{
+ if (!REG_P (XEXP (operands[1], 0)))
+ {
+ operands[1] = shallow_copy_rtx (operands[1]);
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+ }
+ operands[4] = force_reg (Pmode, GEN_INT (0x420));
+})
diff --git a/gcc/config/bfin/t-bfin b/gcc/config/bfin/t-bfin
new file mode 100644
index 000000000..37b6871c1
--- /dev/null
+++ b/gcc/config/bfin/t-bfin
@@ -0,0 +1,43 @@
+# Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+## Target part of the Makefile
+
+LIB1ASMSRC = bfin/lib1funcs.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _umodsi3 _modsi3
+
+EXTRA_PARTS = crtbegin.o crtend.o crti.o crtn.o
+
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# Assemble startup files.
+$(T)crti.o: $(srcdir)/config/bfin/crti.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/bfin/crti.s
+
+$(T)crtn.o: $(srcdir)/config/bfin/crtn.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/bfin/crtn.s
diff --git a/gcc/config/bfin/t-bfin-elf b/gcc/config/bfin/t-bfin-elf
new file mode 100644
index 000000000..39209f628
--- /dev/null
+++ b/gcc/config/bfin/t-bfin-elf
@@ -0,0 +1,81 @@
+# Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+## Target part of the Makefile
+
+LIB1ASMSRC = bfin/lib1funcs.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _umodsi3 _modsi3 _umulsi3_highpart
+LIB1ASMFUNCS += _smulsi3_highpart
+
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+CRTSTUFF_T_CFLAGS = -fpic
+TARGET_LIBGCC2_CFLAGS = -fpic
+
+MULTILIB_OPTIONS=mcpu=bf532-none
+MULTILIB_OPTIONS+=mid-shared-library/msep-data/mfdpic mleaf-id-shared-library
+MULTILIB_DIRNAMES=bf532-none mid-shared-library msep-data mfdpic mleaf-id-shared-library
+
+MULTILIB_MATCHES=mcpu?bf532-none=mcpu?bf512-none mcpu?bf532-none=mcpu?bf514-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf516-none mcpu?bf532-none=mcpu?bf518-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf522-none mcpu?bf532-none=mcpu?bf523-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf524-none mcpu?bf532-none=mcpu?bf525-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf526-none mcpu?bf532-none=mcpu?bf527-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf531-none mcpu?bf532-none=mcpu?bf533-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf534-none mcpu?bf532-none=mcpu?bf536-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf537-none mcpu?bf532-none=mcpu?bf538-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf539-none mcpu?bf532-none=mcpu?bf542-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf542m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf544-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf544m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf547-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf547m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf548-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf548m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf549-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf549m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf561-none
+
+MULTILIB_EXCEPTIONS=mleaf-id-shared-library*
+MULTILIB_EXCEPTIONS+=mcpu=bf532-none/mleaf-id-shared-library*
+MULTILIB_EXCEPTIONS+=*mfdpic/mleaf-id-shared-library*
+MULTILIB_EXCEPTIONS+=*msep-data/mleaf-id-shared-library*
+
+# Assemble startup files.
+$(T)crti.o: $(srcdir)/config/bfin/crti.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/bfin/crti.s
+
+$(T)crtn.o: $(srcdir)/config/bfin/crtn.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/bfin/crtn.s
+
+$(T)crtlibid.o: $(srcdir)/config/bfin/crtlibid.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crtlibid.o -x assembler-with-cpp \
+ $(srcdir)/config/bfin/crtlibid.s
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o crti.o crtn.o crtlibid.o
diff --git a/gcc/config/bfin/t-bfin-linux b/gcc/config/bfin/t-bfin-linux
new file mode 100644
index 000000000..f7ba95501
--- /dev/null
+++ b/gcc/config/bfin/t-bfin-linux
@@ -0,0 +1,72 @@
+# Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+## Target part of the Makefile
+
+LIB1ASMSRC = bfin/lib1funcs.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _umodsi3 _modsi3 _umulsi3_highpart
+LIB1ASMFUNCS += _smulsi3_highpart
+
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+CRTSTUFF_T_CFLAGS = -fpic
+TARGET_LIBGCC2_CFLAGS = -fpic
+
+MULTILIB_OPTIONS=mcpu=bf532-none
+MULTILIB_DIRNAMES=bf532-none
+
+MULTILIB_MATCHES=mcpu?bf532-none=mcpu?bf512-none mcpu?bf532-none=mcpu?bf514-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf516-none mcpu?bf532-none=mcpu?bf518-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf522-none mcpu?bf532-none=mcpu?bf523-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf524-none mcpu?bf532-none=mcpu?bf525-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf526-none mcpu?bf532-none=mcpu?bf527-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf531-none mcpu?bf532-none=mcpu?bf533-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf534-none mcpu?bf532-none=mcpu?bf536-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf537-none mcpu?bf532-none=mcpu?bf538-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf539-none mcpu?bf532-none=mcpu?bf542-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf542m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf544-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf544m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf547-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf547m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf548-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf548m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf549-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf549m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf561-none
+
+SHLIB_MAPFILES=$(srcdir)/config/bfin/libgcc-bfin.ver
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o
+
+# This rule uses MULTILIB_MATCHES to generate a definition of
+# SYSROOT_SUFFIX_SPEC.
+linux-sysroot-suffix.h: $(srcdir)/config/bfin/print-sysroot-suffix.sh
+ $(SHELL) $(srcdir)/config/bfin/print-sysroot-suffix.sh \
+ "$(SYSTEM_HEADER_DIR)/../.." "$(MULTILIB_MATCHES)" \
+ "$(MULTILIB_OPTIONS)" > $@
+
+generated_files += linux-sysroot-suffix.h
diff --git a/gcc/config/bfin/t-bfin-uclinux b/gcc/config/bfin/t-bfin-uclinux
new file mode 100644
index 000000000..eb6d2253e
--- /dev/null
+++ b/gcc/config/bfin/t-bfin-uclinux
@@ -0,0 +1,72 @@
+# Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+## Target part of the Makefile
+
+LIB1ASMSRC = bfin/lib1funcs.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _umodsi3 _modsi3 _umulsi3_highpart
+LIB1ASMFUNCS += _smulsi3_highpart
+
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+CRTSTUFF_T_CFLAGS = -fpic
+TARGET_LIBGCC2_CFLAGS = -fpic
+
+MULTILIB_OPTIONS=mcpu=bf532-none
+MULTILIB_OPTIONS+=mid-shared-library/msep-data mleaf-id-shared-library
+MULTILIB_DIRNAMES=bf532-none mid-shared-library msep-data mleaf-id-shared-library
+
+MULTILIB_MATCHES=mcpu?bf532-none=mcpu?bf512-none mcpu?bf532-none=mcpu?bf514-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf516-none mcpu?bf532-none=mcpu?bf518-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf522-none mcpu?bf532-none=mcpu?bf523-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf524-none mcpu?bf532-none=mcpu?bf525-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf526-none mcpu?bf532-none=mcpu?bf527-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf531-none mcpu?bf532-none=mcpu?bf533-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf534-none mcpu?bf532-none=mcpu?bf536-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf537-none mcpu?bf532-none=mcpu?bf538-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf539-none mcpu?bf532-none=mcpu?bf542-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf542m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf544-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf544m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf547-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf547m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf548-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf548m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf549-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf549m-none
+MULTILIB_MATCHES+=mcpu?bf532-none=mcpu?bf561-none
+
+MULTILIB_EXCEPTIONS=mleaf-id-shared-library*
+MULTILIB_EXCEPTIONS+=mcpu=bf532-none/mleaf-id-shared-library*
+MULTILIB_EXCEPTIONS+=*msep-data/mleaf-id-shared-library*
+
+# Assemble startup files.
+$(T)crtlibid.o: $(srcdir)/config/bfin/crtlibid.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crtlibid.o -x assembler-with-cpp \
+ $(srcdir)/config/bfin/crtlibid.s
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o crtlibid.o
diff --git a/gcc/config/bfin/t-rtems b/gcc/config/bfin/t-rtems
new file mode 100644
index 000000000..728ab1c4f
--- /dev/null
+++ b/gcc/config/bfin/t-rtems
@@ -0,0 +1,6 @@
+# Multilibs for fbin RTEMS targets.
+
+MULTILIB_OPTIONS =
+MULTILIB_DIRNAMES =
+MULTILIB_EXTRA_OPTS =
+MULTILIB_EXCEPTIONS =
diff --git a/gcc/config/bfin/uclinux.h b/gcc/config/bfin/uclinux.h
new file mode 100644
index 000000000..6001b2364
--- /dev/null
+++ b/gcc/config/bfin/uclinux.h
@@ -0,0 +1,41 @@
+/* Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: crt1%O%s} crti%O%s crtbegin%O%s crtlibid%O%s"
+
+#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
+
+#define MD_UNWIND_SUPPORT "config/bfin/linux-unwind.h"
+
+/* Like the definition in gcc.c, but for purposes of uClinux, every link is
+ static. */
+#define MFWRAP_SPEC " %{fmudflap|fmudflapth: \
+ --wrap=malloc --wrap=free --wrap=calloc --wrap=realloc\
+ --wrap=mmap --wrap=munmap --wrap=alloca\
+ %{fmudflapth: --wrap=pthread_create\
+}} %{fmudflap|fmudflapth: --wrap=main}"
+
+#undef TARGET_SUPPORTS_SYNC_CALLS
+#define TARGET_SUPPORTS_SYNC_CALLS 1