summaryrefslogtreecommitdiff
path: root/gcc/config/lm32
diff options
context:
space:
mode:
authorupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
committerupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
commit554fd8c5195424bdbcabf5de30fdc183aba391bd (patch)
tree976dc5ab7fddf506dadce60ae936f43f58787092 /gcc/config/lm32
downloadcbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.bz2
cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.xz
obtained gcc-4.6.4.tar.bz2 from upstream website;upstream
verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository.
Diffstat (limited to 'gcc/config/lm32')
-rw-r--r--gcc/config/lm32/constraints.md57
-rw-r--r--gcc/config/lm32/lm32-protos.h39
-rw-r--r--gcc/config/lm32/lm32.c1248
-rw-r--r--gcc/config/lm32/lm32.h556
-rw-r--r--gcc/config/lm32/lm32.md996
-rw-r--r--gcc/config/lm32/lm32.opt40
-rw-r--r--gcc/config/lm32/predicates.md77
-rw-r--r--gcc/config/lm32/rtems.h32
-rw-r--r--gcc/config/lm32/sfp-machine.h51
-rw-r--r--gcc/config/lm32/t-fprules-softfp5
-rw-r--r--gcc/config/lm32/t-lm322
-rw-r--r--gcc/config/lm32/uclinux-elf.h84
12 files changed, 3187 insertions, 0 deletions
diff --git a/gcc/config/lm32/constraints.md b/gcc/config/lm32/constraints.md
new file mode 100644
index 000000000..a8c7f97e2
--- /dev/null
+++ b/gcc/config/lm32/constraints.md
@@ -0,0 +1,57 @@
+;; Constraint definitions for Lattice Mico32 architecture.
+;; Contributed by Jon Beniston <jon@beniston.com>
+;;
+;; Copyright (C) 2009 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_constraint "J"
+ "The value 0."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "K"
+ "A signed 16-bit immediate in the range -32768 to 32767."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -32768, 32767)")))
+
+(define_constraint "L"
+ "An unsigned 16-bit immediate in the range 0 to 65535."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 65535)")))
+
+(define_constraint "M"
+ "The value 1."
+ (and (match_code "const_int")
+ (match_test "ival == 1")))
+
+(define_constraint "U"
+ "A shifted signed 16-bit constant appropriate for orhi."
+ (and (match_code "const_int")
+ (match_test "(ival & 0xffff) == 0
+ && (ival >> 31 == -1 || ival >> 31 == 0)")))
+
+(define_constraint "S"
+ "A symbol in the small data section."
+ (match_operand 0 "no_pic_small_symbol"))
+
+(define_constraint "Y"
+ "A high part of a symbol."
+ (and (match_code "high")
+ (ior (ior (match_code "symbol_ref" "0")
+ (match_code "label_ref" "0"))
+ (match_code "const" "0"))))
diff --git a/gcc/config/lm32/lm32-protos.h b/gcc/config/lm32/lm32-protos.h
new file mode 100644
index 000000000..bc086d2ee
--- /dev/null
+++ b/gcc/config/lm32/lm32-protos.h
@@ -0,0 +1,39 @@
+/* Prototypes of target machine functions, Lattice Mico32 architecture.
+ Contributed by Jon Beniston <jon@beniston.com>
+
+ Copyright (C) 2009, 2010 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+extern int lm32_return_in_memory (tree type);
+extern void lm32_declare_object (FILE *stream, char *name, char *init_string,
+ char *final_string, int size);
+extern void lm32_expand_prologue (void);
+extern void lm32_expand_epilogue (void);
+extern void lm32_print_operand (FILE *file, rtx op, int letter);
+extern void lm32_print_operand_address (FILE *file, rtx addr);
+extern HOST_WIDE_INT lm32_compute_initial_elimination_offset (int from,
+ int to);
+extern int lm32_can_use_return (void);
+extern rtx lm32_return_addr_rtx (int count, rtx frame);
+extern int lm32_expand_block_move (rtx *);
+extern int nonpic_symbol_mentioned_p (rtx);
+extern rtx lm32_legitimize_pic_address (rtx, enum machine_mode, rtx);
+extern void lm32_expand_scc (rtx operands[]);
+extern void lm32_expand_conditional_branch (rtx operands[]);
+extern bool lm32_move_ok (enum machine_mode, rtx operands[2]);
+extern bool lm32_legitimate_constant_p (rtx);
diff --git a/gcc/config/lm32/lm32.c b/gcc/config/lm32/lm32.c
new file mode 100644
index 000000000..2c7131a5d
--- /dev/null
+++ b/gcc/config/lm32/lm32.c
@@ -0,0 +1,1248 @@
+/* Subroutines used for code generation on the Lattice Mico32 architecture.
+ Contributed by Jon Beniston <jon@beniston.com>
+
+ Copyright (C) 2009, 2010 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "insn-codes.h"
+#include "recog.h"
+#include "output.h"
+#include "tree.h"
+#include "expr.h"
+#include "flags.h"
+#include "reload.h"
+#include "tm_p.h"
+#include "function.h"
+#include "diagnostic-core.h"
+#include "optabs.h"
+#include "libfuncs.h"
+#include "ggc.h"
+#include "target.h"
+#include "target-def.h"
+#include "langhooks.h"
+#include "tm-constrs.h"
+#include "df.h"
+
+struct lm32_frame_info
+{
+ HOST_WIDE_INT total_size; /* number of bytes of entire frame. */
+ HOST_WIDE_INT callee_size; /* number of bytes to save callee saves. */
+ HOST_WIDE_INT pretend_size; /* number of bytes we pretend caller did. */
+ HOST_WIDE_INT args_size; /* number of bytes for outgoing arguments. */
+ HOST_WIDE_INT locals_size; /* number of bytes for local variables. */
+ unsigned int reg_save_mask; /* mask of saved registers. */
+};
+
+/* Prototypes for static functions. */
+static rtx emit_add (rtx dest, rtx src0, rtx src1);
+static void expand_save_restore (struct lm32_frame_info *info, int op);
+static void stack_adjust (HOST_WIDE_INT amount);
+static bool lm32_in_small_data_p (const_tree);
+static void lm32_setup_incoming_varargs (CUMULATIVE_ARGS * cum,
+ enum machine_mode mode, tree type,
+ int *pretend_size, int no_rtl);
+static bool lm32_rtx_costs (rtx x, int code, int outer_code, int *total,
+ bool speed);
+static bool lm32_can_eliminate (const int, const int);
+static bool
+lm32_legitimate_address_p (enum machine_mode mode, rtx x, bool strict);
+static HOST_WIDE_INT lm32_compute_frame_size (int size);
+static void lm32_option_override (void);
+static rtx lm32_function_arg (CUMULATIVE_ARGS * cum,
+ enum machine_mode mode, const_tree type,
+ bool named);
+static void lm32_function_arg_advance (CUMULATIVE_ARGS * cum,
+ enum machine_mode mode,
+ const_tree type, bool named);
+
+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
+static const struct default_options lm32_option_optimization_table[] =
+ {
+ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+ { OPT_LEVELS_NONE, 0, NULL, 0 }
+ };
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE lm32_option_override
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#define TARGET_OPTION_OPTIMIZATION_TABLE lm32_option_optimization_table
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS lm32_rtx_costs
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P lm32_in_small_data_p
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS lm32_setup_incoming_varargs
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG lm32_function_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE lm32_function_arg_advance
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET -0x8000
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 0x7fff
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE lm32_can_eliminate
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P lm32_legitimate_address_p
+#undef TARGET_EXCEPT_UNWIND_INFO
+#define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Current frame information calculated by lm32_compute_frame_size. */
+static struct lm32_frame_info current_frame_info;
+
+/* Return non-zero if the given return type should be returned in memory. */
+
+int
+lm32_return_in_memory (tree type)
+{
+ HOST_WIDE_INT size;
+
+ if (!AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+ return 0;
+ }
+
+ size = int_size_in_bytes (type);
+ if (size >= 0 && size <= UNITS_PER_WORD)
+ {
+ /* If it can fit in one register. */
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Generate an emit a word sized add instruction. */
+
+static rtx
+emit_add (rtx dest, rtx src0, rtx src1)
+{
+ rtx insn;
+ insn = emit_insn (gen_addsi3 (dest, src0, src1));
+ return insn;
+}
+
+/* Generate the code to compare (and possibly branch) two integer values
+ TEST_CODE is the comparison code we are trying to emulate
+ (or implement directly)
+ RESULT is where to store the result of the comparison,
+ or null to emit a branch
+ CMP0 CMP1 are the two comparison operands
+ DESTINATION is the destination of the branch, or null to only compare
+ */
+
+static void
+gen_int_relational (enum rtx_code code,
+ rtx result,
+ rtx cmp0,
+ rtx cmp1,
+ rtx destination)
+{
+ enum machine_mode mode;
+ int branch_p;
+ rtx temp;
+ rtx cond;
+ rtx label;
+
+ mode = GET_MODE (cmp0);
+ if (mode == VOIDmode)
+ mode = GET_MODE (cmp1);
+
+ /* Is this a branch or compare. */
+ branch_p = (destination != 0);
+
+ /* Instruction set doesn't support LE or LT, so swap operands and use
+ GE, GT. */
+ switch (code)
+ {
+ case LE:
+ case LT:
+ case LEU:
+ case LTU:
+ {
+ rtx temp;
+
+ code = swap_condition (code);
+ temp = cmp0;
+ cmp0 = cmp1;
+ cmp1 = temp;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (branch_p)
+ {
+ rtx insn, cond, label;
+
+ /* Operands must be in registers. */
+ if (!register_operand (cmp0, mode))
+ cmp0 = force_reg (mode, cmp0);
+ if (!register_operand (cmp1, mode))
+ cmp1 = force_reg (mode, cmp1);
+
+ /* Generate conditional branch instruction. */
+ cond = gen_rtx_fmt_ee (code, mode, cmp0, cmp1);
+ label = gen_rtx_LABEL_REF (VOIDmode, destination);
+ insn = gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ cond, label, pc_rtx));
+ emit_jump_insn (insn);
+ }
+ else
+ {
+ /* We can't have const_ints in cmp0, other than 0. */
+ if ((GET_CODE (cmp0) == CONST_INT) && (INTVAL (cmp0) != 0))
+ cmp0 = force_reg (mode, cmp0);
+
+ /* If the comparison is against an int not in legal range
+ move it into a register. */
+ if (GET_CODE (cmp1) == CONST_INT)
+ {
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ case LE:
+ case LT:
+ case GE:
+ case GT:
+ if (!satisfies_constraint_K (cmp1))
+ cmp1 = force_reg (mode, cmp1);
+ break;
+ case LEU:
+ case LTU:
+ case GEU:
+ case GTU:
+ if (!satisfies_constraint_L (cmp1))
+ cmp1 = force_reg (mode, cmp1);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* Generate compare instruction. */
+ emit_move_insn (result, gen_rtx_fmt_ee (code, mode, cmp0, cmp1));
+ }
+}
+
+/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
+ and OPERAND[3]. Store the result in OPERANDS[0]. */
+
+void
+lm32_expand_scc (rtx operands[])
+{
+ rtx target = operands[0];
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx op0 = operands[2];
+ rtx op1 = operands[3];
+
+ gen_int_relational (code, target, op0, op1, NULL_RTX);
+}
+
+/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
+ CODE and jump to OPERANDS[3] if the condition holds. */
+
+void
+lm32_expand_conditional_branch (rtx operands[])
+{
+ enum rtx_code code = GET_CODE (operands[0]);
+ rtx op0 = operands[1];
+ rtx op1 = operands[2];
+ rtx destination = operands[3];
+
+ gen_int_relational (code, NULL_RTX, op0, op1, destination);
+}
+
+/* Generate and emit RTL to save or restore callee save registers. */
+static void
+expand_save_restore (struct lm32_frame_info *info, int op)
+{
+ unsigned int reg_save_mask = info->reg_save_mask;
+ int regno;
+ HOST_WIDE_INT offset;
+ rtx insn;
+
+ /* Callee saves are below locals and above outgoing arguments. */
+ offset = info->args_size + info->callee_size;
+ for (regno = 0; regno <= 31; regno++)
+ {
+ if ((reg_save_mask & (1 << regno)) != 0)
+ {
+ rtx offset_rtx;
+ rtx mem;
+
+ offset_rtx = GEN_INT (offset);
+ if (satisfies_constraint_K (offset_rtx))
+ {
+ mem = gen_rtx_MEM (word_mode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ offset_rtx));
+ }
+ else
+ {
+ /* r10 is caller saved so it can be used as a temp reg. */
+ rtx r10;
+
+ r10 = gen_rtx_REG (word_mode, 10);
+ insn = emit_move_insn (r10, offset_rtx);
+ if (op == 0)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_add (r10, r10, stack_pointer_rtx);
+ if (op == 0)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ mem = gen_rtx_MEM (word_mode, r10);
+ }
+
+ if (op == 0)
+ insn = emit_move_insn (mem, gen_rtx_REG (word_mode, regno));
+ else
+ insn = emit_move_insn (gen_rtx_REG (word_mode, regno), mem);
+
+ /* only prologue instructions which set the sp fp or save a
+ register should be marked as frame related. */
+ if (op == 0)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ offset -= UNITS_PER_WORD;
+ }
+ }
+}
+
+static void
+stack_adjust (HOST_WIDE_INT amount)
+{
+ rtx insn;
+
+ if (!IN_RANGE (amount, -32776, 32768))
+ {
+ /* r10 is caller saved so it can be used as a temp reg. */
+ rtx r10;
+ r10 = gen_rtx_REG (word_mode, 10);
+ insn = emit_move_insn (r10, GEN_INT (amount));
+ if (amount < 0)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_add (stack_pointer_rtx, stack_pointer_rtx, r10);
+ if (amount < 0)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else
+ {
+ insn = emit_add (stack_pointer_rtx,
+ stack_pointer_rtx, GEN_INT (amount));
+ if (amount < 0)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+}
+
+
+/* Create and emit instructions for a functions prologue. */
+void
+lm32_expand_prologue (void)
+{
+ rtx insn;
+
+ lm32_compute_frame_size (get_frame_size ());
+
+ if (current_frame_info.total_size > 0)
+ {
+ /* Add space on stack new frame. */
+ stack_adjust (-current_frame_info.total_size);
+
+ /* Save callee save registers. */
+ if (current_frame_info.reg_save_mask != 0)
+ expand_save_restore (&current_frame_info, 0);
+
+ /* Setup frame pointer if it's needed. */
+ if (frame_pointer_needed == 1)
+ {
+ /* Move sp to fp. */
+ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* Add offset - Don't use total_size, as that includes pretend_size,
+ which isn't part of this frame? */
+ insn = emit_add (frame_pointer_rtx,
+ frame_pointer_rtx,
+ GEN_INT (current_frame_info.args_size +
+ current_frame_info.callee_size +
+ current_frame_info.locals_size));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* Prevent prologue from being scheduled into function body. */
+ emit_insn (gen_blockage ());
+ }
+}
+
+/* Create an emit instructions for a functions epilogue. */
+void
+lm32_expand_epilogue (void)
+{
+ rtx ra_rtx = gen_rtx_REG (Pmode, RA_REGNUM);
+
+ lm32_compute_frame_size (get_frame_size ());
+
+ if (current_frame_info.total_size > 0)
+ {
+ /* Prevent stack code from being reordered. */
+ emit_insn (gen_blockage ());
+
+ /* Restore callee save registers. */
+ if (current_frame_info.reg_save_mask != 0)
+ expand_save_restore (&current_frame_info, 1);
+
+ /* Deallocate stack. */
+ stack_adjust (current_frame_info.total_size);
+
+ /* Return to calling function. */
+ emit_jump_insn (gen_return_internal (ra_rtx));
+ }
+ else
+ {
+ /* Return to calling function. */
+ emit_jump_insn (gen_return_internal (ra_rtx));
+ }
+}
+
+/* Return the bytes needed to compute the frame pointer from the current
+ stack pointer. */
+static HOST_WIDE_INT
+lm32_compute_frame_size (int size)
+{
+ int regno;
+ HOST_WIDE_INT total_size, locals_size, args_size, pretend_size, callee_size;
+ unsigned int reg_save_mask;
+
+ locals_size = size;
+ args_size = crtl->outgoing_args_size;
+ pretend_size = crtl->args.pretend_args_size;
+ callee_size = 0;
+ reg_save_mask = 0;
+
+ /* Build mask that actually determines which regsiters we save
+ and calculate size required to store them in the stack. */
+ for (regno = 1; regno < SP_REGNUM; regno++)
+ {
+ if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
+ {
+ reg_save_mask |= 1 << regno;
+ callee_size += UNITS_PER_WORD;
+ }
+ }
+ if (df_regs_ever_live_p (RA_REGNUM) || !current_function_is_leaf
+ || !optimize)
+ {
+ reg_save_mask |= 1 << RA_REGNUM;
+ callee_size += UNITS_PER_WORD;
+ }
+ if (!(reg_save_mask & (1 << FP_REGNUM)) && frame_pointer_needed)
+ {
+ reg_save_mask |= 1 << FP_REGNUM;
+ callee_size += UNITS_PER_WORD;
+ }
+
+ /* Compute total frame size. */
+ total_size = pretend_size + args_size + locals_size + callee_size;
+
+ /* Align frame to appropriate boundary. */
+ total_size = (total_size + 3) & ~3;
+
+ /* Save computed information. */
+ current_frame_info.total_size = total_size;
+ current_frame_info.callee_size = callee_size;
+ current_frame_info.pretend_size = pretend_size;
+ current_frame_info.locals_size = locals_size;
+ current_frame_info.args_size = args_size;
+ current_frame_info.reg_save_mask = reg_save_mask;
+
+ return total_size;
+}
+
+void
+lm32_print_operand (FILE * file, rtx op, int letter)
+{
+ enum rtx_code code;
+
+ code = GET_CODE (op);
+
+ if (code == SIGN_EXTEND)
+ op = XEXP (op, 0), code = GET_CODE (op);
+ else if (code == REG || code == SUBREG)
+ {
+ int regnum;
+
+ if (code == REG)
+ regnum = REGNO (op);
+ else
+ regnum = true_regnum (op);
+
+ fprintf (file, "%s", reg_names[regnum]);
+ }
+ else if (code == HIGH)
+ output_addr_const (file, XEXP (op, 0));
+ else if (code == MEM)
+ output_address (XEXP (op, 0));
+ else if (letter == 'z' && GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
+ fprintf (file, "%s", reg_names[0]);
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ {
+ if ((CONST_DOUBLE_LOW (op) != 0) || (CONST_DOUBLE_HIGH (op) != 0))
+ output_operand_lossage ("only 0.0 can be loaded as an immediate");
+ else
+ fprintf (file, "0");
+ }
+ else if (code == EQ)
+ fprintf (file, "e ");
+ else if (code == NE)
+ fprintf (file, "ne ");
+ else if (code == GT)
+ fprintf (file, "g ");
+ else if (code == GTU)
+ fprintf (file, "gu ");
+ else if (code == LT)
+ fprintf (file, "l ");
+ else if (code == LTU)
+ fprintf (file, "lu ");
+ else if (code == GE)
+ fprintf (file, "ge ");
+ else if (code == GEU)
+ fprintf (file, "geu");
+ else if (code == LE)
+ fprintf (file, "le ");
+ else if (code == LEU)
+ fprintf (file, "leu");
+ else
+ output_addr_const (file, op);
+}
+
+/* A C compound statement to output to stdio stream STREAM the
+ assembler syntax for an instruction operand that is a memory
+ reference whose address is ADDR. ADDR is an RTL expression.
+
+ On some machines, the syntax for a symbolic address depends on
+ the section that the address refers to. On these machines,
+ define the macro `ENCODE_SECTION_INFO' to store the information
+ into the `symbol_ref', and then check for it here. */
+
+void
+lm32_print_operand_address (FILE * file, rtx addr)
+{
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ fprintf (file, "(%s+0)", reg_names[REGNO (addr)]);
+ break;
+
+ case MEM:
+ output_address (XEXP (addr, 0));
+ break;
+
+ case PLUS:
+ {
+ rtx arg0 = XEXP (addr, 0);
+ rtx arg1 = XEXP (addr, 1);
+
+ if (GET_CODE (arg0) == REG && CONSTANT_P (arg1))
+ {
+ if (GET_CODE (arg1) == CONST_INT)
+ fprintf (file, "(%s+%ld)", reg_names[REGNO (arg0)],
+ INTVAL (arg1));
+ else
+ {
+ fprintf (file, "(%s+", reg_names[REGNO (arg0)]);
+ output_addr_const (file, arg1);
+ fprintf (file, ")");
+ }
+ }
+ else if (CONSTANT_P (arg0) && CONSTANT_P (arg1))
+ output_addr_const (file, addr);
+ else
+ fatal_insn ("bad operand", addr);
+ }
+ break;
+
+ case SYMBOL_REF:
+ if (SYMBOL_REF_SMALL_P (addr))
+ {
+ fprintf (file, "gp(");
+ output_addr_const (file, addr);
+ fprintf (file, ")");
+ }
+ else
+ fatal_insn ("can't use non gp relative absolute address", addr);
+ break;
+
+ default:
+ fatal_insn ("invalid addressing mode", addr);
+ break;
+ }
+}
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+static rtx
+lm32_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ if (mode == VOIDmode)
+ /* Compute operand 2 of the call insn. */
+ return GEN_INT (0);
+
+ if (targetm.calls.must_pass_in_stack (mode, type))
+ return NULL_RTX;
+
+ if (!named || (*cum + LM32_NUM_REGS2 (mode, type) > LM32_NUM_ARG_REGS))
+ return NULL_RTX;
+
+ return gen_rtx_REG (mode, *cum + LM32_FIRST_ARG_REG);
+}
+
+static void
+lm32_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ *cum += LM32_NUM_REGS2 (mode, type);
+}
+
+HOST_WIDE_INT
+lm32_compute_initial_elimination_offset (int from, int to)
+{
+ HOST_WIDE_INT offset = 0;
+
+ switch (from)
+ {
+ case ARG_POINTER_REGNUM:
+ switch (to)
+ {
+ case FRAME_POINTER_REGNUM:
+ offset = 0;
+ break;
+ case STACK_POINTER_REGNUM:
+ offset =
+ lm32_compute_frame_size (get_frame_size ()) -
+ current_frame_info.pretend_size;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ return offset;
+}
+
+static void
+lm32_setup_incoming_varargs (CUMULATIVE_ARGS * cum, enum machine_mode mode,
+ tree type, int *pretend_size, int no_rtl)
+{
+ int first_anon_arg;
+ tree fntype;
+
+ fntype = TREE_TYPE (current_function_decl);
+
+ if (stdarg_p (fntype))
+ first_anon_arg = *cum + LM32_FIRST_ARG_REG;
+ else
+ {
+ /* this is the common case, we have been passed details setup
+ for the last named argument, we want to skip over the
+ registers, if any used in passing this named paramter in
+ order to determine which is the first registers used to pass
+ anonymous arguments. */
+ int size;
+
+ if (mode == BLKmode)
+ size = int_size_in_bytes (type);
+ else
+ size = GET_MODE_SIZE (mode);
+
+ first_anon_arg =
+ *cum + LM32_FIRST_ARG_REG +
+ ((size + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
+ }
+
+ if ((first_anon_arg < (LM32_FIRST_ARG_REG + LM32_NUM_ARG_REGS)) && !no_rtl)
+ {
+ int first_reg_offset = first_anon_arg;
+ int size = LM32_FIRST_ARG_REG + LM32_NUM_ARG_REGS - first_anon_arg;
+ rtx regblock;
+
+ regblock = gen_rtx_MEM (BLKmode,
+ plus_constant (arg_pointer_rtx,
+ FIRST_PARM_OFFSET (0)));
+ move_block_from_reg (first_reg_offset, regblock, size);
+
+ *pretend_size = size * UNITS_PER_WORD;
+ }
+}
+
+/* Override command line options. */
+static void
+lm32_option_override (void)
+{
+ /* We must have sign-extend enabled if barrel-shift isn't. */
+ if (!TARGET_BARREL_SHIFT_ENABLED && !TARGET_SIGN_EXTEND_ENABLED)
+ target_flags |= MASK_SIGN_EXTEND_ENABLED;
+}
+
+/* Return nonzero if this function is known to have a null epilogue.
+ This allows the optimizer to omit jumps to jumps if no stack
+ was created. */
+int
+lm32_can_use_return (void)
+{
+ if (!reload_completed)
+ return 0;
+
+ if (df_regs_ever_live_p (RA_REGNUM) || crtl->profile)
+ return 0;
+
+ if (lm32_compute_frame_size (get_frame_size ()) != 0)
+ return 0;
+
+ return 1;
+}
+
+/* Support function to determine the return address of the function
+ 'count' frames back up the stack. */
+rtx
+lm32_return_addr_rtx (int count, rtx frame)
+{
+ rtx r;
+ if (count == 0)
+ {
+ if (!df_regs_ever_live_p (RA_REGNUM))
+ r = gen_rtx_REG (Pmode, RA_REGNUM);
+ else
+ {
+ r = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode, frame,
+ GEN_INT (-2 * UNITS_PER_WORD)));
+ set_mem_alias_set (r, get_frame_alias_set ());
+ }
+ }
+ else if (flag_omit_frame_pointer)
+ r = NULL_RTX;
+ else
+ {
+ r = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode, frame,
+ GEN_INT (-2 * UNITS_PER_WORD)));
+ set_mem_alias_set (r, get_frame_alias_set ());
+ }
+ return r;
+}
+
+/* Return true if EXP should be placed in the small data section. */
+
+static bool
+lm32_in_small_data_p (const_tree exp)
+{
+ /* We want to merge strings, so we never consider them small data. */
+ if (TREE_CODE (exp) == STRING_CST)
+ return false;
+
+ /* Functions are never in the small data area. Duh. */
+ if (TREE_CODE (exp) == FUNCTION_DECL)
+ return false;
+
+ if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
+ {
+ const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
+ if (strcmp (section, ".sdata") == 0 || strcmp (section, ".sbss") == 0)
+ return true;
+ }
+ else
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
+
+ /* If this is an incomplete type with size 0, then we can't put it
+ in sdata because it might be too big when completed. */
+ if (size > 0 && size <= g_switch_value)
+ return true;
+ }
+
+ return false;
+}
+
+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
+ Assume that the areas do not overlap. */
+
+static void
+lm32_block_move_inline (rtx dest, rtx src, HOST_WIDE_INT length,
+ HOST_WIDE_INT alignment)
+{
+ HOST_WIDE_INT offset, delta;
+ unsigned HOST_WIDE_INT bits;
+ int i;
+ enum machine_mode mode;
+ rtx *regs;
+
+ /* Work out how many bits to move at a time. */
+ switch (alignment)
+ {
+ case 1:
+ bits = 8;
+ break;
+ case 2:
+ bits = 16;
+ break;
+ default:
+ bits = 32;
+ break;
+ }
+
+ mode = mode_for_size (bits, MODE_INT, 0);
+ delta = bits / BITS_PER_UNIT;
+
+ /* Allocate a buffer for the temporary registers. */
+ regs = XALLOCAVEC (rtx, length / delta);
+
+ /* Load as many BITS-sized chunks as possible. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ {
+ regs[i] = gen_reg_rtx (mode);
+ emit_move_insn (regs[i], adjust_address (src, mode, offset));
+ }
+
+ /* Copy the chunks to the destination. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
+
+ /* Mop up any left-over bytes. */
+ if (offset < length)
+ {
+ src = adjust_address (src, BLKmode, offset);
+ dest = adjust_address (dest, BLKmode, offset);
+ move_by_pieces (dest, src, length - offset,
+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
+ }
+}
+
+/* Expand string/block move operations.
+
+ operands[0] is the pointer to the destination.
+ operands[1] is the pointer to the source.
+ operands[2] is the number of bytes to move.
+ operands[3] is the alignment. */
+
+int
+lm32_expand_block_move (rtx * operands)
+{
+ if ((GET_CODE (operands[2]) == CONST_INT) && (INTVAL (operands[2]) <= 32))
+ {
+ lm32_block_move_inline (operands[0], operands[1], INTVAL (operands[2]),
+ INTVAL (operands[3]));
+ return 1;
+ }
+ return 0;
+}
+
+/* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
+ isn't protected by a PIC unspec. */
+int
+nonpic_symbol_mentioned_p (rtx x)
+{
+ const char *fmt;
+ int i;
+
+ if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
+ || GET_CODE (x) == PC)
+ return 1;
+
+ /* We don't want to look into the possible MEM location of a
+ CONST_DOUBLE, since we're not going to use it, in general. */
+ if (GET_CODE (x) == CONST_DOUBLE)
+ return 0;
+
+ if (GET_CODE (x) == UNSPEC)
+ return 0;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Compute a (partial) cost for rtx X. Return true if the complete
+ cost has been computed, and false if subexpressions should be
+ scanned. In either case, *TOTAL contains the cost result. */
+
+static bool
+lm32_rtx_costs (rtx x, int code, int outer_code, int *total, bool speed)
+{
+ enum machine_mode mode = GET_MODE (x);
+ bool small_mode;
+
+ const int arithmetic_latency = 1;
+ const int shift_latency = 1;
+ const int compare_latency = 2;
+ const int multiply_latency = 3;
+ const int load_latency = 3;
+ const int libcall_size_cost = 5;
+
+ /* Determine if we can handle the given mode size in a single instruction. */
+ small_mode = (mode == QImode) || (mode == HImode) || (mode == SImode);
+
+ switch (code)
+ {
+
+ case PLUS:
+ case MINUS:
+ case AND:
+ case IOR:
+ case XOR:
+ case NOT:
+ case NEG:
+ if (!speed)
+ *total = COSTS_N_INSNS (LM32_NUM_REGS (mode));
+ else
+ *total =
+ COSTS_N_INSNS (arithmetic_latency + (LM32_NUM_REGS (mode) - 1));
+ break;
+
+ case COMPARE:
+ if (small_mode)
+ {
+ if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (compare_latency);
+ }
+ else
+ {
+ /* FIXME. Guessing here. */
+ *total = COSTS_N_INSNS (LM32_NUM_REGS (mode) * (2 + 3) / 2);
+ }
+ break;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ if (TARGET_BARREL_SHIFT_ENABLED && small_mode)
+ {
+ if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (shift_latency);
+ }
+ else if (TARGET_BARREL_SHIFT_ENABLED)
+ {
+ /* FIXME: Guessing here. */
+ *total = COSTS_N_INSNS (LM32_NUM_REGS (mode) * 4);
+ }
+ else if (small_mode && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ *total = COSTS_N_INSNS (INTVAL (XEXP (x, 1)));
+ }
+ else
+ {
+ /* Libcall. */
+ if (!speed)
+ *total = COSTS_N_INSNS (libcall_size_cost);
+ else
+ *total = COSTS_N_INSNS (100);
+ }
+ break;
+
+ case MULT:
+ if (TARGET_MULTIPLY_ENABLED && small_mode)
+ {
+ if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (multiply_latency);
+ }
+ else
+ {
+ /* Libcall. */
+ if (!speed)
+ *total = COSTS_N_INSNS (libcall_size_cost);
+ else
+ *total = COSTS_N_INSNS (100);
+ }
+ break;
+
+ case DIV:
+ case MOD:
+ case UDIV:
+ case UMOD:
+ if (TARGET_DIVIDE_ENABLED && small_mode)
+ {
+ if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else
+ {
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ int cycles = 0;
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
+
+ while (i)
+ {
+ i >>= 2;
+ cycles++;
+ }
+ if (IN_RANGE (i, 0, 65536))
+ *total = COSTS_N_INSNS (1 + 1 + cycles);
+ else
+ *total = COSTS_N_INSNS (2 + 1 + cycles);
+ return true;
+ }
+ else if (GET_CODE (XEXP (x, 1)) == REG)
+ {
+ *total = COSTS_N_INSNS (1 + GET_MODE_SIZE (mode) / 2);
+ return true;
+ }
+ else
+ {
+ *total = COSTS_N_INSNS (1 + GET_MODE_SIZE (mode) / 2);
+ return false;
+ }
+ }
+ }
+ else
+ {
+ /* Libcall. */
+ if (!speed)
+ *total = COSTS_N_INSNS (libcall_size_cost);
+ else
+ *total = COSTS_N_INSNS (100);
+ }
+ break;
+
+ case HIGH:
+ case LO_SUM:
+ if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (arithmetic_latency);
+ break;
+
+ case ZERO_EXTEND:
+ if (MEM_P (XEXP (x, 0)))
+ *total = COSTS_N_INSNS (0);
+ else if (small_mode)
+ {
+ if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (arithmetic_latency);
+ }
+ else
+ *total = COSTS_N_INSNS (LM32_NUM_REGS (mode) / 2);
+ break;
+
+ case CONST_INT:
+ {
+ switch (outer_code)
+ {
+ case HIGH:
+ case LO_SUM:
+ *total = COSTS_N_INSNS (0);
+ return true;
+
+ case AND:
+ case XOR:
+ case IOR:
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATE:
+ case ROTATERT:
+ if (satisfies_constraint_L (x))
+ *total = COSTS_N_INSNS (0);
+ else
+ *total = COSTS_N_INSNS (2);
+ return true;
+
+ case SET:
+ case PLUS:
+ case MINUS:
+ case COMPARE:
+ if (satisfies_constraint_K (x))
+ *total = COSTS_N_INSNS (0);
+ else
+ *total = COSTS_N_INSNS (2);
+ return true;
+
+ case MULT:
+ if (TARGET_MULTIPLY_ENABLED)
+ {
+ if (satisfies_constraint_K (x))
+ *total = COSTS_N_INSNS (0);
+ else
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+ /* Fall through. */
+
+ default:
+ if (satisfies_constraint_K (x))
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+ }
+
+ case SYMBOL_REF:
+ case CONST:
+ switch (outer_code)
+ {
+ case HIGH:
+ case LO_SUM:
+ *total = COSTS_N_INSNS (0);
+ return true;
+
+ case MEM:
+ case SET:
+ if (g_switch_value)
+ {
+ *total = COSTS_N_INSNS (0);
+ return true;
+ }
+ break;
+ }
+ /* Fall through. */
+
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ *total = COSTS_N_INSNS (2);
+ return true;
+
+ case SET:
+ *total = COSTS_N_INSNS (1);
+ break;
+
+ case MEM:
+ if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (load_latency);
+ break;
+
+ }
+
+ return false;
+}
+
+/* Implemenent TARGET_CAN_ELIMINATE. */
+
+bool
+lm32_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ return (to == STACK_POINTER_REGNUM && frame_pointer_needed) ? false : true;
+}
+
+/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
+
+static bool
+lm32_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict)
+{
+ /* (rM) */
+ if (strict && REG_P (x) && STRICT_REG_OK_FOR_BASE_P (x))
+ return true;
+ if (!strict && REG_P (x) && NONSTRICT_REG_OK_FOR_BASE_P (x))
+ return true;
+
+ /* (rM)+literal) */
+ if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && ((strict && STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0)))
+ || (!strict && NONSTRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))))
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && satisfies_constraint_K (XEXP ((x), 1)))
+ return true;
+
+ /* gp(sym) */
+ if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_SMALL_P (x))
+ return true;
+
+ return false;
+}
+
+/* Check a move is not memory to memory. */
+
+bool
+lm32_move_ok (enum machine_mode mode, rtx operands[2]) {
+ if (memory_operand (operands[0], mode))
+ return register_or_zero_operand (operands[1], mode);
+ return true;
+}
+
+/* Implement LEGITIMATE_CONSTANT_P. */
+
+bool
+lm32_legitimate_constant_p (rtx x)
+{
+ /* 32-bit addresses require multiple instructions. */
+ if (!flag_pic && reloc_operand (x, GET_MODE (x)))
+ return false;
+
+ return true;
+}
diff --git a/gcc/config/lm32/lm32.h b/gcc/config/lm32/lm32.h
new file mode 100644
index 000000000..3141719b4
--- /dev/null
+++ b/gcc/config/lm32/lm32.h
@@ -0,0 +1,556 @@
+/* Definitions of target machine for GNU compiler, Lattice Mico32 architecture.
+ Contributed by Jon Beniston <jon@beniston.com>
+
+ Copyright (C) 2009, 2010, 2011 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/*-------------------------------*/
+/* Run-time Target Specification */
+/*-------------------------------*/
+
+/* Print subsidiary information on the compiler version in use. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (LatticeMico32)")
+#endif
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__lm32__"); \
+ builtin_assert ("cpu=lm32"); \
+ builtin_assert ("machine=lm32"); \
+ if (TARGET_MULTIPLY_ENABLED) \
+ builtin_define ("__multiply_enabled__"); \
+ if (TARGET_DIVIDE_ENABLED) \
+ builtin_define ("__divide_enabled__"); \
+ if (TARGET_BARREL_SHIFT_ENABLED) \
+ builtin_define ("__barrel_shift_enabled__"); \
+ if (TARGET_SIGN_EXTEND_ENABLED) \
+ builtin_define ("__sign_extend_enabled__"); \
+ if (TARGET_USER_ENABLED) \
+ builtin_define ("__user_enabled__"); \
+ } \
+ while (0)
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{mmultiply-enabled} \
+%{mdivide-enabled} \
+%{mbarrel-shift-enabled} \
+%{msign-extend-enabled} \
+%{muser-enabled} \
+"
+
+/* Let link script define all link options.
+ Default to using simulator link script. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
+#undef LIB_SPEC
+#define LIB_SPEC "%{!T*:-T sim.ld}"
+
+#undef CC1_SPEC
+#define CC1_SPEC "%{G*}"
+
+/*---------------------------------*/
+/* Target machine storage layout. */
+/*---------------------------------*/
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN 1
+#define WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+do { \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ (MODE) = word_mode; \
+} while (0)
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 64
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Make arrays and structures word-aligned to allow faster copying etc. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ ((((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (TYPE) == ARRAY_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
+/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
+ character arrays to be word-aligned so that `strcpy' calls that copy
+ constants to character arrays can be done inline, and 'strcmp' can be
+ optimised to use word loads. */
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+ DATA_ALIGNMENT (TYPE, ALIGN)
+
+/*----------------------------------------*/
+/* Layout of source language data types. */
+/*----------------------------------------*/
+
+#define INT_TYPE_SIZE 32
+#define SHORT_TYPE_SIZE 16
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define SIZE_TYPE "unsigned int"
+
+#define PTRDIFF_TYPE "int"
+
+/*---------------------------*/
+/* Standard register usage. */
+/*---------------------------*/
+
+#define FIRST_PSEUDO_REGISTER 32
+
+#define RV_REGNUM 1
+#define GP_REGNUM 26
+#define FP_REGNUM 27
+#define SP_REGNUM 28
+#define RA_REGNUM 29
+
+#define G_REG_P(X) ((X)<32)
+
+#define FIXED_REGISTERS \
+{ 1, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 1, 0, 1, 0, 1, 1}
+
+#define CALL_USED_REGISTERS \
+{ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 1, 0, 1, 0, 1, 1}
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) G_REG_P(REGNO)
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+( GET_MODE_CLASS (MODE1) == MODE_INT \
+ && GET_MODE_CLASS (MODE2) == MODE_INT \
+ && GET_MODE_SIZE (MODE1) <= UNITS_PER_WORD \
+ && GET_MODE_SIZE (MODE2) <= UNITS_PER_WORD)
+
+#define AVOID_CCMODE_COPIES
+
+/*----------------------------------*/
+/* Register classes and constants. */
+/*----------------------------------*/
+
+enum reg_class
+{
+ NO_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES { "NO_REGS", "GENERAL_REGS", "ALL_REGS" }
+
+#define REG_CLASS_CONTENTS \
+{ {0x00000000}, \
+ {0xffffffff}, \
+ {0xffffffff} \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ (G_REG_P(REGNO) ? GENERAL_REGS : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+#define INDEX_REG_CLASS NO_REGS
+
+#define BASE_REG_CLASS GENERAL_REGS
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ (G_REG_P (REGNO) || G_REG_P ((unsigned) reg_renumber[REGNO]))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
+
+/*----------------------------------------*/
+/* Stack Layout and Calling Conventions. */
+/*----------------------------------------*/
+
+#define STACK_GROWS_DOWNWARD 1
+
+#define FRAME_GROWS_DOWNWARD 1
+
+#define STACK_POINTER_OFFSET (UNITS_PER_WORD)
+
+#define STARTING_FRAME_OFFSET (UNITS_PER_WORD)
+
+#define FIRST_PARM_OFFSET(FNDECL) (UNITS_PER_WORD)
+
+#define STACK_POINTER_REGNUM SP_REGNUM
+
+#define FRAME_POINTER_REGNUM FP_REGNUM
+
+#define ARG_POINTER_REGNUM FRAME_POINTER_REGNUM
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (SImode, RA_REGNUM)
+
+#define RETURN_ADDR_RTX(count, frame) \
+ lm32_return_addr_rtx (count, frame)
+
+/* FIXME - This is not yet supported. */
+#define STATIC_CHAIN_REGNUM 9
+
+#define ELIMINABLE_REGS \
+{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = lm32_compute_initial_elimination_offset (FROM, TO)
+
+/*-----------------------------*/
+/* Function argument passing. */
+/*-----------------------------*/
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/*--------------------------------*/
+/* Passing Arguments in Registers */
+/*--------------------------------*/
+
+/* The first argument register. */
+#define LM32_FIRST_ARG_REG 1
+
+/* The number of (integer) argument register available. */
+#define LM32_NUM_ARG_REGS 8
+
+#define CUMULATIVE_ARGS int
+
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT,N_NAMED_ARGS) \
+ (CUM) = 0
+
+#define FUNCTION_ARG_REGNO_P(r) \
+ (((r) >= LM32_FIRST_ARG_REG) && ((r) <= LM32_NUM_ARG_REGS))
+
+/*--------------------*/
+/* Function results. */
+/*--------------------*/
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx_REG ((INTEGRAL_TYPE_P (VALTYPE) \
+ && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \
+ ? word_mode \
+ : TYPE_MODE (VALTYPE), \
+ RV_REGNUM)
+
+#define LIBCALL_VALUE(MODE) gen_rtx_REG (MODE, RV_REGNUM)
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == RV_REGNUM)
+
+#define RETURN_IN_MEMORY(TYPE) lm32_return_in_memory (TYPE)
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Convert from bytes to ints. */
+#define LM32_NUM_INTS(X) (((X) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* The number of (integer) registers required to hold a quantity of
+ type MODE. */
+#define LM32_NUM_REGS(MODE) LM32_NUM_INTS (GET_MODE_SIZE (MODE))
+
+/* The number of (integer) registers required to hold a quantity of
+ TYPE MODE. */
+#define LM32_NUM_REGS2(MODE, TYPE) \
+ LM32_NUM_INTS ((MODE) == BLKmode ? \
+ int_size_in_bytes (TYPE) : GET_MODE_SIZE (MODE))
+
+#define STRUCT_VALUE 0
+
+/*---------------------------*/
+/* Function entry and exit. */
+/*---------------------------*/
+
+/*-------------*/
+/* Profiling. */
+/*-------------*/
+
+#define FUNCTION_PROFILER(FILE, LABELNO)
+
+/*---------------*/
+/* Trampolines. */
+/*---------------*/
+
+#define TRAMPOLINE_SIZE 0
+
+/*---------------------*/
+/* Addressing Modes. */
+/*---------------------*/
+
+#define CONSTANT_ADDRESS_P(X) \
+ ((GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == HIGH \
+ || (GET_CODE (X) == CONST)))
+
+#define MAX_REGS_PER_ADDRESS 1
+
+#define STRICT_REG_OK_FOR_BASE_P(X) \
+ (REGNO_OK_FOR_BASE_P (REGNO (X)))
+#define NONSTRICT_REG_OK_FOR_BASE_P(X) \
+ (G_REG_P (REGNO (X)) || !HARD_REGISTER_NUM_P (REGNO (X)))
+
+#ifdef REG_OK_STRICT
+#define REG_OK_FOR_BASE_P(X) STRICT_REG_OK_FOR_BASE_P(X)
+#else
+#define REG_OK_FOR_BASE_P(X) NONSTRICT_REG_OK_FOR_BASE_P(X)
+#endif
+
+#define LEGITIMATE_CONSTANT_P(X) lm32_legitimate_constant_p (X)
+
+/*-------------------------*/
+/* Condition Code Status. */
+/*-------------------------*/
+
+#define REVERSIBLE_CC_MODE(MODE) 1
+
+/*---------*/
+/* Costs. */
+/*---------*/
+
+#define SLOW_BYTE_ACCESS 1
+
+#define NO_FUNCTION_CSE
+
+#define BRANCH_COST(speed_p, predictable_p) 4
+
+#define MOVE_RATIO(speed) (speed ? 24 : 3)
+
+/*------------*/
+/* Sections. */
+/*------------*/
+
+#define TEXT_SECTION_ASM_OP "\t.section\t.text"
+#define DATA_SECTION_ASM_OP "\t.section\t.data"
+#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\""
+#define BSS_SECTION_ASM_OP "\t.section\t.bss"
+#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\""
+
+/*-------*/
+/* PIC. */
+/*-------*/
+
+#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? GP_REGNUM : INVALID_REGNUM)
+
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+ (!(nonpic_symbol_mentioned_p (X)))
+
+/*-------------*/
+/* Assembler. */
+/*-------------*/
+
+#define ASM_COMMENT_START "#"
+
+#define ASM_APP_ON "#APP\n"
+
+#define ASM_APP_OFF "#NO_APP\n"
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { \
+ fputc ( '\t', FILE); \
+ assemble_name (FILE, LABEL1); \
+ fputs ( " = ", FILE); \
+ assemble_name (FILE, LABEL2); \
+ fputc ( '\n', FILE); \
+ } while (0)
+
+/* Override default implementation in elfos.h to support -G. */
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ if ((SIZE) <= (unsigned HOST_WIDE_INT) g_switch_value) \
+ switch_to_section (sbss_section); \
+ else \
+ switch_to_section (bss_section); \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, SIZE); \
+ ASM_OUTPUT_ALIGN ((FILE), exact_log2((ALIGN) / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ ASM_OUTPUT_SKIP((FILE), (SIZE) ? (SIZE) : 1); \
+} while (0)
+
+/* Override default implementation in elfos.h to support -G. */
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+do \
+{ \
+ if ((SIZE) <= (unsigned HOST_WIDE_INT) g_switch_value) \
+ { \
+ switch_to_section (sbss_section); \
+ (*targetm.asm_out.globalize_label) (FILE, NAME); \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, SIZE); \
+ ASM_OUTPUT_ALIGN ((FILE), exact_log2((ALIGN) / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ ASM_OUTPUT_SKIP((FILE), (SIZE) ? (SIZE) : 1); \
+ } \
+ else \
+ { \
+ switch_to_section (bss_section); \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \
+ (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ } \
+} \
+while (0)
+
+#define ASM_OUTPUT_LABEL(FILE, NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ do { \
+ const char *xname = (NAME); \
+ if (xname[0] == '@') \
+ xname += 1; \
+ if (xname[0] == '*') \
+ xname += 1; \
+ fputs (xname, FILE); \
+ } while (0)
+
+#define ASM_OUTPUT_SYMBOL_REF(STREAM, SYMBOL) \
+ do { \
+ assemble_name (STREAM, XSTR (SYMBOL, 0)); \
+ } while (0)
+
+#define GLOBAL_ASM_OP "\t.global\t"
+
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "gp", "fp", "sp", "ra", "ea", "ba"}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
+ (((CHAR) == '&') || ((CHAR) == '@') || ((CHAR) == '*'))
+
+#define PRINT_OPERAND(FILE, X, CODE) \
+ lm32_print_operand (FILE, X, CODE)
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ lm32_print_operand_address (FILE, ADDR)
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+#endif
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ do { if ((LOG) != 0) fprintf (FILE, "\t.align %d\n", (1 << (LOG))); } while (0)
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+do { \
+ char label[64]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ fprintf (FILE, "\n\t.word\t"); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "\n"); \
+} while (0)
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+do { \
+ char label[64]; \
+ fprintf (FILE, "\t.word\t("); \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", REL); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, ")\n"); \
+} while (0)
+
+/*-------------*/
+/* Debugging. */
+/*-------------*/
+
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+/*--------*/
+/* Misc. */
+/*--------*/
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define SHORT_IMMEDIATES_SIGN_EXTEND
+
+#define MOVE_MAX UNITS_PER_WORD
+#define MAX_MOVE_MAX 4
+
+#define SHIFT_COUNT_TRUNCATED 1
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#ifndef NO_IMPLICIT_EXTERN_C
+#define NO_IMPLICIT_EXTERN_C
+#endif
+
+#define STORE_FLAG_VALUE 1
diff --git a/gcc/config/lm32/lm32.md b/gcc/config/lm32/lm32.md
new file mode 100644
index 000000000..7539cb065
--- /dev/null
+++ b/gcc/config/lm32/lm32.md
@@ -0,0 +1,996 @@
+;; Machine description of the Lattice Mico32 architecture for GNU C compiler.
+;; Contributed by Jon Beniston <jon@beniston.com>
+
+;; Copyright (C) 2009, 2010 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Include predicate and constraint definitions
+(include "predicates.md")
+(include "constraints.md")
+
+
+;; Register numbers
+(define_constants
+ [(RA_REGNUM 29) ; return address register.
+ ]
+)
+
+;; LM32 specific volatile operations
+(define_constants
+ [(UNSPECV_BLOCKAGE 1)] ; prevent scheduling across pro/epilog boundaries
+)
+
+;; LM32 specific operations
+(define_constants
+ [(UNSPEC_GOT 2)
+ (UNSPEC_GOTOFF_HI16 3)
+ (UNSPEC_GOTOFF_LO16 4)]
+)
+
+;; ---------------------------------
+;; instruction types
+;; ---------------------------------
+
+(define_attr "type"
+ "unknown,load,store,arith,compare,shift,multiply,divide,call,icall,ubranch,uibranch,cbranch"
+ (const_string "unknown"))
+
+;; ---------------------------------
+;; instruction lengths
+;; ---------------------------------
+
+; All instructions are 4 bytes
+; Except for branches that are out of range, and have to be implemented
+; as two instructions
+(define_attr "length" ""
+ (cond [
+ (eq_attr "type" "cbranch")
+ (if_then_else
+ (lt (abs (minus (match_dup 2) (pc)))
+ (const_int 32768)
+ )
+ (const_int 4)
+ (const_int 8)
+ )
+ ]
+ (const_int 4))
+)
+
+;; ---------------------------------
+;; scheduling
+;; ---------------------------------
+
+(define_automaton "lm32")
+
+(define_cpu_unit "x" "lm32")
+(define_cpu_unit "m" "lm32")
+(define_cpu_unit "w" "lm32")
+
+(define_insn_reservation "singlecycle" 1
+ (eq_attr "type" "store,arith,call,icall,ubranch,uibranch,cbranch")
+ "x")
+
+(define_insn_reservation "twocycle" 2
+ (eq_attr "type" "compare,shift,divide")
+ "x,m")
+
+(define_insn_reservation "threecycle" 3
+ (eq_attr "type" "load,multiply")
+ "x,m,w")
+
+;; ---------------------------------
+;; mov
+;; ---------------------------------
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (can_create_pseudo_p ())
+ {
+ if (GET_CODE (operand0) == MEM)
+ {
+ /* Source operand for store must be in a register. */
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+ }
+}")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (can_create_pseudo_p ())
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ /* Source operand for store must be in a register. */
+ operands[1] = force_reg (HImode, operands[1]);
+ }
+ }
+}")
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (can_create_pseudo_p ())
+ {
+ if (GET_CODE (operands[0]) == MEM
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == MEM))
+ {
+ /* Source operand for store must be in a register. */
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+ }
+
+ if (flag_pic && symbolic_operand (operands[1], SImode))
+ {
+ if (GET_CODE (operands[1]) == LABEL_REF
+ || (GET_CODE (operands[1]) == SYMBOL_REF
+ && SYMBOL_REF_LOCAL_P (operands[1])
+ && !SYMBOL_REF_WEAK (operands[1])))
+ {
+ emit_insn (gen_movsi_gotoff_hi16 (operands[0], operands[1]));
+ emit_insn (gen_addsi3 (operands[0],
+ operands[0],
+ pic_offset_table_rtx));
+ emit_insn (gen_movsi_gotoff_lo16 (operands[0],
+ operands[0],
+ operands[1]));
+ }
+ else
+ emit_insn (gen_movsi_got (operands[0], operands[1]));
+ crtl->uses_pic_offset_table = 1;
+ DONE;
+ }
+ else if (flag_pic && GET_CODE (operands[1]) == CONST)
+ {
+ rtx op = XEXP (operands[1], 0);
+ if (GET_CODE (op) == PLUS)
+ {
+ rtx arg0 = XEXP (op, 0);
+ rtx arg1 = XEXP (op, 1);
+ if (GET_CODE (arg0) == LABEL_REF
+ || (GET_CODE (arg0) == SYMBOL_REF
+ && SYMBOL_REF_LOCAL_P (arg0)
+ && !SYMBOL_REF_WEAK (arg0)))
+ {
+ emit_insn (gen_movsi_gotoff_hi16 (operands[0], arg0));
+ emit_insn (gen_addsi3 (operands[0],
+ operands[0],
+ pic_offset_table_rtx));
+ emit_insn (gen_movsi_gotoff_lo16 (operands[0],
+ operands[0],
+ arg0));
+ }
+ else
+ emit_insn (gen_movsi_got (operands[0], arg0));
+ emit_insn (gen_addsi3 (operands[0], operands[0], arg1));
+ crtl->uses_pic_offset_table = 1;
+ DONE;
+ }
+ }
+ else if (!flag_pic && reloc_operand (operands[1], GET_MODE (operands[1])))
+ {
+ emit_insn (gen_rtx_SET (SImode, operands[0], gen_rtx_HIGH (SImode, operands[1])));
+ emit_insn (gen_rtx_SET (SImode, operands[0], gen_rtx_LO_SUM (SImode, operands[0], operands[1])));
+ DONE;
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (!(satisfies_constraint_K (operands[1])
+ || satisfies_constraint_L (operands[1])
+ || satisfies_constraint_U (operands[1])))
+ {
+ emit_insn (gen_movsi_insn (operands[0],
+ GEN_INT (INTVAL (operands[1]) & ~0xffff)));
+ emit_insn (gen_iorsi3 (operands[0],
+ operands[0],
+ GEN_INT (INTVAL (operands[1]) & 0xffff)));
+ DONE;
+ }
+ }
+}")
+
+(define_expand "movmemsi"
+ [(parallel [(set (match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" ""))
+ (use (match_operand:SI 2 "" ""))
+ (use (match_operand:SI 3 "const_int_operand" ""))])]
+ ""
+{
+ if (!lm32_expand_block_move (operands))
+ FAIL;
+ DONE;
+})
+
+;; ---------------------------------
+;; load/stores/moves
+;; ---------------------------------
+
+(define_insn "movsi_got"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] UNSPEC_GOT))]
+ "flag_pic"
+ "lw %0, (gp+got(%1))"
+ [(set_attr "type" "load")]
+)
+
+(define_insn "movsi_gotoff_hi16"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] UNSPEC_GOTOFF_HI16))]
+ "flag_pic"
+ "orhi %0, r0, gotoffhi16(%1)"
+ [(set_attr "type" "load")]
+)
+
+(define_insn "movsi_gotoff_lo16"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(plus:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand 2 "" ""))] UNSPEC_GOTOFF_LO16))]
+ "flag_pic"
+ "addi %0, %1, gotofflo16(%2)"
+ [(set_attr "type" "arith")]
+)
+
+(define_insn "*movsi_lo_sum"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "reloc_operand" "i")))]
+ "!flag_pic"
+ "ori %0, %0, lo(%2)"
+ [(set_attr "type" "arith")]
+)
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,m,r")
+ (match_operand:QI 1 "general_operand" "m,r,r,J,n"))]
+ "lm32_move_ok (QImode, operands)"
+ "@
+ lbu %0, %1
+ or %0, %1, r0
+ sb %0, %1
+ sb %0, r0
+ addi %0, r0, %1"
+ [(set_attr "type" "load,arith,store,store,arith")]
+)
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,m,r,r")
+ (match_operand:HI 1 "general_operand" "m,r,r,J,K,L"))]
+ "lm32_move_ok (HImode, operands)"
+ "@
+ lhu %0, %1
+ or %0, %1, r0
+ sh %0, %1
+ sh %0, r0
+ addi %0, r0, %1
+ ori %0, r0, %1"
+ [(set_attr "type" "load,arith,store,store,arith,arith")]
+)
+
+(define_insn "movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,m,m,r,r,r,r,r")
+ (match_operand:SI 1 "movsi_rhs_operand" "m,r,r,J,K,L,U,S,Y"))]
+ "lm32_move_ok (SImode, operands)"
+ "@
+ lw %0, %1
+ or %0, %1, r0
+ sw %0, %1
+ sw %0, r0
+ addi %0, r0, %1
+ ori %0, r0, %1
+ orhi %0, r0, hi(%1)
+ mva %0, gp(%1)
+ orhi %0, r0, hi(%1)"
+ [(set_attr "type" "load,arith,store,store,arith,arith,arith,arith,arith")]
+)
+
+;; ---------------------------------
+;; sign and zero extension
+;; ---------------------------------
+
+(define_insn "*extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "m,r")))]
+ "TARGET_SIGN_EXTEND_ENABLED || (GET_CODE (operands[1]) != REG)"
+ "@
+ lb %0, %1
+ sextb %0, %1"
+ [(set_attr "type" "load,arith")]
+)
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "m,r")))]
+ ""
+ "@
+ lbu %0, %1
+ andi %0, %1, 0xff"
+ [(set_attr "type" "load,arith")]
+)
+
+(define_insn "*extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "m,r")))]
+ "TARGET_SIGN_EXTEND_ENABLED || (GET_CODE (operands[1]) != REG)"
+ "@
+ lb %0, %1
+ sextb %0, %1"
+ [(set_attr "type" "load,arith")]
+)
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "m,r")))]
+ ""
+ "@
+ lbu %0, %1
+ andi %0, %1, 0xff"
+ [(set_attr "type" "load,arith")]
+)
+
+(define_insn "*extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "m,r")))]
+ "TARGET_SIGN_EXTEND_ENABLED || (GET_CODE (operands[1]) != REG)"
+ "@
+ lh %0, %1
+ sexth %0, %1"
+ [(set_attr "type" "load,arith")]
+)
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "m,r")))]
+ ""
+ "@
+ lhu %0, %1
+ andi %0, %1, 0xffff"
+ [(set_attr "type" "load,arith")]
+)
+
+;; ---------------------------------
+;; compare
+;; ---------------------------------
+
+(define_expand "cstoresi4"
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(match_operand:SI 2 "register_operand")
+ (match_operand:SI 3 "register_or_int_operand")]))]
+ ""
+{
+ lm32_expand_scc (operands);
+ DONE;
+})
+
+(define_insn "*seq"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (eq:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ")
+ (match_operand:SI 2 "register_or_K_operand" "r,K")))]
+ ""
+ "@
+ cmpe %0, %z1, %2
+ cmpei %0, %z1, %2"
+ [(set_attr "type" "compare")]
+)
+
+(define_insn "*sne"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ne:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ")
+ (match_operand:SI 2 "register_or_K_operand" "r,K")))]
+ ""
+ "@
+ cmpne %0, %z1, %2
+ cmpnei %0, %z1, %2"
+ [(set_attr "type" "compare")]
+)
+
+(define_insn "*sgt"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (gt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ")
+ (match_operand:SI 2 "register_or_K_operand" "r,K")))]
+ ""
+ "@
+ cmpg %0, %z1, %2
+ cmpgi %0, %z1, %2"
+ [(set_attr "type" "compare")]
+)
+
+(define_insn "*sge"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ge:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ")
+ (match_operand:SI 2 "register_or_K_operand" "r,K")))]
+ ""
+ "@
+ cmpge %0, %z1, %2
+ cmpgei %0, %z1, %2"
+ [(set_attr "type" "compare")]
+)
+
+(define_insn "*sgtu"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (gtu:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L")))]
+ ""
+ "@
+ cmpgu %0, %z1, %2
+ cmpgui %0, %z1, %2"
+ [(set_attr "type" "compare")]
+)
+
+(define_insn "*sgeu"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (geu:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L")))]
+ ""
+ "@
+ cmpgeu %0, %z1, %2
+ cmpgeui %0, %z1, %2"
+ [(set_attr "type" "compare")]
+)
+
+;; ---------------------------------
+;; unconditional branch
+;; ---------------------------------
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "bi %0"
+ [(set_attr "type" "ubranch")]
+)
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "b %0"
+ [(set_attr "type" "uibranch")]
+)
+
+;; ---------------------------------
+;; conditional branch
+;; ---------------------------------
+
+(define_expand "cbranchsi4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "nonmemory_operand")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ lm32_expand_conditional_branch (operands);
+ DONE;
+}")
+
+(define_insn "*beq"
+ [(set (pc)
+ (if_then_else (eq:SI (match_operand:SI 0 "register_or_zero_operand" "rJ")
+ (match_operand:SI 1 "register_or_zero_operand" "rJ"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+{
+ return get_attr_length (insn) == 4
+ ? "be %z0,%z1,%2"
+ : "bne %z0,%z1,8\n\tbi %2";
+}
+ [(set_attr "type" "cbranch")])
+
+(define_insn "*bne"
+ [(set (pc)
+ (if_then_else (ne:SI (match_operand:SI 0 "register_or_zero_operand" "rJ")
+ (match_operand:SI 1 "register_or_zero_operand" "rJ"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+{
+ return get_attr_length (insn) == 4
+ ? "bne %z0,%z1,%2"
+ : "be %z0,%z1,8\n\tbi %2";
+}
+ [(set_attr "type" "cbranch")])
+
+(define_insn "*bgt"
+ [(set (pc)
+ (if_then_else (gt:SI (match_operand:SI 0 "register_or_zero_operand" "rJ")
+ (match_operand:SI 1 "register_or_zero_operand" "rJ"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+{
+ return get_attr_length (insn) == 4
+ ? "bg %z0,%z1,%2"
+ : "bge %z1,%z0,8\n\tbi %2";
+}
+ [(set_attr "type" "cbranch")])
+
+(define_insn "*bge"
+ [(set (pc)
+ (if_then_else (ge:SI (match_operand:SI 0 "register_or_zero_operand" "rJ")
+ (match_operand:SI 1 "register_or_zero_operand" "rJ"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+{
+ return get_attr_length (insn) == 4
+ ? "bge %z0,%z1,%2"
+ : "bg %z1,%z0,8\n\tbi %2";
+}
+ [(set_attr "type" "cbranch")])
+
+(define_insn "*bgtu"
+ [(set (pc)
+ (if_then_else (gtu:SI (match_operand:SI 0 "register_or_zero_operand" "rJ")
+ (match_operand:SI 1 "register_or_zero_operand" "rJ"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+{
+ return get_attr_length (insn) == 4
+ ? "bgu %z0,%z1,%2"
+ : "bgeu %z1,%z0,8\n\tbi %2";
+}
+ [(set_attr "type" "cbranch")])
+
+(define_insn "*bgeu"
+ [(set (pc)
+ (if_then_else (geu:SI (match_operand:SI 0 "register_or_zero_operand" "rJ")
+ (match_operand:SI 1 "register_or_zero_operand" "rJ"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+{
+ return get_attr_length (insn) == 4
+ ? "bgeu %z0,%z1,%2"
+ : "bgu %z1,%z0,8\n\tbi %2";
+}
+ [(set_attr "type" "cbranch")])
+
+;; ---------------------------------
+;; call
+;; ---------------------------------
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (clobber (reg:SI RA_REGNUM))
+ ])]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[0], 0);
+ if (!CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[0], 0) = force_reg (Pmode, addr);
+}")
+
+(define_insn "*call"
+ [(call (mem:SI (match_operand:SI 0 "call_operand" "r,s"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI RA_REGNUM))]
+ ""
+ "@
+ call %0
+ calli %0"
+ [(set_attr "type" "call,icall")]
+)
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand 1 "" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI RA_REGNUM))
+ ])]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ if (!CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[1], 0) = force_reg (Pmode, addr);
+}")
+
+(define_insn "*call_value"
+ [(set (match_operand 0 "register_operand" "=r,r")
+ (call (mem:SI (match_operand:SI 1 "call_operand" "r,s"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI RA_REGNUM))]
+ ""
+ "@
+ call %1
+ calli %1"
+ [(set_attr "type" "call,icall")]
+)
+
+(define_insn "return_internal"
+ [(use (match_operand:SI 0 "register_operand" "r"))
+ (return)]
+ ""
+ "b %0"
+ [(set_attr "type" "uibranch")]
+)
+
+(define_insn "return"
+ [(return)]
+ "lm32_can_use_return ()"
+ "ret"
+ [(set_attr "type" "uibranch")]
+)
+
+;; ---------------------------------
+;; switch/case statements
+;; ---------------------------------
+
+(define_expand "tablejump"
+ [(set (pc) (match_operand 0 "register_operand" ""))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "
+{
+ rtx target = operands[0];
+ if (flag_pic)
+ {
+ /* For PIC, the table entry is relative to the start of the table. */
+ rtx label = gen_reg_rtx (SImode);
+ target = gen_reg_rtx (SImode);
+ emit_move_insn (label, gen_rtx_LABEL_REF (SImode, operands[1]));
+ emit_insn (gen_addsi3 (target, operands[0], label));
+ }
+ emit_jump_insn (gen_tablejumpsi (target, operands[1]));
+ DONE;
+}")
+
+(define_insn "tablejumpsi"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "b %0"
+ [(set_attr "type" "ubranch")]
+)
+
+;; ---------------------------------
+;; arithmetic
+;; ---------------------------------
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ")
+ (match_operand:SI 2 "register_or_K_operand" "r,K")))]
+ ""
+ "@
+ add %0, %z1, %2
+ addi %0, %z1, %2"
+ [(set_attr "type" "arith")]
+)
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
+ (match_operand:SI 2 "register_or_zero_operand" "rJ")))]
+ ""
+ "sub %0, %z1, %z2"
+ [(set_attr "type" "arith")]
+)
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (mult:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ")
+ (match_operand:SI 2 "register_or_K_operand" "r,K")))]
+ "TARGET_MULTIPLY_ENABLED"
+ "@
+ mul %0, %z1, %2
+ muli %0, %z1, %2"
+ [(set_attr "type" "multiply")]
+)
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_DIVIDE_ENABLED"
+ "divu %0, %z1, %2"
+ [(set_attr "type" "divide")]
+)
+
+(define_insn "umodsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (umod:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_DIVIDE_ENABLED"
+ "modu %0, %z1, %2"
+ [(set_attr "type" "divide")]
+)
+
+;; ---------------------------------
+;; negation and inversion
+;; ---------------------------------
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")))]
+ ""
+ "sub %0, r0, %z1"
+ [(set_attr "type" "arith")]
+)
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")))]
+ ""
+ "not %0, %z1"
+ [(set_attr "type" "arith")]
+)
+
+;; ---------------------------------
+;; logical
+;; ---------------------------------
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (and:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L")))]
+ ""
+ "@
+ and %0, %z1, %2
+ andi %0, %z1, %2"
+ [(set_attr "type" "arith")]
+)
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L")))]
+ ""
+ "@
+ or %0, %z1, %2
+ ori %0, %z1, %2"
+ [(set_attr "type" "arith")]
+)
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (xor:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L")))]
+ ""
+ "@
+ xor %0, %z1, %2
+ xori %0, %z1, %2"
+ [(set_attr "type" "arith")]
+)
+
+(define_insn "*norsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (not:SI (ior:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L"))))]
+ ""
+ "@
+ nor %0, %z1, %2
+ nori %0, %z1, %2"
+ [(set_attr "type" "arith")]
+)
+
+(define_insn "*xnorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (not:SI (xor:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L"))))]
+ ""
+ "@
+ xnor %0, %z1, %2
+ xnori %0, %z1, %2"
+ [(set_attr "type" "arith")]
+)
+
+;; ---------------------------------
+;; shifts
+;; ---------------------------------
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashift:SI (match_operand:SI 1 "register_or_zero_operand" "")
+ (match_operand:SI 2 "register_or_L_operand" "")))]
+ ""
+{
+ if (!TARGET_BARREL_SHIFT_ENABLED)
+ {
+ if (!optimize_size
+ && satisfies_constraint_L (operands[2])
+ && INTVAL (operands[2]) <= 8)
+ {
+ int i;
+ int shifts = INTVAL (operands[2]);
+ rtx one = GEN_INT (1);
+
+ if (shifts == 0)
+ emit_move_insn (operands[0], operands[1]);
+ else
+ emit_insn (gen_addsi3 (operands[0], operands[1], operands[1]));
+ for (i = 1; i < shifts; i++)
+ emit_insn (gen_addsi3 (operands[0], operands[0], operands[0]));
+ DONE;
+ }
+ else
+ FAIL;
+ }
+})
+
+(define_insn "*ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L")))]
+ "TARGET_BARREL_SHIFT_ENABLED"
+ "@
+ sl %0, %z1, %2
+ sli %0, %z1, %2"
+ [(set_attr "type" "shift")]
+)
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "")
+ (match_operand:SI 2 "register_or_L_operand" "")))]
+ ""
+{
+ if (!TARGET_BARREL_SHIFT_ENABLED)
+ {
+ if (!optimize_size
+ && satisfies_constraint_L (operands[2])
+ && INTVAL (operands[2]) <= 8)
+ {
+ int i;
+ int shifts = INTVAL (operands[2]);
+ rtx one = GEN_INT (1);
+
+ if (shifts == 0)
+ emit_move_insn (operands[0], operands[1]);
+ else
+ emit_insn (gen_ashrsi3_1bit (operands[0], operands[1], one));
+ for (i = 1; i < shifts; i++)
+ emit_insn (gen_ashrsi3_1bit (operands[0], operands[0], one));
+ DONE;
+ }
+ else
+ FAIL;
+ }
+})
+
+(define_insn "*ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L")))]
+ "TARGET_BARREL_SHIFT_ENABLED"
+ "@
+ sr %0, %z1, %2
+ sri %0, %z1, %2"
+ [(set_attr "type" "shift")]
+)
+
+(define_insn "ashrsi3_1bit"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
+ (match_operand:SI 2 "constant_M_operand" "M")))]
+ "!TARGET_BARREL_SHIFT_ENABLED"
+ "sri %0, %z1, %2"
+ [(set_attr "type" "shift")]
+)
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "")
+ (match_operand:SI 2 "register_or_L_operand" "")))]
+ ""
+{
+ if (!TARGET_BARREL_SHIFT_ENABLED)
+ {
+ if (!optimize_size
+ && satisfies_constraint_L (operands[2])
+ && INTVAL (operands[2]) <= 8)
+ {
+ int i;
+ int shifts = INTVAL (operands[2]);
+ rtx one = GEN_INT (1);
+
+ if (shifts == 0)
+ emit_move_insn (operands[0], operands[1]);
+ else
+ emit_insn (gen_lshrsi3_1bit (operands[0], operands[1], one));
+ for (i = 1; i < shifts; i++)
+ emit_insn (gen_lshrsi3_1bit (operands[0], operands[0], one));
+ DONE;
+ }
+ else
+ FAIL;
+ }
+})
+
+(define_insn "*lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ")
+ (match_operand:SI 2 "register_or_L_operand" "r,L")))]
+ "TARGET_BARREL_SHIFT_ENABLED"
+ "@
+ sru %0, %z1, %2
+ srui %0, %z1, %2"
+ [(set_attr "type" "shift")]
+)
+
+(define_insn "lshrsi3_1bit"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
+ (match_operand:SI 2 "constant_M_operand" "M")))]
+ "!TARGET_BARREL_SHIFT_ENABLED"
+ "srui %0, %z1, %2"
+ [(set_attr "type" "shift")]
+)
+
+;; ---------------------------------
+;; function entry / exit
+;; ---------------------------------
+
+(define_expand "prologue"
+ [(const_int 1)]
+ ""
+ "
+{
+ lm32_expand_prologue ();
+ DONE;
+}")
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+ "
+{
+ lm32_expand_epilogue ();
+ DONE;
+}")
+
+;; ---------------------------------
+;; nop
+;; ---------------------------------
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "type" "arith")]
+)
+
+;; ---------------------------------
+;; blockage
+;; ---------------------------------
+
+;; used to stop the scheduler from
+;; scheduling code across certain boundaries
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
+ ""
+ ""
+ [(set_attr "length" "0")]
+)
diff --git a/gcc/config/lm32/lm32.opt b/gcc/config/lm32/lm32.opt
new file mode 100644
index 000000000..9efecdd7c
--- /dev/null
+++ b/gcc/config/lm32/lm32.opt
@@ -0,0 +1,40 @@
+; Options for the Lattice Mico32 port of the compiler.
+; Contributed by Jon Beniston <jon@beniston.com>
+;
+; Copyright (C) 2009 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it
+; under the terms of the GNU General Public License as published
+; by the Free Software Foundation; either version 3, or (at your
+; option) any later version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mmultiply-enabled
+Target Report Mask(MULTIPLY_ENABLED)
+Enable multiply instructions
+
+mdivide-enabled
+Target Report Mask(DIVIDE_ENABLED)
+Enable divide and modulus instructions
+
+mbarrel-shift-enabled
+Target Report Mask(BARREL_SHIFT_ENABLED)
+Enable barrel shift instructions
+
+msign-extend-enabled
+Target Report Mask(SIGN_EXTEND_ENABLED)
+Enable sign extend instructions
+
+muser-enabled
+Target Report Mask(USER_ENABLED)
+Enable user-defined instructions
diff --git a/gcc/config/lm32/predicates.md b/gcc/config/lm32/predicates.md
new file mode 100644
index 000000000..7137c0114
--- /dev/null
+++ b/gcc/config/lm32/predicates.md
@@ -0,0 +1,77 @@
+;; Predicate definitions for Lattice Mico32 architecture.
+;; Contributed by Jon Beniston <jon@beniston.com>
+;;
+;; Copyright (C) 2009 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_predicate "const0_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST0_RTX (GET_MODE (op))")))
+
+(define_predicate "constant_K_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_K (op)")))
+
+(define_predicate "constant_L_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_L (op)")))
+
+(define_predicate "constant_M_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_M (op)")))
+
+(define_predicate "register_or_zero_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const0_operand")))
+
+(define_predicate "register_or_K_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "constant_K_operand")))
+
+(define_predicate "register_or_L_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "constant_L_operand")))
+
+(define_predicate "register_or_int_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_code "const_int")))
+
+(define_predicate "reloc_operand"
+ (ior (ior (match_code "label_ref")
+ (match_code "symbol_ref"))
+ (match_code "const")))
+
+(define_predicate "symbolic_operand"
+ (ior (match_code "label_ref")
+ (match_code "symbol_ref")))
+
+(define_predicate "no_pic_small_symbol"
+ (match_code "symbol_ref")
+{
+ return !flag_pic && SYMBOL_REF_SMALL_P (op);
+})
+
+(define_predicate "call_operand"
+ (ior (match_code "symbol_ref")
+ (match_operand 0 "register_operand")))
+
+(define_predicate "movsi_rhs_operand"
+ (ior (match_operand 0 "nonimmediate_operand")
+ (ior (match_code "const_int")
+ (ior (match_test "satisfies_constraint_S (op)")
+ (match_test "satisfies_constraint_Y (op)")))))
diff --git a/gcc/config/lm32/rtems.h b/gcc/config/lm32/rtems.h
new file mode 100644
index 000000000..44a527b8d
--- /dev/null
+++ b/gcc/config/lm32/rtems.h
@@ -0,0 +1,32 @@
+/* Definitions for rtems targeting a lm32 using ELF.
+ Copyright (C) 2009, Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Target OS builtins. */
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__rtems__"); \
+ builtin_define ("__USE_INIT_FINI__"); \
+ builtin_assert ("system=rtems"); \
+ } \
+ while (0)
+
+/* Use the default */
+#undef LINK_GCC_C_SEQUENCE_SPEC
diff --git a/gcc/config/lm32/sfp-machine.h b/gcc/config/lm32/sfp-machine.h
new file mode 100644
index 000000000..190384854
--- /dev/null
+++ b/gcc/config/lm32/sfp-machine.h
@@ -0,0 +1,51 @@
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE long
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 1
+
+/* Someone please check this. */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ else \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+
+#define __BYTE_ORDER __BIG_ENDIAN
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
diff --git a/gcc/config/lm32/t-fprules-softfp b/gcc/config/lm32/t-fprules-softfp
new file mode 100644
index 000000000..f99f51cfd
--- /dev/null
+++ b/gcc/config/lm32/t-fprules-softfp
@@ -0,0 +1,5 @@
+softfp_float_modes := sf df
+softfp_int_modes := si di
+softfp_extensions := sfdf
+softfp_truncations := dfsf
+softfp_machine_header := lm32/sfp-machine.h
diff --git a/gcc/config/lm32/t-lm32 b/gcc/config/lm32/t-lm32
new file mode 100644
index 000000000..ec9a18b73
--- /dev/null
+++ b/gcc/config/lm32/t-lm32
@@ -0,0 +1,2 @@
+# for multilib
+MULTILIB_OPTIONS = mbarrel-shift-enabled mmultiply-enabled mdivide-enabled msign-extend-enabled
diff --git a/gcc/config/lm32/uclinux-elf.h b/gcc/config/lm32/uclinux-elf.h
new file mode 100644
index 000000000..ce3689b17
--- /dev/null
+++ b/gcc/config/lm32/uclinux-elf.h
@@ -0,0 +1,84 @@
+/* Definitions for LM32 running Linux-based GNU systems using ELF
+ Copyright (C) 1993, 1994, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+ 2009, 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* elfos.h should have already been included. Now just override
+ any conflicting definitions and add any extras. */
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (LM32 GNU/Linux with ELF)", stderr);
+
+/* Do not assume anything about header files. */
+#undef NO_IMPLICIT_EXTERN_C
+#define NO_IMPLICIT_EXTERN_C
+
+/* The GNU C++ standard library requires that these macros be defined. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
+/* Now we define the strings used to build the spec file. */
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{pthread:-lpthread} \
+ %{shared:-lc} \
+ %{!shared:-lc} "
+
+#define LIBGCC_SPEC "-lgcc"
+
+/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
+ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main'. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
+ %{!p:%{profile:gcrt1.o%s} \
+ %{!profile:crt1.o%s}}}} \
+ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
+ the GNU/Linux magical crtend.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main', followed by a normal
+ GNU/Linux "finalizer" file, `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{h*} \
+ %{static:-Bstatic} \
+ %{shared:-shared} \
+ %{symbolic:-Bsymbolic} \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker /lib/ld-linux.so.2"
+
+#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
+
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
+
+#undef CC1_SPEC
+#define CC1_SPEC "%{G*} %{!fno-PIC:-fPIC}"
+