summaryrefslogtreecommitdiff
path: root/gcc/config/mn10300/mn10300.md
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/mn10300/mn10300.md')
-rw-r--r--gcc/config/mn10300/mn10300.md2154
1 files changed, 2154 insertions, 0 deletions
diff --git a/gcc/config/mn10300/mn10300.md b/gcc/config/mn10300/mn10300.md
new file mode 100644
index 000000000..3d8e91470
--- /dev/null
+++ b/gcc/config/mn10300/mn10300.md
@@ -0,0 +1,2154 @@
+;; GCC machine description for Matsushita MN10300
+;; Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+;; 2005, 2006, 2007, 2008, 2009, 2010, 2011
+;; Free Software Foundation, Inc.
+;; Contributed by Jeff Law (law@cygnus.com).
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; The original PO technology requires these to be ordered by speed,
+;; so that assigner will pick the fastest.
+
+;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+(define_constants [
+ (PIC_REG 6)
+ (SP_REG 9)
+ (MDR_REG 50)
+ (CC_REG 51)
+
+ (UNSPEC_PIC 1)
+ (UNSPEC_GOT 2)
+ (UNSPEC_GOTOFF 3)
+ (UNSPEC_PLT 4)
+ (UNSPEC_GOTSYM_OFF 5)
+
+ (UNSPEC_EXT 6)
+ (UNSPEC_BSCH 7)
+
+ ;; This is used to encode LIW patterns.
+ (UNSPEC_LIW 8)
+])
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; Processor type. This attribute must exactly match the processor_type
+;; enumeration in mn10300.h.
+(define_attr "cpu" "mn10300,am33,am33_2,am34"
+ (const (symbol_ref "(enum attr_cpu) mn10300_tune_cpu")))
+
+;; Used to control the "enabled" attribute on a per-instruction basis.
+(define_attr "isa" "base,am33,am33_2,am34"
+ (const_string "base"))
+
+(define_attr "enabled" ""
+ (cond [(eq_attr "isa" "base")
+ (const_int 1)
+
+ (and (eq_attr "isa" "am33")
+ (ne (symbol_ref "TARGET_AM33") (const_int 0)))
+ (const_int 1)
+
+ (and (eq_attr "isa" "am33_2")
+ (ne (symbol_ref "TARGET_AM33_2") (const_int 0)))
+ (const_int 1)
+
+ (and (eq_attr "isa" "am34")
+ (ne (symbol_ref "TARGET_AM34") (const_int 0)))
+ (const_int 1)
+ ]
+ (const_int 0))
+)
+
+(define_mode_iterator INT [QI HI SI])
+
+
+;; Bundling of smaller insns into a long instruction word (LIW)
+(define_automaton "liw_bundling")
+(automata_option "ndfa")
+
+(define_cpu_unit "liw_op1_u,liw_op2_u" "liw_bundling")
+
+(define_attr "liw" "op1,op2,both,either"
+ (const_string "both"))
+;; Note: this list must match the one defined for liw_op_names[].
+(define_attr "liw_op" "add,cmp,sub,mov,and,or,xor,asr,lsr,asl,none,max"
+ (const_string "none"))
+
+(define_insn_reservation "liw_op1" 1
+ (and (ior (eq_attr "cpu" "am33")
+ (eq_attr "cpu" "am33_2")
+ (eq_attr "cpu" "am34"))
+ (eq_attr "liw" "op1"))
+ "liw_op1_u");
+(define_insn_reservation "liw_op2" 1
+ (and (ior (eq_attr "cpu" "am33")
+ (eq_attr "cpu" "am33_2")
+ (eq_attr "cpu" "am34"))
+ (eq_attr "liw" "op2"))
+ "liw_op2_u");
+(define_insn_reservation "liw_both" 1
+ (and (ior (eq_attr "cpu" "am33")
+ (eq_attr "cpu" "am33_2")
+ (eq_attr "cpu" "am34"))
+ (eq_attr "liw" "both"))
+ "liw_op1_u + liw_op2_u");
+(define_insn_reservation "liw_either" 1
+ (and (ior (eq_attr "cpu" "am33")
+ (eq_attr "cpu" "am33_2")
+ (eq_attr "cpu" "am34"))
+ (eq_attr "liw" "either"))
+ "liw_op1_u | liw_op2_u");
+
+;; ----------------------------------------------------------------------
+;; Pipeline description.
+;; ----------------------------------------------------------------------
+
+;; The AM33 only has a single pipeline. It has five stages (fetch,
+;; decode, execute, memory access, writeback) each of which normally
+;; takes a single CPU clock cycle.
+
+;; The timings attribute consists of two numbers, the first is the
+;; throughput, which is the number of cycles the instruction takes
+;; to execute and generate a result. The second is the latency
+;; which is the effective number of cycles the instruction takes to
+;; execute if its result is used by the following instruction. The
+;; latency is always greater than or equal to the throughput.
+;; These values were taken from the Appendix of the "MN103E Series
+;; Instruction Manual" and the timings for the AM34.
+
+;; Note - it would be nice to use strings rather than integers for
+;; the possible values of this attribute, so that we can have the
+;; gcc build mechanism check for values that are not supported by
+;; the reservations below. But this will not work because the code
+;; in mn10300_adjust_sched_cost() needs integers not strings.
+
+(define_attr "timings" "" (const_int 11))
+
+(define_automaton "pipelining")
+(define_cpu_unit "throughput" "pipelining")
+
+(define_insn_reservation "throughput__1_latency__1" 1
+ (eq_attr "timings" "11") "throughput")
+(define_insn_reservation "throughput__1_latency__2" 2
+ (eq_attr "timings" "12") "throughput,nothing")
+(define_insn_reservation "throughput__1_latency__3" 3
+ (eq_attr "timings" "13") "throughput,nothing*2")
+(define_insn_reservation "throughput__1_latency__4" 4
+ (eq_attr "timings" "14") "throughput,nothing*3")
+(define_insn_reservation "throughput__2_latency__2" 2
+ (eq_attr "timings" "22") "throughput*2")
+(define_insn_reservation "throughput__2_latency__3" 3
+ (eq_attr "timings" "23") "throughput*2,nothing")
+(define_insn_reservation "throughput__2_latency__4" 4
+ (eq_attr "timings" "24") "throughput*2,nothing*2")
+(define_insn_reservation "throughput__2_latency__5" 5
+ (eq_attr "timings" "25") "throughput*2,nothing*3")
+(define_insn_reservation "throughput__3_latency__3" 3
+ (eq_attr "timings" "33") "throughput*3")
+(define_insn_reservation "throughput__3_latency__7" 7
+ (eq_attr "timings" "37") "throughput*3,nothing*4")
+(define_insn_reservation "throughput__4_latency__4" 4
+ (eq_attr "timings" "44") "throughput*4")
+(define_insn_reservation "throughput__4_latency__7" 7
+ (eq_attr "timings" "47") "throughput*4,nothing*3")
+(define_insn_reservation "throughput__4_latency__8" 8
+ (eq_attr "timings" "48") "throughput*4,nothing*4")
+(define_insn_reservation "throughput__5_latency__5" 5
+ (eq_attr "timings" "55") "throughput*5")
+(define_insn_reservation "throughput__6_latency__6" 6
+ (eq_attr "timings" "66") "throughput*6")
+(define_insn_reservation "throughput__7_latency__7" 7
+ (eq_attr "timings" "77") "throughput*7")
+(define_insn_reservation "throughput__7_latency__8" 8
+ (eq_attr "timings" "78") "throughput*7,nothing")
+(define_insn_reservation "throughput__8_latency__8" 8
+ (eq_attr "timings" "88") "throughput*8")
+(define_insn_reservation "throughput__9_latency__9" 9
+ (eq_attr "timings" "99") "throughput*9")
+(define_insn_reservation "throughput__8_latency_14" 14
+ (eq_attr "timings" "814") "throughput*8,nothing*6")
+(define_insn_reservation "throughput__9_latency_10" 10
+ (eq_attr "timings" "910") "throughput*9,nothing")
+(define_insn_reservation "throughput_10_latency_10" 10
+ (eq_attr "timings" "1010") "throughput*10")
+(define_insn_reservation "throughput_12_latency_16" 16
+ (eq_attr "timings" "1216") "throughput*12,nothing*4")
+(define_insn_reservation "throughput_13_latency_13" 13
+ (eq_attr "timings" "1313") "throughput*13")
+(define_insn_reservation "throughput_14_latency_14" 14
+ (eq_attr "timings" "1414") "throughput*14")
+(define_insn_reservation "throughput_13_latency_17" 17
+ (eq_attr "timings" "1317") "throughput*13,nothing*4")
+(define_insn_reservation "throughput_23_latency_27" 27
+ (eq_attr "timings" "2327") "throughput*23,nothing*4")
+(define_insn_reservation "throughput_25_latency_31" 31
+ (eq_attr "timings" "2531") "throughput*25,nothing*6")
+(define_insn_reservation "throughput_38_latency_39" 39
+ (eq_attr "timings" "3839") "throughput*38,nothing")
+(define_insn_reservation "throughput_39_latency_40" 40
+ (eq_attr "timings" "3940") "throughput*39,nothing")
+(define_insn_reservation "throughput_40_latency_40" 40
+ (eq_attr "timings" "4040") "throughput*40")
+(define_insn_reservation "throughput_41_latency_42" 42
+ (eq_attr "timings" "4142") "throughput*41,nothing")
+(define_insn_reservation "throughput_42_latency_43" 44
+ (eq_attr "timings" "4243") "throughput*42,nothing")
+(define_insn_reservation "throughput_43_latency_44" 44
+ (eq_attr "timings" "4344") "throughput*43,nothing")
+(define_insn_reservation "throughput_45_latency_46" 46
+ (eq_attr "timings" "4546") "throughput*45,nothing")
+(define_insn_reservation "throughput_47_latency_53" 53
+ (eq_attr "timings" "4753") "throughput*47,nothing*6")
+
+;; Note - the conflict between memory load/store instructions
+;; and floating point instructions described in section 1-7-4
+;; of Chapter 3 of the MN103E Series Instruction Manual is
+;; handled by the mn10300_adjust_sched_cost function.
+
+;; ----------------------------------------------------------------------
+;; MOVE INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; movqi
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand")
+ (match_operand:QI 1 "general_operand"))]
+ ""
+{
+ /* One of the ops has to be in a register. */
+ if (!register_operand (operand0, QImode)
+ && !register_operand (operand1, QImode))
+ operands[1] = force_reg (QImode, operand1);
+})
+
+(define_insn "*movqi_internal"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=*r,D*r,D*r,D,m")
+ (match_operand:QI 1 "general_operand" " 0,D*r, i,m,D"))]
+ "(register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "";
+ case 1:
+ case 2:
+ return "mov %1,%0";
+ case 3:
+ case 4:
+ return "movbu %1,%0";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr_alternative "timings"
+ [(const_int 11)
+ (const_int 11)
+ (const_int 11)
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 11) (const_int 22))
+ ])]
+)
+
+;; movhi
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "nonimmediate_operand")
+ (match_operand:HI 1 "general_operand"))]
+ ""
+{
+ /* One of the ops has to be in a register. */
+ if (!register_operand (operand1, HImode)
+ && !register_operand (operand0, HImode))
+ operands[1] = force_reg (HImode, operand1);
+})
+
+(define_insn "*movhi_internal"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=*r,D*r,D*r,D,m")
+ (match_operand:HI 1 "general_operand" " 0, i,D*r,m,D"))]
+ "(register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "";
+ case 1:
+ /* Note that "MOV imm8,An" is already zero-extending, and is 2 bytes.
+ We have "MOV imm16,Dn" at 3 bytes. The only win for the 4 byte
+ movu is for an 8-bit unsigned move into Rn. */
+ if (TARGET_AM33
+ && CONST_INT_P (operands[1])
+ && IN_RANGE (INTVAL (operands[1]), 0x80, 0xff)
+ && REGNO_EXTENDED_P (REGNO (operands[0]), 1))
+ return "movu %1,%0";
+ /* FALLTHRU */
+ case 2:
+ return "mov %1,%0";
+ case 3:
+ case 4:
+ return "movhu %1,%0";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr_alternative "timings"
+ [(const_int 11)
+ (const_int 11)
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 11) (const_int 22))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 11) (const_int 22))
+ ])]
+)
+
+;; movsi and helpers
+
+;; We use this to handle addition of two values when one operand is the
+;; stack pointer and the other is a memory reference of some kind. Reload
+;; does not handle them correctly without this expander.
+(define_expand "reload_plus_sp_const"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "impossible_plus_operand" ""))
+ (clobber (match_operand:SI 2 "register_operand" "=&A"))]
+ ""
+{
+ rtx dest, scratch, other;
+
+ dest = operands[0];
+ scratch = operands[2];
+
+ other = XEXP (operands[1], 1);
+ if (other == stack_pointer_rtx)
+ other = XEXP (operands[1], 0);
+
+ if (true_regnum (other) == true_regnum (dest))
+ {
+ gcc_assert (true_regnum (scratch) != true_regnum (dest));
+ emit_move_insn (scratch, stack_pointer_rtx);
+ emit_insn (gen_addsi3 (dest, dest, scratch));
+ }
+ else if (TARGET_AM33 || REGNO_REG_CLASS (true_regnum (dest)) == ADDRESS_REGS)
+ {
+ emit_move_insn (dest, stack_pointer_rtx);
+ if (other == stack_pointer_rtx)
+ emit_insn (gen_addsi3 (dest, dest, dest));
+ else if (other != const0_rtx)
+ emit_insn (gen_addsi3 (dest, dest, other));
+ }
+ else
+ {
+ emit_move_insn (scratch, stack_pointer_rtx);
+ if (other == stack_pointer_rtx)
+ {
+ emit_move_insn (dest, scratch);
+ emit_insn (gen_addsi3 (dest, dest, dest));
+ }
+ else if (other != const0_rtx)
+ {
+ emit_move_insn (dest, other);
+ emit_insn (gen_addsi3 (dest, dest, scratch));
+ }
+ else
+ emit_move_insn (dest, scratch);
+ }
+ DONE;
+})
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand")
+ (match_operand:SI 1 "general_operand"))]
+ ""
+{
+ /* One of the ops has to be in a register. */
+ if (!register_operand (operand1, SImode)
+ && !register_operand (operand0, SImode))
+ operands[1] = force_reg (SImode, operand1);
+ if (flag_pic)
+ {
+ rtx temp;
+ if (SYMBOLIC_CONST_P (operands[1]))
+ {
+ if (MEM_P (operands[0]))
+ operands[1] = force_reg (Pmode, operands[1]);
+ else
+ {
+ temp = (!can_create_pseudo_p ()
+ ? operands[0]
+ : gen_reg_rtx (Pmode));
+ operands[1] = mn10300_legitimize_pic_address (operands[1], temp);
+ }
+ }
+ else if (GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
+ {
+ temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
+ temp = mn10300_legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
+ temp);
+ operands[1] = expand_binop (SImode, add_optab, temp,
+ XEXP (XEXP (operands[1], 0), 1),
+ (!can_create_pseudo_p ()
+ ? temp
+ : gen_reg_rtx (Pmode)),
+ 0, OPTAB_LIB_WIDEN);
+ }
+ }
+})
+
+(define_insn "*movsi_internal"
+ [(set (match_operand:SI 0 "nonimmediate_operand"
+ "=r,r,r,r,m,r, A,*y,*y,*z,*d")
+ (match_operand:SI 1 "general_operand"
+ " 0,O,i,r,r,m,*y, A, i,*d,*z"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "";
+ case 1: /* imm-reg. */
+ case 2:
+ /* See movhi for a discussion of sizes for 8-bit movu. Note that the
+ 24-bit movu is 6 bytes, which is the same size as the full 32-bit
+ mov form for An and Dn. So again movu is only a win for Rn. */
+ if (TARGET_AM33
+ && CONST_INT_P (operands[1])
+ && REGNO_EXTENDED_P (REGNO (operands[0]), 1))
+ {
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+ if (IN_RANGE (val, 0x80, 0xff)
+ || IN_RANGE (val, 0x800000, 0xffffff))
+ return "movu %1,%0";
+ }
+ /* FALLTHRU */
+ case 3: /* reg-reg */
+ case 4: /* reg-mem */
+ case 5: /* mem-reg */
+ case 6: /* sp-reg */
+ case 7: /* reg-sp */
+ case 8: /* imm-sp */
+ case 9: /* reg-mdr */
+ case 10: /* mdr-reg */
+ return "mov %1,%0";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "isa" "*,*,*,*,*,*,*,*,am33,*,*")
+ (set_attr "liw" "*,either,*,either,*,*,*,*,*,*,*")
+ (set_attr "liw_op" "mov")
+ (set_attr_alternative "timings"
+ [(const_int 11)
+ (const_int 22)
+ (const_int 22)
+ (const_int 11)
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 11) (const_int 22))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 11) (const_int 22))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ (const_int 11)
+ (const_int 11)
+ (const_int 11)
+ ])]
+)
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand")
+ (match_operand:SF 1 "general_operand"))]
+ "TARGET_AM33_2"
+{
+ /* One of the ops has to be in a register. */
+ if (!register_operand (operand1, SFmode)
+ && !register_operand (operand0, SFmode))
+ operands[1] = force_reg (SFmode, operand1);
+})
+
+(define_insn "*movsf_internal"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=rf,r,f,r,f,r,f,r,m,f,Q")
+ (match_operand:SF 1 "general_operand" " 0,F,F,r,f,f,r,m,r,Q,f"))]
+ "TARGET_AM33_2
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "";
+ case 1:
+ case 3:
+ case 7:
+ case 8:
+ return "mov %1,%0";
+ case 2:
+ case 4:
+ case 5:
+ case 6:
+ case 9:
+ case 10:
+ return "fmov %1,%0";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr_alternative "timings"
+ [(const_int 11)
+ (const_int 22)
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 47) (const_int 25))
+ (const_int 11)
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 14))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 12))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 14))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ ])]
+)
+
+;; If the flags register is not live, generate CLR instead of MOV 0.
+;; For MN103, this is only legal for DATA_REGS; for AM33 this is legal
+;; but not a win for ADDRESS_REGS.
+(define_peephole2
+ [(set (match_operand:INT 0 "register_operand" "") (const_int 0))]
+ "peep2_regno_dead_p (0, CC_REG)
+ && (REGNO_DATA_P (REGNO (operands[0]), 1)
+ || REGNO_EXTENDED_P (REGNO (operands[0]), 1))"
+ [(parallel [(set (match_dup 0) (const_int 0))
+ (clobber (reg:CC CC_REG))])]
+)
+
+(define_insn "*mov<mode>_clr"
+ [(set (match_operand:INT 0 "register_operand" "=D")
+ (const_int 0))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "clr %0"
+)
+
+;; ----------------------------------------------------------------------
+;; ADD INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,!*y,!r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,0, 0, r")
+ (match_operand:SI 2 "nonmemory_operand" "r,O,i, i, r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ { return mn10300_output_add (operands, false); }
+ [(set_attr "timings" "11,11,11,11,22")
+ (set_attr "liw" "either,either,*,*,*")
+ (set_attr "liw_op" "add")]
+)
+
+;; Note that ADD IMM,SP does not set the flags, so omit that here.
+(define_insn "*addsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,!r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0, r")
+ (match_operand:SI 2 "nonmemory_operand" "ri, r")))
+ (set (reg CC_REG)
+ (compare (plus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && mn10300_match_ccmode (insn, CCZNCmode)"
+ { return mn10300_output_add (operands, true); }
+ [(set_attr "timings" "11,22")]
+)
+
+;; A helper to expand the above, with the CC_MODE filled in.
+(define_expand "addsi3_flags"
+ [(parallel [(set (match_operand:SI 0 "register_operand")
+ (plus:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "nonmemory_operand")))
+ (set (reg:CCZNC CC_REG)
+ (compare:CCZNC (plus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))])]
+ ""
+)
+
+(define_insn "addc_internal"
+ [(set (match_operand:SI 0 "register_operand" "=D,r,r")
+ (plus:SI
+ (plus:SI
+ (ltu:SI (reg:CC CC_REG) (const_int 0))
+ (match_operand:SI 1 "register_operand" "%0,0,r"))
+ (match_operand:SI 2 "reg_or_am33_const_operand" " D,i,r")))
+ (clobber (reg:CC CC_REG))]
+ "reload_completed"
+ "@
+ addc %2,%0
+ addc %2,%0
+ addc %2,%1,%0"
+ [(set_attr "isa" "*,am33,am33")]
+)
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "nonmemory_operand" "")))]
+ ""
+{
+ rtx op0l, op0h, op1l, op1h, op2l, op2h;
+
+ op0l = gen_lowpart (SImode, operands[0]);
+ op1l = gen_lowpart (SImode, operands[1]);
+ op2l = gen_lowpart (SImode, operands[2]);
+ op0h = gen_highpart (SImode, operands[0]);
+ op1h = gen_highpart (SImode, operands[1]);
+ op2h = gen_highpart_mode (SImode, DImode, operands[2]);
+
+ if (!reg_or_am33_const_operand (op2h, SImode))
+ op2h = force_reg (SImode, op2h);
+
+ emit_insn (gen_adddi3_internal (op0l, op0h, op1l, op2l, op1h, op2h));
+ DONE;
+})
+
+;; Note that reload only supports one commutative operand. Thus we cannot
+;; auto-swap both the high and low outputs with their matching constraints.
+;; For MN103, we're strapped for registers but thankfully the alternatives
+;; are few. For AM33, it becomes much easier to not represent the early
+;; clobber and 6 permutations of immediate and three-operand adds, but
+;; instead allocate a scratch register and do the expansion by hand.
+
+(define_insn_and_split "adddi3_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r, r, r")
+ (plus:SI (match_operand:SI 2 "register_operand" "%0, 0, r")
+ (match_operand:SI 3 "nonmemory_operand" "ri,ri,ri")))
+ (set (match_operand:SI 1 "register_operand" "=D, D, r")
+ (plus:SI
+ (plus:SI
+ (ltu:SI (plus:SI (match_dup 2) (match_dup 3)) (match_dup 2))
+ (match_operand:SI 4 "register_operand" " 1, D, r"))
+ (match_operand:SI 5 "reg_or_am33_const_operand" " D, 1,ri")))
+ (clobber (match_scratch:SI 6 "=X, X,&r"))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ rtx op0l = operands[0];
+ rtx op0h = operands[1];
+ rtx op1l = operands[2];
+ rtx op2l = operands[3];
+ rtx op1h = operands[4];
+ rtx op2h = operands[5];
+ rtx scratch = operands[6];
+ rtx x;
+
+ if (reg_overlap_mentioned_p (op0l, op1h))
+ {
+ emit_move_insn (scratch, op0l);
+ op1h = scratch;
+ if (reg_overlap_mentioned_p (op0l, op2h))
+ op2h = scratch;
+ }
+ else if (reg_overlap_mentioned_p (op0l, op2h))
+ {
+ emit_move_insn (scratch, op0l);
+ op2h = scratch;
+ }
+
+ if (rtx_equal_p (op0l, op1l))
+ ;
+ else if (rtx_equal_p (op0l, op2l))
+ x = op1l, op1l = op2l, op2l = x;
+ else
+ {
+ gcc_assert (TARGET_AM33);
+ if (!REG_P (op2l))
+ {
+ emit_move_insn (op0l, op2l);
+ op2l = op1l;
+ op1l = op0l;
+ }
+ }
+ emit_insn (gen_addsi3_flags (op0l, op1l, op2l));
+
+ if (rtx_equal_p (op0h, op1h))
+ ;
+ else if (rtx_equal_p (op0h, op2h))
+ x = op1h, op1h = op2h, op2h = x;
+ else
+ {
+ gcc_assert (TARGET_AM33);
+ if (!REG_P (op2h))
+ {
+ emit_move_insn (op0h, op2h);
+ op2h = op1h;
+ op1h = op0h;
+ }
+ }
+ emit_insn (gen_addc_internal (op0h, op1h, op2h));
+ DONE;
+}
+ [(set_attr "isa" "*,*,am33")]
+)
+
+;; The following pattern is generated by combine when it proves that one
+;; of the inputs to the low-part of the double-word add is zero, and thus
+;; no carry is generated into the high-part.
+
+(define_insn_and_split "*adddi3_degenerate"
+ [(set (match_operand:SI 0 "register_operand" "=&r,&r")
+ (match_operand:SI 2 "nonmemory_operand" " 0, 0"))
+ (set (match_operand:SI 1 "register_operand" "=r , r")
+ (plus:SI (match_operand:SI 3 "register_operand" "%1 , r")
+ (match_operand:SI 4 "nonmemory_operand" "ri, r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ ""
+ [(const_int 0)]
+{
+ rtx scratch = NULL_RTX;
+ if (!rtx_equal_p (operands[0], operands[2]))
+ {
+ gcc_assert (!reg_overlap_mentioned_p (operands[0], operands[1]));
+ if (reg_overlap_mentioned_p (operands[0], operands[3])
+ || reg_overlap_mentioned_p (operands[0], operands[4]))
+ {
+ scratch = gen_reg_rtx (SImode);
+ emit_move_insn (scratch, operands[2]);
+ }
+ else
+ emit_move_insn (operands[0], operands[2]);
+ }
+ emit_insn (gen_addsi3 (operands[1], operands[3], operands[4]));
+ if (scratch)
+ emit_move_insn (operands[0], scratch);
+ DONE;
+})
+
+;; ----------------------------------------------------------------------
+;; SUBTRACT INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,O,i,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ sub %2,%0
+ sub %2,%0
+ sub %2,%0
+ sub %2,%1,%0"
+ [(set_attr "isa" "*,*,*,am33")
+ (set_attr "liw" "either,either,*,*")
+ (set_attr "liw_op" "sub")
+ (set_attr "timings" "11,11,11,22")]
+)
+
+(define_insn "*subsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r, r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0, r")
+ (match_operand:SI 2 "nonmemory_operand" "ri,r")))
+ (set (reg CC_REG)
+ (compare (minus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && mn10300_match_ccmode (insn, CCZNCmode)"
+ "@
+ sub %2,%0
+ sub %2,%1,%0"
+ [(set_attr "isa" "*,am33")
+ (set_attr "timings" "11,22")]
+)
+
+;; A helper to expand the above, with the CC_MODE filled in.
+(define_expand "subsi3_flags"
+ [(parallel [(set (match_operand:SI 0 "register_operand")
+ (minus:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "nonmemory_operand")))
+ (set (reg:CCZNC CC_REG)
+ (compare:CCZNC (minus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))])]
+ ""
+)
+
+(define_insn "subc_internal"
+ [(set (match_operand:SI 0 "register_operand" "=D,r,r")
+ (minus:SI
+ (minus:SI (match_operand:SI 1 "register_operand" " 0,0,r")
+ (match_operand:SI 2 "reg_or_am33_const_operand" " D,i,r"))
+ (geu:SI (reg:CC CC_REG) (const_int 0))))
+ (clobber (reg:CC CC_REG))]
+ "reload_completed"
+ "@
+ subc %2,%0
+ subc %2,%0
+ subc %2,%1,%0"
+ [(set_attr "isa" "*,am33,am33")]
+)
+
+(define_expand "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "nonmemory_operand" "")))]
+ ""
+{
+ rtx op0l, op0h, op1l, op1h, op2l, op2h;
+
+ op0l = gen_lowpart (SImode, operands[0]);
+ op1l = gen_lowpart (SImode, operands[1]);
+ op2l = gen_lowpart (SImode, operands[2]);
+ op0h = gen_highpart (SImode, operands[0]);
+ op1h = gen_highpart (SImode, operands[1]);
+ op2h = gen_highpart_mode (SImode, DImode, operands[2]);
+
+ if (!reg_or_am33_const_operand (op2h, SImode))
+ op2h = force_reg (SImode, op2h);
+
+ emit_insn (gen_subdi3_internal (op0l, op0h, op1l, op1h, op2l, op2h));
+ DONE;
+})
+
+;; As with adddi3, the use of the scratch register helps reduce the
+;; number of permutations for AM33.
+;; ??? The early clobber on op0 avoids a reload bug wherein both output
+;; registers are set the same. Consider negate, where both op2 and op3
+;; are 0, are csed to the same input register, and reload fails to undo
+;; the cse when satisfying the matching constraints.
+
+(define_insn_and_split "subdi3_internal"
+ [(set (match_operand:SI 0 "register_operand" "=&r, r")
+ (minus:SI
+ (match_operand:SI 2 "register_operand" " 0, r")
+ (match_operand:SI 4 "nonmemory_operand" " ri,ri")))
+ (set (match_operand:SI 1 "register_operand" "=D , r")
+ (minus:SI
+ (minus:SI
+ (match_operand:SI 3 "register_operand" " 1, r")
+ (match_operand:SI 5 "reg_or_am33_const_operand" " D,ri"))
+ (ltu:SI (match_dup 2) (match_dup 4))))
+ (clobber (match_scratch:SI 6 "=X ,&r"))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ rtx op0l = operands[0];
+ rtx op0h = operands[1];
+ rtx op1l = operands[2];
+ rtx op1h = operands[3];
+ rtx op2l = operands[4];
+ rtx op2h = operands[5];
+ rtx scratch = operands[6];
+
+ if (reg_overlap_mentioned_p (op0l, op1h))
+ {
+ emit_move_insn (scratch, op0l);
+ op1h = scratch;
+ if (reg_overlap_mentioned_p (op0l, op2h))
+ op2h = scratch;
+ }
+ else if (reg_overlap_mentioned_p (op0l, op2h))
+ {
+ emit_move_insn (scratch, op0l);
+ op2h = scratch;
+ }
+
+ if (!rtx_equal_p (op0l, op1l))
+ {
+ gcc_assert (TARGET_AM33);
+ if (!REG_P (op2l))
+ {
+ emit_move_insn (op0l, op1l);
+ op1l = op0l;
+ }
+ }
+ emit_insn (gen_subsi3_flags (op0l, op1l, op2l));
+
+ if (!rtx_equal_p (op0h, op1h))
+ {
+ gcc_assert (TARGET_AM33);
+ if (!REG_P (op2h))
+ {
+ emit_move_insn (op0h, op1h);
+ op1h = op0h;
+ }
+ }
+ emit_insn (gen_subc_internal (op0h, op1h, op2h));
+ DONE;
+}
+ [(set_attr "isa" "*,am33")]
+)
+
+;; The following pattern is generated by combine when it proves that one
+;; of the inputs to the low-part of the double-word sub is zero, and thus
+;; no carry is generated into the high-part.
+
+(define_insn_and_split "*subdi3_degenerate"
+ [(set (match_operand:SI 0 "register_operand" "=&r,&r")
+ (match_operand:SI 2 "nonmemory_operand" " 0, 0"))
+ (set (match_operand:SI 1 "register_operand" "=r , r")
+ (minus:SI (match_operand:SI 3 "register_operand" " 1, r")
+ (match_operand:SI 4 "nonmemory_operand" " ri, r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ ""
+ [(const_int 0)]
+{
+ rtx scratch = NULL_RTX;
+ if (!rtx_equal_p (operands[0], operands[2]))
+ {
+ gcc_assert (!reg_overlap_mentioned_p (operands[0], operands[1]));
+ if (reg_overlap_mentioned_p (operands[0], operands[3])
+ || reg_overlap_mentioned_p (operands[0], operands[4]))
+ {
+ scratch = gen_reg_rtx (SImode);
+ emit_move_insn (scratch, operands[2]);
+ }
+ else
+ emit_move_insn (operands[0], operands[2]);
+ }
+ emit_insn (gen_subsi3 (operands[1], operands[3], operands[4]));
+ if (scratch)
+ emit_move_insn (operands[0], scratch);
+ DONE;
+})
+
+(define_insn_and_split "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=D,&r")
+ (neg:SI (match_operand:SI 1 "register_operand" " 0, r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ /* Recall that twos-compliment is ones-compliment plus one. When
+ allocated in DATA_REGS this is 2+1 bytes; otherwise (for am33)
+ this is 3+3 bytes.
+
+ For AM33, it would have been possible to load zero and use the
+ three-address subtract to have a total size of 3+4*N bytes for
+ multiple negations, plus increased throughput. Not attempted here. */
+
+ if (true_regnum (operands[0]) == true_regnum (operands[1]))
+ {
+ emit_insn (gen_one_cmplsi2 (operands[0], operands[0]));
+ emit_insn (gen_addsi3 (operands[0], operands[0], const1_rtx));
+ }
+ else
+ {
+ emit_move_insn (operands[0], const0_rtx);
+ emit_insn (gen_subsi3 (operands[0], operands[0], operands[1]));
+ }
+ DONE;
+})
+
+;; ----------------------------------------------------------------------
+;; MULTIPLY INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; ??? Note that AM33 has a third multiply variant that puts the high part
+;; into the MDRQ register, however this variant also constrains the inputs
+;; to be in DATA_REGS and thus isn't as helpful as it might be considering
+;; the existance of the 4-operand multiply. Nor is there a set of divide
+;; insns that use MDRQ. Given that there is an IMM->MDRQ insn, this would
+;; have been very handy for starting udivmodsi4...
+
+(define_expand "mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" ""))))]
+ ""
+{
+ emit_insn (gen_mulsidi3_internal (gen_lowpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[0]),
+ operands[1], operands[2]));
+ DONE;
+})
+
+(define_insn "mulsidi3_internal"
+ [(set (match_operand:SI 0 "register_operand" "=D,r")
+ (mult:SI (match_operand:SI 2 "register_operand" "%0,r")
+ (match_operand:SI 3 "register_operand" " D,r")))
+ (set (match_operand:SI 1 "register_operand" "=z,r")
+ (truncate:SI
+ (ashiftrt:DI
+ (mult:DI (sign_extend:DI (match_dup 2))
+ (sign_extend:DI (match_dup 3)))
+ (const_int 32))))
+ (clobber (reg:CC CC_REG))]
+ ""
+{
+ if (which_alternative == 1)
+ return "mul %2,%3,%1,%0";
+ else if (TARGET_MULT_BUG)
+ return "nop\;nop\;mul %3,%0";
+ else
+ return "mul %3,%0";
+}
+ [(set_attr "isa" "*,am33")
+ (set (attr "timings")
+ (if_then_else (eq_attr "cpu" "am34") (const_int 24) (const_int 23)))]
+)
+
+(define_expand "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" ""))))
+ (clobber (reg:CC CC_REG))]
+ ""
+{
+ emit_insn (gen_umulsidi3_internal (gen_lowpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[0]),
+ operands[1], operands[2]));
+ DONE;
+})
+
+(define_insn "umulsidi3_internal"
+ [(set (match_operand:SI 0 "register_operand" "=D,r")
+ (mult:SI (match_operand:SI 2 "register_operand" "%0,r")
+ (match_operand:SI 3 "register_operand" " D,r")))
+ (set (match_operand:SI 1 "register_operand" "=z,r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI (match_dup 2))
+ (zero_extend:DI (match_dup 3)))
+ (const_int 32))))
+ (clobber (reg:CC CC_REG))]
+ ""
+{
+ if (which_alternative == 1)
+ return "mulu %2,%3,%1,%0";
+ else if (TARGET_MULT_BUG)
+ return "nop\;nop\;mulu %3,%0";
+ else
+ return "mulu %3,%0";
+}
+ [(set_attr "isa" "*,am33")
+ (set (attr "timings")
+ (if_then_else (eq_attr "cpu" "am34") (const_int 24) (const_int 23)))]
+)
+
+(define_expand "mulsi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand")
+ (mult:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "reg_or_am33_const_operand")))
+ (clobber (match_scratch:SI 3))
+ (clobber (reg:CC CC_REG))])]
+ ""
+)
+
+(define_insn "*mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=D, r,r")
+ (mult:SI (match_operand:SI 2 "register_operand" "%0, 0,r")
+ (match_operand:SI 3 "reg_or_am33_const_operand" " D,ri,r")))
+ (clobber (match_scratch:SI 1 "=z, z,r"))
+ (clobber (reg:CC CC_REG))]
+ ""
+{
+ if (which_alternative == 2)
+ return "mul %2,%3,%1,%0";
+ else if (TARGET_MULT_BUG)
+ return "nop\;nop\;mul %3,%0";
+ else
+ return "mul %3,%0";
+}
+ [(set_attr "isa" "*,am33,am33")
+ (set (attr "timings")
+ (if_then_else (eq_attr "cpu" "am34") (const_int 24) (const_int 23)))]
+)
+
+(define_expand "udivmodsi4"
+ [(parallel [(set (match_operand:SI 0 "register_operand")
+ (udiv:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "register_operand")))
+ (set (match_operand:SI 3 "register_operand")
+ (umod:SI (match_dup 1) (match_dup 2)))
+ (use (const_int 0))
+ (clobber (reg:CC CC_REG))])]
+ ""
+)
+
+;; Note the trick to get reload to put the zero into the MDR register,
+;; rather than exposing the load early and letting CSE or someone try
+;; to share the zeros between division insns. Which tends to result
+;; in sequences like 0->r0->d0->mdr.
+
+(define_insn "*udivmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "=D")
+ (udiv:SI (match_operand:SI 2 "register_operand" " 0")
+ (match_operand:SI 3 "register_operand" " D")))
+ (set (match_operand:SI 1 "register_operand" "=z")
+ (umod:SI (match_dup 2) (match_dup 3)))
+ (use (match_operand:SI 4 "nonmemory_operand" " 1"))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "divu %3,%0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 3839) (const_int 4243)))]
+)
+
+(define_expand "divmodsi4"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (div:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (set (match_operand:SI 3 "register_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))
+ (use (match_dup 4))
+ (clobber (reg:CC CC_REG))])]
+ ""
+{
+ operands[4] = gen_reg_rtx (SImode);
+ emit_insn (gen_ext_internal (operands[4], operands[1]));
+})
+
+;; ??? Ideally we'd represent this via shift, but it seems like adding a
+;; special-case pattern for (ashiftrt x 31) is just as likely to result
+;; in poor register allocation choices.
+(define_insn "ext_internal"
+ [(set (match_operand:SI 0 "register_operand" "=z")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "D")] UNSPEC_EXT))]
+ ""
+ "ext %1"
+)
+
+(define_insn "*divmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "=D")
+ (div:SI (match_operand:SI 2 "register_operand" " 0")
+ (match_operand:SI 3 "register_operand" " D")))
+ (set (match_operand:SI 1 "register_operand" "=z")
+ (mod:SI (match_dup 2) (match_dup 3)))
+ (use (match_operand:SI 4 "register_operand" " 1"))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "div %3,%0";
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 3839) (const_int 4243)))]
+)
+
+
+;; ----------------------------------------------------------------------
+;; AND INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" " i,D,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ and %2,%0
+ and %2,%0
+ and %2,%1,%0"
+ [(set_attr "isa" "*,*,am33")
+ (set_attr "liw" "*,op1,*")
+ (set_attr "liw_op" "and")
+ (set_attr "timings" "22,11,11")]
+)
+
+(define_insn "*andsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" " i,D,r")))
+ (set (reg CC_REG)
+ (compare (and:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && mn10300_match_ccmode (insn, CCZNmode)"
+ "@
+ and %2,%0
+ and %2,%0
+ and %2,%1,%0"
+ [(set_attr "isa" "*,*,am33")
+ (set_attr "timings" "22,11,11")]
+)
+
+;; Make sure we generate extensions instead of ANDs.
+
+(define_split
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 255)))
+ (clobber (reg:CC CC_REG))])]
+ ""
+ [(set (match_dup 0) (zero_extend:SI (match_dup 1)))]
+ { operands[1] = gen_lowpart (QImode, operands[1]); }
+)
+
+(define_split
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 65535)))
+ (clobber (reg:CC CC_REG))])]
+ ""
+ [(set (match_dup 0) (zero_extend:SI (match_dup 1)))]
+ { operands[1] = gen_lowpart (HImode, operands[1]); }
+)
+
+;; Split AND by an appropriate constant into two shifts. Recall that
+;; operations with a full 32-bit immediate require an extra cycle, so
+;; this is a size optimization with no speed penalty. This only applies
+;; do DATA_REGS; the shift insns that AM33 adds are too large for a win.
+
+(define_split
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand" "")))
+ (clobber (reg:CC CC_REG))])]
+ "reload_completed
+ && REGNO_DATA_P (true_regnum (operands[0]), 1)
+ && mn10300_split_and_operand_count (operands[1]) != 0"
+ [(const_int 0)]
+{
+ int count = mn10300_split_and_operand_count (operands[1]);
+ if (count > 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[0], GEN_INT (count)));
+ emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (count)));
+ }
+ else
+ {
+ emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (-count)));
+ emit_insn (gen_lshrsi3 (operands[0], operands[0], GEN_INT (-count)));
+ }
+ DONE;
+})
+
+;; ----------------------------------------------------------------------
+;; OR INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" " i,D,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ or %2,%0
+ or %2,%0
+ or %2,%1,%0"
+ [(set_attr "isa" "*,*,am33")
+ (set_attr "liw" "*,op1,*")
+ (set_attr "liw_op" "or")
+ (set_attr "timings" "22,11,11")]
+)
+
+(define_insn "*iorsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" " i,D,r")))
+ (set (reg CC_REG)
+ (compare (ior:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && mn10300_match_ccmode (insn, CCZNmode)"
+ "@
+ or %2,%0
+ or %2,%0
+ or %2,%1,%0"
+ [(set_attr "isa" "*,*,am33")
+ (set_attr "timings" "22,11,11")]
+)
+
+;; ----------------------------------------------------------------------
+;; XOR INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" " i,D,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ xor %2,%0
+ xor %2,%0
+ xor %2,%1,%0"
+ [(set_attr "isa" "*,*,am33")
+ (set_attr "liw" "*,op1,*")
+ (set_attr "liw_op" "xor")
+ (set_attr "timings" "22,11,11")]
+)
+
+(define_insn "*xorsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" " i,D,r")))
+ (set (reg CC_REG)
+ (compare (xor:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && mn10300_match_ccmode (insn, CCZNmode)"
+ "@
+ xor %2,%0
+ xor %2,%0
+ xor %2,%1,%0"
+ [(set_attr "isa" "*,*,am33")
+ (set_attr "timings" "22,11,11")]
+)
+
+;; ----------------------------------------------------------------------
+;; NOT INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=D")
+ (not:SI (match_operand:SI 1 "register_operand" " 0")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "not %0"
+)
+
+(define_insn "*one_cmplsi2_flags"
+ [(set (match_operand:SI 0 "register_operand" "=D")
+ (not:SI (match_operand:SI 1 "register_operand" " 0")))
+ (set (reg CC_REG)
+ (compare (not:SI (match_dup 1))
+ (const_int 0)))]
+ "reload_completed && mn10300_match_ccmode (insn, CCZNmode)"
+ "not %0"
+)
+
+;; ----------------------------------------------------------------------
+;; COMPARE AND BRANCH INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; We expand the comparison into a single insn so that it will not be split
+;; up by reload.
+(define_expand "cbranchsi4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "nonmemory_operand")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+ ""
+)
+
+(define_insn_and_split "*cbranchsi4_cmp"
+ [(set (pc)
+ (if_then_else (match_operator 3 "ordered_comparison_operator"
+ [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "nonmemory_operand" "ri")])
+ (match_operand 2 "label_ref_operand" "")
+ (pc)))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ mn10300_split_cbranch (CCmode, operands[3], operands[2]);
+ DONE;
+})
+
+(define_insn "*cmpsi"
+ [(set (reg CC_REG)
+ (compare (match_operand:SI 0 "register_operand" "r,r,r")
+ (match_operand:SI 1 "nonmemory_operand" "r,O,i")))]
+ "reload_completed"
+{
+ /* The operands of CMP must be distinct registers. In the case where
+ we've failed to optimize the comparison of a register to itself, we
+ must use another method to set the Z flag. We can achieve this
+ effect with a BTST 0,D0. This will not alter the contents of D0;
+ the use of d0 is arbitrary; any data register would work. */
+ if (rtx_equal_p (operands[0], operands[1]))
+ return "btst 0,d0";
+ else
+ return "cmp %1,%0";
+}
+ [(set_attr_alternative "timings"
+ [(if_then_else (eq_attr "cpu" "am34") (const_int 11) (const_int 22))
+ (if_then_else (eq_attr "cpu" "am34") (const_int 11) (const_int 22))
+ (if_then_else (eq_attr "cpu" "am34") (const_int 11) (const_int 22))])
+ (set_attr "liw" "either,either,*")
+ (set_attr "liw_op" "cmp")]
+)
+
+(define_insn "*integer_conditional_branch"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 2 "int_mode_flags" "")
+ (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ "reload_completed"
+ "b%b0 %1"
+)
+
+(define_insn_and_split "*cbranchsi4_btst"
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "CCZN_comparison_operator"
+ [(and:SI (match_operand:SI 0 "register_operand" "D")
+ (match_operand:SI 1 "immediate_operand" "i"))
+ (const_int 0)])
+ (match_operand 2 "label_ref_operand" "")
+ (pc)))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ mn10300_split_cbranch (CCZNmode, operands[3], operands[2]);
+ DONE;
+})
+
+(define_insn "*btstsi"
+ [(set (reg:CCZN CC_REG)
+ (compare:CCZN
+ (and:SI (match_operand:SI 0 "register_operand" "D")
+ (match_operand:SI 1 "immediate_operand" "i"))
+ (const_int 0)))]
+ "reload_completed"
+ "btst %1,%0"
+)
+
+(define_expand "cbranchsf4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:SF 1 "register_operand")
+ (match_operand:SF 2 "nonmemory_operand")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ "TARGET_AM33_2"
+ ""
+)
+
+(define_insn_and_split "*cbranchsf4_cmp"
+ [(set (pc)
+ (if_then_else (match_operator 3 "ordered_comparison_operator"
+ [(match_operand:SF 0 "register_operand" "f")
+ (match_operand:SF 1 "nonmemory_operand" "fF")])
+ (match_operand 2 "label_ref_operand" "")
+ (pc)))
+ ]
+ "TARGET_AM33_2"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ mn10300_split_cbranch (CC_FLOATmode, operands[3], operands[2]);
+ DONE;
+})
+
+(define_insn "*am33_cmpsf"
+ [(set (reg:CC_FLOAT CC_REG)
+ (compare:CC_FLOAT (match_operand:SF 0 "register_operand" "f")
+ (match_operand:SF 1 "nonmemory_operand" "fF")))]
+ "TARGET_AM33_2 && reload_completed"
+ "fcmp %1, %0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 25)))]
+)
+
+(define_insn "*float_conditional_branch"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(reg:CC_FLOAT CC_REG) (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ "TARGET_AM33_2 && reload_completed"
+ "fb%b0 %1"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 44) (const_int 33)))]
+)
+
+;; Unconditional and other jump instructions.
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jmp %l0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 11) (const_int 44)))]
+)
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "a"))]
+ ""
+ "jmp (%0)"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 11) (const_int 33)))]
+)
+
+(define_expand "builtin_setjmp_receiver"
+ [(match_operand 0 "" "")]
+ "flag_pic"
+{
+ emit_insn (gen_load_pic ());
+ DONE;
+})
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "register_operand")
+ (match_operand:SI 1 "immediate_operand")
+ (match_operand:SI 2 "immediate_operand")
+ (match_operand 3 "" "") (match_operand 4 "")]
+ ""
+{
+ rtx table = gen_reg_rtx (SImode);
+ rtx index = gen_reg_rtx (SImode);
+ rtx addr = gen_reg_rtx (Pmode);
+ rtx test;
+
+ emit_move_insn (table, gen_rtx_LABEL_REF (VOIDmode, operands[3]));
+ emit_insn (gen_addsi3 (index, operands[0], GEN_INT (- INTVAL (operands[1]))));
+ test = gen_rtx_fmt_ee (GTU, VOIDmode, index, operands[2]);
+ emit_jump_insn (gen_cbranchsi4 (test, index, operands[2], operands[4]));
+
+ emit_insn (gen_ashlsi3 (index, index, const2_rtx));
+ emit_move_insn (addr, gen_rtx_MEM (SImode,
+ gen_rtx_PLUS (SImode, table, index)));
+ if (flag_pic)
+ emit_insn (gen_addsi3 (addr, addr, table));
+
+ emit_jump_insn (gen_tablejump (addr, operands[3]));
+ DONE;
+})
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "a"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp (%0)"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 11) (const_int 33)))]
+)
+
+;; Call subroutine with no return value.
+
+(define_expand "call"
+ [(call (match_operand:QI 0 "general_operand")
+ (match_operand:SI 1 "general_operand"))]
+ ""
+{
+ rtx fn = XEXP (operands[0], 0);
+
+ if (flag_pic && GET_CODE (fn) == SYMBOL_REF)
+ {
+ if (MN10300_GLOBAL_P (fn))
+ {
+ /* The PLT code won't run on AM30, but then, there's no
+ shared library support for AM30 either, so we just assume
+ the linker is going to adjust all @PLT relocs to the
+ actual symbols. */
+ emit_use (pic_offset_table_rtx);
+ fn = gen_rtx_UNSPEC (SImode, gen_rtvec (1, fn), UNSPEC_PLT);
+ }
+ else
+ fn = gen_rtx_UNSPEC (SImode, gen_rtvec (1, fn), UNSPEC_PIC);
+ }
+ if (! call_address_operand (fn, VOIDmode))
+ fn = force_reg (SImode, fn);
+
+ XEXP (operands[0], 0) = fn;
+})
+
+(define_insn "*call_internal"
+ [(call (mem:QI (match_operand:SI 0 "call_address_operand" "a,S"))
+ (match_operand:SI 1 "" ""))]
+ ""
+ "@
+ calls %C0
+ call %C0,[],0"
+ [(set_attr_alternative "timings"
+ [(if_then_else (eq_attr "cpu" "am34")
+ (const_int 33) (const_int 44))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 55) (const_int 33))
+ ])
+ ]
+)
+
+;; Call subroutine, returning value in operand 0
+;; (which must be a hard register).
+
+(define_expand "call_value"
+ [(set (match_operand 0 "")
+ (call (match_operand:QI 1 "general_operand")
+ (match_operand:SI 2 "general_operand")))]
+ ""
+{
+ rtx fn = XEXP (operands[1], 0);
+
+ if (flag_pic && GET_CODE (fn) == SYMBOL_REF)
+ {
+ if (MN10300_GLOBAL_P (fn))
+ {
+ /* The PLT code won't run on AM30, but then, there's no
+ shared library support for AM30 either, so we just assume
+ the linker is going to adjust all @PLT relocs to the
+ actual symbols. */
+ emit_use (pic_offset_table_rtx);
+ fn = gen_rtx_UNSPEC (SImode, gen_rtvec (1, fn), UNSPEC_PLT);
+ }
+ else
+ fn = gen_rtx_UNSPEC (SImode, gen_rtvec (1, fn), UNSPEC_PIC);
+ }
+ if (! call_address_operand (fn, VOIDmode))
+ fn = force_reg (SImode, fn);
+
+ XEXP (operands[1], 0) = fn;
+})
+
+(define_insn "call_value_internal"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (match_operand:SI 1 "call_address_operand" "a,S"))
+ (match_operand:SI 2 "" "")))]
+ ""
+ "@
+ calls %C1
+ call %C1,[],0"
+ [(set_attr_alternative "timings"
+ [(if_then_else (eq_attr "cpu" "am34")
+ (const_int 33) (const_int 44))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 55) (const_int 33))
+ ])
+ ]
+)
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "")
+ (const_int 0))
+ (match_operand 1 "")
+ (match_operand 2 "")])]
+ ""
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+ DONE;
+})
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+)
+
+;; ----------------------------------------------------------------------
+;; EXTEND INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" " 0,m,r")))]
+ ""
+ "@
+ extbu %0
+ movbu %1,%0
+ extbu %1,%0"
+ [(set_attr "isa" "*,*,am33")
+ (set_attr_alternative "timings"
+ [(const_int 11)
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ (const_int 11)
+ ])]
+)
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,r")
+ (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" " 0,m,r")))]
+ ""
+ "@
+ exthu %0
+ movhu %1,%0
+ exthu %1,%0"
+ [(set_attr "isa" "*,*,am33")
+ (set_attr_alternative "timings"
+ [(const_int 11)
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 24))
+ (const_int 11)])]
+)
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=D,r")
+ (sign_extend:SI
+ (match_operand:QI 1 "register_operand" "0,r")))]
+ ""
+ "@
+ extb %0
+ extb %1,%0"
+ [(set_attr "isa" "*,am33")]
+)
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=D,r")
+ (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "0,r")))]
+ ""
+ "@
+ exth %0
+ exth %1,%0"
+ [(set_attr "isa" "*,am33")]
+)
+
+;; ----------------------------------------------------------------------
+;; SHIFTS
+;; ----------------------------------------------------------------------
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,D,d,d,D,D,D,r")
+ (ashift:SI
+ (match_operand:SI 1 "register_operand" " 0,0,0,0,0,0,0,r")
+ (match_operand:QI 2 "nonmemory_operand" " J,K,M,L,D,O,i,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ add %0,%0
+ asl2 %0
+ asl2 %0\;add %0,%0
+ asl2 %0\;asl2 %0
+ asl %S2,%0
+ asl %S2,%0
+ asl %S2,%0
+ asl %2,%1,%0"
+ [(set_attr "isa" "*,*,*,*,*,*,*,am33")
+ (set_attr "liw" "op2,op2,op2,op2,op2,op2,*,*")
+ (set_attr "liw_op" "asl")
+ (set_attr "timings" "11,11,22,22,11,11,11,11")]
+)
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,D,r")
+ (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "0,0,0,r")
+ (match_operand:QI 2 "nonmemory_operand" "D,O,i,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ lsr %S2,%0
+ lsr %S2,%0
+ lsr %S2,%0
+ lsr %2,%1,%0"
+ [(set_attr "isa" "*,*,*,am33")
+ (set_attr "liw" "op2,op2,*,*")
+ (set_attr "liw_op" "lsr")]
+)
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,D,r")
+ (ashiftrt:SI
+ (match_operand:SI 1 "register_operand" "0,0,0,r")
+ (match_operand:QI 2 "nonmemory_operand" "D,O,i,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ asr %S2,%0
+ asr %S2,%0
+ asr %S2,%0
+ asr %2,%1,%0"
+ [(set_attr "isa" "*,*,*,am33")
+ (set_attr "liw" "op2,op2,*,*")
+ (set_attr "liw_op" "asr")]
+)
+
+;; ----------------------------------------------------------------------
+;; MISCELANEOUS
+;; ----------------------------------------------------------------------
+
+(define_expand "clzsi2"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "")
+ (const_int 0)] UNSPEC_BSCH))
+ (clobber (reg:CC CC_REG))])]
+ "TARGET_AM33"
+)
+
+(define_insn "*bsch"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "nonmemory_operand" "0")]
+ UNSPEC_BSCH))
+ (clobber (reg:CC CC_REG))]
+ "TARGET_AM33"
+ "bsch %1,%0"
+)
+
+;; ----------------------------------------------------------------------
+;; FP INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (abs:SF (match_operand:SF 1 "register_operand" "0,?f")))]
+ "TARGET_AM33_2"
+ "@
+ fabs %0
+ fabs %1, %0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 14)))]
+)
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (neg:SF (match_operand:SF 1 "register_operand" "0,?f")))]
+ "TARGET_AM33_2"
+ "@
+ fneg %0
+ fneg %1, %0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 14)))]
+)
+
+(define_expand "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (sqrt:SF (match_operand:SF 1 "register_operand" "")))]
+ "TARGET_AM33_2 && flag_unsafe_math_optimizations"
+{
+ rtx scratch = gen_reg_rtx (SFmode);
+ emit_insn (gen_rsqrtsf2 (scratch, operands[1], CONST1_RTX (SFmode)));
+ emit_insn (gen_divsf3 (operands[0], force_reg (SFmode, CONST1_RTX (SFmode)),
+ scratch));
+ DONE;
+})
+
+(define_insn "rsqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (div:SF (match_operand:SF 2 "const_1f_operand" "F,F")
+ (sqrt:SF (match_operand:SF 1 "register_operand" "0,?f"))))
+ (clobber (reg:CC_FLOAT CC_REG))]
+ "TARGET_AM33_2"
+ "@
+ frsqrt %0
+ frsqrt %1, %0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 4753) (const_int 2327)))]
+)
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (plus:SF (match_operand:SF 1 "register_operand" "%0,f")
+ (match_operand:SF 2 "nonmemory_operand" "f,?fF")))
+ (clobber (reg:CC_FLOAT CC_REG))]
+ "TARGET_AM33_2"
+ "@
+ fadd %2, %0
+ fadd %2, %1, %0"
+ [(set_attr_alternative "timings"
+ [(if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 14))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 25))
+ ])]
+)
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (minus:SF (match_operand:SF 1 "register_operand" "0,f")
+ (match_operand:SF 2 "nonmemory_operand" "f,?fF")))
+ (clobber (reg:CC_FLOAT CC_REG))]
+ "TARGET_AM33_2"
+ "@
+ fsub %2, %0
+ fsub %2, %1, %0"
+ [(set_attr_alternative "timings"
+ [(if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 14))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 25))
+ ])]
+)
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (mult:SF (match_operand:SF 1 "register_operand" "%0,f")
+ (match_operand:SF 2 "nonmemory_operand" "f,?fF")))
+ (clobber (reg:CC_FLOAT CC_REG))
+ ]
+ "TARGET_AM33_2"
+ "@
+ fmul %2, %0
+ fmul %2, %1, %0"
+ [(set_attr_alternative "timings"
+ [(if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 14))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 25))
+ ])]
+)
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (div:SF (match_operand:SF 1 "register_operand" "0,f")
+ (match_operand:SF 2 "nonmemory_operand" "f,?fF")))
+ (clobber (reg:CC_FLOAT CC_REG))]
+ "TARGET_AM33_2"
+ "@
+ fdiv %2, %0
+ fdiv %2, %1, %0"
+ [(set_attr_alternative "timings"
+ [(if_then_else (eq_attr "cpu" "am34")
+ (const_int 2531) (const_int 1216))
+ (if_then_else (eq_attr "cpu" "am34")
+ (const_int 2531) (const_int 1317))
+ ])]
+)
+
+(define_insn "fmasf4"
+ [(set (match_operand:SF 0 "register_operand" "=c")
+ (fma:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")
+ (match_operand:SF 3 "register_operand" "f")))
+ (clobber (reg:CC_FLOAT CC_REG))
+ ]
+ "TARGET_AM33_2"
+ "fmadd %1, %2, %3, %0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 24)))]
+)
+
+(define_insn "fmssf4"
+ [(set (match_operand:SF 0 "register_operand" "=c")
+ (fma:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")
+ (neg:SF (match_operand:SF 3 "register_operand" "f"))))
+ (clobber (reg:CC_FLOAT CC_REG))
+ ]
+ "TARGET_AM33_2"
+ "fmsub %1, %2, %3, %0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 24)))]
+)
+
+(define_insn "fnmasf4"
+ [(set (match_operand:SF 0 "register_operand" "=c")
+ (fma:SF (neg:SF (match_operand:SF 1 "register_operand" "f"))
+ (match_operand:SF 2 "register_operand" "f")
+ (match_operand:SF 3 "register_operand" "f")))
+ (clobber (reg:CC_FLOAT CC_REG))
+ ]
+ "TARGET_AM33_2"
+ "fnmadd %1, %2, %3, %0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 24)))]
+)
+
+(define_insn "fnmssf4"
+ [(set (match_operand:SF 0 "register_operand" "=c")
+ (fma:SF (neg:SF (match_operand:SF 1 "register_operand" "f"))
+ (match_operand:SF 2 "register_operand" "f")
+ (neg:SF (match_operand:SF 3 "register_operand" "f"))))
+ (clobber (reg:CC_FLOAT CC_REG))
+ ]
+ "TARGET_AM33_2"
+ "fnmsub %1, %2, %3, %0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 17) (const_int 24)))]
+)
+
+;; ----------------------------------------------------------------------
+;; PROLOGUE/EPILOGUE
+;; ----------------------------------------------------------------------
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ { mn10300_expand_prologue (); DONE; }
+)
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+ { mn10300_expand_epilogue (); DONE; }
+)
+
+(define_insn "return"
+ [(return)]
+ "mn10300_can_use_rets_insn ()"
+{
+ /* The RETF insn is 4 cycles faster than RETS, though 1 byte larger. */
+ if (optimize_insn_for_speed_p () && mn10300_can_use_retf_insn ())
+ return "retf [],0";
+ else
+ return "rets";
+})
+
+(define_insn "return_ret"
+ [(return)
+ (use (match_operand:SI 0 "const_int_operand" ""))]
+ ""
+{
+ /* The RETF insn is up to 3 cycles faster than RET. */
+ fputs ((mn10300_can_use_retf_insn () ? "\tretf " : "\tret "), asm_out_file);
+ mn10300_print_reg_list (asm_out_file, mn10300_get_live_callee_saved_regs ());
+ fprintf (asm_out_file, ",%d\n", (int) INTVAL (operands[0]));
+ return "";
+})
+
+;; This instruction matches one generated by mn10300_gen_multiple_store()
+(define_insn "store_movm"
+ [(match_parallel 0 "mn10300_store_multiple_operation"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (match_operand 1 "" "")))])]
+ ""
+{
+ fputs ("\tmovm ", asm_out_file);
+ mn10300_print_reg_list (asm_out_file,
+ mn10300_store_multiple_operation (operands[0],
+ VOIDmode));
+ fprintf (asm_out_file, ",(sp)\n");
+ return "";
+}
+ ;; Assume that no more than 8 registers will be pushed.
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 99) (const_int 88)))]
+)
+
+(define_expand "load_pic"
+ [(const_int 0)]
+ "flag_pic"
+{
+ if (TARGET_AM33)
+ emit_insn (gen_am33_load_pic (pic_offset_table_rtx));
+ else if (mn10300_frame_size () == 0)
+ emit_insn (gen_mn10300_load_pic0 (pic_offset_table_rtx));
+ else
+ emit_insn (gen_mn10300_load_pic1 (pic_offset_table_rtx));
+ DONE;
+})
+
+(define_insn "am33_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(const_int 0)] UNSPEC_GOT))
+ (clobber (reg:CC CC_REG))]
+ "TARGET_AM33"
+{
+ operands[1] = gen_rtx_SYMBOL_REF (VOIDmode, GOT_SYMBOL_NAME);
+ return ".LPIC%=:\;mov pc,%0\;add %1-(.LPIC%=-.),%0";
+}
+ [(set_attr "timings" "33")]
+)
+
+;; Load pic register with push/pop of stack.
+(define_insn "mn10300_load_pic0"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(const_int 0)] UNSPEC_GOT))
+ (clobber (reg:SI MDR_REG))
+ (clobber (reg:CC CC_REG))]
+ ""
+{
+ operands[1] = gen_rtx_SYMBOL_REF (VOIDmode, GOT_SYMBOL_NAME);
+ return ("add -4,sp\;"
+ "calls .LPIC%=\n"
+ ".LPIC%=:\;"
+ "movm (sp),[%0]\;"
+ "add %1-(.LPIC%=-.),%0");
+}
+ [(set_attr "timings" "88")]
+)
+
+;; Load pic register re-using existing stack space.
+(define_insn "mn10300_load_pic1"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(const_int 0)] UNSPEC_GOT))
+ (clobber (mem:SI (reg:SI SP_REG)))
+ (clobber (reg:SI MDR_REG))
+ (clobber (reg:CC CC_REG))]
+ ""
+{
+ operands[1] = gen_rtx_SYMBOL_REF (VOIDmode, GOT_SYMBOL_NAME);
+ return ("calls .LPIC%=\n"
+ ".LPIC%=:\;"
+ "mov (sp),%0\;"
+ "add %1-(.LPIC%=-.),%0");
+}
+ [(set_attr "timings" "66")]
+)
+
+;; The mode on operand 3 has been deliberately omitted because it
+;; can be either SI (for arithmetic operations) or QI (for shifts).
+(define_insn "liw"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_dup 0)
+ (match_operand 2 "liw_operand" "rO")
+ (match_operand:SI 4 "const_int_operand" "")]
+ UNSPEC_LIW))
+ (set (match_operand:SI 1 "register_operand" "=r")
+ (unspec:SI [(match_dup 1)
+ (match_operand 3 "liw_operand" "rO")
+ (match_operand:SI 5 "const_int_operand" "")]
+ UNSPEC_LIW))]
+ "TARGET_ALLOW_LIW"
+ "%W4_%W5 %2, %0, %3, %1"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 12)))]
+)
+
+;; The mode on operand 1 has been deliberately omitted because it
+;; can be either SI (for arithmetic operations) or QI (for shifts).
+(define_insn "cmp_liw"
+ [(set (reg:CC CC_REG)
+ (compare:CC (match_operand:SI 2 "register_operand" "r")
+ (match_operand 3 "liw_operand" "rO")))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_dup 0)
+ (match_operand 1 "liw_operand" "rO")
+ (match_operand:SI 4 "const_int_operand" "")]
+ UNSPEC_LIW))]
+ "TARGET_ALLOW_LIW"
+ "cmp_%W4 %3, %2, %1, %0"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 12)))]
+)
+
+(define_insn "liw_cmp"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_dup 0)
+ (match_operand 1 "liw_operand" "rO")
+ (match_operand:SI 4 "const_int_operand" "")]
+ UNSPEC_LIW))
+ (set (reg:CC CC_REG)
+ (compare:CC (match_operand:SI 2 "register_operand" "r")
+ (match_operand 3 "liw_operand" "rO")))]
+ "TARGET_ALLOW_LIW"
+ "%W4_cmp %1, %0, %3, %2"
+ [(set (attr "timings") (if_then_else (eq_attr "cpu" "am34")
+ (const_int 13) (const_int 12)))]
+)