From 554fd8c5195424bdbcabf5de30fdc183aba391bd Mon Sep 17 00:00:00 2001 From: upstream source tree Date: Sun, 15 Mar 2015 20:14:05 -0400 Subject: obtained gcc-4.6.4.tar.bz2 from upstream website; verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository. --- gcc/config/m68k/m68k.md | 7808 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 7808 insertions(+) create mode 100644 gcc/config/m68k/m68k.md (limited to 'gcc/config/m68k/m68k.md') diff --git a/gcc/config/m68k/m68k.md b/gcc/config/m68k/m68k.md new file mode 100644 index 000000000..f89037f2e --- /dev/null +++ b/gcc/config/m68k/m68k.md @@ -0,0 +1,7808 @@ +;;- Machine description for GNU compiler, Motorola 68000 Version +;; Copyright (C) 1987, 1988, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2001, +;; 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 +;; Free Software Foundation, Inc. + +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. + +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;;- Information about MCF5200 port. + +;;- The MCF5200 "ColdFire" architecture is a reduced version of the +;;- 68k ISA. Differences include reduced support for byte and word +;;- operands and the removal of BCD, bitfield, rotate, and integer +;;- divide instructions. The TARGET_COLDFIRE flag turns the use of the +;;- removed opcodes and addressing modes off. +;;- + + +;;- instruction definitions + +;;- @@The original PO technology requires these to be ordered by speed, +;;- @@ so that assigner will pick the fastest. + +;;- See file "rtl.def" for documentation on define_insn, match_*, et. al. + +;;- When naming insn's (operand 0 of define_insn) be careful about using +;;- names from other targets machine descriptions. + +;;- cpp macro #define NOTICE_UPDATE_CC in file tm.h handles condition code +;;- updates for most instructions. + +;;- Operand classes for the register allocator: +;;- 'a' one of the address registers can be used. +;;- 'd' one of the data registers can be used. +;;- 'f' one of the m68881/fpu registers can be used +;;- 'r' either a data or an address register can be used. + +;;- Immediate Floating point operator constraints +;;- 'G' a floating point constant that is *NOT* one of the standard +;; 68881 constant values (to force calling output_move_const_double +;; to get it from rom if it is a 68881 constant). +;; +;; See the functions standard_XXX_constant_p in output-m68k.c for more +;; info. + +;;- Immediate integer operand constraints: +;;- 'I' 1 .. 8 +;;- 'J' -32768 .. 32767 +;;- 'K' all integers EXCEPT -128 .. 127 +;;- 'L' -8 .. -1 +;;- 'M' all integers EXCEPT -256 .. 255 +;;- 'N' 24 .. 31 +;;- 'O' 16 +;;- 'P' 8 .. 15 + +;;- Assembler specs: +;;- "%." size separator ("." or "") move%.l d0,d1 +;;- "%-" push operand "sp@-" move%.l d0,%- +;;- "%+" pop operand "sp@+" move%.l d0,%+ +;;- "%@" top of stack "sp@" move%.l d0,%@ +;;- "%!" fpcr register +;;- "%$" single-precision fp specifier ("s" or "") f%$add.x fp0,fp1 +;;- "%&" double-precision fp specifier ("d" or "") f%&add.x fp0,fp1 + +;;- Information about 68040 port. + +;;- The 68040 executes all 68030 and 68881/2 instructions, but some must +;;- be emulated in software by the OS. It is faster to avoid these +;;- instructions and issue a library call rather than trapping into +;;- the kernel. The affected instructions are fintrz and fscale. The +;;- TUNE_68040 flag turns the use of the opcodes off. + +;;- The '040 also implements a set of new floating-point instructions +;;- which specify the rounding precision in the opcode. This finally +;;- permit the 68k series to be truly IEEE compliant, and solves all +;;- issues of excess precision accumulating in the extended registers. +;;- By default, GCC does not use these instructions, since such code will +;;- not run on an '030. To use these instructions, use the -m68040-only +;;- switch. + +;;- These new instructions aren't directly in the md. They are brought +;;- into play by defining "%$" and "%&" to expand to "s" and "d" rather +;;- than "". + +;;- Information about 68060 port. + +;;- The 68060 executes all 68030 and 68881/2 instructions, but some must +;;- be emulated in software by the OS. It is faster to avoid these +;;- instructions and issue a library call rather than trapping into +;;- the kernel. The affected instructions are: divs.l ,Dr:Dq; +;;- divu.l ,Dr:Dq; muls.l ,Dr:Dq; mulu.l ,Dr:Dq; and +;;- fscale. The TUNE_68060 flag turns the use of the opcodes off. + +;;- Some of these insn's are composites of several m68000 op codes. +;;- The assembler (or final @@??) insures that the appropriate one is +;;- selected. + +;; UNSPEC usage: + +(define_constants + [(UNSPEC_SIN 1) + (UNSPEC_COS 2) + (UNSPEC_GOT 3) + (UNSPEC_IB 4) + (UNSPEC_TIE 5) + (UNSPEC_RELOC16 6) + (UNSPEC_RELOC32 7) + ]) + +;; UNSPEC_VOLATILE usage: + +(define_constants + [(UNSPECV_BLOCKAGE 0) + ]) + +;; Registers by name. +(define_constants + [(D0_REG 0) + (A0_REG 8) + (A1_REG 9) + (PIC_REG 13) + (A6_REG 14) + (SP_REG 15) + (FP0_REG 16) + ]) + +(include "predicates.md") +(include "constraints.md") + +;; :::::::::::::::::::: +;; :: +;; :: Attributes +;; :: +;; :::::::::::::::::::: + +;; Processor type. +(define_attr "cpu" "cfv1, cfv2, cfv3, cfv4, unknown" + (const (symbol_ref "m68k_sched_cpu"))) + +;; MAC type. +(define_attr "mac" "no, cf_mac, cf_emac" + (const (symbol_ref "m68k_sched_mac"))) + +;; Instruction type for use in scheduling description. +;; _l and _w suffixes indicate size of the operands of instruction. +;; alu - usual arithmetic or logic instruction. +;; aluq - arithmetic or logic instruction which has a quick immediate (the one +;; that is encoded in the instruction word) for its Y operand. +;; alux - Arithmetic instruction that uses carry bit (e.g., addx and subx). +;; bcc - conditional branch. +;; bitr - bit operation that only updates flags. +;; bitrw - bit operation that updates flags and output operand. +;; bra, bsr, clr, cmp, div, ext - corresponding instruction. +;; falu, fbcc, fcmp, fdiv, fmove, fmul, fneg, fsqrt, ftst - corresponding +;; instruction. +;; ib - fake instruction to subscribe slots in ColdFire V1,V2,V3 instruction +;; buffer. +;; ignore - fake instruction. +;; jmp, jsr, lea, link, mov3q, move, moveq, mul - corresponding instruction. +;; mvsz - mvs or mvz instruction. +;; neg, nop, pea, rts, scc - corresponding instruction. +;; shift - arithmetic or logical shift instruction. +;; trap, tst, unlk - corresponding instruction. +(define_attr "type" + "alu_l,aluq_l,alux_l,bcc,bitr,bitrw,bra,bsr,clr,clr_l,cmp,cmp_l, + div_w,div_l,ext, + falu,fbcc,fcmp,fdiv,fmove,fmul,fneg,fsqrt,ftst, + ib,ignore, + jmp,jsr,lea,link,mov3q_l,move,move_l,moveq_l,mul_w,mul_l,mvsz,neg_l,nop, + pea,rts,scc,shift, + trap,tst,tst_l,unlk, + unknown" + (const_string "unknown")) + +;; Index of the X or Y operand in recog_data.operand[]. +;; Should be used only within opx_type and opy_type. +(define_attr "opx" "" (const_int 0)) +(define_attr "opy" "" (const_int 1)) + +;; Type of the Y operand. +;; See m68k.c: enum attr_op_type. +(define_attr "opy_type" + "none,Rn,FPn,mem1,mem234,mem5,mem6,mem7,imm_q,imm_w,imm_l" + (cond [(eq_attr "type" "ext,fbcc,ftst,neg_l,bcc,bra,bsr,clr,clr_l,ib,ignore, + jmp,jsr,nop,rts,scc,trap,tst,tst_l, + unlk,unknown") (const_string "none") + (eq_attr "type" "lea,pea") + (symbol_ref "m68k_sched_attr_opy_type (insn, 1)")] + (symbol_ref "m68k_sched_attr_opy_type (insn, 0)"))) + +;; Type of the X operand. +;; See m68k.c: enum attr_op_type. +(define_attr "opx_type" + "none,Rn,FPn,mem1,mem234,mem5,mem6,mem7,imm_q,imm_w,imm_l" + (cond [(eq_attr "type" "ib,ignore,nop,rts,trap,unlk, + unknown") (const_string "none") + (eq_attr "type" "pea") (const_string "mem1") + (eq_attr "type" "jmp,jsr") + (symbol_ref "m68k_sched_attr_opx_type (insn, 1)")] + (symbol_ref "m68k_sched_attr_opx_type (insn, 0)"))) + +;; Access to the X operand: none, read, write, read/write, unknown. +;; Access to the Y operand is either none (if opy_type is none) +;; or read otherwise. +(define_attr "opx_access" "none, r, w, rw" + (cond [(eq_attr "type" "ib,ignore,nop,rts,trap,unlk, + unknown") (const_string "none") + (eq_attr "type" "bcc,bra,bsr,bitr,cmp,cmp_l,fbcc,fcmp,ftst, + jmp,jsr,tst,tst_l") (const_string "r") + (eq_attr "type" "clr,clr_l,fneg,fmove,lea, + mov3q_l,move,move_l,moveq_l,mvsz, + pea,scc") (const_string "w") + (eq_attr "type" "alu_l,aluq_l,alux_l,bitrw,div_w,div_l,ext, + falu,fdiv,fmul,fsqrt,link,mul_w,mul_l, + neg_l,shift") (const_string "rw")] + ;; Should never be used. + (symbol_ref "(gcc_unreachable (), OPX_ACCESS_NONE)"))) + +;; Memory accesses of the insn. +;; 00 - no memory references +;; 10 - memory is read +;; i0 - indexed memory is read +;; 01 - memory is written +;; 0i - indexed memory is written +;; 11 - memory is read, memory is written +;; i1 - indexed memory is read, memory is written +;; 1i - memory is read, indexed memory is written +(define_attr "op_mem" "00, 10, i0, 01, 0i, 11, i1, 1i" + (symbol_ref "m68k_sched_attr_op_mem (insn)")) + +;; Instruction size in words. +(define_attr "size" "1,2,3" + (symbol_ref "m68k_sched_attr_size (insn)")) + +;; Alternative is OK for ColdFire. +(define_attr "ok_for_coldfire" "yes,no" (const_string "yes")) + +;; Define 'enabled' attribute. +(define_attr "enabled" "" + (cond [(and (ne (symbol_ref "TARGET_COLDFIRE") (const_int 0)) + (eq_attr "ok_for_coldfire" "no")) + (const_int 0)] + (const_int 1))) + +;; Mode macros for floating point operations. +;; Valid floating point modes +(define_mode_iterator FP [SF DF (XF "TARGET_68881")]) +;; Mnemonic infix to round result +(define_mode_attr round [(SF "%$") (DF "%&") (XF "")]) +;; Mnemonic infix to round result for mul or div instruction +(define_mode_attr round_mul [(SF "sgl") (DF "%&") (XF "")]) +;; Suffix specifying source operand format +(define_mode_attr prec [(SF "s") (DF "d") (XF "x")]) +;; Allowable D registers +(define_mode_attr dreg [(SF "d") (DF "") (XF "")]) +;; Allowable 68881 constant constraints +(define_mode_attr const [(SF "F") (DF "G") (XF "")]) + + +(define_insn_and_split "*movdf_internal" + [(set (match_operand:DF 0 "push_operand" "=m, m") + (match_operand:DF 1 "general_operand" "f, ro<>E"))] + "" + "@ + fmove%.d %f1,%0 + #" + "&& reload_completed && (extract_constrain_insn_cached (insn), which_alternative == 1)" + [(const_int 0)] +{ + m68k_emit_move_double (operands); + DONE; +} + [(set_attr "type" "fmove,*")]) + +(define_insn_and_split "pushdi" + [(set (match_operand:DI 0 "push_operand" "=m") + (match_operand:DI 1 "general_operand" "ro<>Fi"))] + "" + "#" + "&& reload_completed" + [(const_int 0)] +{ + m68k_emit_move_double (operands); + DONE; +}) + +;; We don't want to allow a constant operand for test insns because +;; (set (cc0) (const_int foo)) has no mode information. Such insns will +;; be folded while optimizing anyway. + +(define_insn "tstdi" + [(set (cc0) + (compare (match_operand:DI 0 "nonimmediate_operand" "am,d") + (const_int 0))) + (clobber (match_scratch:SI 1 "=X,d")) + (clobber (match_scratch:DI 2 "=d,X"))] + "" +{ + if (which_alternative == 0) + { + rtx xoperands[2]; + + xoperands[0] = operands[2]; + xoperands[1] = operands[0]; + output_move_double (xoperands); + cc_status.flags |= CC_REVERSED; /*|*/ + return "neg%.l %R2\;negx%.l %2"; + } + if (find_reg_note (insn, REG_DEAD, operands[0])) + { + cc_status.flags |= CC_REVERSED; /*|*/ + return "neg%.l %R0\;negx%.l %0"; + } + else + /* + 'sub' clears %1, and also clears the X cc bit + 'tst' sets the Z cc bit according to the low part of the DImode operand + 'subx %1' (i.e. subx #0) acts as a (non-existent) tstx on the high part. + */ + return "sub%.l %1,%1\;tst%.l %R0\;subx%.l %1,%0"; +}) + +;; If you think that the 68020 does not support tstl a0, +;; reread page B-167 of the 68020 manual more carefully. +(define_insn "*tstsi_internal_68020_cf" + [(set (cc0) + (compare (match_operand:SI 0 "nonimmediate_operand" "rm") + (const_int 0)))] + "TARGET_68020 || TARGET_COLDFIRE" + "tst%.l %0" + [(set_attr "type" "tst_l")]) + +;; On an address reg, cmpw may replace cmpl. +(define_insn "*tstsi_internal" + [(set (cc0) + (compare (match_operand:SI 0 "nonimmediate_operand" "dm,r") + (const_int 0)))] + "!(TARGET_68020 || TARGET_COLDFIRE)" + "@ + tst%.l %0 + cmp%.w #0,%0" + [(set_attr "type" "tst_l,cmp")]) + +;; This can't use an address register, because comparisons +;; with address registers as second operand always test the whole word. +(define_insn "*tsthi_internal" + [(set (cc0) + (compare (match_operand:HI 0 "nonimmediate_operand" "dm") + (const_int 0)))] + "" + "tst%.w %0" + [(set_attr "type" "tst")]) + +(define_insn "*tstqi_internal" + [(set (cc0) + (compare (match_operand:QI 0 "nonimmediate_operand" "dm") + (const_int 0)))] + "" + "tst%.b %0" + [(set_attr "type" "tst")]) + +(define_insn "tst_68881" + [(set (cc0) + (compare (match_operand:FP 0 "general_operand" "fm") + (match_operand:FP 1 "const0_operand" "H")))] + "TARGET_68881" +{ + cc_status.flags = CC_IN_68881; + if (FP_REG_P (operands[0])) + return "ftst%.x %0"; + return "ftst%. %0"; +} + [(set_attr "type" "ftst")]) + +(define_insn "tst_cf" + [(set (cc0) + (compare (match_operand:FP 0 "general_operand" "fU") + (match_operand:FP 1 "const0_operand" "H")))] + "TARGET_COLDFIRE_FPU" +{ + cc_status.flags = CC_IN_68881; + if (FP_REG_P (operands[0])) + return "ftst%.d %0"; + return "ftst%. %0"; +} + [(set_attr "type" "ftst")]) + + +;; compare instructions. + +(define_insn "*cmpdi_internal" + [(set (cc0) + (compare (match_operand:DI 1 "nonimmediate_operand" "0,d") + (match_operand:DI 2 "general_operand" "d,0"))) + (clobber (match_scratch:DI 0 "=d,d"))] + "" +{ + if (rtx_equal_p (operands[0], operands[1])) + return "sub%.l %R2,%R0\;subx%.l %2,%0"; + else + { + cc_status.flags |= CC_REVERSED; /*|*/ + return "sub%.l %R1,%R0\;subx%.l %1,%0"; + } +}) + +(define_insn "cmpdi" + [(set (cc0) + (compare (match_operand:DI 0 "nonimmediate_operand") + (match_operand:DI 1 "general_operand"))) + (clobber (match_scratch:DI 2))] + "" + "") + + +(define_expand "cbranchdi4" + [(set (pc) + (if_then_else (match_operator 0 "ordered_comparison_operator" + [(match_operand:DI 1 "nonimmediate_operand") + (match_operand:DI 2 "general_operand")]) + (label_ref (match_operand 3 "")) + (pc)))] + "" +{ + if (operands[2] == const0_rtx) + emit_insn (gen_tstdi (operands[1])); + else + emit_insn (gen_cmpdi (operands[1], operands[2])); + operands[1] = cc0_rtx; + operands[2] = const0_rtx; +}) + +(define_expand "cstoredi4" + [(set (match_operand:QI 0 "register_operand") + (match_operator:QI 1 "ordered_comparison_operator" + [(match_operand:DI 2 "nonimmediate_operand") + (match_operand:DI 3 "general_operand")]))] + "" +{ + if (operands[3] == const0_rtx) + emit_insn (gen_tstdi (operands[2])); + else + emit_insn (gen_cmpdi (operands[2], operands[3])); + operands[2] = cc0_rtx; + operands[3] = const0_rtx; +}) + + +(define_expand "cbranchsi4" + [(set (cc0) + (compare (match_operand:SI 1 "nonimmediate_operand" "") + (match_operand:SI 2 "general_operand" ""))) + (set (pc) + (if_then_else (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (label_ref (match_operand 3 "")) + (pc)))] + "" + "") + +(define_expand "cstoresi4" + [(set (cc0) + (compare (match_operand:SI 2 "nonimmediate_operand" "") + (match_operand:SI 3 "general_operand" ""))) + (set (match_operand:QI 0 "register_operand") + (match_operator:QI 1 "ordered_comparison_operator" + [(cc0) (const_int 0)]))] + "" + "") + + +;; A composite of the cmp, cmpa, cmpi & cmpm m68000 op codes. +(define_insn "" + [(set (cc0) + (compare (match_operand:SI 0 "nonimmediate_operand" "rKT,rKs,mSr,mSa,>") + (match_operand:SI 1 "general_src_operand" "mSr,mSa,KTr,Ksr,>")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) + return "cmpm%.l %1,%0"; + if (REG_P (operands[1]) + || (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM)) + { + cc_status.flags |= CC_REVERSED; /*|*/ + return "cmp%.l %d0,%d1"; + } + if (ADDRESS_REG_P (operands[0]) + && GET_CODE (operands[1]) == CONST_INT + && INTVAL (operands[1]) < 0x8000 + && INTVAL (operands[1]) >= -0x8000) + return "cmp%.w %1,%0"; + return "cmp%.l %d1,%d0"; +}) + +(define_insn "*cmpsi_cf" + [(set (cc0) + (compare (match_operand:SI 0 "nonimmediate_operand" "mrKs,r") + (match_operand:SI 1 "general_operand" "r,mrKs")))] + "TARGET_COLDFIRE" +{ + if (REG_P (operands[1]) + || (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM)) + { + cc_status.flags |= CC_REVERSED; /*|*/ + return "cmp%.l %d0,%d1"; + } + return "cmp%.l %d1,%d0"; +} + [(set_attr "type" "cmp_l")]) + +(define_expand "cbranchhi4" + [(set (cc0) + (compare (match_operand:HI 1 "nonimmediate_src_operand" "") + (match_operand:HI 2 "m68k_subword_comparison_operand" ""))) + (set (pc) + (if_then_else (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (label_ref (match_operand 3 "")) + (pc)))] + "" + "") + +(define_expand "cstorehi4" + [(set (cc0) + (compare (match_operand:HI 2 "nonimmediate_operand" "") + (match_operand:HI 3 "m68k_subword_comparison_operand" ""))) + (set (match_operand:QI 0 "register_operand") + (match_operator:QI 1 "ordered_comparison_operator" + [(cc0) (const_int 0)]))] + "" + "") + +(define_insn "" + [(set (cc0) + (compare (match_operand:HI 0 "nonimmediate_src_operand" "rnmS,d,n,mS,>") + (match_operand:HI 1 "general_src_operand" "d,rnmS,mS,n,>")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) + return "cmpm%.w %1,%0"; + if ((REG_P (operands[1]) && !ADDRESS_REG_P (operands[1])) + || (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM)) + { + cc_status.flags |= CC_REVERSED; /*|*/ + return "cmp%.w %d0,%d1"; + } + return "cmp%.w %d1,%d0"; +}) + +(define_expand "cbranchqi4" + [(set (cc0) + (compare (match_operand:QI 1 "nonimmediate_src_operand" "") + (match_operand:QI 2 "m68k_subword_comparison_operand" ""))) + (set (pc) + (if_then_else (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (label_ref (match_operand 3 "")) + (pc)))] + "" + "") + +(define_expand "cstoreqi4" + [(set (cc0) + (compare (match_operand:QI 2 "nonimmediate_src_operand" "") + (match_operand:QI 3 "m68k_subword_comparison_operand" ""))) + (set (match_operand:QI 0 "register_operand") + (match_operator:QI 1 "ordered_comparison_operator" + [(cc0) (const_int 0)]))] + "" + "") + +(define_insn "" + [(set (cc0) + (compare (match_operand:QI 0 "nonimmediate_src_operand" "dn,dmS,>") + (match_operand:QI 1 "general_src_operand" "dmS,nd,>")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) + return "cmpm%.b %1,%0"; + if (REG_P (operands[1]) + || (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM)) + { + cc_status.flags |= CC_REVERSED; /*|*/ + return "cmp%.b %d0,%d1"; + } + return "cmp%.b %d1,%d0"; +}) + +(define_expand "cbranch4" + [(set (cc0) + (compare (match_operand:FP 1 "register_operand" "") + (match_operand:FP 2 "fp_src_operand" ""))) + (set (pc) + (if_then_else (match_operator 0 "comparison_operator" + [(cc0) (const_int 0)]) + (label_ref (match_operand 3 "")) + (pc)))] + "TARGET_HARD_FLOAT" + "") + +(define_expand "cstore4" + [(set (cc0) + (compare (match_operand:FP 2 "register_operand" "") + (match_operand:FP 3 "fp_src_operand" ""))) + (set (match_operand:QI 0 "register_operand") + (match_operator:QI 1 "m68k_cstore_comparison_operator" + [(cc0) (const_int 0)]))] + "TARGET_HARD_FLOAT && !(TUNE_68060 || TARGET_COLDFIRE_FPU)" + "if (TARGET_COLDFIRE && operands[2] != const0_rtx) + FAIL;") + +(define_insn "*cmp_68881" + [(set (cc0) + (compare (match_operand:FP 0 "fp_src_operand" "f,f,mF") + (match_operand:FP 1 "fp_src_operand" "f,mF,f")))] + "TARGET_68881 + && (register_operand (operands[0], mode) + || register_operand (operands[1], mode))" + "@ + fcmp%.x %1,%0 + fcmp%. %f1,%0 + fcmp%. %0,%f1" + [(set_attr "type" "fcmp")]) + +(define_insn "*cmp_cf" + [(set (cc0) + (compare (match_operand:FP 0 "fp_src_operand" "f,f,U") + (match_operand:FP 1 "fp_src_operand" "f,U,f")))] + "TARGET_COLDFIRE_FPU + && (register_operand (operands[0], mode) + || register_operand (operands[1], mode))" + "@ + fcmp%.d %1,%0 + fcmp%. %f1,%0 + fcmp%. %0,%f1" + [(set_attr "type" "fcmp")]) + +;; Recognizers for btst instructions. + +;; ColdFire/5200 only allows "" type addresses when the bit position is +;; specified as a constant, so we must disable all patterns that may extract +;; from a MEM at a constant bit position if we can't use this as a constraint. + +(define_insn "" + [(set + (cc0) + (compare (zero_extract:SI (match_operand:QI 0 "memory_src_operand" "oS") + (const_int 1) + (minus:SI (const_int 7) + (match_operand:SI 1 "general_operand" "di"))) + (const_int 0)))] + "!TARGET_COLDFIRE" +{ + return output_btst (operands, operands[1], operands[0], insn, 7); +}) + +;; This is the same as the above pattern except for the constraints. The 'i' +;; has been deleted. + +(define_insn "" + [(set + (cc0) + (compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "o") + (const_int 1) + (minus:SI (const_int 7) + (match_operand:SI 1 "general_operand" "d"))) + (const_int 0)))] + "TARGET_COLDFIRE" +{ + return output_btst (operands, operands[1], operands[0], insn, 7); +}) + +(define_insn "" + [(set + (cc0) + (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "d") + (const_int 1) + (minus:SI (const_int 31) + (match_operand:SI 1 "general_operand" "di"))) + (const_int 0)))] + "" +{ + return output_btst (operands, operands[1], operands[0], insn, 31); +}) + +;; The following two patterns are like the previous two +;; except that they use the fact that bit-number operands +;; are automatically masked to 3 or 5 bits. + +(define_insn "" + [(set + (cc0) + (compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "o") + (const_int 1) + (minus:SI (const_int 7) + (and:SI + (match_operand:SI 1 "register_operand" "d") + (const_int 7)))) + (const_int 0)))] + "" +{ + return output_btst (operands, operands[1], operands[0], insn, 7); +}) + +(define_insn "" + [(set + (cc0) + (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "d") + (const_int 1) + (minus:SI (const_int 31) + (and:SI + (match_operand:SI 1 "register_operand" "d") + (const_int 31)))) + (const_int 0)))] + "" +{ + return output_btst (operands, operands[1], operands[0], insn, 31); +}) + +;; Nonoffsettable mem refs are ok in this one pattern +;; since we don't try to adjust them. +(define_insn "" + [(set + (cc0) + (compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "m") + (const_int 1) + (match_operand:SI 1 "const_int_operand" "n")) + (const_int 0)))] + "(unsigned) INTVAL (operands[1]) < 8 && !TARGET_COLDFIRE" +{ + operands[1] = GEN_INT (7 - INTVAL (operands[1])); + return output_btst (operands, operands[1], operands[0], insn, 7); +}) + +(define_insn "" + [(set + (cc0) + (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "do") + (const_int 1) + (match_operand:SI 1 "const_int_operand" "n")) + (const_int 0)))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[0]) == MEM) + { + operands[0] = adjust_address (operands[0], QImode, + INTVAL (operands[1]) / 8); + operands[1] = GEN_INT (7 - INTVAL (operands[1]) % 8); + return output_btst (operands, operands[1], operands[0], insn, 7); + } + operands[1] = GEN_INT (31 - INTVAL (operands[1])); + return output_btst (operands, operands[1], operands[0], insn, 31); +}) + +;; This is the same as the above pattern except for the constraints. +;; The 'o' has been replaced with 'Q'. + +(define_insn "" + [(set + (cc0) + (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "dQ") + (const_int 1) + (match_operand:SI 1 "const_int_operand" "n")) + (const_int 0)))] + "TARGET_COLDFIRE" +{ + if (GET_CODE (operands[0]) == MEM) + { + operands[0] = adjust_address (operands[0], QImode, + INTVAL (operands[1]) / 8); + operands[1] = GEN_INT (7 - INTVAL (operands[1]) % 8); + return output_btst (operands, operands[1], operands[0], insn, 7); + } + operands[1] = GEN_INT (31 - INTVAL (operands[1])); + return output_btst (operands, operands[1], operands[0], insn, 31); +}) + + +;; move instructions + +;; A special case in which it is not desirable +;; to reload the constant into a data register. +(define_insn "pushexthisi_const" + [(set (match_operand:SI 0 "push_operand" "=m,m,m") + (match_operand:SI 1 "const_int_operand" "C0,R,J"))] + "INTVAL (operands[1]) >= -0x8000 && INTVAL (operands[1]) < 0x8000" + "@ + clr%.l %0 + mov3q%.l %1,%- + pea %a1" + [(set_attr "type" "clr_l,mov3q_l,pea")]) + +;This is never used. +;(define_insn "swapsi" +; [(set (match_operand:SI 0 "nonimmediate_operand" "+r") +; (match_operand:SI 1 "general_operand" "+r")) +; (set (match_dup 1) (match_dup 0))] +; "" +; "exg %1,%0") + +;; Special case of fullword move when source is zero for 68000_10. +;; moveq is faster on the 68000. +(define_insn "*movsi_const0_68000_10" + [(set (match_operand:SI 0 "movsi_const0_operand" "=d,a,g") + (const_int 0))] + "TUNE_68000_10" + "@ + moveq #0,%0 + sub%.l %0,%0 + clr%.l %0" + [(set_attr "type" "moveq_l,alu_l,clr_l") + (set_attr "opy" "*,0,*")]) + +;; Special case of fullword move when source is zero for 68040_60. +;; On the '040, 'subl an,an' takes 2 clocks while lea takes only 1 +(define_insn "*movsi_const0_68040_60" + [(set (match_operand:SI 0 "movsi_const0_operand" "=a,g") + (const_int 0))] + "TUNE_68040_60" +{ + if (which_alternative == 0) + return MOTOROLA ? "lea 0.w,%0" : "lea 0:w,%0"; + else if (which_alternative == 1) + return "clr%.l %0"; + else + { + gcc_unreachable (); + return ""; + } +} + [(set_attr "type" "lea,clr_l")]) + +;; Special case of fullword move when source is zero. +(define_insn "*movsi_const0" + [(set (match_operand:SI 0 "movsi_const0_operand" "=a,g") + (const_int 0))] + "!(TUNE_68000_10 || TUNE_68040_60)" + "@ + sub%.l %0,%0 + clr%.l %0" + [(set_attr "type" "alu_l,clr_l") + (set_attr "opy" "0,*")]) + +;; General case of fullword move. +;; +;; This is the main "hook" for PIC code. When generating +;; PIC, movsi is responsible for determining when the source address +;; needs PIC relocation and appropriately calling legitimize_pic_address +;; to perform the actual relocation. +;; +;; In both the PIC and non-PIC cases the patterns generated will +;; matched by the next define_insn. +(define_expand "movsi" + [(set (match_operand:SI 0 "" "") + (match_operand:SI 1 "" ""))] + "" +{ + rtx tmp, base, offset; + + /* Recognize the case where operand[1] is a reference to thread-local + data and load its address to a register. */ + if (!TARGET_PCREL && m68k_tls_reference_p (operands[1], false)) + { + rtx tmp = operands[1]; + rtx addend = NULL; + + if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS) + { + addend = XEXP (XEXP (tmp, 0), 1); + tmp = XEXP (XEXP (tmp, 0), 0); + } + + gcc_assert (GET_CODE (tmp) == SYMBOL_REF); + gcc_assert (SYMBOL_REF_TLS_MODEL (tmp) != 0); + + tmp = m68k_legitimize_tls_address (tmp); + + if (addend) + { + if (!REG_P (tmp)) + { + rtx reg; + + reg = gen_reg_rtx (Pmode); + emit_move_insn (reg, tmp); + tmp = reg; + } + + tmp = gen_rtx_PLUS (SImode, tmp, addend); + } + + operands[1] = tmp; + } + else if (flag_pic && !TARGET_PCREL && symbolic_operand (operands[1], SImode)) + { + /* The source is an address which requires PIC relocation. + Call legitimize_pic_address with the source, mode, and a relocation + register (a new pseudo, or the final destination if reload_in_progress + is set). Then fall through normally */ + rtx temp = reload_in_progress ? operands[0] : gen_reg_rtx (Pmode); + operands[1] = legitimize_pic_address (operands[1], SImode, temp); + } + else if (flag_pic && TARGET_PCREL && ! reload_in_progress) + { + /* Don't allow writes to memory except via a register; + the m68k doesn't consider PC-relative addresses to be writable. */ + if (symbolic_operand (operands[0], SImode)) + operands[0] = force_reg (SImode, XEXP (operands[0], 0)); + else if (GET_CODE (operands[0]) == MEM + && symbolic_operand (XEXP (operands[0], 0), SImode)) + operands[0] = gen_rtx_MEM (SImode, + force_reg (SImode, XEXP (operands[0], 0))); + } + if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P) + { + split_const (operands[1], &base, &offset); + if (GET_CODE (base) == SYMBOL_REF + && !offset_within_block_p (base, INTVAL (offset))) + { + tmp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (SImode); + emit_move_insn (tmp, base); + emit_insn (gen_addsi3 (operands[0], tmp, offset)); + DONE; + } + } +}) + +;; General case of fullword move. +(define_insn "*movsi_m68k" + ;; Notes: make sure no alternative allows g vs g. + ;; We don't allow f-regs since fixed point cannot go in them. + [(set (match_operand:SI 0 "nonimmediate_operand" "=g,d,a<") + (match_operand:SI 1 "general_src_operand" "damSnT,n,i"))] + "!TARGET_COLDFIRE && reload_completed" +{ + return output_move_simode (operands); +}) + +;; Before reload is completed the register constraints +;; force integer constants in range for a moveq to be reloaded +;; if they are headed for memory. +(define_insn "*movsi_m68k2" + [(set (match_operand:SI 0 "nonimmediate_operand" "=g,d,a<") + (match_operand:SI 1 "general_src_operand" "damSKT,n,i"))] + + "!TARGET_COLDFIRE" +{ + return output_move_simode (operands); +}) + +;; ColdFire move instructions can have at most one operand of mode >= 6. +(define_insn "*movsi_cf" + [(set (match_operand:SI 0 "nonimmediate_operand" "=g,d, d, d, d, d, a,Ap, a, r,g, U") + (match_operand:SI 1 "general_operand" " R,CQ,CW,CZ,CS,Ci,J,J Cs,Cs, g, Rr,U"))] + "TARGET_COLDFIRE" +{ + switch (which_alternative) + { + case 0: + return "mov3q%.l %1,%0"; + + case 1: + return "moveq %1,%0"; + + case 2: + { + unsigned u = INTVAL (operands[1]); + + operands[1] = GEN_INT ((u << 16) | (u >> 16)); /*|*/ + return "moveq %1,%0\n\tswap %0"; + } + + case 3: + return "mvz%.w %1,%0"; + + case 4: + return "mvs%.w %1,%0"; + + case 5: + return "move%.l %1,%0"; + + case 6: + return "move%.w %1,%0"; + + case 7: + return "pea %a1"; + + case 8: + return "lea %a1,%0"; + + case 9: + case 10: + case 11: + return "move%.l %1,%0"; + + default: + gcc_unreachable (); + return ""; + } +} + [(set_attr "type" "mov3q_l,moveq_l,*,mvsz,mvsz,move_l,move,pea,lea,move_l,move_l,move_l")]) + +;; Special case of fullword move, where we need to get a non-GOT PIC +;; reference into an address register. +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=a<") + (match_operand:SI 1 "pcrel_address" ""))] + "TARGET_PCREL" +{ + if (push_operand (operands[0], SImode)) + return "pea %a1"; + return "lea %a1,%0"; +}) + +(define_expand "movhi" + [(set (match_operand:HI 0 "nonimmediate_operand" "") + (match_operand:HI 1 "general_operand" ""))] + "" + "") + +(define_insn "" + [(set (match_operand:HI 0 "nonimmediate_operand" "=g") + (match_operand:HI 1 "general_src_operand" "gS"))] + "!TARGET_COLDFIRE" + "* return output_move_himode (operands);") + +(define_insn "" + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,g,U") + (match_operand:HI 1 "general_operand" "g,r,U"))] + "TARGET_COLDFIRE" + "* return output_move_himode (operands);") + +(define_expand "movstricthi" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "")) + (match_operand:HI 1 "general_src_operand" ""))] + "" + "") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm")) + (match_operand:HI 1 "general_src_operand" "rmSn"))] + "!TARGET_COLDFIRE" + "* return output_move_stricthi (operands);") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+d,m")) + (match_operand:HI 1 "general_src_operand" "rmn,r"))] + "TARGET_COLDFIRE" + "* return output_move_stricthi (operands);") + +(define_expand "movqi" + [(set (match_operand:QI 0 "nonimmediate_operand" "") + (match_operand:QI 1 "general_src_operand" ""))] + "" + "") + +(define_insn "" + [(set (match_operand:QI 0 "nonimmediate_operand" "=d,*a,m") + (match_operand:QI 1 "general_src_operand" "dmSi*a,di*a,dmSi"))] + "!TARGET_COLDFIRE" + "* return output_move_qimode (operands);") + +(define_insn "" + [(set (match_operand:QI 0 "nonimmediate_operand" "=d,dm,U,d*a") + (match_operand:QI 1 "general_src_operand" "dmi,d,U,di*a"))] + "TARGET_COLDFIRE" + "* return output_move_qimode (operands);") + +(define_expand "movstrictqi" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "")) + (match_operand:QI 1 "general_src_operand" ""))] + "" + "") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm")) + (match_operand:QI 1 "general_src_operand" "dmSn"))] + "!TARGET_COLDFIRE" + "* return output_move_strictqi (operands);") + +(define_insn "*movstrictqi_cf" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+d, Ac, d,m")) + (match_operand:QI 1 "general_src_operand" "C0,C0, dmn,d"))] + "TARGET_COLDFIRE" + "@ + clr%.b %0 + clr%.b %0 + move%.b %1,%0 + move%.b %1,%0" + [(set_attr "type" "clr,clr,move,move")]) + +(define_expand "pushqi1" + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -2))) + (set (mem:QI (plus:SI (reg:SI SP_REG) (const_int 1))) + (match_operand:QI 0 "general_operand" ""))] + "!TARGET_COLDFIRE" + "") + +(define_expand "reload_insf" + [(set (match_operand:SF 0 "nonimmediate_operand" "=f") + (match_operand:SF 1 "general_operand" "mf")) + (clobber (match_operand:SI 2 "register_operand" "=&a"))] + "TARGET_COLDFIRE_FPU" +{ + if (emit_move_sequence (operands, SFmode, operands[2])) + DONE; + + /* We don't want the clobber emitted, so handle this ourselves. */ + emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1])); + DONE; +}) + +(define_expand "reload_outsf" + [(set (match_operand:SF 0 "general_operand" "") + (match_operand:SF 1 "register_operand" "f")) + (clobber (match_operand:SI 2 "register_operand" "=&a"))] + "TARGET_COLDFIRE_FPU" +{ + if (emit_move_sequence (operands, SFmode, operands[2])) + DONE; + + /* We don't want the clobber emitted, so handle this ourselves. */ + emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1])); + DONE; +}) + +(define_expand "movsf" + [(set (match_operand:SF 0 "nonimmediate_operand" "") + (match_operand:SF 1 "general_operand" ""))] + "" + "") + +(define_insn "" + [(set (match_operand:SF 0 "nonimmediate_operand" "=rmf") + (match_operand:SF 1 "general_operand" "rmfF"))] + "!TARGET_COLDFIRE" +{ + if (FP_REG_P (operands[0])) + { + if (FP_REG_P (operands[1])) + return "f%$move%.x %1,%0"; + else if (ADDRESS_REG_P (operands[1])) + return "move%.l %1,%-\;f%$move%.s %+,%0"; + else if (GET_CODE (operands[1]) == CONST_DOUBLE) + return output_move_const_single (operands); + return "f%$move%.s %f1,%0"; + } + if (FP_REG_P (operands[1])) + { + if (ADDRESS_REG_P (operands[0])) + return "fmove%.s %1,%-\;move%.l %+,%0"; + return "fmove%.s %f1,%0"; + } + if (operands[1] == CONST0_RTX (SFmode) + /* clr insns on 68000 read before writing. */ + && ((TARGET_68010 || TARGET_COLDFIRE) + || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) + { + if (ADDRESS_REG_P (operands[0])) + { + /* On the '040, 'subl an,an' takes 2 clocks while lea takes only 1 */ + if (TUNE_68040_60) + return MOTOROLA ? "lea 0.w,%0" : "lea 0:w,%0"; + else + return "sub%.l %0,%0"; + } + /* moveq is faster on the 68000. */ + if (DATA_REG_P (operands[0]) && TUNE_68000_10) + return "moveq #0,%0"; + return "clr%.l %0"; + } + return "move%.l %1,%0"; +}) + +(define_insn "movsf_cf_soft" + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,g,U") + (match_operand:SF 1 "general_operand" "g,r,U"))] + "TARGET_COLDFIRE && !TARGET_COLDFIRE_FPU" + "move%.l %1,%0" + [(set_attr "type" "move_l")]) + +;; SFmode MEMs are restricted to modes 2-4 if TARGET_COLDFIRE_FPU. +;; The move instructions can handle all combinations. +(define_insn "movsf_cf_hard" + [(set (match_operand:SF 0 "nonimmediate_operand" "=rU, f, f,mr,f,r,f +,m") + (match_operand:SF 1 "general_operand" " f, rU,f,rm,F,F, m +,f"))] + "TARGET_COLDFIRE_FPU" +{ + if (which_alternative == 4 || which_alternative == 5) { + rtx xoperands[2]; + REAL_VALUE_TYPE r; + long l; + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + REAL_VALUE_TO_TARGET_SINGLE (r, l); + xoperands[0] = operands[0]; + xoperands[1] = GEN_INT (l); + if (which_alternative == 5) { + if (l == 0) { + if (ADDRESS_REG_P (xoperands[0])) + output_asm_insn ("sub%.l %0,%0", xoperands); + else + output_asm_insn ("clr%.l %0", xoperands); + } else + if (GET_CODE (operands[0]) == MEM + && symbolic_operand (XEXP (operands[0], 0), SImode)) + output_asm_insn ("move%.l %1,%-;move%.l %+,%0", xoperands); + else + output_asm_insn ("move%.l %1,%0", xoperands); + return ""; + } + if (l != 0) + output_asm_insn ("move%.l %1,%-;fsmove%.s %+,%0", xoperands); + else + output_asm_insn ("clr%.l %-;fsmove%.s %+,%0", xoperands); + return ""; + } + if (FP_REG_P (operands[0])) + { + if (ADDRESS_REG_P (operands[1])) + return "move%.l %1,%-;fsmove%.s %+,%0"; + if (FP_REG_P (operands[1])) + return "fsmove%.d %1,%0"; + return "fsmove%.s %f1,%0"; + } + if (FP_REG_P (operands[1])) + { + if (ADDRESS_REG_P (operands[0])) + return "fmove%.s %1,%-;move%.l %+,%0"; + return "fmove%.s %f1,%0"; + } + if (operands[1] == CONST0_RTX (SFmode)) + { + if (ADDRESS_REG_P (operands[0])) + return "sub%.l %0,%0"; + return "clr%.l %0"; + } + return "move%.l %1,%0"; +}) + +(define_expand "reload_indf" + [(set (match_operand:DF 0 "nonimmediate_operand" "=f") + (match_operand:DF 1 "general_operand" "mf")) + (clobber (match_operand:SI 2 "register_operand" "=&a"))] + "TARGET_COLDFIRE_FPU" +{ + if (emit_move_sequence (operands, DFmode, operands[2])) + DONE; + + /* We don't want the clobber emitted, so handle this ourselves. */ + emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1])); + DONE; +}) + +(define_expand "reload_outdf" + [(set (match_operand:DF 0 "general_operand" "") + (match_operand:DF 1 "register_operand" "f")) + (clobber (match_operand:SI 2 "register_operand" "=&a"))] + "TARGET_COLDFIRE_FPU" +{ + if (emit_move_sequence (operands, DFmode, operands[2])) + DONE; + + /* We don't want the clobber emitted, so handle this ourselves. */ + emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1])); + DONE; +}) + +(define_expand "movdf" + [(set (match_operand:DF 0 "nonimmediate_operand" "") + (match_operand:DF 1 "general_operand" ""))] + "" +{ + if (TARGET_COLDFIRE_FPU) + if (emit_move_sequence (operands, DFmode, 0)) + DONE; +}) + +(define_insn "" + [(set (match_operand:DF 0 "nonimmediate_operand" "=rm,rf,rf,&rof<>") + (match_operand:DF 1 "general_operand" "*rf,m,0,*rofE<>"))] +; [(set (match_operand:DF 0 "nonimmediate_operand" "=rm,&rf,&rof<>") +; (match_operand:DF 1 "general_operand" "rf,m,rofF<>"))] + "!TARGET_COLDFIRE" +{ + if (FP_REG_P (operands[0])) + { + if (FP_REG_P (operands[1])) + return "f%&move%.x %1,%0"; + if (REG_P (operands[1])) + { + rtx xoperands[2]; + xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); + output_asm_insn ("move%.l %1,%-", xoperands); + output_asm_insn ("move%.l %1,%-", operands); + return "f%&move%.d %+,%0"; + } + if (GET_CODE (operands[1]) == CONST_DOUBLE) + return output_move_const_double (operands); + return "f%&move%.d %f1,%0"; + } + else if (FP_REG_P (operands[1])) + { + if (REG_P (operands[0])) + { + output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands); + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + return "move%.l %+,%0"; + } + else + return "fmove%.d %f1,%0"; + } + return output_move_double (operands); +}) + +(define_insn_and_split "movdf_cf_soft" + [(set (match_operand:DF 0 "nonimmediate_operand" "=r,g") + (match_operand:DF 1 "general_operand" "g,r"))] + "TARGET_COLDFIRE && !TARGET_COLDFIRE_FPU" + "#" + "&& reload_completed" + [(const_int 0)] +{ + m68k_emit_move_double (operands); + DONE; +}) + +(define_insn "movdf_cf_hard" + [(set (match_operand:DF 0 "nonimmediate_operand" "=f, U,r,f,r,r,m,f") + (match_operand:DF 1 "general_operand" " fU,f, f,r,r,m,r,E"))] + "TARGET_COLDFIRE_FPU" +{ + rtx xoperands[3]; + REAL_VALUE_TYPE r; + long l[2]; + + switch (which_alternative) + { + default: + return "fdmove%.d %1,%0"; + case 1: + return "fmove%.d %1,%0"; + case 2: + return "fmove%.d %1,%-;move%.l %+,%0;move%.l %+,%R0"; + case 3: + return "move%.l %R1,%-;move%.l %1,%-;fdmove%.d %+,%0"; + case 4: case 5: case 6: + return output_move_double (operands); + case 7: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + REAL_VALUE_TO_TARGET_DOUBLE (r, l); + xoperands[0] = operands[0]; + xoperands[1] = GEN_INT (l[0]); + xoperands[2] = GEN_INT (l[1]); + if (operands[1] == CONST0_RTX (DFmode)) + output_asm_insn ("clr%.l %-;clr%.l %-;fdmove%.d %+,%0", + xoperands); + else + if (l[1] == 0) + output_asm_insn ("clr%.l %-;move%.l %1,%-;fdmove%.d %+,%0", + xoperands); + else + output_asm_insn ("move%.l %2,%-;move%.l %1,%-;fdmove%.d %+,%0", + xoperands); + return ""; + } +}) + +;; ??? The XFmode patterns are schizophrenic about whether constants are +;; allowed. Most but not all have predicates and constraint that disallow +;; constants. Most but not all have output templates that handle constants. +;; See also LEGITIMATE_CONSTANT_P. + +(define_expand "movxf" + [(set (match_operand:XF 0 "nonimmediate_operand" "") + (match_operand:XF 1 "general_operand" ""))] + "" +{ + /* We can't rewrite operands during reload. */ + if (! reload_in_progress) + { + if (CONSTANT_P (operands[1])) + { + operands[1] = force_const_mem (XFmode, operands[1]); + if (! memory_address_p (XFmode, XEXP (operands[1], 0))) + operands[1] = adjust_address (operands[1], XFmode, 0); + } + if (flag_pic && TARGET_PCREL) + { + /* Don't allow writes to memory except via a register; the + m68k doesn't consider PC-relative addresses to be writable. */ + if (GET_CODE (operands[0]) == MEM + && symbolic_operand (XEXP (operands[0], 0), SImode)) + operands[0] = gen_rtx_MEM (XFmode, + force_reg (SImode, XEXP (operands[0], 0))); + } + } +}) + +(define_insn "" + [(set (match_operand:XF 0 "nonimmediate_operand" "=f,m,f,!r,!f,!r,m,!r") + (match_operand:XF 1 "nonimmediate_operand" "m,f,f,f,r,!r,!r,m"))] + "TARGET_68881" +{ + if (FP_REG_P (operands[0])) + { + if (FP_REG_P (operands[1])) + return "fmove%.x %1,%0"; + if (REG_P (operands[1])) + { + rtx xoperands[2]; + xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2); + output_asm_insn ("move%.l %1,%-", xoperands); + xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); + output_asm_insn ("move%.l %1,%-", xoperands); + output_asm_insn ("move%.l %1,%-", operands); + return "fmove%.x %+,%0"; + } + if (GET_CODE (operands[1]) == CONST_DOUBLE) + return "fmove%.x %1,%0"; + return "fmove%.x %f1,%0"; + } + if (FP_REG_P (operands[1])) + { + if (REG_P (operands[0])) + { + output_asm_insn ("fmove%.x %f1,%-\;move%.l %+,%0", operands); + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + output_asm_insn ("move%.l %+,%0", operands); + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + return "move%.l %+,%0"; + } + /* Must be memory destination. */ + return "fmove%.x %f1,%0"; + } + return output_move_double (operands); +}) + +(define_insn "" + [(set (match_operand:XF 0 "nonimmediate_operand" "=rm,rf,&rof<>") + (match_operand:XF 1 "nonimmediate_operand" "rf,m,rof<>"))] + "! TARGET_68881 && ! TARGET_COLDFIRE" +{ + if (FP_REG_P (operands[0])) + { + if (FP_REG_P (operands[1])) + return "fmove%.x %1,%0"; + if (REG_P (operands[1])) + { + rtx xoperands[2]; + xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2); + output_asm_insn ("move%.l %1,%-", xoperands); + xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); + output_asm_insn ("move%.l %1,%-", xoperands); + output_asm_insn ("move%.l %1,%-", operands); + return "fmove%.x %+,%0"; + } + if (GET_CODE (operands[1]) == CONST_DOUBLE) + return "fmove%.x %1,%0"; + return "fmove%.x %f1,%0"; + } + if (FP_REG_P (operands[1])) + { + if (REG_P (operands[0])) + { + output_asm_insn ("fmove%.x %f1,%-\;move%.l %+,%0", operands); + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + output_asm_insn ("move%.l %+,%0", operands); + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + return "move%.l %+,%0"; + } + else + return "fmove%.x %f1,%0"; + } + return output_move_double (operands); +}) + +(define_insn "" + [(set (match_operand:XF 0 "nonimmediate_operand" "=r,g") + (match_operand:XF 1 "nonimmediate_operand" "g,r"))] + "! TARGET_68881 && TARGET_COLDFIRE" + "* return output_move_double (operands);") + +(define_expand "movdi" + ;; Let's see if it really still needs to handle fp regs, and, if so, why. + [(set (match_operand:DI 0 "nonimmediate_operand" "") + (match_operand:DI 1 "general_operand" ""))] + "" + "") + +;; movdi can apply to fp regs in some cases +(define_insn "" + ;; Let's see if it really still needs to handle fp regs, and, if so, why. + [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r,&ro<>") + (match_operand:DI 1 "general_operand" "rF,m,roi<>F"))] +; [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,&r,&ro<>,!&rm,!&f") +; (match_operand:DI 1 "general_operand" "r,m,roi<>,fF"))] +; [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,&rf,&ro<>,!&rm,!&f") +; (match_operand:DI 1 "general_operand" "r,m,roi<>,fF,rfF"))] + "!TARGET_COLDFIRE" +{ + if (FP_REG_P (operands[0])) + { + if (FP_REG_P (operands[1])) + return "fmove%.x %1,%0"; + if (REG_P (operands[1])) + { + rtx xoperands[2]; + xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); + output_asm_insn ("move%.l %1,%-", xoperands); + output_asm_insn ("move%.l %1,%-", operands); + return "fmove%.d %+,%0"; + } + if (GET_CODE (operands[1]) == CONST_DOUBLE) + return output_move_const_double (operands); + return "fmove%.d %f1,%0"; + } + else if (FP_REG_P (operands[1])) + { + if (REG_P (operands[0])) + { + output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands); + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + return "move%.l %+,%0"; + } + else + return "fmove%.d %f1,%0"; + } + return output_move_double (operands); +}) + +(define_insn "" + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,g") + (match_operand:DI 1 "general_operand" "g,r"))] + "TARGET_COLDFIRE" + "* return output_move_double (operands);") + +;; Thus goes after the move instructions +;; because the move instructions are better (require no spilling) +;; when they can apply. It goes before the add/sub insns +;; so we will prefer it to them. + +(define_insn "pushasi" + [(set (match_operand:SI 0 "push_operand" "=m") + (match_operand:SI 1 "address_operand" "p"))] + "" + "pea %a1" + [(set_attr "type" "pea")]) + +;; truncation instructions +(define_insn "truncsiqi2" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm,d") + (truncate:QI + (match_operand:SI 1 "general_src_operand" "doJS,i")))] + "" +{ + if (GET_CODE (operands[0]) == REG) + { + /* Must clear condition codes, since the move.l bases them on + the entire 32 bits, not just the desired 8 bits. */ + CC_STATUS_INIT; + return "move%.l %1,%0"; + } + if (GET_CODE (operands[1]) == MEM) + operands[1] = adjust_address (operands[1], QImode, 3); + return "move%.b %1,%0"; +}) + +(define_insn "trunchiqi2" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm,d") + (truncate:QI + (match_operand:HI 1 "general_src_operand" "doJS,i")))] + "" +{ + if (GET_CODE (operands[0]) == REG + && (GET_CODE (operands[1]) == MEM + || GET_CODE (operands[1]) == CONST_INT)) + { + /* Must clear condition codes, since the move.w bases them on + the entire 16 bits, not just the desired 8 bits. */ + CC_STATUS_INIT; + return "move%.w %1,%0"; + } + if (GET_CODE (operands[0]) == REG) + { + /* Must clear condition codes, since the move.l bases them on + the entire 32 bits, not just the desired 8 bits. */ + CC_STATUS_INIT; + return "move%.l %1,%0"; + } + if (GET_CODE (operands[1]) == MEM) + operands[1] = adjust_address (operands[1], QImode, 1); + return "move%.b %1,%0"; +}) + +(define_insn "truncsihi2" + [(set (match_operand:HI 0 "nonimmediate_operand" "=dm,d") + (truncate:HI + (match_operand:SI 1 "general_src_operand" "roJS,i")))] + "" +{ + if (GET_CODE (operands[0]) == REG) + { + /* Must clear condition codes, since the move.l bases them on + the entire 32 bits, not just the desired 8 bits. */ + CC_STATUS_INIT; + return "move%.l %1,%0"; + } + if (GET_CODE (operands[1]) == MEM) + operands[1] = adjust_address (operands[1], QImode, 2); + return "move%.w %1,%0"; +}) + +;; zero extension instructions + +;; two special patterns to match various post_inc/pre_dec patterns +(define_insn_and_split "*zero_extend_inc" + [(set (match_operand 0 "post_inc_operand" "") + (zero_extend (match_operand 1 "register_operand" "")))] + "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT && + GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT && + GET_MODE_SIZE (GET_MODE (operands[0])) == GET_MODE_SIZE (GET_MODE (operands[1])) * 2" + "#" + "" + [(set (match_dup 0) + (const_int 0)) + (set (match_dup 0) + (match_dup 1))] +{ + operands[0] = adjust_address (operands[0], GET_MODE (operands[1]), 0); +}) + +(define_insn_and_split "*zero_extend_dec" + [(set (match_operand 0 "pre_dec_operand" "") + (zero_extend (match_operand 1 "register_operand" "")))] + "(GET_MODE (operands[0]) != HImode || XEXP (XEXP (operands[0], 0), 0) != stack_pointer_rtx) && + GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT && + GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT && + GET_MODE_SIZE (GET_MODE (operands[0])) == GET_MODE_SIZE (GET_MODE (operands[1])) * 2" + "#" + "" + [(set (match_dup 0) + (match_dup 1)) + (set (match_dup 0) + (const_int 0))] +{ + operands[0] = adjust_address (operands[0], GET_MODE (operands[1]), 0); +}) + +(define_insn_and_split "zero_extendqidi2" + [(set (match_operand:DI 0 "register_operand" "") + (zero_extend:DI (match_operand:QI 1 "nonimmediate_src_operand" "")))] + "" + "#" + "" + [(set (match_dup 2) + (zero_extend:SI (match_dup 1))) + (set (match_dup 3) + (const_int 0))] +{ + operands[2] = gen_lowpart (SImode, operands[0]); + operands[3] = gen_highpart (SImode, operands[0]); +}) + +(define_insn_and_split "zero_extendhidi2" + [(set (match_operand:DI 0 "register_operand" "") + (zero_extend:DI (match_operand:HI 1 "nonimmediate_src_operand" "")))] + "" + "#" + "" + [(set (match_dup 2) + (zero_extend:SI (match_dup 1))) + (set (match_dup 3) + (const_int 0))] +{ + operands[2] = gen_lowpart (SImode, operands[0]); + operands[3] = gen_highpart (SImode, operands[0]); +}) + +(define_expand "zero_extendsidi2" + [(set (match_operand:DI 0 "nonimmediate_operand" "") + (zero_extend:DI (match_operand:SI 1 "nonimmediate_src_operand" "")))] + "" +{ + if (GET_CODE (operands[0]) == MEM + && GET_CODE (operands[1]) == MEM) + operands[1] = force_reg (SImode, operands[1]); +}) + +(define_insn_and_split "*zero_extendsidi2" + [(set (match_operand:DI 0 "nonimmediate_operand" "") + (zero_extend:DI (match_operand:SI 1 "nonimmediate_src_operand" "")))] + "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM" + "#" + "" + [(set (match_dup 2) + (match_dup 1)) + (set (match_dup 3) + (const_int 0))] +{ + operands[2] = gen_lowpart (SImode, operands[0]); + operands[3] = gen_highpart (SImode, operands[0]); +}) + +(define_insn "*zero_extendhisi2_cf" + [(set (match_operand:SI 0 "register_operand" "=d") + (zero_extend:SI (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))] + "ISA_HAS_MVS_MVZ" + "mvz%.w %1,%0" + [(set_attr "type" "mvsz")]) + +(define_insn "zero_extendhisi2" + [(set (match_operand:SI 0 "register_operand" "=d") + (zero_extend:SI (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))] + "" + "#") + +(define_expand "zero_extendqihi2" + [(set (match_operand:HI 0 "register_operand" "") + (zero_extend:HI (match_operand:QI 1 "nonimmediate_src_operand" "")))] + "!TARGET_COLDFIRE" + "") + +(define_insn "*zero_extendqihi2" + [(set (match_operand:HI 0 "register_operand" "=d") + (zero_extend:HI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))] + "!TARGET_COLDFIRE" + "#") + +(define_insn "*zero_extendqisi2_cfv4" + [(set (match_operand:SI 0 "register_operand" "=d") + (zero_extend:SI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))] + "ISA_HAS_MVS_MVZ" + "mvz%.b %1,%0" + [(set_attr "type" "mvsz")]) + +(define_insn "zero_extendqisi2" + [(set (match_operand:SI 0 "register_operand" "=d") + (zero_extend:SI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))] + "" + "#") + +;; these two pattern split everything else which isn't matched by +;; something else above +(define_split + [(set (match_operand 0 "register_operand" "") + (zero_extend (match_operand 1 "nonimmediate_src_operand" "")))] + "!ISA_HAS_MVS_MVZ + && reload_completed + && reg_mentioned_p (operands[0], operands[1])" + [(set (strict_low_part (match_dup 2)) + (match_dup 1)) + (set (match_dup 0) + (match_op_dup 4 [(match_dup 0) (match_dup 3)]))] +{ + operands[2] = gen_lowpart (GET_MODE (operands[1]), operands[0]); + operands[3] = GEN_INT (GET_MODE_MASK (GET_MODE (operands[1]))); + operands[4] = gen_rtx_AND (GET_MODE (operands[0]), operands[0], operands[3]); +}) + +(define_split + [(set (match_operand 0 "register_operand" "") + (zero_extend (match_operand 1 "nonimmediate_src_operand" "")))] + "!ISA_HAS_MVS_MVZ && reload_completed" + [(set (match_dup 0) + (const_int 0)) + (set (strict_low_part (match_dup 2)) + (match_dup 1))] +{ + operands[2] = gen_lowpart (GET_MODE (operands[1]), operands[0]); +}) + +;; sign extension instructions + +(define_insn "extendqidi2" + [(set (match_operand:DI 0 "nonimmediate_operand" "=d") + (sign_extend:DI (match_operand:QI 1 "general_src_operand" "rmS")))] + "" +{ + CC_STATUS_INIT; + operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + if (ISA_HAS_MVS_MVZ) + return "mvs%.b %1,%2\;smi %0\;extb%.l %0"; + if (TARGET_68020 || TARGET_COLDFIRE) + { + if (ADDRESS_REG_P (operands[1])) + return "move%.w %1,%2\;extb%.l %2\;smi %0\;extb%.l %0"; + else + return "move%.b %1,%2\;extb%.l %2\;smi %0\;extb%.l %0"; + } + else + { + if (ADDRESS_REG_P (operands[1])) + return "move%.w %1,%2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0\;smi %0"; + else + return "move%.b %1,%2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0\;smi %0"; + } +}) + +(define_insn "extendhidi2" + [(set (match_operand:DI 0 "nonimmediate_operand" "=d") + (sign_extend:DI + (match_operand:HI 1 "general_src_operand" "rmS")))] + "" +{ + CC_STATUS_INIT; + operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + if (ISA_HAS_MVS_MVZ) + return "mvs%.w %1,%2\;smi %0\;extb%.l %0"; + if (TARGET_68020 || TARGET_COLDFIRE) + return "move%.w %1,%2\;ext%.l %2\;smi %0\;extb%.l %0"; + else + return "move%.w %1,%2\;ext%.l %2\;smi %0\;ext%.w %0\;ext%.l %0"; +}) + +(define_insn "extendsidi2" + [(set (match_operand:DI 0 "nonimmediate_operand" "=d,o,o,<") + (sign_extend:DI + (match_operand:SI 1 "nonimmediate_src_operand" "rm,rm,r,rm"))) + (clobber (match_scratch:SI 2 "=X,d,d,d"))] + "" +{ + CC_STATUS_INIT; + + if (which_alternative == 0) + /* Handle alternative 0. */ + { + if (TARGET_68020 || TARGET_COLDFIRE) + return "move%.l %1,%R0\;smi %0\;extb%.l %0"; + else + return "move%.l %1,%R0\;smi %0\;ext%.w %0\;ext%.l %0"; + } + + /* Handle alternatives 1, 2 and 3. We don't need to adjust address by 4 + in alternative 3 because autodecrement will do that for us. */ + operands[3] = adjust_address (operands[0], SImode, + which_alternative == 3 ? 0 : 4); + operands[0] = adjust_address (operands[0], SImode, 0); + + if (TARGET_68020 || TARGET_COLDFIRE) + return "move%.l %1,%3\;smi %2\;extb%.l %2\;move%.l %2,%0"; + else + return "move%.l %1,%3\;smi %2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0"; +} + [(set_attr "ok_for_coldfire" "yes,no,yes,yes")]) + +;; Special case when one can avoid register clobbering, copy and test +;; Maybe there is a way to make that the general case, by forcing the +;; result of the SI tree to be in the lower register of the DI target + +(define_insn "extendplussidi" + [(set (match_operand:DI 0 "register_operand" "=d") + (sign_extend:DI (plus:SI (match_operand:SI 1 "general_operand" "%rmn") + (match_operand:SI 2 "general_operand" "rmn"))))] + "" +{ + CC_STATUS_INIT; + operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + if (GET_CODE (operands[1]) == CONST_INT + && (unsigned) INTVAL (operands[1]) > 8) + { + rtx tmp = operands[1]; + + operands[1] = operands[2]; + operands[2] = tmp; + } + if (GET_CODE (operands[1]) == REG + && REGNO (operands[1]) == REGNO (operands[3])) + output_asm_insn ("add%.l %2,%3", operands); + else + output_asm_insn ("move%.l %2,%3\;add%.l %1,%3", operands); + if (TARGET_68020 || TARGET_COLDFIRE) + return "smi %0\;extb%.l %0"; + else + return "smi %0\;ext%.w %0\;ext%.l %0"; +}) + +(define_expand "extendhisi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (sign_extend:SI + (match_operand:HI 1 "nonimmediate_src_operand" "")))] + "" + "") + +(define_insn "*cfv4_extendhisi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (sign_extend:SI + (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))] + "ISA_HAS_MVS_MVZ" + "mvs%.w %1,%0" + [(set_attr "type" "mvsz")]) + +(define_insn "*68k_extendhisi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "=*d,a") + (sign_extend:SI + (match_operand:HI 1 "nonimmediate_src_operand" "0,rmS")))] + "!ISA_HAS_MVS_MVZ" + "@ + ext%.l %0 + move%.w %1,%0" + [(set_attr "type" "ext,move")]) + +(define_insn "extendqihi2" + [(set (match_operand:HI 0 "nonimmediate_operand" "=d") + (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0")))] + "" + "ext%.w %0" + [(set_attr "type" "ext")]) + +(define_expand "extendqisi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))] + "TARGET_68020 || TARGET_COLDFIRE" + "") + +(define_insn "*cfv4_extendqisi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "rms")))] + "ISA_HAS_MVS_MVZ" + "mvs%.b %1,%0" + [(set_attr "type" "mvsz")]) + +(define_insn "*68k_extendqisi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0")))] + "TARGET_68020 || (TARGET_COLDFIRE && !ISA_HAS_MVS_MVZ)" + "extb%.l %0" + [(set_attr "type" "ext")]) + +;; Conversions between float and double. + +(define_expand "extendsfdf2" + [(set (match_operand:DF 0 "nonimmediate_operand" "") + (float_extend:DF + (match_operand:SF 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "" + [(set (match_operand:DF 0 "nonimmediate_operand" "=*fdm,f") + (float_extend:DF + (match_operand:SF 1 "general_operand" "f,dmF")))] + "TARGET_68881" +{ + if (FP_REG_P (operands[0]) && FP_REG_P (operands[1])) + { + if (REGNO (operands[0]) == REGNO (operands[1])) + { + /* Extending float to double in an fp-reg is a no-op. + NOTICE_UPDATE_CC has already assumed that the + cc will be set. So cancel what it did. */ + cc_status = cc_prev_status; + return ""; + } + return "f%&move%.x %1,%0"; + } + if (FP_REG_P (operands[0])) + return "f%&move%.s %f1,%0"; + if (DATA_REG_P (operands[0]) && FP_REG_P (operands[1])) + { + output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands); + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + return "move%.l %+,%0"; + } + return "fmove%.d %f1,%0"; +}) + +(define_insn "extendsfdf2_cf" + [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f") + (float_extend:DF + (match_operand:SF 1 "general_operand" "f,U")))] + "TARGET_COLDFIRE_FPU" +{ + if (FP_REG_P (operands[0]) && FP_REG_P (operands[1])) + { + if (REGNO (operands[0]) == REGNO (operands[1])) + { + /* Extending float to double in an fp-reg is a no-op. + NOTICE_UPDATE_CC has already assumed that the + cc will be set. So cancel what it did. */ + cc_status = cc_prev_status; + return ""; + } + return "fdmove%.d %1,%0"; + } + return "fdmove%.s %f1,%0"; +}) + +;; This cannot output into an f-reg because there is no way to be +;; sure of truncating in that case. +(define_expand "truncdfsf2" + [(set (match_operand:SF 0 "nonimmediate_operand" "") + (float_truncate:SF + (match_operand:DF 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +;; On the '040 we can truncate in a register accurately and easily. +(define_insn "" + [(set (match_operand:SF 0 "nonimmediate_operand" "=f") + (float_truncate:SF + (match_operand:DF 1 "general_operand" "fmG")))] + "TARGET_68881 && TARGET_68040" +{ + if (FP_REG_P (operands[1])) + return "f%$move%.x %1,%0"; + return "f%$move%.d %f1,%0"; +}) + +(define_insn "truncdfsf2_cf" + [(set (match_operand:SF 0 "nonimmediate_operand" "=f,dU") + (float_truncate:SF + (match_operand:DF 1 "general_operand" "U,f")))] + "TARGET_COLDFIRE_FPU" + "@ + fsmove%.d %1,%0 + fmove%.s %1,%0" + [(set_attr "type" "fmove")]) + +(define_insn "*truncdfsf2_68881" + [(set (match_operand:SF 0 "nonimmediate_operand" "=dm") + (float_truncate:SF + (match_operand:DF 1 "general_operand" "f")))] + "TARGET_68881" + "fmove%.s %f1,%0" + [(set_attr "type" "fmove")]) + +;; Conversion between fixed point and floating point. +;; Note that among the fix-to-float insns +;; the ones that start with SImode come first. +;; That is so that an operand that is a CONST_INT +;; (and therefore lacks a specific machine mode). +;; will be recognized as SImode (which is always valid) +;; rather than as QImode or HImode. + +(define_expand "floatsi2" + [(set (match_operand:FP 0 "nonimmediate_operand" "") + (float:FP (match_operand:SI 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "floatsi2_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (float:FP (match_operand:SI 1 "general_operand" "dmi")))] + "TARGET_68881" + "fmove%.l %1,%0" + [(set_attr "type" "fmove")]) + +(define_insn "floatsi2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (float:FP (match_operand:SI 1 "general_operand" "dU")))] + "TARGET_COLDFIRE_FPU" + "fmove%.l %1,%0" + [(set_attr "type" "fmove")]) + + +(define_expand "floathi2" + [(set (match_operand:FP 0 "nonimmediate_operand" "") + (float:FP (match_operand:HI 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "floathi2_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (float:FP (match_operand:HI 1 "general_operand" "dmn")))] + "TARGET_68881" + "fmove%.w %1,%0" + [(set_attr "type" "fmove")]) + +(define_insn "floathi2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (float:FP (match_operand:HI 1 "general_operand" "dU")))] + "TARGET_COLDFIRE_FPU" + "fmove%.w %1,%0" + [(set_attr "type" "fmove")]) + + +(define_expand "floatqi2" + [(set (match_operand:FP 0 "nonimmediate_operand" "") + (float:FP (match_operand:QI 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "floatqi2_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (float:FP (match_operand:QI 1 "general_operand" "dmn")))] + "TARGET_68881" + "fmove%.b %1,%0" + [(set_attr "type" "fmove")]) + +(define_insn "floatqi2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (float:FP (match_operand:QI 1 "general_operand" "dU")))] + "TARGET_COLDFIRE_FPU" + "fmove%.b %1,%0" + [(set_attr "type" "fmove")]) + + +;; New routines to convert floating-point values to integers +;; to be used on the '040. These should be faster than trapping +;; into the kernel to emulate fintrz. They should also be faster +;; than calling the subroutines fixsfsi or fixdfsi. + +(define_insn "fix_truncdfsi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "=dm") + (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "f")))) + (clobber (match_scratch:SI 2 "=d")) + (clobber (match_scratch:SI 3 "=d"))] + "TARGET_68881 && TUNE_68040" +{ + CC_STATUS_INIT; + return "fmovem%.l %!,%2\;moveq #16,%3\;or%.l %2,%3\;and%.w #-33,%3\;fmovem%.l %3,%!\;fmove%.l %1,%0\;fmovem%.l %2,%!"; +}) + +(define_insn "fix_truncdfhi2" + [(set (match_operand:HI 0 "nonimmediate_operand" "=dm") + (fix:HI (fix:DF (match_operand:DF 1 "register_operand" "f")))) + (clobber (match_scratch:SI 2 "=d")) + (clobber (match_scratch:SI 3 "=d"))] + "TARGET_68881 && TUNE_68040" +{ + CC_STATUS_INIT; + return "fmovem%.l %!,%2\;moveq #16,%3\;or%.l %2,%3\;and%.w #-33,%3\;fmovem%.l %3,%!\;fmove%.w %1,%0\;fmovem%.l %2,%!"; +}) + +(define_insn "fix_truncdfqi2" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm") + (fix:QI (fix:DF (match_operand:DF 1 "register_operand" "f")))) + (clobber (match_scratch:SI 2 "=d")) + (clobber (match_scratch:SI 3 "=d"))] + "TARGET_68881 && TUNE_68040" +{ + CC_STATUS_INIT; + return "fmovem%.l %!,%2\;moveq #16,%3\;or%.l %2,%3\;and%.w #-33,%3\;fmovem%.l %3,%!\;fmove%.b %1,%0\;fmovem%.l %2,%!"; +}) + +;; Convert a float to a float whose value is an integer. +;; This is the first stage of converting it to an integer type. + +(define_expand "ftrunc2" + [(set (match_operand:FP 0 "nonimmediate_operand" "") + (fix:FP (match_operand:FP 1 "general_operand" "")))] + "TARGET_HARD_FLOAT && !TUNE_68040" + "") + +(define_insn "ftrunc2_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (fix:FP (match_operand:FP 1 "general_operand" "fm")))] + "TARGET_68881 && !TUNE_68040" +{ + if (FP_REG_P (operands[1])) + return "fintrz%.x %f1,%0"; + return "fintrz%. %f1,%0"; +} + [(set_attr "type" "falu")]) + +(define_insn "ftrunc2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (fix:FP (match_operand:FP 1 "general_operand" "fU")))] + "TARGET_COLDFIRE_FPU" +{ + if (FP_REG_P (operands[1])) + return "fintrz%.d %f1,%0"; + return "fintrz%. %f1,%0"; +} + [(set_attr "type" "falu")]) + +;; Convert a float whose value is an integer +;; to an actual integer. Second stage of converting float to integer type. +(define_expand "fixqi2" + [(set (match_operand:QI 0 "nonimmediate_operand" "") + (fix:QI (match_operand:FP 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "fixqi2_68881" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm") + (fix:QI (match_operand:FP 1 "general_operand" "f")))] + "TARGET_68881" + "fmove%.b %1,%0" + [(set_attr "type" "fmove")]) + +(define_insn "fixqi2_cf" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dU") + (fix:QI (match_operand:FP 1 "general_operand" "f")))] + "TARGET_COLDFIRE_FPU" + "fmove%.b %1,%0" + [(set_attr "type" "fmove")]) + +(define_expand "fixhi2" + [(set (match_operand:HI 0 "nonimmediate_operand" "") + (fix:HI (match_operand:FP 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "fixhi2_68881" + [(set (match_operand:HI 0 "nonimmediate_operand" "=dm") + (fix:HI (match_operand:FP 1 "general_operand" "f")))] + "TARGET_68881" + "fmove%.w %1,%0" + [(set_attr "type" "fmove")]) + +(define_insn "fixhi2_cf" + [(set (match_operand:HI 0 "nonimmediate_operand" "=dU") + (fix:HI (match_operand:FP 1 "general_operand" "f")))] + "TARGET_COLDFIRE_FPU" + "fmove%.w %1,%0" + [(set_attr "type" "fmove")]) + +(define_expand "fixsi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (fix:SI (match_operand:FP 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "fixsi2_68881" + [(set (match_operand:SI 0 "nonimmediate_operand" "=dm") + (fix:SI (match_operand:FP 1 "general_operand" "f")))] + "TARGET_68881" + "fmove%.l %1,%0" + [(set_attr "type" "fmove")]) + +(define_insn "fixsi2_cf" + [(set (match_operand:SI 0 "nonimmediate_operand" "=dU") + (fix:SI (match_operand:FP 1 "general_operand" "f")))] + "TARGET_COLDFIRE_FPU" + "fmove%.l %1,%0" + [(set_attr "type" "fmove")]) + + +;; add instructions + +(define_insn "adddi_lshrdi_63" + [(set (match_operand:DI 0 "nonimmediate_operand" "=d") + (plus:DI (lshiftrt:DI (match_operand:DI 1 "general_operand" "rm") + (const_int 63)) + (match_dup 1))) + (clobber (match_scratch:SI 2 "=d"))] + "" +{ + operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + if (REG_P (operands[1]) && REGNO (operands[1]) == REGNO (operands[0])) + return + "move%.l %1,%2\;add%.l %2,%2\;subx%.l %2,%2\;sub%.l %2,%3\;subx%.l %2,%0"; + if (GET_CODE (operands[1]) == REG) + operands[4] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); + else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC + || GET_CODE (XEXP (operands[1], 0)) == PRE_DEC) + operands[4] = operands[1]; + else + operands[4] = adjust_address (operands[1], SImode, 4); + if (GET_CODE (operands[1]) == MEM + && GET_CODE (XEXP (operands[1], 0)) == PRE_DEC) + output_asm_insn ("move%.l %4,%3", operands); + output_asm_insn ("move%.l %1,%0\;smi %2", operands); + if (TARGET_68020 || TARGET_COLDFIRE) + output_asm_insn ("extb%.l %2", operands); + else + output_asm_insn ("ext%.w %2\;ext%.l %2", operands); + if (GET_CODE (operands[1]) != MEM + || GET_CODE (XEXP (operands[1], 0)) != PRE_DEC) + output_asm_insn ("move%.l %4,%3", operands); + return "sub%.l %2,%3\;subx%.l %2,%0"; +}) + +(define_insn "adddi_sexthishl32" + [(set (match_operand:DI 0 "nonimmediate_operand" "=o,a,*d,*d") + (plus:DI (ashift:DI (sign_extend:DI + (match_operand:HI 1 "general_operand" "rm,rm,rm,rm")) + (const_int 32)) + (match_operand:DI 2 "general_operand" "0,0,0,0"))) + (clobber (match_scratch:SI 3 "=&d,X,a,?d"))] + "!TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + if (ADDRESS_REG_P (operands[0])) + return "add%.w %1,%0"; + else if (ADDRESS_REG_P (operands[3])) + return "move%.w %1,%3\;add%.l %3,%0"; + else + return "move%.w %1,%3\;ext%.l %3\;add%.l %3,%0"; +}) + +(define_insn "*adddi_dilshr32" + [(set (match_operand:DI 0 "nonimmediate_operand" "=d,o") + (plus:DI (lshiftrt:DI (match_operand:DI 1 "general_operand" "ro,d") + (const_int 32)) + (match_operand:DI 2 "general_operand" "0,0")))] + "!TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + if (GET_CODE (operands[0]) == REG) + operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else + operands[2] = adjust_address (operands[0], SImode, 4); + return "add%.l %1,%2\;negx%.l %0\;neg%.l %0"; +}) + +(define_insn "*adddi_dilshr32_cf" + [(set (match_operand:DI 0 "register_operand" "=d") + (plus:DI (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "ro") + (const_int 32)) + (match_operand:DI 2 "register_operand" "0")))] + "TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + return "add%.l %1,%R0\;negx%.l %0\;neg%.l %0"; +}) + +(define_insn "adddi_dishl32" + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,o") +;; (plus:DI (match_operand:DI 2 "general_operand" "%0") +;; (ashift:DI (match_operand:DI 1 "general_operand" "ro") +;; (const_int 32))))] + (plus:DI (ashift:DI (match_operand:DI 1 "general_operand" "ro,d") + (const_int 32)) + (match_operand:DI 2 "general_operand" "0,0")))] + "" +{ + CC_STATUS_INIT; + if (GET_CODE (operands[1]) == REG) + operands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); + else + operands[1] = adjust_address (operands[1], SImode, 4); + return "add%.l %1,%0"; +} + [(set_attr "type" "alu_l")]) + +(define_insn "adddi3" + [(set (match_operand:DI 0 "nonimmediate_operand" "=o<>,d,d,d") + (plus:DI (match_operand:DI 1 "general_operand" "%0,0,0,0") + (match_operand:DI 2 "general_operand" "d,no>,d,a"))) + (clobber (match_scratch:SI 3 "=&d,&d,X,&d"))] + "" +{ + if (DATA_REG_P (operands[0])) + { + if (DATA_REG_P (operands[2])) + return "add%.l %R2,%R0\;addx%.l %2,%0"; + else if (GET_CODE (operands[2]) == MEM + && GET_CODE (XEXP (operands[2], 0)) == POST_INC) + return "move%.l %2,%3\;add%.l %2,%R0\;addx%.l %3,%0"; + else + { + rtx high, low; + rtx xoperands[2]; + + if (GET_CODE (operands[2]) == REG) + { + low = gen_rtx_REG (SImode, REGNO (operands[2]) + 1); + high = operands[2]; + } + else if (CONSTANT_P (operands[2])) + split_double (operands[2], &high, &low); + else + { + low = adjust_address (operands[2], SImode, 4); + high = operands[2]; + } + + operands[1] = low, operands[2] = high; + xoperands[0] = operands[3]; + if (GET_CODE (operands[1]) == CONST_INT + && INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0) + xoperands[1] = GEN_INT (-INTVAL (operands[2]) - 1); + else + xoperands[1] = operands[2]; + + output_asm_insn (output_move_simode (xoperands), xoperands); + if (GET_CODE (operands[1]) == CONST_INT) + { + if (INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8) + return "addq%.l %1,%R0\;addx%.l %3,%0"; + else if (INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0) + { + operands[1] = GEN_INT (-INTVAL (operands[1])); + return "subq%.l %1,%R0\;subx%.l %3,%0"; + } + } + return "add%.l %1,%R0\;addx%.l %3,%0"; + } + } + else + { + gcc_assert (GET_CODE (operands[0]) == MEM); + CC_STATUS_INIT; + if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) + { + operands[1] = gen_rtx_MEM (SImode, + plus_constant (XEXP(operands[0], 0), -8)); + return "move%.l %0,%3\;add%.l %R2,%0\;addx%.l %2,%3\;move%.l %3,%1"; + } + else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) + { + operands[1] = XEXP(operands[0], 0); + return "add%.l %R2,%0\;move%.l %0,%3\;addx%.l %2,%3\;move%.l %3,%1"; + } + else + { + operands[1] = adjust_address (operands[0], SImode, 4); + return "add%.l %R2,%1\;move%.l %0,%3\;addx%.l %2,%3\;move%.l %3,%0"; + } + } +}) + +(define_insn "addsi_lshrsi_31" + [(set (match_operand:SI 0 "nonimmediate_operand" "=dm,dm,d") + (plus:SI (lshiftrt:SI (match_operand:SI 1 "general_operand" "rm,r,rm") + (const_int 31)) + (match_dup 1)))] + "" +{ + operands[2] = operands[0]; + operands[3] = gen_label_rtx(); + if (GET_CODE (operands[0]) == MEM) + { + if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) + operands[0] = gen_rtx_MEM (SImode, XEXP (XEXP (operands[0], 0), 0)); + else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) + operands[2] = gen_rtx_MEM (SImode, XEXP (XEXP (operands[0], 0), 0)); + } + output_asm_insn ("move%.l %1,%0", operands); + output_asm_insn ("jpl %l3", operands); + output_asm_insn ("addq%.l #1,%2", operands); + (*targetm.asm_out.internal_label) (asm_out_file, "L", + CODE_LABEL_NUMBER (operands[3])); + return ""; +} + [(set_attr "ok_for_coldfire" "no,yes,yes")]) + +(define_expand "addsi3" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (plus:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "general_src_operand" "")))] + "" + "") + +;; Note that the middle two alternatives are near-duplicates +;; in order to handle insns generated by reload. +;; This is needed since they are not themselves reloaded, +;; so commutativity won't apply to them. +(define_insn "*addsi3_internal" + [(set (match_operand:SI 0 "nonimmediate_operand" "=m,?a,?a,d,a") + (plus:SI (match_operand:SI 1 "general_operand" "%0,a,rJK,0,0") + (match_operand:SI 2 "general_src_operand" "dIKLT,rJK,a,mSrIKLT,mSrIKLs")))] + + + "! TARGET_COLDFIRE" + "* return output_addsi3 (operands);") + +(define_insn_and_split "*addsi3_5200" + [(set (match_operand:SI 0 "nonimmediate_operand" "=mr,mr,a, m,r, ?a, ?a,?a,?a") + (plus:SI (match_operand:SI 1 "general_operand" "%0, 0, 0, 0,0, a, a, r, a") + (match_operand:SI 2 "general_src_operand" " I, L, JCu,d,mrKi,Cj, r, a, JCu")))] + "TARGET_COLDFIRE" +{ + switch (which_alternative) + { + case 0: + return "addq%.l %2,%0"; + + case 1: + operands[2] = GEN_INT (- INTVAL (operands[2])); + return "subq%.l %2,%0"; + + case 3: + case 4: + return "add%.l %2,%0"; + + case 5: + /* move%.l %2,%0\n\tadd%.l %1,%0 */ + return "#"; + + case 6: + return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0"; + + case 7: + return MOTOROLA ? "lea (%2,%1.l),%0" : "lea %2@(0,%1:l),%0"; + + case 2: + case 8: + return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0"; + + default: + gcc_unreachable (); + return ""; + } +} + "&& reload_completed && (extract_constrain_insn_cached (insn), which_alternative == 5) && !operands_match_p (operands[0], operands[1])" + [(set (match_dup 0) + (match_dup 2)) + (set (match_dup 0) + (plus:SI (match_dup 0) + (match_dup 1)))] + "" + [(set_attr "type" "aluq_l,aluq_l,lea, alu_l,alu_l,*,lea, lea, lea") + (set_attr "opy" "2, 2, *, 2, 2, *,*, *, *") + (set_attr "opy_type" "*, *, mem5,*, *, *,mem6,mem6,mem5")]) + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=a") + (plus:SI (match_operand:SI 1 "general_operand" "0") + (sign_extend:SI + (match_operand:HI 2 "nonimmediate_src_operand" "rmS"))))] + "!TARGET_COLDFIRE" + "add%.w %2,%0") + +(define_insn "addhi3" + [(set (match_operand:HI 0 "nonimmediate_operand" "=m,r") + (plus:HI (match_operand:HI 1 "general_operand" "%0,0") + (match_operand:HI 2 "general_src_operand" "dn,rmSn")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[2]) == CONST_INT) + { + /* If the constant would be a negative number when interpreted as + HImode, make it negative. This is usually, but not always, done + elsewhere in the compiler. First check for constants out of range, + which could confuse us. */ + + if (INTVAL (operands[2]) >= 32768) + operands[2] = GEN_INT (INTVAL (operands[2]) - 65536); + + if (INTVAL (operands[2]) > 0 + && INTVAL (operands[2]) <= 8) + return "addq%.w %2,%0"; + if (INTVAL (operands[2]) < 0 + && INTVAL (operands[2]) >= -8) + { + operands[2] = GEN_INT (- INTVAL (operands[2])); + return "subq%.w %2,%0"; + } + /* On the CPU32 it is faster to use two addqw instructions to + add a small integer (8 < N <= 16) to a register. + Likewise for subqw. */ + if (TUNE_CPU32 && REG_P (operands[0])) + { + if (INTVAL (operands[2]) > 8 + && INTVAL (operands[2]) <= 16) + { + operands[2] = GEN_INT (INTVAL (operands[2]) - 8); + return "addq%.w #8,%0\;addq%.w %2,%0"; + } + if (INTVAL (operands[2]) < -8 + && INTVAL (operands[2]) >= -16) + { + operands[2] = GEN_INT (- INTVAL (operands[2]) - 8); + return "subq%.w #8,%0\;subq%.w %2,%0"; + } + } + if (ADDRESS_REG_P (operands[0]) && !TUNE_68040) + return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0"; + } + return "add%.w %2,%0"; +}) + +;; These insns must use MATCH_DUP instead of the more expected +;; use of a matching constraint because the "output" here is also +;; an input, so you can't use the matching constraint. That also means +;; that you can't use the "%", so you need patterns with the matched +;; operand in both positions. + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d")) + (plus:HI (match_dup 0) + (match_operand:HI 1 "general_src_operand" "dn,rmSn")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[1]) == CONST_INT) + { + /* If the constant would be a negative number when interpreted as + HImode, make it negative. This is usually, but not always, done + elsewhere in the compiler. First check for constants out of range, + which could confuse us. */ + + if (INTVAL (operands[1]) >= 32768) + operands[1] = GEN_INT (INTVAL (operands[1]) - 65536); + + if (INTVAL (operands[1]) > 0 + && INTVAL (operands[1]) <= 8) + return "addq%.w %1,%0"; + if (INTVAL (operands[1]) < 0 + && INTVAL (operands[1]) >= -8) + { + operands[1] = GEN_INT (- INTVAL (operands[1])); + return "subq%.w %1,%0"; + } + /* On the CPU32 it is faster to use two addqw instructions to + add a small integer (8 < N <= 16) to a register. + Likewise for subqw. */ + if (TUNE_CPU32 && REG_P (operands[0])) + { + if (INTVAL (operands[1]) > 8 + && INTVAL (operands[1]) <= 16) + { + operands[1] = GEN_INT (INTVAL (operands[1]) - 8); + return "addq%.w #8,%0\;addq%.w %1,%0"; + } + if (INTVAL (operands[1]) < -8 + && INTVAL (operands[1]) >= -16) + { + operands[1] = GEN_INT (- INTVAL (operands[1]) - 8); + return "subq%.w #8,%0\;subq%.w %1,%0"; + } + } + if (ADDRESS_REG_P (operands[0]) && !TUNE_68040) + return MOTOROLA ? "lea (%c1,%0),%0" : "lea %0@(%c1),%0"; + } + return "add%.w %1,%0"; +}) + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d")) + (plus:HI (match_operand:HI 1 "general_src_operand" "dn,rmSn") + (match_dup 0)))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[1]) == CONST_INT) + { + /* If the constant would be a negative number when interpreted as + HImode, make it negative. This is usually, but not always, done + elsewhere in the compiler. First check for constants out of range, + which could confuse us. */ + + if (INTVAL (operands[1]) >= 32768) + operands[1] = GEN_INT (INTVAL (operands[1]) - 65536); + + if (INTVAL (operands[1]) > 0 + && INTVAL (operands[1]) <= 8) + return "addq%.w %1,%0"; + if (INTVAL (operands[1]) < 0 + && INTVAL (operands[1]) >= -8) + { + operands[1] = GEN_INT (- INTVAL (operands[1])); + return "subq%.w %1,%0"; + } + /* On the CPU32 it is faster to use two addqw instructions to + add a small integer (8 < N <= 16) to a register. + Likewise for subqw. */ + if (TUNE_CPU32 && REG_P (operands[0])) + { + if (INTVAL (operands[1]) > 8 + && INTVAL (operands[1]) <= 16) + { + operands[1] = GEN_INT (INTVAL (operands[1]) - 8); + return "addq%.w #8,%0\;addq%.w %1,%0"; + } + if (INTVAL (operands[1]) < -8 + && INTVAL (operands[1]) >= -16) + { + operands[1] = GEN_INT (- INTVAL (operands[1]) - 8); + return "subq%.w #8,%0\;subq%.w %1,%0"; + } + } + if (ADDRESS_REG_P (operands[0]) && !TUNE_68040) + return MOTOROLA ? "lea (%c1,%0),%0" : "lea %0@(%c1),%0"; + } + return "add%.w %1,%0"; +}) + +(define_insn "addqi3" + [(set (match_operand:QI 0 "nonimmediate_operand" "=m,d") + (plus:QI (match_operand:QI 1 "general_operand" "%0,0") + (match_operand:QI 2 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[2]) == CONST_INT) + { + if (INTVAL (operands[2]) >= 128) + operands[2] = GEN_INT (INTVAL (operands[2]) - 256); + + if (INTVAL (operands[2]) > 0 + && INTVAL (operands[2]) <= 8) + return "addq%.b %2,%0"; + if (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) >= -8) + { + operands[2] = GEN_INT (- INTVAL (operands[2])); + return "subq%.b %2,%0"; + } + } + return "add%.b %2,%0"; +}) + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d")) + (plus:QI (match_dup 0) + (match_operand:QI 1 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[1]) == CONST_INT) + { + if (INTVAL (operands[1]) >= 128) + operands[1] = GEN_INT (INTVAL (operands[1]) - 256); + + if (INTVAL (operands[1]) > 0 + && INTVAL (operands[1]) <= 8) + return "addq%.b %1,%0"; + if (INTVAL (operands[1]) < 0 && INTVAL (operands[1]) >= -8) + { + operands[1] = GEN_INT (- INTVAL (operands[1])); + return "subq%.b %1,%0"; + } + } + return "add%.b %1,%0"; +}) + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d")) + (plus:QI (match_operand:QI 1 "general_src_operand" "dn,dmSn") + (match_dup 0)))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[1]) == CONST_INT) + { + if (INTVAL (operands[1]) >= 128) + operands[1] = GEN_INT (INTVAL (operands[1]) - 256); + + if (INTVAL (operands[1]) > 0 + && INTVAL (operands[1]) <= 8) + return "addq%.b %1,%0"; + if (INTVAL (operands[1]) < 0 && INTVAL (operands[1]) >= -8) + { + operands[1] = GEN_INT (- INTVAL (operands[1])); + return "subq%.b %1,%0"; + } + } + return "add%.b %1,%0"; +}) + +(define_expand "add3" + [(set (match_operand:FP 0 "nonimmediate_operand" "") + (plus:FP (match_operand:FP 1 "general_operand" "") + (match_operand:FP 2 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "add3_floatsi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (plus:FP (float:FP (match_operand:SI 2 "general_operand" "dmi")) + (match_operand:FP 1 "general_operand" "0")))] + "TARGET_68881" + "fadd%.l %2,%0" + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +(define_insn "add3_floathi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (plus:FP (float:FP (match_operand:HI 2 "general_operand" "dmn")) + (match_operand:FP 1 "general_operand" "0")))] + "TARGET_68881" + "fadd%.w %2,%0" + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +(define_insn "add3_floatqi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (plus:FP (float:FP (match_operand:QI 2 "general_operand" "dmn")) + (match_operand:FP 1 "general_operand" "0")))] + "TARGET_68881" + "fadd%.b %2,%0" + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +(define_insn "add3_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (plus:FP (match_operand:FP 1 "general_operand" "%0") + (match_operand:FP 2 "general_operand" "fm")))] + "TARGET_68881" +{ + if (FP_REG_P (operands[2])) + return "fadd%.x %2,%0"; + return "fadd%. %f2,%0"; +} + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +(define_insn "add3_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (plus:FP (match_operand:FP 1 "general_operand" "%0") + (match_operand:FP 2 "general_operand" "fU")))] + "TARGET_COLDFIRE_FPU" +{ + if (FP_REG_P (operands[2])) + return "fadd%.d %2,%0"; + return "fadd%. %2,%0"; +} + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +;; subtract instructions + +(define_insn "subdi_sexthishl32" + [(set (match_operand:DI 0 "nonimmediate_operand" "=o,a,*d,*d") + (minus:DI (match_operand:DI 1 "general_operand" "0,0,0,0") + (ashift:DI (sign_extend:DI (match_operand:HI 2 "general_operand" "rm,rm,rm,rm")) + (const_int 32)))) + (clobber (match_scratch:SI 3 "=&d,X,a,?d"))] + "!TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + if (ADDRESS_REG_P (operands[0])) + return "sub%.w %2,%0"; + else if (ADDRESS_REG_P (operands[3])) + return "move%.w %2,%3\;sub%.l %3,%0"; + else + return "move%.w %2,%3\;ext%.l %3\;sub%.l %3,%0"; +}) + +(define_insn "subdi_dishl32" + [(set (match_operand:DI 0 "nonimmediate_operand" "+ro") + (minus:DI (match_dup 0) + (ashift:DI (match_operand:DI 1 "general_operand" "ro") + (const_int 32))))] + "" +{ + CC_STATUS_INIT; + if (GET_CODE (operands[1]) == REG) + operands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); + else + operands[1] = adjust_address (operands[1], SImode, 4); + return "sub%.l %1,%0"; +} + [(set_attr "type" "alu_l")]) + +(define_insn "subdi3" + [(set (match_operand:DI 0 "nonimmediate_operand" "=o<>,d,d,d") + (minus:DI (match_operand:DI 1 "general_operand" "0,0,0,0") + (match_operand:DI 2 "general_operand" "d,no>,d,a"))) + (clobber (match_scratch:SI 3 "=&d,&d,X,&d"))] + "" +{ + if (DATA_REG_P (operands[0])) + { + if (DATA_REG_P (operands[2])) + return "sub%.l %R2,%R0\;subx%.l %2,%0"; + else if (GET_CODE (operands[2]) == MEM + && GET_CODE (XEXP (operands[2], 0)) == POST_INC) + { + return "move%.l %2,%3\;sub%.l %2,%R0\;subx%.l %3,%0"; + } + else + { + rtx high, low; + rtx xoperands[2]; + + if (GET_CODE (operands[2]) == REG) + { + low = gen_rtx_REG (SImode, REGNO (operands[2]) + 1); + high = operands[2]; + } + else if (CONSTANT_P (operands[2])) + split_double (operands[2], &high, &low); + else + { + low = adjust_address (operands[2], SImode, 4); + high = operands[2]; + } + + operands[1] = low, operands[2] = high; + xoperands[0] = operands[3]; + if (GET_CODE (operands[1]) == CONST_INT + && INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0) + xoperands[1] = GEN_INT (-INTVAL (operands[2]) - 1); + else + xoperands[1] = operands[2]; + + output_asm_insn (output_move_simode (xoperands), xoperands); + if (GET_CODE (operands[1]) == CONST_INT) + { + if (INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8) + return "subq%.l %1,%R0\;subx%.l %3,%0"; + else if (INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0) + { + operands[1] = GEN_INT (-INTVAL (operands[1])); + return "addq%.l %1,%R0\;addx%.l %3,%0"; + } + } + return "sub%.l %1,%R0\;subx%.l %3,%0"; + } + } + else + { + gcc_assert (GET_CODE (operands[0]) == MEM); + CC_STATUS_INIT; + if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) + { + operands[1] + = gen_rtx_MEM (SImode, plus_constant (XEXP (operands[0], 0), -8)); + return "move%.l %0,%3\;sub%.l %R2,%0\;subx%.l %2,%3\;move%.l %3,%1"; + } + else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) + { + operands[1] = XEXP(operands[0], 0); + return "sub%.l %R2,%0\;move%.l %0,%3\;subx%.l %2,%3\;move%.l %3,%1"; + } + else + { + operands[1] = adjust_address (operands[0], SImode, 4); + return "sub%.l %R2,%1\;move%.l %0,%3\;subx%.l %2,%3\;move%.l %3,%0"; + } + } +}) + +(define_insn "subsi3" + [(set (match_operand:SI 0 "nonimmediate_operand" "=mda,m,d,a") + (minus:SI (match_operand:SI 1 "general_operand" "0,0,0,0") + (match_operand:SI 2 "general_src_operand" "I,dT,mSrT,mSrs")))] + "" + "@ + subq%.l %2, %0 + sub%.l %2,%0 + sub%.l %2,%0 + sub%.l %2,%0" + [(set_attr "type" "aluq_l,alu_l,alu_l,alu_l") + (set_attr "opy" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=a") + (minus:SI (match_operand:SI 1 "general_operand" "0") + (sign_extend:SI + (match_operand:HI 2 "nonimmediate_src_operand" "rmS"))))] + "!TARGET_COLDFIRE" + "sub%.w %2,%0") + +(define_insn "subhi3" + [(set (match_operand:HI 0 "nonimmediate_operand" "=m,r") + (minus:HI (match_operand:HI 1 "general_operand" "0,0") + (match_operand:HI 2 "general_src_operand" "dn,rmSn")))] + "!TARGET_COLDFIRE" + "sub%.w %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d")) + (minus:HI (match_dup 0) + (match_operand:HI 1 "general_src_operand" "dn,rmSn")))] + "!TARGET_COLDFIRE" + "sub%.w %1,%0") + +(define_insn "subqi3" + [(set (match_operand:QI 0 "nonimmediate_operand" "=m,d") + (minus:QI (match_operand:QI 1 "general_operand" "0,0") + (match_operand:QI 2 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "sub%.b %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d")) + (minus:QI (match_dup 0) + (match_operand:QI 1 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "sub%.b %1,%0") + +(define_expand "sub3" + [(set (match_operand:FP 0 "nonimmediate_operand" "") + (minus:FP (match_operand:FP 1 "general_operand" "") + (match_operand:FP 2 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "sub3_floatsi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (minus:FP (match_operand:FP 1 "general_operand" "0") + (float:FP (match_operand:SI 2 "general_operand" "dmi"))))] + "TARGET_68881" + "fsub%.l %2,%0" + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +(define_insn "sub3_floathi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (minus:FP (match_operand:FP 1 "general_operand" "0") + (float:FP (match_operand:HI 2 "general_operand" "dmn"))))] + "TARGET_68881" + "fsub%.w %2,%0" + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +(define_insn "sub3_floatqi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (minus:FP (match_operand:FP 1 "general_operand" "0") + (float:FP (match_operand:QI 2 "general_operand" "dmn"))))] + "TARGET_68881" + "fsub%.b %2,%0" + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +(define_insn "sub3_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (minus:FP (match_operand:FP 1 "general_operand" "0") + (match_operand:FP 2 "general_operand" "fm")))] + "TARGET_68881" +{ + if (FP_REG_P (operands[2])) + return "fsub%.x %2,%0"; + return "fsub%. %f2,%0"; +} + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +(define_insn "sub3_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (minus:FP (match_operand:FP 1 "general_operand" "0") + (match_operand:FP 2 "general_operand" "fU")))] + "TARGET_COLDFIRE_FPU" +{ + if (FP_REG_P (operands[2])) + return "fsub%.d %2,%0"; + return "fsub%. %2,%0"; +} + [(set_attr "type" "falu") + (set_attr "opy" "2")]) + +;; multiply instructions + +(define_insn "mulhi3" + [(set (match_operand:HI 0 "nonimmediate_operand" "=d") + (mult:HI (match_operand:HI 1 "general_operand" "%0") + (match_operand:HI 2 "general_src_operand" "dmSn")))] + "" +{ + return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0"; +} + [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + +(define_insn "mulhisi3" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (mult:SI (sign_extend:SI + (match_operand:HI 1 "nonimmediate_operand" "%0")) + (sign_extend:SI + (match_operand:HI 2 "nonimmediate_src_operand" "dmS"))))] + "" +{ + return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0"; +} + [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + +(define_insn "*mulhisisi3_s" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (mult:SI (sign_extend:SI + (match_operand:HI 1 "nonimmediate_operand" "%0")) + (match_operand:SI 2 "const_int_operand" "n")))] + "INTVAL (operands[2]) >= -0x8000 && INTVAL (operands[2]) <= 0x7fff" +{ + return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0"; +} + [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + +(define_expand "mulsi3" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (mult:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "general_operand" "")))] + "TARGET_68020 || TARGET_COLDFIRE" + "") + +(define_insn "*mulsi3_68020" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (mult:SI (match_operand:SI 1 "general_operand" "%0") + (match_operand:SI 2 "general_src_operand" "dmSTK")))] + + "TARGET_68020" + "muls%.l %2,%0" + [(set_attr "type" "mul_l") + (set_attr "opy" "2")]) + +(define_insn "*mulsi3_cf" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (mult:SI (match_operand:SI 1 "general_operand" "%0") + (match_operand:SI 2 "general_operand" "d")))] + "TARGET_COLDFIRE" + "muls%.l %2,%0" + [(set_attr "type" "mul_l") + (set_attr "opy" "2")]) + +(define_insn "umulhisi3" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (mult:SI (zero_extend:SI + (match_operand:HI 1 "nonimmediate_operand" "%0")) + (zero_extend:SI + (match_operand:HI 2 "nonimmediate_src_operand" "dmS"))))] + "" +{ + return MOTOROLA ? "mulu%.w %2,%0" : "mulu %2,%0"; +} + [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + +(define_insn "*mulhisisi3_z" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (mult:SI (zero_extend:SI + (match_operand:HI 1 "nonimmediate_operand" "%0")) + (match_operand:SI 2 "const_int_operand" "n")))] + "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 0xffff" +{ + return MOTOROLA ? "mulu%.w %2,%0" : "mulu %2,%0"; +} + [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + +;; We need a separate DEFINE_EXPAND for u?mulsidi3 to be able to use the +;; proper matching constraint. This is because the matching is between +;; the high-numbered word of the DImode operand[0] and operand[1]. +(define_expand "umulsidi3" + [(parallel + [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4) + (mult:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "register_operand" ""))) + (set (subreg:SI (match_dup 0) 0) + (truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1)) + (zero_extend:DI (match_dup 2))) + (const_int 32))))])] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" + "") + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (mult:SI (match_operand:SI 1 "register_operand" "%0") + (match_operand:SI 2 "nonimmediate_operand" "dm"))) + (set (match_operand:SI 3 "register_operand" "=d") + (truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1)) + (zero_extend:DI (match_dup 2))) + (const_int 32))))] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" + "mulu%.l %2,%3:%0") + +; Match immediate case. For 2.4 only match things < 2^31. +; It's tricky with larger values in these patterns since we need to match +; values between the two parallel multiplies, between a CONST_DOUBLE and +; a CONST_INT. +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (mult:SI (match_operand:SI 1 "register_operand" "%0") + (match_operand:SI 2 "const_int_operand" "n"))) + (set (match_operand:SI 3 "register_operand" "=d") + (truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1)) + (match_dup 2)) + (const_int 32))))] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE + && (unsigned) INTVAL (operands[2]) <= 0x7fffffff" + "mulu%.l %2,%3:%0") + +(define_expand "mulsidi3" + [(parallel + [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4) + (mult:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "register_operand" ""))) + (set (subreg:SI (match_dup 0) 0) + (truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1)) + (sign_extend:DI (match_dup 2))) + (const_int 32))))])] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" + "") + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (mult:SI (match_operand:SI 1 "register_operand" "%0") + (match_operand:SI 2 "nonimmediate_operand" "dm"))) + (set (match_operand:SI 3 "register_operand" "=d") + (truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1)) + (sign_extend:DI (match_dup 2))) + (const_int 32))))] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" + "muls%.l %2,%3:%0") + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (mult:SI (match_operand:SI 1 "register_operand" "%0") + (match_operand:SI 2 "const_int_operand" "n"))) + (set (match_operand:SI 3 "register_operand" "=d") + (truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1)) + (match_dup 2)) + (const_int 32))))] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" + "muls%.l %2,%3:%0") + +(define_expand "umulsi3_highpart" + [(parallel + [(set (match_operand:SI 0 "register_operand" "") + (truncate:SI + (lshiftrt:DI + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "")) + (zero_extend:DI (match_operand:SI 2 "general_operand" ""))) + (const_int 32)))) + (clobber (match_dup 3))])] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" +{ + operands[3] = gen_reg_rtx (SImode); + + if (GET_CODE (operands[2]) == CONST_INT) + { + operands[2] = immed_double_const (INTVAL (operands[2]) & 0xffffffff, + 0, DImode); + + /* We have to adjust the operand order for the matching constraints. */ + emit_insn (gen_const_umulsi3_highpart (operands[0], operands[3], + operands[1], operands[2])); + DONE; + } +}) + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (truncate:SI + (lshiftrt:DI + (mult:DI (zero_extend:DI (match_operand:SI 2 "register_operand" "%1")) + (zero_extend:DI (match_operand:SI 3 "nonimmediate_operand" "dm"))) + (const_int 32)))) + (clobber (match_operand:SI 1 "register_operand" "=d"))] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" + "mulu%.l %3,%0:%1") + +(define_insn "const_umulsi3_highpart" + [(set (match_operand:SI 0 "register_operand" "=d") + (truncate:SI + (lshiftrt:DI + (mult:DI (zero_extend:DI (match_operand:SI 2 "register_operand" "1")) + (match_operand:DI 3 "const_uint32_operand" "n")) + (const_int 32)))) + (clobber (match_operand:SI 1 "register_operand" "=d"))] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" + "mulu%.l %3,%0:%1") + +(define_expand "smulsi3_highpart" + [(parallel + [(set (match_operand:SI 0 "register_operand" "") + (truncate:SI + (lshiftrt:DI + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "")) + (sign_extend:DI (match_operand:SI 2 "general_operand" ""))) + (const_int 32)))) + (clobber (match_dup 3))])] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" +{ + operands[3] = gen_reg_rtx (SImode); + if (GET_CODE (operands[2]) == CONST_INT) + { + /* We have to adjust the operand order for the matching constraints. */ + emit_insn (gen_const_smulsi3_highpart (operands[0], operands[3], + operands[1], operands[2])); + DONE; + } +}) + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (truncate:SI + (lshiftrt:DI + (mult:DI (sign_extend:DI (match_operand:SI 2 "register_operand" "%1")) + (sign_extend:DI (match_operand:SI 3 "nonimmediate_operand" "dm"))) + (const_int 32)))) + (clobber (match_operand:SI 1 "register_operand" "=d"))] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" + "muls%.l %3,%0:%1") + +(define_insn "const_smulsi3_highpart" + [(set (match_operand:SI 0 "register_operand" "=d") + (truncate:SI + (lshiftrt:DI + (mult:DI (sign_extend:DI (match_operand:SI 2 "register_operand" "1")) + (match_operand:DI 3 "const_sint32_operand" "n")) + (const_int 32)))) + (clobber (match_operand:SI 1 "register_operand" "=d"))] + "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE" + "muls%.l %3,%0:%1") + +(define_expand "mul3" + [(set (match_operand:FP 0 "nonimmediate_operand" "") + (mult:FP (match_operand:FP 1 "general_operand" "") + (match_operand:FP 2 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "mul3_floatsi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (mult:FP (float:FP (match_operand:SI 2 "general_operand" "dmi")) + (match_operand:FP 1 "general_operand" "0")))] + "TARGET_68881" +{ + return TARGET_68040 + ? "fmul%.l %2,%0" + : "fmul%.l %2,%0"; +} + [(set_attr "type" "fmul") + (set_attr "opy" "2")]) + +(define_insn "mul3_floathi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (mult:FP (float:FP (match_operand:HI 2 "general_operand" "dmn")) + (match_operand:FP 1 "general_operand" "0")))] + "TARGET_68881" +{ + return TARGET_68040 + ? "fmul%.w %2,%0" + : "fmul%.w %2,%0"; +} + [(set_attr "type" "fmul") + (set_attr "opy" "2")]) + +(define_insn "mul3_floatqi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (mult:FP (float:FP (match_operand:QI 2 "general_operand" "dmn")) + (match_operand:FP 1 "general_operand" "0")))] + "TARGET_68881" +{ + return TARGET_68040 + ? "fmul%.b %2,%0" + : "fmul%.b %2,%0"; +} + [(set_attr "type" "fmul") + (set_attr "opy" "2")]) + +(define_insn "muldf_68881" + [(set (match_operand:DF 0 "nonimmediate_operand" "=f") + (mult:DF (match_operand:DF 1 "general_operand" "%0") + (match_operand:DF 2 "general_operand" "fmG")))] + "TARGET_68881" +{ + if (GET_CODE (operands[2]) == CONST_DOUBLE + && floating_exact_log2 (operands[2]) && !TUNE_68040_60) + { + int i = floating_exact_log2 (operands[2]); + operands[2] = GEN_INT (i); + return "fscale%.l %2,%0"; + } + if (REG_P (operands[2])) + return "f%&mul%.x %2,%0"; + return "f%&mul%.d %f2,%0"; +}) + +(define_insn "mulsf_68881" + [(set (match_operand:SF 0 "nonimmediate_operand" "=f") + (mult:SF (match_operand:SF 1 "general_operand" "%0") + (match_operand:SF 2 "general_operand" "fdmF")))] + "TARGET_68881" +{ + if (FP_REG_P (operands[2])) + return (TARGET_68040 + ? "fsmul%.x %2,%0" + : "fsglmul%.x %2,%0"); + return (TARGET_68040 + ? "fsmul%.s %f2,%0" + : "fsglmul%.s %f2,%0"); +}) + +(define_insn "mulxf3_68881" + [(set (match_operand:XF 0 "nonimmediate_operand" "=f") + (mult:XF (match_operand:XF 1 "nonimmediate_operand" "%0") + (match_operand:XF 2 "nonimmediate_operand" "fm")))] + "TARGET_68881" +{ + return "fmul%.x %f2,%0"; +}) + +(define_insn "fmul3_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (mult:FP (match_operand:FP 1 "general_operand" "%0") + (match_operand:FP 2 "general_operand" "fU")))] + "TARGET_COLDFIRE_FPU" +{ + if (FP_REG_P (operands[2])) + return "fmul%.d %2,%0"; + return "fmul%. %2,%0"; +} + [(set_attr "type" "fmul") + (set_attr "opy" "2")]) + +;; divide instructions + +(define_expand "div3" + [(set (match_operand:FP 0 "nonimmediate_operand" "") + (div:FP (match_operand:FP 1 "general_operand" "") + (match_operand:FP 2 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "div3_floatsi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (div:FP (match_operand:FP 1 "general_operand" "0") + (float:FP (match_operand:SI 2 "general_operand" "dmi"))))] + "TARGET_68881" +{ + return TARGET_68040 + ? "fdiv%.l %2,%0" + : "fdiv%.l %2,%0"; +}) + +(define_insn "div3_floathi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (div:FP (match_operand:FP 1 "general_operand" "0") + (float:FP (match_operand:HI 2 "general_operand" "dmn"))))] + "TARGET_68881" +{ + return TARGET_68040 + ? "fdiv%.w %2,%0" + : "fdiv%.w %2,%0"; +}) + +(define_insn "div3_floatqi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (div:FP (match_operand:FP 1 "general_operand" "0") + (float:FP (match_operand:QI 2 "general_operand" "dmn"))))] + "TARGET_68881" +{ + return TARGET_68040 + ? "fdiv%.b %2,%0" + : "fdiv%.b %2,%0"; +}) + +(define_insn "div3_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (div:FP (match_operand:FP 1 "general_operand" "0") + (match_operand:FP 2 "general_operand" "fm")))] + "TARGET_68881" +{ + if (FP_REG_P (operands[2])) + return (TARGET_68040 + ? "fdiv%.x %2,%0" + : "fdiv%.x %2,%0"); + return (TARGET_68040 + ? "fdiv%. %f2,%0" + : "fdiv%. %f2,%0"); +}) + +(define_insn "div3_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (div:FP (match_operand:FP 1 "general_operand" "0") + (match_operand:FP 2 "general_operand" "fU")))] + "TARGET_COLDFIRE_FPU" +{ + if (FP_REG_P (operands[2])) + return "fdiv%.d %2,%0"; + return "fdiv%. %2,%0"; +} + [(set_attr "type" "fdiv") + (set_attr "opy" "2")]) + +;; Remainder instructions. + +(define_expand "divmodsi4" + [(parallel + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (div:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "general_src_operand" ""))) + (set (match_operand:SI 3 "nonimmediate_operand" "") + (mod:SI (match_dup 1) (match_dup 2)))])] + "TARGET_68020 || TARGET_CF_HWDIV" + "") + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (div:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_src_operand" "dU"))) + (set (match_operand:SI 3 "nonimmediate_operand" "=&d") + (mod:SI (match_dup 1) (match_dup 2)))] + "TARGET_CF_HWDIV" +{ + if (find_reg_note (insn, REG_UNUSED, operands[3])) + return "divs%.l %2,%0"; + else if (find_reg_note (insn, REG_UNUSED, operands[0])) + return "rems%.l %2,%3:%0"; + else + return "rems%.l %2,%3:%0\;divs%.l %2,%0"; +} + [(set_attr "type" "div_l") + (set_attr "opy" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (div:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_src_operand" "dmSTK"))) + (set (match_operand:SI 3 "nonimmediate_operand" "=d") + (mod:SI (match_dup 1) (match_dup 2)))] + "TARGET_68020" +{ + if (find_reg_note (insn, REG_UNUSED, operands[3])) + return "divs%.l %2,%0"; + else + return "divsl%.l %2,%3:%0"; +}) + +(define_expand "udivmodsi4" + [(parallel + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (udiv:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_src_operand" "dmSTK"))) + (set (match_operand:SI 3 "nonimmediate_operand" "=d") + (umod:SI (match_dup 1) (match_dup 2)))])] + "TARGET_68020 || TARGET_CF_HWDIV" + "") + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (udiv:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_src_operand" "dU"))) + (set (match_operand:SI 3 "nonimmediate_operand" "=&d") + (umod:SI (match_dup 1) (match_dup 2)))] + "TARGET_CF_HWDIV" +{ + if (find_reg_note (insn, REG_UNUSED, operands[3])) + return "divu%.l %2,%0"; + else if (find_reg_note (insn, REG_UNUSED, operands[0])) + return "remu%.l %2,%3:%0"; + else + return "remu%.l %2,%3:%0\;divu%.l %2,%0"; +} + [(set_attr "type" "div_l") + (set_attr "opy" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (udiv:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_src_operand" "dmSTK"))) + (set (match_operand:SI 3 "nonimmediate_operand" "=d") + (umod:SI (match_dup 1) (match_dup 2)))] + "TARGET_68020 && !TARGET_COLDFIRE" +{ + if (find_reg_note (insn, REG_UNUSED, operands[3])) + return "divu%.l %2,%0"; + else + return "divul%.l %2,%3:%0"; +}) + +(define_insn "divmodhi4" + [(set (match_operand:HI 0 "nonimmediate_operand" "=d") + (div:HI (match_operand:HI 1 "general_operand" "0") + (match_operand:HI 2 "general_src_operand" "dmSKT"))) + (set (match_operand:HI 3 "nonimmediate_operand" "=d") + (mod:HI (match_dup 1) (match_dup 2)))] + "!TARGET_COLDFIRE || TARGET_CF_HWDIV" +{ + output_asm_insn (MOTOROLA ? + "ext%.l %0\;divs%.w %2,%0" : + "extl %0\;divs %2,%0", + operands); + if (!find_reg_note(insn, REG_UNUSED, operands[3])) + { + CC_STATUS_INIT; + return "move%.l %0,%3\;swap %3"; + } + else + return ""; +}) + +(define_insn "udivmodhi4" + [(set (match_operand:HI 0 "nonimmediate_operand" "=d") + (udiv:HI (match_operand:HI 1 "general_operand" "0") + (match_operand:HI 2 "general_src_operand" "dmSKT"))) + (set (match_operand:HI 3 "nonimmediate_operand" "=d") + (umod:HI (match_dup 1) (match_dup 2)))] + "!TARGET_COLDFIRE || TARGET_CF_HWDIV" +{ + if (ISA_HAS_MVS_MVZ) + output_asm_insn (MOTOROLA ? + "mvz%.w %0,%0\;divu%.w %2,%0" : + "mvz%.w %0,%0\;divu %2,%0", + operands); + else + output_asm_insn (MOTOROLA ? + "and%.l #0xFFFF,%0\;divu%.w %2,%0" : + "and%.l #0xFFFF,%0\;divu %2,%0", + operands); + + if (!find_reg_note(insn, REG_UNUSED, operands[3])) + { + CC_STATUS_INIT; + return "move%.l %0,%3\;swap %3"; + } + else + return ""; +}) + +;; logical-and instructions + +;; "anddi3" is mainly here to help combine(). +(define_insn "anddi3" + [(set (match_operand:DI 0 "nonimmediate_operand" "=o,d") + (and:DI (match_operand:DI 1 "general_operand" "%0,0") + (match_operand:DI 2 "general_operand" "dn,don")))] + "!TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + /* We can get CONST_DOUBLE, but also const1_rtx etc. */ + if (CONSTANT_P (operands[2])) + { + rtx hi, lo; + + split_double (operands[2], &hi, &lo); + + switch (INTVAL (hi)) + { + case 0 : + output_asm_insn ("clr%.l %0", operands); + break; + case -1 : + break; + default : + { + rtx xoperands[3]; + + xoperands[0] = operands[0]; + xoperands[2] = hi; + output_asm_insn (output_andsi3 (xoperands), xoperands); + } + } + if (GET_CODE (operands[0]) == REG) + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else + operands[0] = adjust_address (operands[0], SImode, 4); + switch (INTVAL (lo)) + { + case 0 : + output_asm_insn ("clr%.l %0", operands); + break; + case -1 : + break; + default : + { + rtx xoperands[3]; + + xoperands[0] = operands[0]; + xoperands[2] = lo; + output_asm_insn (output_andsi3 (xoperands), xoperands); + } + } + return ""; + } + if (GET_CODE (operands[0]) != REG) + { + operands[1] = adjust_address (operands[0], SImode, 4); + return "and%.l %2,%0\;and%.l %R2,%1"; + } + if (GET_CODE (operands[2]) != REG) + { + operands[1] = adjust_address (operands[2], SImode, 4); + return "and%.l %2,%0\;and%.l %1,%R0"; + } + return "and%.l %2,%0\;and%.l %R2,%R0"; +}) + +;; Prevent AND from being made with sp. This doesn't exist in the machine +;; and reload will cause inefficient code. Since sp is a FIXED_REG, we +;; can't allocate pseudos into it. + +(define_expand "andsi3" + [(set (match_operand:SI 0 "not_sp_operand" "") + (and:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "general_src_operand" "")))] + "" + "") + +;; produced by split operations after reload finished +(define_insn "*andsi3_split" + [(set (match_operand:SI 0 "register_operand" "=d") + (and:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "const_int_operand" "i")))] + "reload_completed && !TARGET_COLDFIRE" +{ + return output_andsi3 (operands); +}) + +(define_insn "andsi3_internal" + [(set (match_operand:SI 0 "not_sp_operand" "=m,d") + (and:SI (match_operand:SI 1 "general_operand" "%0,0") + (match_operand:SI 2 "general_src_operand" "dKT,dmSM")))] + "!TARGET_COLDFIRE" +{ + return output_andsi3 (operands); +}) + +(define_insn "andsi3_5200" + [(set (match_operand:SI 0 "not_sp_operand" "=m,d") + (and:SI (match_operand:SI 1 "general_operand" "%0,0") + (match_operand:SI 2 "general_src_operand" "d,dmsK")))] + "TARGET_COLDFIRE" +{ + if (ISA_HAS_MVS_MVZ + && DATA_REG_P (operands[0]) + && GET_CODE (operands[2]) == CONST_INT) + { + if (INTVAL (operands[2]) == 0x000000ff) + return "mvz%.b %0,%0"; + else if (INTVAL (operands[2]) == 0x0000ffff) + return "mvz%.w %0,%0"; + } + return output_andsi3 (operands); +}) + +(define_insn "andhi3" + [(set (match_operand:HI 0 "nonimmediate_operand" "=m,d") + (and:HI (match_operand:HI 1 "general_operand" "%0,0") + (match_operand:HI 2 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "and%.w %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d")) + (and:HI (match_dup 0) + (match_operand:HI 1 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "and%.w %1,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d")) + (and:HI (match_operand:HI 1 "general_src_operand" "dn,dmSn") + (match_dup 0)))] + "!TARGET_COLDFIRE" + "and%.w %1,%0") + +(define_insn "andqi3" + [(set (match_operand:QI 0 "nonimmediate_operand" "=m,d") + (and:QI (match_operand:QI 1 "general_operand" "%0,0") + (match_operand:QI 2 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "and%.b %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d")) + (and:QI (match_dup 0) + (match_operand:QI 1 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "and%.b %1,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d")) + (and:QI (match_operand:QI 1 "general_src_operand" "dn,dmSn") + (match_dup 0)))] + "!TARGET_COLDFIRE" + "and%.b %1,%0") + +;; inclusive-or instructions + +(define_insn "iordi_zext" + [(set (match_operand:DI 0 "nonimmediate_operand" "=o,d") + (ior:DI (zero_extend:DI (match_operand 1 "general_operand" "dn,dmn")) + (match_operand:DI 2 "general_operand" "0,0")))] + "!TARGET_COLDFIRE" +{ + int byte_mode; + + CC_STATUS_INIT; + if (GET_CODE (operands[0]) == REG) + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else + operands[0] = adjust_address (operands[0], SImode, 4); + if (GET_MODE (operands[1]) == SImode) + return "or%.l %1,%0"; + byte_mode = (GET_MODE (operands[1]) == QImode); + if (GET_CODE (operands[0]) == MEM) + operands[0] = adjust_address (operands[0], byte_mode ? QImode : HImode, + byte_mode ? 3 : 2); + if (byte_mode) + return "or%.b %1,%0"; + else + return "or%.w %1,%0"; +}) + +;; "iordi3" is mainly here to help combine(). +(define_insn "iordi3" + [(set (match_operand:DI 0 "nonimmediate_operand" "=o,d") + (ior:DI (match_operand:DI 1 "general_operand" "%0,0") + (match_operand:DI 2 "general_operand" "dn,don")))] + "!TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + /* We can get CONST_DOUBLE, but also const1_rtx etc. */ + if (CONSTANT_P (operands[2])) + { + rtx hi, lo; + + split_double (operands[2], &hi, &lo); + + switch (INTVAL (hi)) + { + case 0 : + break; + case -1 : + /* FIXME : a scratch register would be welcome here if operand[0] + is not a register */ + output_asm_insn ("move%.l #-1,%0", operands); + break; + default : + { + rtx xoperands[3]; + + xoperands[0] = operands[0]; + xoperands[2] = hi; + output_asm_insn (output_iorsi3 (xoperands), xoperands); + } + } + if (GET_CODE (operands[0]) == REG) + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else + operands[0] = adjust_address (operands[0], SImode, 4); + switch (INTVAL (lo)) + { + case 0 : + break; + case -1 : + /* FIXME : a scratch register would be welcome here if operand[0] + is not a register */ + output_asm_insn ("move%.l #-1,%0", operands); + break; + default : + { + rtx xoperands[3]; + + xoperands[0] = operands[0]; + xoperands[2] = lo; + output_asm_insn (output_iorsi3 (xoperands), xoperands); + } + } + return ""; + } + if (GET_CODE (operands[0]) != REG) + { + operands[1] = adjust_address (operands[0], SImode, 4); + return "or%.l %2,%0\;or%.l %R2,%1"; + } + if (GET_CODE (operands[2]) != REG) + { + operands[1] = adjust_address (operands[2], SImode, 4); + return "or%.l %2,%0\;or%.l %1,%R0"; + } + return "or%.l %2,%0\;or%.l %R2,%R0"; +}) + +(define_expand "iorsi3" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (ior:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "general_src_operand" "")))] + "" + "") + +(define_insn "iorsi3_internal" + [(set (match_operand:SI 0 "nonimmediate_operand" "=m,d") + (ior:SI (match_operand:SI 1 "general_operand" "%0,0") + (match_operand:SI 2 "general_src_operand" "dKT,dmSMT")))] + "! TARGET_COLDFIRE" +{ + return output_iorsi3 (operands); +}) + +(define_insn "iorsi3_5200" + [(set (match_operand:SI 0 "nonimmediate_operand" "=m,d") + (ior:SI (match_operand:SI 1 "general_operand" "%0,0") + (match_operand:SI 2 "general_src_operand" "d,dmsK")))] + "TARGET_COLDFIRE" +{ + return output_iorsi3 (operands); +}) + +(define_insn "iorhi3" + [(set (match_operand:HI 0 "nonimmediate_operand" "=m,d") + (ior:HI (match_operand:HI 1 "general_operand" "%0,0") + (match_operand:HI 2 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "or%.w %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d")) + (ior:HI (match_dup 0) + (match_operand:HI 1 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "or%.w %1,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d")) + (ior:HI (match_operand:HI 1 "general_src_operand" "dn,dmSn") + (match_dup 0)))] + "!TARGET_COLDFIRE" + "or%.w %1,%0") + +(define_insn "iorqi3" + [(set (match_operand:QI 0 "nonimmediate_operand" "=m,d") + (ior:QI (match_operand:QI 1 "general_operand" "%0,0") + (match_operand:QI 2 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "or%.b %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d")) + (ior:QI (match_dup 0) + (match_operand:QI 1 "general_src_operand" "dn,dmSn")))] + "!TARGET_COLDFIRE" + "or%.b %1,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d")) + (ior:QI (match_operand:QI 1 "general_src_operand" "dn,dmSn") + (match_dup 0)))] + "!TARGET_COLDFIRE" + "or%.b %1,%0") + +;; On all 68k models, this makes faster code in a special case. +;; See also ashlsi_16, ashrsi_16 and lshrsi_16. + +(define_insn "iorsi_zexthi_ashl16" + [(set (match_operand:SI 0 "nonimmediate_operand" "=&d") + (ior:SI (zero_extend:SI (match_operand:HI 1 "general_operand" "rmn")) + (ashift:SI (match_operand:SI 2 "general_operand" "or") + (const_int 16))))] + "" +{ + CC_STATUS_INIT; + if (GET_CODE (operands[2]) != REG) + operands[2] = adjust_address (operands[2], HImode, 2); + if (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != REGNO (operands[0])) + output_asm_insn ("move%.w %2,%0", operands); + return "swap %0\;mov%.w %1,%0"; +}) + +(define_insn "iorsi_zext" + [(set (match_operand:SI 0 "nonimmediate_operand" "=o,d") + (ior:SI (zero_extend:SI (match_operand 1 "general_operand" "dn,dmn")) + (match_operand:SI 2 "general_operand" "0,0")))] + "!TARGET_COLDFIRE" +{ + int byte_mode; + + CC_STATUS_INIT; + byte_mode = (GET_MODE (operands[1]) == QImode); + if (GET_CODE (operands[0]) == MEM) + operands[0] = adjust_address (operands[0], byte_mode ? QImode : HImode, + byte_mode ? 3 : 2); + if (byte_mode) + return "or%.b %1,%0"; + else + return "or%.w %1,%0"; +}) + +;; xor instructions + +;; "xordi3" is mainly here to help combine(). +(define_insn "xordi3" + [(set (match_operand:DI 0 "nonimmediate_operand" "=od") + (xor:DI (match_operand:DI 1 "general_operand" "%0") + (match_operand:DI 2 "general_operand" "dn")))] + "!TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + /* We can get CONST_DOUBLE, but also const1_rtx etc. */ + + if (CONSTANT_P (operands[2])) + { + rtx hi, lo; + + split_double (operands[2], &hi, &lo); + + switch (INTVAL (hi)) + { + case 0 : + break; + case -1 : + output_asm_insn ("not%.l %0", operands); + break; + default : + /* FIXME : a scratch register would be welcome here if + -128 <= INTVAL (hi) < -1 */ + { + rtx xoperands[3]; + + xoperands[0] = operands[0]; + xoperands[2] = hi; + output_asm_insn (output_xorsi3 (xoperands), xoperands); + } + } + if (GET_CODE (operands[0]) == REG) + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else + operands[0] = adjust_address (operands[0], SImode, 4); + switch (INTVAL (lo)) + { + case 0 : + break; + case -1 : + output_asm_insn ("not%.l %0", operands); + break; + default : + /* FIXME : a scratch register would be welcome here if + -128 <= INTVAL (lo) < -1 */ + operands[2] = lo; + /* FIXME : this should be merged with xorsi3 */ + { + rtx xoperands[3]; + + xoperands[0] = operands[0]; + xoperands[2] = lo; + output_asm_insn (output_xorsi3 (xoperands), xoperands); + } + } + return ""; + } + if (GET_CODE (operands[0]) != REG) + { + operands[1] = adjust_address (operands[0], SImode, 4); + return "eor%.l %2,%0\;eor%.l %R2,%1"; + } + if (GET_CODE (operands[2]) != REG) + { + operands[1] = adjust_address (operands[2], SImode, 4); + return "eor%.l %2,%0\;eor%.l %1,%R0"; + } + return "eor%.l %2,%0\;eor%.l %R2,%R0"; +}) + +(define_expand "xorsi3" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (xor:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "general_operand" "")))] + "" + "") + +(define_insn "xorsi3_internal" + [(set (match_operand:SI 0 "nonimmediate_operand" "=do,m") + (xor:SI (match_operand:SI 1 "general_operand" "%0,0") + (match_operand:SI 2 "general_operand" "di,dKT")))] + + "!TARGET_COLDFIRE" +{ + return output_xorsi3 (operands); +}) + +(define_insn "xorsi3_5200" + [(set (match_operand:SI 0 "nonimmediate_operand" "=dm,d") + (xor:SI (match_operand:SI 1 "general_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,Ks")))] + "TARGET_COLDFIRE" +{ + return output_xorsi3 (operands); +}) + +(define_insn "xorhi3" + [(set (match_operand:HI 0 "nonimmediate_operand" "=dm") + (xor:HI (match_operand:HI 1 "general_operand" "%0") + (match_operand:HI 2 "general_operand" "dn")))] + "!TARGET_COLDFIRE" + "eor%.w %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm")) + (xor:HI (match_dup 0) + (match_operand:HI 1 "general_operand" "dn")))] + "!TARGET_COLDFIRE" + "eor%.w %1,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm")) + (xor:HI (match_operand:HI 1 "general_operand" "dn") + (match_dup 0)))] + "!TARGET_COLDFIRE" + "eor%.w %1,%0") + +(define_insn "xorqi3" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm") + (xor:QI (match_operand:QI 1 "general_operand" "%0") + (match_operand:QI 2 "general_operand" "dn")))] + "!TARGET_COLDFIRE" + "eor%.b %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm")) + (xor:QI (match_dup 0) + (match_operand:QI 1 "general_operand" "dn")))] + "!TARGET_COLDFIRE" + "eor%.b %1,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm")) + (xor:QI (match_operand:QI 1 "general_operand" "dn") + (match_dup 0)))] + "!TARGET_COLDFIRE" + "eor%.b %1,%0") + +;; negation instructions + +(define_expand "negdi2" + [(set (match_operand:DI 0 "nonimmediate_operand" "") + (neg:DI (match_operand:DI 1 "general_operand" "")))] + "" +{ + if (TARGET_COLDFIRE) + emit_insn (gen_negdi2_5200 (operands[0], operands[1])); + else + emit_insn (gen_negdi2_internal (operands[0], operands[1])); + DONE; +}) + +(define_insn "negdi2_internal" + [(set (match_operand:DI 0 "nonimmediate_operand" "=<,do,!*a") + (neg:DI (match_operand:DI 1 "general_operand" "0,0,0")))] + "!TARGET_COLDFIRE" +{ + if (which_alternative == 0) + return "neg%.l %0\;negx%.l %0"; + if (GET_CODE (operands[0]) == REG) + operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else + operands[1] = adjust_address (operands[0], SImode, 4); + if (ADDRESS_REG_P (operands[0])) + return "exg %/d0,%1\;neg%.l %/d0\;exg %/d0,%1\;exg %/d0,%0\;negx%.l %/d0\;exg %/d0,%0"; + else + return "neg%.l %1\;negx%.l %0"; +}) + +(define_insn "negdi2_5200" + [(set (match_operand:DI 0 "nonimmediate_operand" "=d") + (neg:DI (match_operand:DI 1 "general_operand" "0")))] + "TARGET_COLDFIRE" +{ + operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + return "neg%.l %1\;negx%.l %0"; +}) + +(define_expand "negsi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (neg:SI (match_operand:SI 1 "general_operand" "")))] + "" +{ + if (TARGET_COLDFIRE) + emit_insn (gen_negsi2_5200 (operands[0], operands[1])); + else + emit_insn (gen_negsi2_internal (operands[0], operands[1])); + DONE; +}) + +(define_insn "negsi2_internal" + [(set (match_operand:SI 0 "nonimmediate_operand" "=dm") + (neg:SI (match_operand:SI 1 "general_operand" "0")))] + "!TARGET_COLDFIRE" + "neg%.l %0" + [(set_attr "type" "neg_l")]) + +(define_insn "negsi2_5200" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (neg:SI (match_operand:SI 1 "general_operand" "0")))] + "TARGET_COLDFIRE" + "neg%.l %0" + [(set_attr "type" "neg_l")]) + +(define_insn "neghi2" + [(set (match_operand:HI 0 "nonimmediate_operand" "=dm") + (neg:HI (match_operand:HI 1 "general_operand" "0")))] + "!TARGET_COLDFIRE" + "neg%.w %0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm")) + (neg:HI (match_dup 0)))] + "!TARGET_COLDFIRE" + "neg%.w %0") + +(define_insn "negqi2" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm") + (neg:QI (match_operand:QI 1 "general_operand" "0")))] + "!TARGET_COLDFIRE" + "neg%.b %0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm")) + (neg:QI (match_dup 0)))] + "!TARGET_COLDFIRE" + "neg%.b %0") + +;; If using software floating point, just flip the sign bit. + +(define_expand "negsf2" + [(set (match_operand:SF 0 "nonimmediate_operand" "") + (neg:SF (match_operand:SF 1 "general_operand" "")))] + "" +{ + if (!TARGET_HARD_FLOAT) + { + rtx result; + rtx target; + + target = operand_subword_force (operands[0], 0, SFmode); + result = expand_binop (SImode, xor_optab, + operand_subword_force (operands[1], 0, SFmode), + GEN_INT (-2147483647 - 1), target, 0, OPTAB_WIDEN); + gcc_assert (result); + + if (result != target) + emit_move_insn (result, target); + + /* Make a place for REG_EQUAL. */ + emit_move_insn (operands[0], operands[0]); + DONE; + } +}) + +(define_expand "negdf2" + [(set (match_operand:DF 0 "nonimmediate_operand" "") + (neg:DF (match_operand:DF 1 "general_operand" "")))] + "" +{ + if (!TARGET_HARD_FLOAT) + { + rtx result; + rtx target; + rtx insns; + + start_sequence (); + target = operand_subword (operands[0], 0, 1, DFmode); + result = expand_binop (SImode, xor_optab, + operand_subword_force (operands[1], 0, DFmode), + GEN_INT (-2147483647 - 1), target, 0, OPTAB_WIDEN); + gcc_assert (result); + + if (result != target) + emit_move_insn (result, target); + + emit_move_insn (operand_subword (operands[0], 1, 1, DFmode), + operand_subword_force (operands[1], 1, DFmode)); + + insns = get_insns (); + end_sequence (); + + emit_insn (insns); + DONE; + } +}) + +(define_expand "negxf2" + [(set (match_operand:XF 0 "nonimmediate_operand" "") + (neg:XF (match_operand:XF 1 "nonimmediate_operand" "")))] + "" +{ + if (!TARGET_68881) + { + rtx result; + rtx target; + rtx insns; + + start_sequence (); + target = operand_subword (operands[0], 0, 1, XFmode); + result = expand_binop (SImode, xor_optab, + operand_subword_force (operands[1], 0, XFmode), + GEN_INT (-2147483647 - 1), target, 0, OPTAB_WIDEN); + gcc_assert (result); + + if (result != target) + emit_move_insn (result, target); + + emit_move_insn (operand_subword (operands[0], 1, 1, XFmode), + operand_subword_force (operands[1], 1, XFmode)); + emit_move_insn (operand_subword (operands[0], 2, 1, XFmode), + operand_subword_force (operands[1], 2, XFmode)); + + insns = get_insns (); + end_sequence (); + + emit_insn (insns); + DONE; + } +}) + +(define_insn "neg2_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d") + (neg:FP (match_operand:FP 1 "general_operand" "fm,0")))] + "TARGET_68881" +{ + if (DATA_REG_P (operands[0])) + { + operands[1] = GEN_INT (31); + return "bchg %1,%0"; + } + if (FP_REG_P (operands[1])) + return "fneg%.x %1,%0"; + return "fneg%. %f1,%0"; +}) + +(define_insn "neg2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d") + (neg:FP (match_operand:FP 1 "general_operand" "fU,0")))] + "TARGET_COLDFIRE_FPU" +{ + if (DATA_REG_P (operands[0])) + { + operands[1] = GEN_INT (31); + return "bchg %1,%0"; + } + if (FP_REG_P (operands[1])) + return "fneg%.d %1,%0"; + return "fneg%. %1,%0"; +}) + +;; Sqrt instruction for the 68881 + +(define_expand "sqrt2" + [(set (match_operand:FP 0 "nonimmediate_operand" "") + (sqrt:FP (match_operand:FP 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "sqrt2_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (sqrt:FP (match_operand:FP 1 "general_operand" "fm")))] + "TARGET_68881" +{ + if (FP_REG_P (operands[1])) + return "fsqrt%.x %1,%0"; + return "fsqrt%. %1,%0"; +} + [(set_attr "type" "fsqrt")]) + +(define_insn "sqrt2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (sqrt:FP (match_operand:FP 1 "general_operand" "fU")))] + "TARGET_COLDFIRE_FPU" +{ + if (FP_REG_P (operands[1])) + return "fsqrt%.d %1,%0"; + return "fsqrt%. %1,%0"; +} + [(set_attr "type" "fsqrt")]) +;; Absolute value instructions +;; If using software floating point, just zero the sign bit. + +(define_expand "abssf2" + [(set (match_operand:SF 0 "nonimmediate_operand" "") + (abs:SF (match_operand:SF 1 "general_operand" "")))] + "" +{ + if (!TARGET_HARD_FLOAT) + { + rtx result; + rtx target; + + target = operand_subword_force (operands[0], 0, SFmode); + result = expand_binop (SImode, and_optab, + operand_subword_force (operands[1], 0, SFmode), + GEN_INT (0x7fffffff), target, 0, OPTAB_WIDEN); + gcc_assert (result); + + if (result != target) + emit_move_insn (result, target); + + /* Make a place for REG_EQUAL. */ + emit_move_insn (operands[0], operands[0]); + DONE; + } +}) + +(define_expand "absdf2" + [(set (match_operand:DF 0 "nonimmediate_operand" "") + (abs:DF (match_operand:DF 1 "general_operand" "")))] + "" +{ + if (!TARGET_HARD_FLOAT) + { + rtx result; + rtx target; + rtx insns; + + start_sequence (); + target = operand_subword (operands[0], 0, 1, DFmode); + result = expand_binop (SImode, and_optab, + operand_subword_force (operands[1], 0, DFmode), + GEN_INT (0x7fffffff), target, 0, OPTAB_WIDEN); + gcc_assert (result); + + if (result != target) + emit_move_insn (result, target); + + emit_move_insn (operand_subword (operands[0], 1, 1, DFmode), + operand_subword_force (operands[1], 1, DFmode)); + + insns = get_insns (); + end_sequence (); + + emit_insn (insns); + DONE; + } +}) + +(define_expand "absxf2" + [(set (match_operand:XF 0 "nonimmediate_operand" "") + (abs:XF (match_operand:XF 1 "nonimmediate_operand" "")))] + "" +{ + if (!TARGET_68881) + { + rtx result; + rtx target; + rtx insns; + + start_sequence (); + target = operand_subword (operands[0], 0, 1, XFmode); + result = expand_binop (SImode, and_optab, + operand_subword_force (operands[1], 0, XFmode), + GEN_INT (0x7fffffff), target, 0, OPTAB_WIDEN); + gcc_assert (result); + + if (result != target) + emit_move_insn (result, target); + + emit_move_insn (operand_subword (operands[0], 1, 1, XFmode), + operand_subword_force (operands[1], 1, XFmode)); + emit_move_insn (operand_subword (operands[0], 2, 1, XFmode), + operand_subword_force (operands[1], 2, XFmode)); + + insns = get_insns (); + end_sequence (); + + emit_insn (insns); + DONE; + } +}) + +(define_insn "abs2_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d") + (abs:FP (match_operand:FP 1 "general_operand" "fm,0")))] + "TARGET_68881" +{ + if (DATA_REG_P (operands[0])) + { + operands[1] = GEN_INT (31); + return "bclr %1,%0"; + } + if (FP_REG_P (operands[1])) + return "fabs%.x %1,%0"; + return "fabs%. %f1,%0"; +}) + +(define_insn "abs2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d") + (abs:FP (match_operand:FP 1 "general_operand" "fU,0")))] + "TARGET_COLDFIRE_FPU" +{ + if (DATA_REG_P (operands[0])) + { + operands[1] = GEN_INT (31); + return "bclr %1,%0"; + } + if (FP_REG_P (operands[1])) + return "fabs%.d %1,%0"; + return "fabs%. %1,%0"; +} + [(set_attr "type" "bitrw,fneg")]) + +;; bit indexing instructions + +;; ColdFire ff1 instruction implements clz. +(define_insn "clzsi2" + [(set (match_operand:SI 0 "register_operand" "=d") + (clz:SI (match_operand:SI 1 "register_operand" "0")))] + "ISA_HAS_FF1" + "ff1 %0" + [(set_attr "type" "ext")]) + +;; one complement instructions + +;; "one_cmpldi2" is mainly here to help combine(). +(define_insn "one_cmpldi2" + [(set (match_operand:DI 0 "nonimmediate_operand" "=dm") + (not:DI (match_operand:DI 1 "general_operand" "0")))] + "!TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + if (GET_CODE (operands[0]) == REG) + operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC + || GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) + operands[1] = operands[0]; + else + operands[1] = adjust_address (operands[0], SImode, 4); + return "not%.l %1\;not%.l %0"; +}) + +(define_expand "one_cmplsi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (not:SI (match_operand:SI 1 "general_operand" "")))] + "" +{ + if (TARGET_COLDFIRE) + emit_insn (gen_one_cmplsi2_5200 (operands[0], operands[1])); + else + emit_insn (gen_one_cmplsi2_internal (operands[0], operands[1])); + DONE; +}) + +(define_insn "one_cmplsi2_internal" + [(set (match_operand:SI 0 "nonimmediate_operand" "=dm") + (not:SI (match_operand:SI 1 "general_operand" "0")))] + "!TARGET_COLDFIRE" + "not%.l %0") + +(define_insn "one_cmplsi2_5200" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (not:SI (match_operand:SI 1 "general_operand" "0")))] + "TARGET_COLDFIRE" + "not%.l %0" + [(set_attr "type" "neg_l")]) + +(define_insn "one_cmplhi2" + [(set (match_operand:HI 0 "nonimmediate_operand" "=dm") + (not:HI (match_operand:HI 1 "general_operand" "0")))] + "!TARGET_COLDFIRE" + "not%.w %0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm")) + (not:HI (match_dup 0)))] + "!TARGET_COLDFIRE" + "not%.w %0") + +(define_insn "one_cmplqi2" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm") + (not:QI (match_operand:QI 1 "general_operand" "0")))] + "!TARGET_COLDFIRE" + "not%.b %0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm")) + (not:QI (match_dup 0)))] + "!TARGET_COLDFIRE" + "not%.b %0") + +;; arithmetic shift instructions +;; We don't need the shift memory by 1 bit instruction + +(define_insn "ashldi_extsi" + [(set (match_operand:DI 0 "nonimmediate_operand" "=ro") + (ashift:DI + (match_operator:DI 2 "extend_operator" + [(match_operand:SI 1 "general_operand" "rm")]) + (const_int 32)))] + "" +{ + CC_STATUS_INIT; + if (GET_CODE (operands[0]) == REG) + operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else + operands[2] = adjust_address (operands[0], SImode, 4); + if (ADDRESS_REG_P (operands[0])) + return "move%.l %1,%0\;sub%.l %2,%2"; + else + return "move%.l %1,%0\;clr%.l %2"; +}) + +(define_insn "ashldi_sexthi" + [(set (match_operand:DI 0 "nonimmediate_operand" "=m,a*d") + (ashift:DI (sign_extend:DI (match_operand:HI 1 "general_operand" "rm,rm")) + (const_int 32))) + (clobber (match_scratch:SI 2 "=a,X"))] + "" +{ + CC_STATUS_INIT; + if (GET_CODE (operands[0]) == MEM) + { + if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) + return "clr%.l %0\;move%.w %1,%2\;move%.l %2,%0"; + else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) + return "move%.w %1,%2\;move%.l %2,%0\;clr%.l %0"; + else + { + operands[3] = adjust_address (operands[0], SImode, 4); + return "move%.w %1,%2\;move%.l %2,%0\;clr%.l %3"; + } + } + else if (DATA_REG_P (operands[0])) + return "move%.w %1,%0\;ext%.l %0\;clr%.l %R0"; + else + return "move%.w %1,%0\;sub%.l %R0,%R0"; +}) + +(define_insn "*ashldi3_const1" + [(set (match_operand:DI 0 "register_operand" "=d") + (ashift:DI (match_operand:DI 1 "register_operand" "0") + (const_int 1)))] + "!TARGET_COLDFIRE" + "add%.l %R0,%R0\;addx%.l %0,%0") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashift:DI (match_operand:DI 1 "register_operand" "") + (const_int 2)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 0) + (ashift:DI (match_dup 1) (const_int 1))) + (set (match_dup 0) + (ashift:DI (match_dup 0) (const_int 1)))] + "") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashift:DI (match_operand:DI 1 "register_operand" "") + (const_int 3)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 0) + (ashift:DI (match_dup 1) (const_int 2))) + (set (match_dup 0) + (ashift:DI (match_dup 0) (const_int 1)))] + "") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashift:DI (match_operand:DI 1 "register_operand" "") + (const_int 8)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 2) + (rotate:SI (match_dup 2) (const_int 8))) + (set (match_dup 3) + (rotate:SI (match_dup 3) (const_int 8))) + (set (strict_low_part (subreg:QI (match_dup 0) 3)) + (subreg:QI (match_dup 0) 7)) + (set (strict_low_part (subreg:QI (match_dup 0) 7)) + (const_int 0))] +{ + operands[2] = gen_highpart (SImode, operands[0]); + operands[3] = gen_lowpart (SImode, operands[0]); +}) + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashift:DI (match_operand:DI 1 "register_operand" "") + (const_int 16)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 2) + (rotate:SI (match_dup 2) (const_int 16))) + (set (match_dup 3) + (rotate:SI (match_dup 3) (const_int 16))) + (set (strict_low_part (subreg:HI (match_dup 0) 2)) + (subreg:HI (match_dup 0) 6)) + (set (strict_low_part (subreg:HI (match_dup 0) 6)) + (const_int 0))] +{ + operands[2] = gen_highpart (SImode, operands[0]); + operands[3] = gen_lowpart (SImode, operands[0]); +}) + +(define_split + [(set (match_operand:DI 0 "pre_dec_operand" "") + (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "") + (const_int 32)))] + "reload_completed" + [(set (match_dup 0) (const_int 0)) + (set (match_dup 0) (match_dup 1))] +{ + operands[0] = adjust_address(operands[0], SImode, 0); + operands[1] = gen_lowpart(SImode, operands[1]); +}) + +(define_split + [(set (match_operand:DI 0 "post_inc_operand" "") + (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "") + (const_int 32)))] + "reload_completed" + [(set (match_dup 0) (match_dup 1)) + (set (match_dup 0) (const_int 0))] +{ + operands[0] = adjust_address(operands[0], SImode, 0); + operands[1] = gen_lowpart(SImode, operands[1]); +}) + +(define_insn_and_split "*ashldi3_const32" + [(set (match_operand:DI 0 "nonimmediate_operand" "=ro<>") + (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "ro") + (const_int 32)))] + "" + "#" + "&& reload_completed" + [(set (match_dup 4) (match_dup 3)) + (set (match_dup 2) (const_int 0))] + "split_di(operands, 2, operands + 2, operands + 4);") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashift:DI (match_operand:DI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")))] + "reload_completed && !TARGET_COLDFIRE + && INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 40" + [(set (match_dup 4) (ashift:SI (match_dup 4) (match_dup 2))) + (set (match_dup 3) (match_dup 4)) + (set (match_dup 4) (const_int 0))] +{ + operands[2] = GEN_INT (INTVAL (operands[2]) - 32); + operands[3] = gen_highpart (SImode, operands[0]); + operands[4] = gen_lowpart (SImode, operands[0]); +}) + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashift:DI (match_operand:DI 1 "register_operand" "") + (const_int 48)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 2) (match_dup 3)) + (set (match_dup 2) + (rotate:SI (match_dup 2) (const_int 16))) + (set (match_dup 3) (const_int 0)) + (set (strict_low_part (subreg:HI (match_dup 0) 2)) + (const_int 0))] +{ + operands[2] = gen_highpart (SImode, operands[0]); + operands[3] = gen_lowpart (SImode, operands[0]); +}) + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashift:DI (match_operand:DI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")))] + "reload_completed && !TARGET_COLDFIRE + && INTVAL (operands[2]) > 40 && INTVAL (operands[2]) <= 63" + [(set (match_dup 3) (match_dup 2)) + (set (match_dup 4) (ashift:SI (match_dup 4) (match_dup 3))) + (set (match_dup 3) (match_dup 4)) + (set (match_dup 4) (const_int 0))] +{ + operands[2] = GEN_INT (INTVAL (operands[2]) - 32); + operands[3] = gen_highpart (SImode, operands[0]); + operands[4] = gen_lowpart (SImode, operands[0]); +}) + +(define_insn "*ashldi3" + [(set (match_operand:DI 0 "register_operand" "=d") + (ashift:DI (match_operand:DI 1 "register_operand" "0") + (match_operand 2 "const_int_operand" "n")))] + "!TARGET_COLDFIRE + && ((INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3) + || INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16 + || (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63))" + "#") + +(define_expand "ashldi3" + [(set (match_operand:DI 0 "register_operand" "") + (ashift:DI (match_operand:DI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")))] + "!TARGET_COLDFIRE" +{ + /* ??? This is a named pattern like this is not allowed to FAIL based + on its operands. */ + if (GET_CODE (operands[2]) != CONST_INT + || ((INTVAL (operands[2]) < 1 || INTVAL (operands[2]) > 3) + && INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16 + && (INTVAL (operands[2]) < 32 || INTVAL (operands[2]) > 63))) + FAIL; +}) + +;; On most 68k models, this makes faster code in a special case. + +(define_insn "ashlsi_16" + [(set (match_operand:SI 0 "register_operand" "=d") + (ashift:SI (match_operand:SI 1 "register_operand" "0") + (const_int 16)))] + "!TUNE_68060" +{ + CC_STATUS_INIT; + return "swap %0\;clr%.w %0"; +}) + +;; ashift patterns : use lsl instead of asl, because lsl always clears the +;; overflow bit, so we must not set CC_NO_OVERFLOW. + +;; On the 68000, this makes faster code in a special case. + +(define_insn "ashlsi_17_24" + [(set (match_operand:SI 0 "register_operand" "=d") + (ashift:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "const_int_operand" "n")))] + "TUNE_68000_10 + && INTVAL (operands[2]) > 16 + && INTVAL (operands[2]) <= 24" +{ + CC_STATUS_INIT; + + operands[2] = GEN_INT (INTVAL (operands[2]) - 16); + return "lsl%.w %2,%0\;swap %0\;clr%.w %0"; +}) + +(define_insn "ashlsi3" + [(set (match_operand:SI 0 "register_operand" "=d") + (ashift:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "general_operand" "dI")))] + "" +{ + if (operands[2] == const1_rtx) + { + cc_status.flags = CC_NO_OVERFLOW; + return "add%.l %0,%0"; + } + return "lsl%.l %2,%0"; +}) + +(define_insn "ashlhi3" + [(set (match_operand:HI 0 "register_operand" "=d") + (ashift:HI (match_operand:HI 1 "register_operand" "0") + (match_operand:HI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "lsl%.w %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d")) + (ashift:HI (match_dup 0) + (match_operand:HI 1 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "lsl%.w %1,%0") + +(define_insn "ashlqi3" + [(set (match_operand:QI 0 "register_operand" "=d") + (ashift:QI (match_operand:QI 1 "register_operand" "0") + (match_operand:QI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "lsl%.b %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d")) + (ashift:QI (match_dup 0) + (match_operand:QI 1 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "lsl%.b %1,%0") + +;; On most 68k models, this makes faster code in a special case. + +(define_insn "ashrsi_16" + [(set (match_operand:SI 0 "register_operand" "=d") + (ashiftrt:SI (match_operand:SI 1 "register_operand" "0") + (const_int 16)))] + "!TUNE_68060" + "swap %0\;ext%.l %0") + +;; On the 68000, this makes faster code in a special case. + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (ashiftrt:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "const_int_operand" "n")))] + "TUNE_68000_10 + && INTVAL (operands[2]) > 16 + && INTVAL (operands[2]) <= 24" +{ + operands[2] = GEN_INT (INTVAL (operands[2]) - 16); + return "swap %0\;asr%.w %2,%0\;ext%.l %0"; +}) + +(define_insn "subreghi1ashrdi_const32" + [(set (match_operand:HI 0 "nonimmediate_operand" "=rm") + (subreg:HI (ashiftrt:DI (match_operand:DI 1 "general_operand" "ro") + (const_int 32)) 6))] + "" +{ + if (GET_CODE (operands[1]) != REG) + operands[1] = adjust_address (operands[1], HImode, 2); + return "move%.w %1,%0"; +} + [(set_attr "type" "move")]) + +(define_insn "subregsi1ashrdi_const32" + [(set (match_operand:SI 0 "nonimmediate_operand" "=rm") + (subreg:SI (ashiftrt:DI (match_operand:DI 1 "general_operand" "ro") + (const_int 32)) 4))] + "" +{ + return "move%.l %1,%0"; +} + [(set_attr "type" "move_l")]) + +(define_insn "*ashrdi3_const1" + [(set (match_operand:DI 0 "register_operand" "=d") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "0") + (const_int 1)))] + "!TARGET_COLDFIRE" +{ + operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + return "asr%.l #1,%0\;roxr%.l #1,%1"; +}) + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 2)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 0) + (ashiftrt:DI (match_dup 1) (const_int 1))) + (set (match_dup 0) + (ashiftrt:DI (match_dup 0) (const_int 1)))] + "") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 3)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 0) + (ashiftrt:DI (match_dup 1) (const_int 2))) + (set (match_dup 0) + (ashiftrt:DI (match_dup 0) (const_int 1)))] + "") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 8)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (strict_low_part (subreg:QI (match_dup 0) 7)) + (subreg:QI (match_dup 0) 3)) + (set (match_dup 2) + (ashiftrt:SI (match_dup 2) (const_int 8))) + (set (match_dup 3) + (rotatert:SI (match_dup 3) (const_int 8)))] +{ + operands[2] = gen_highpart (SImode, operands[0]); + operands[3] = gen_lowpart (SImode, operands[0]); +}) + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 16)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (strict_low_part (subreg:HI (match_dup 0) 6)) + (subreg:HI (match_dup 0) 2)) + (set (match_dup 2) + (rotate:SI (match_dup 2) (const_int 16))) + (set (match_dup 3) + (rotate:SI (match_dup 3) (const_int 16))) + (set (match_dup 2) + (sign_extend:SI (subreg:HI (match_dup 2) 2)))] +{ + operands[2] = gen_highpart (SImode, operands[0]); + operands[3] = gen_lowpart (SImode, operands[0]); +}) + +(define_insn "*ashrdi_const32" + [(set (match_operand:DI 0 "register_operand" "=d") + (ashiftrt:DI (match_operand:DI 1 "nonimmediate_src_operand" "ro") + (const_int 32)))] + "" +{ + CC_STATUS_INIT; + if (TARGET_68020) + return "move%.l %1,%R0\;smi %0\;extb%.l %0"; + else + return "move%.l %1,%R0\;smi %0\;ext%.w %0\;ext%.l %0"; +}) + +(define_insn "*ashrdi_const32_mem" + [(set (match_operand:DI 0 "memory_operand" "=o,<") + (ashiftrt:DI (match_operand:DI 1 "nonimmediate_src_operand" "ro,ro") + (const_int 32))) + (clobber (match_scratch:SI 2 "=d,d"))] + "" +{ + CC_STATUS_INIT; + operands[3] = adjust_address (operands[0], SImode, + which_alternative == 0 ? 4 : 0); + operands[0] = adjust_address (operands[0], SImode, 0); + if (TARGET_68020 || TARGET_COLDFIRE) + return "move%.l %1,%3\;smi %2\;extb%.l %2\;move%.l %2,%0"; + else + return "move%.l %1,%3\;smi %2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0"; +}) + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 63)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 3) + (ashiftrt:SI (match_dup 3) (const_int 31))) + (set (match_dup 2) + (match_dup 3))] + "split_di(operands, 1, operands + 2, operands + 3);") + +;; The predicate below must be general_operand, because ashrdi3 allows that +(define_insn "ashrdi_const" + [(set (match_operand:DI 0 "register_operand" "=d") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "0") + (match_operand 2 "const_int_operand" "n")))] + "!TARGET_COLDFIRE + && ((INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3) + || INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16 + || INTVAL (operands[2]) == 31 + || (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63))" +{ + operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + CC_STATUS_INIT; + if (INTVAL (operands[2]) == 48) + return "swap %0\;ext%.l %0\;move%.l %0,%1\;smi %0\;ext%.w %0"; + if (INTVAL (operands[2]) == 31) + return "add%.l %1,%1\;addx%.l %0,%0\;move%.l %0,%1\;subx%.l %0,%0"; + if (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63) + { + operands[2] = GEN_INT (INTVAL (operands[2]) - 32); + output_asm_insn (INTVAL (operands[2]) <= 8 ? "asr%.l %2,%0" : + "moveq %2,%1\;asr%.l %1,%0", operands); + output_asm_insn ("mov%.l %0,%1\;smi %0", operands); + return INTVAL (operands[2]) >= 15 ? "ext%.w %d0" : + TARGET_68020 ? "extb%.l %0" : "ext%.w %0\;ext%.l %0"; + } + return "#"; +}) + +(define_expand "ashrdi3" + [(set (match_operand:DI 0 "register_operand" "") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")))] + "!TARGET_COLDFIRE" +{ + /* ??? This is a named pattern like this is not allowed to FAIL based + on its operands. */ + if (GET_CODE (operands[2]) != CONST_INT + || ((INTVAL (operands[2]) < 1 || INTVAL (operands[2]) > 3) + && INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16 + && (INTVAL (operands[2]) < 31 || INTVAL (operands[2]) > 63))) + FAIL; +}) + +;; On all 68k models, this makes faster code in a special case. + +(define_insn "ashrsi_31" + [(set (match_operand:SI 0 "register_operand" "=d") + (ashiftrt:SI (match_operand:SI 1 "register_operand" "0") + (const_int 31)))] + "" +{ + return "add%.l %0,%0\;subx%.l %0,%0"; +}) + +(define_insn "ashrsi3" + [(set (match_operand:SI 0 "register_operand" "=d") + (ashiftrt:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "general_operand" "dI")))] + "" + "asr%.l %2,%0" + [(set_attr "type" "shift") + (set_attr "opy" "2")]) + +(define_insn "ashrhi3" + [(set (match_operand:HI 0 "register_operand" "=d") + (ashiftrt:HI (match_operand:HI 1 "register_operand" "0") + (match_operand:HI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "asr%.w %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d")) + (ashiftrt:HI (match_dup 0) + (match_operand:HI 1 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "asr%.w %1,%0") + +(define_insn "ashrqi3" + [(set (match_operand:QI 0 "register_operand" "=d") + (ashiftrt:QI (match_operand:QI 1 "register_operand" "0") + (match_operand:QI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "asr%.b %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d")) + (ashiftrt:QI (match_dup 0) + (match_operand:QI 1 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "asr%.b %1,%0") + +;; logical shift instructions + +;; commented out because of reload problems in 950612-1.c +;;(define_insn "" +;; [(set (cc0) +;; (subreg:SI (lshiftrt:DI (match_operand:DI 0 "general_operand" "ro") +;; (const_int 32)) 4)) +;; (set (match_operand:SI 1 "nonimmediate_operand" "=dm") +;; (subreg:SI (lshiftrt:DI (match_dup 0) +;; (const_int 32)) 4))] +;; "" +;;{ +;; return "move%.l %0,%1"; +;;}) +;; +;;(define_insn "" +;; [(set (cc0) +;; (subreg:SI (lshiftrt:DI (match_operand:DI 0 "general_operand" "ro") +;; (const_int 32)) 0)) +;; (set (match_operand:DI 1 "nonimmediate_operand" "=do") +;; (lshiftrt:DI (match_dup 0) +;; (const_int 32)))] +;; "" +;;{ +;; if (GET_CODE (operands[1]) == REG) +;; operands[2] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); +;; else +;; operands[2] = adjust_address (operands[1], SImode, 4); +;; return "move%.l %0,%2\;clr%.l %1"; +;;}) + +(define_insn "subreg1lshrdi_const32" + [(set (match_operand:SI 0 "nonimmediate_operand" "=rm") + (subreg:SI (lshiftrt:DI (match_operand:DI 1 "general_operand" "ro") + (const_int 32)) 4))] + "" + "move%.l %1,%0" + [(set_attr "type" "move_l")]) + +(define_insn "*lshrdi3_const1" + [(set (match_operand:DI 0 "register_operand" "=d") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "0") + (const_int 1)))] + "!TARGET_COLDFIRE" + "lsr%.l #1,%0\;roxr%.l #1,%R0") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 2)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 0) + (lshiftrt:DI (match_dup 1) (const_int 1))) + (set (match_dup 0) + (lshiftrt:DI (match_dup 0) (const_int 1)))] + "") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 3)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (match_dup 0) + (lshiftrt:DI (match_dup 1) (const_int 2))) + (set (match_dup 0) + (lshiftrt:DI (match_dup 0) (const_int 1)))] + "") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 8)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (strict_low_part (subreg:QI (match_dup 0) 7)) + (subreg:QI (match_dup 0) 3)) + (set (match_dup 2) + (lshiftrt:SI (match_dup 2) (const_int 8))) + (set (match_dup 3) + (rotatert:SI (match_dup 3) (const_int 8)))] +{ + operands[2] = gen_highpart (SImode, operands[0]); + operands[3] = gen_lowpart (SImode, operands[0]); +}) + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 16)))] + "reload_completed && !TARGET_COLDFIRE" + [(set (strict_low_part (subreg:HI (match_dup 0) 6)) + (subreg:HI (match_dup 0) 2)) + (set (strict_low_part (subreg:HI (match_dup 0) 2)) + (const_int 0)) + (set (match_dup 3) + (rotate:SI (match_dup 3) (const_int 16))) + (set (match_dup 2) + (rotate:SI (match_dup 2) (const_int 16)))] +{ + operands[2] = gen_highpart (SImode, operands[0]); + operands[3] = gen_lowpart (SImode, operands[0]); +}) + +(define_split + [(set (match_operand:DI 0 "pre_dec_operand" "") + (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "") + (const_int 32)))] + "reload_completed" + [(set (match_dup 0) (match_dup 1)) + (set (match_dup 0) (const_int 0))] +{ + operands[0] = adjust_address(operands[0], SImode, 0); + operands[1] = gen_highpart(SImode, operands[1]); +}) + +(define_split + [(set (match_operand:DI 0 "post_inc_operand" "") + (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "") + (const_int 32)))] + "reload_completed" + [(set (match_dup 0) (const_int 0)) + (set (match_dup 0) (match_dup 1))] +{ + operands[0] = adjust_address(operands[0], SImode, 0); + operands[1] = gen_highpart(SImode, operands[1]); +}) + +(define_split + [(set (match_operand:DI 0 "nonimmediate_operand" "") + (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "") + (const_int 32)))] + "reload_completed" + [(set (match_dup 2) (match_dup 5)) + (set (match_dup 4) (const_int 0))] + "split_di(operands, 2, operands + 2, operands + 4);") + +(define_insn "*lshrdi_const32" + [(set (match_operand:DI 0 "nonimmediate_operand" "=ro<>") + (lshiftrt:DI (match_operand:DI 1 "general_operand" "ro") + (const_int 32)))] + "" + "#") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")))] + "reload_completed && !TARGET_COLDFIRE + && INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 40" + [(set (match_dup 3) (lshiftrt:SI (match_dup 3) (match_dup 2))) + (set (match_dup 4) (match_dup 3)) + (set (match_dup 3) (const_int 0))] +{ + operands[2] = GEN_INT (INTVAL (operands[2]) - 32); + operands[3] = gen_highpart (SImode, operands[0]); + operands[4] = gen_lowpart (SImode, operands[0]); +}) + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 48)))] + "reload_completed" + [(set (match_dup 3) (match_dup 2)) + (set (strict_low_part (subreg:HI (match_dup 0) 6)) + (const_int 0)) + (set (match_dup 2) (const_int 0)) + (set (match_dup 3) + (rotate:SI (match_dup 3) (const_int 16)))] +{ + operands[2] = gen_highpart (SImode, operands[0]); + operands[3] = gen_lowpart (SImode, operands[0]); +}) + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")))] + "reload_completed && !TARGET_COLDFIRE + && INTVAL (operands[2]) > 40 && INTVAL (operands[2]) <= 62" + [(set (match_dup 4) (match_dup 2)) + (set (match_dup 3) (lshiftrt:SI (match_dup 3) (match_dup 4))) + (set (match_dup 4) (match_dup 3)) + (set (match_dup 3) (const_int 0))] +{ + operands[2] = GEN_INT (INTVAL (operands[2]) - 32); + operands[3] = gen_highpart (SImode, operands[0]); + operands[4] = gen_lowpart (SImode, operands[0]); +}) + +(define_insn "*lshrdi_const63" + [(set (match_operand:DI 0 "register_operand" "=d") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "0") + (const_int 63)))] + "" + "add%.l %0,%0\;clr%.l %0\;clr%.l %R1\;addx%.l %R1,%R1") + +(define_insn "*lshrdi3_const" + [(set (match_operand:DI 0 "register_operand" "=d") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "0") + (match_operand 2 "const_int_operand" "n")))] + "(!TARGET_COLDFIRE + && ((INTVAL (operands[2]) >= 2 && INTVAL (operands[2]) <= 3) + || INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16 + || (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63)))" + "#") + +(define_expand "lshrdi3" + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")))] + "!TARGET_COLDFIRE" +{ + /* ??? This is a named pattern like this is not allowed to FAIL based + on its operands. */ + if (GET_CODE (operands[2]) != CONST_INT + || ((INTVAL (operands[2]) < 1 || INTVAL (operands[2]) > 3) + && INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16 + && (INTVAL (operands[2]) < 32 || INTVAL (operands[2]) > 63))) + FAIL; +}) + +;; On all 68k models, this makes faster code in a special case. + +(define_insn "lshrsi_31" + [(set (match_operand:SI 0 "register_operand" "=d") + (lshiftrt:SI (match_operand:SI 1 "register_operand" "0") + (const_int 31)))] + "" +{ + return "add%.l %0,%0\;subx%.l %0,%0\;neg%.l %0"; +}) + +;; On most 68k models, this makes faster code in a special case. + +(define_insn "lshrsi_16" + [(set (match_operand:SI 0 "register_operand" "=d") + (lshiftrt:SI (match_operand:SI 1 "register_operand" "0") + (const_int 16)))] + "!TUNE_68060" +{ + CC_STATUS_INIT; + return "clr%.w %0\;swap %0"; +}) + +;; On the 68000, this makes faster code in a special case. + +(define_insn "lshrsi_17_24" + [(set (match_operand:SI 0 "register_operand" "=d") + (lshiftrt:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "const_int_operand" "n")))] + "TUNE_68000_10 + && INTVAL (operands[2]) > 16 + && INTVAL (operands[2]) <= 24" +{ + /* I think lsr%.w sets the CC properly. */ + operands[2] = GEN_INT (INTVAL (operands[2]) - 16); + return "clr%.w %0\;swap %0\;lsr%.w %2,%0"; +}) + +(define_insn "lshrsi3" + [(set (match_operand:SI 0 "register_operand" "=d") + (lshiftrt:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "general_operand" "dI")))] + "" + "lsr%.l %2,%0" + [(set_attr "type" "shift") + (set_attr "opy" "2")]) + +(define_insn "lshrhi3" + [(set (match_operand:HI 0 "register_operand" "=d") + (lshiftrt:HI (match_operand:HI 1 "register_operand" "0") + (match_operand:HI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "lsr%.w %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d")) + (lshiftrt:HI (match_dup 0) + (match_operand:HI 1 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "lsr%.w %1,%0") + +(define_insn "lshrqi3" + [(set (match_operand:QI 0 "register_operand" "=d") + (lshiftrt:QI (match_operand:QI 1 "register_operand" "0") + (match_operand:QI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "lsr%.b %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d")) + (lshiftrt:QI (match_dup 0) + (match_operand:QI 1 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "lsr%.b %1,%0") + +;; rotate instructions + +(define_insn "rotlsi_16" + [(set (match_operand:SI 0 "register_operand" "=d") + (rotate:SI (match_operand:SI 1 "register_operand" "0") + (const_int 16)))] + "" + "swap %0" + [(set_attr "type" "shift")]) + +(define_insn "rotlsi3" + [(set (match_operand:SI 0 "register_operand" "=d") + (rotate:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "general_operand" "dINO")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 16) + return "swap %0"; + else if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 16) + { + operands[2] = GEN_INT (32 - INTVAL (operands[2])); + return "ror%.l %2,%0"; + } + else + return "rol%.l %2,%0"; +}) + +(define_insn "rotlhi3" + [(set (match_operand:HI 0 "register_operand" "=d") + (rotate:HI (match_operand:HI 1 "register_operand" "0") + (match_operand:HI 2 "general_operand" "dIP")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 8) + { + operands[2] = GEN_INT (16 - INTVAL (operands[2])); + return "ror%.w %2,%0"; + } + else + return "rol%.w %2,%0"; +}) + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d")) + (rotate:HI (match_dup 0) + (match_operand:HI 1 "general_operand" "dIP")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 8) + { + operands[2] = GEN_INT (16 - INTVAL (operands[2])); + return "ror%.w %2,%0"; + } + else + return "rol%.w %2,%0"; +}) + +(define_insn "rotlqi3" + [(set (match_operand:QI 0 "register_operand" "=d") + (rotate:QI (match_operand:QI 1 "register_operand" "0") + (match_operand:QI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 4) + { + operands[2] = GEN_INT (8 - INTVAL (operands[2])); + return "ror%.b %2,%0"; + } + else + return "rol%.b %2,%0"; +}) + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d")) + (rotate:QI (match_dup 0) + (match_operand:QI 1 "general_operand" "dI")))] + "!TARGET_COLDFIRE" +{ + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 4) + { + operands[2] = GEN_INT (8 - INTVAL (operands[2])); + return "ror%.b %2,%0"; + } + else + return "rol%.b %2,%0"; +}) + +(define_insn "rotrsi3" + [(set (match_operand:SI 0 "register_operand" "=d") + (rotatert:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "ror%.l %2,%0") + +(define_insn "rotrhi3" + [(set (match_operand:HI 0 "register_operand" "=d") + (rotatert:HI (match_operand:HI 1 "register_operand" "0") + (match_operand:HI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "ror%.w %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d")) + (rotatert:HI (match_dup 0) + (match_operand:HI 1 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "ror%.w %1,%0") + +(define_insn "rotrqi3" + [(set (match_operand:QI 0 "register_operand" "=d") + (rotatert:QI (match_operand:QI 1 "register_operand" "0") + (match_operand:QI 2 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "ror%.b %2,%0") + +(define_insn "" + [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d")) + (rotatert:QI (match_dup 0) + (match_operand:QI 1 "general_operand" "dI")))] + "!TARGET_COLDFIRE" + "ror%.b %1,%0") + + +;; Bit set/clear in memory byte. + +;; set bit, bit number is int +(define_insn "bsetmemqi" + [(set (match_operand:QI 0 "memory_operand" "+m") + (ior:QI (subreg:QI (ashift:SI (const_int 1) + (match_operand:SI 1 "general_operand" "d")) 3) + (match_dup 0)))] + "" +{ + CC_STATUS_INIT; + return "bset %1,%0"; +} + [(set_attr "type" "bitrw")]) + +;; set bit, bit number is (sign/zero)_extended from HImode/QImode +(define_insn "*bsetmemqi_ext" + [(set (match_operand:QI 0 "memory_operand" "+m") + (ior:QI (subreg:QI (ashift:SI (const_int 1) + (match_operator:SI 2 "extend_operator" + [(match_operand 1 "general_operand" "d")])) 3) + (match_dup 0)))] + "" +{ + CC_STATUS_INIT; + return "bset %1,%0"; +} + [(set_attr "type" "bitrw")]) + +;; clear bit, bit number is int +(define_insn "bclrmemqi" + [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+m") + (const_int 1) + (minus:SI (const_int 7) + (match_operand:SI 1 "general_operand" "d"))) + (const_int 0))] + "" +{ + CC_STATUS_INIT; + return "bclr %1,%0"; +} + [(set_attr "type" "bitrw")]) + +;; clear bit, bit number is (sign/zero)_extended from HImode/QImode +(define_insn "*bclrmemqi_ext" + [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+m") + (const_int 1) + (minus:SI (const_int 7) + (match_operator:SI 2 "extend_operator" + [(match_operand 1 "general_operand" "d")]))) + (const_int 0))] + "" +{ + CC_STATUS_INIT; + return "bclr %1,%0"; +} + [(set_attr "type" "bitrw")]) + +;; Special cases of bit-field insns which we should +;; recognize in preference to the general case. +;; These handle aligned 8-bit and 16-bit fields, +;; which can usually be done with move instructions. + +; +; Special case for 32-bit field in memory. This only occurs when 32-bit +; alignment of structure members is specified. +; +; The move is allowed to be odd byte aligned, because that's still faster +; than an odd byte aligned bit-field instruction. +; +(define_insn "" + [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o") + (const_int 32) + (match_operand:SI 1 "const_int_operand" "n")) + (match_operand:SI 2 "general_src_operand" "rmSi"))] + "TARGET_68020 && TARGET_BITFIELD + && (INTVAL (operands[1]) % 8) == 0 + && ! mode_dependent_address_p (XEXP (operands[0], 0))" +{ + operands[0] + = adjust_address (operands[0], SImode, INTVAL (operands[1]) / 8); + + return "move%.l %2,%0"; +}) + +(define_insn "" + [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+do") + (match_operand:SI 1 "const_int_operand" "n") + (match_operand:SI 2 "const_int_operand" "n")) + (match_operand:SI 3 "register_operand" "d"))] + "TARGET_68020 && TARGET_BITFIELD + && (INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16) + && INTVAL (operands[2]) % INTVAL (operands[1]) == 0 + && (GET_CODE (operands[0]) == REG + || ! mode_dependent_address_p (XEXP (operands[0], 0)))" +{ + if (REG_P (operands[0])) + { + if (INTVAL (operands[1]) + INTVAL (operands[2]) != 32) + return "bfins %3,%0{%b2:%b1}"; + } + else + operands[0] = adjust_address (operands[0], + INTVAL (operands[1]) == 8 ? QImode : HImode, + INTVAL (operands[2]) / 8); + + if (GET_CODE (operands[3]) == MEM) + operands[3] = adjust_address (operands[3], + INTVAL (operands[1]) == 8 ? QImode : HImode, + (32 - INTVAL (operands[1])) / 8); + + if (INTVAL (operands[1]) == 8) + return "move%.b %3,%0"; + return "move%.w %3,%0"; +}) + + +; +; Special case for 32-bit field in memory. This only occurs when 32-bit +; alignment of structure members is specified. +; +; The move is allowed to be odd byte aligned, because that's still faster +; than an odd byte aligned bit-field instruction. +; +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=rm") + (zero_extract:SI (match_operand:QI 1 "memory_src_operand" "oS") + (const_int 32) + (match_operand:SI 2 "const_int_operand" "n")))] + "TARGET_68020 && TARGET_BITFIELD + && (INTVAL (operands[2]) % 8) == 0 + && ! mode_dependent_address_p (XEXP (operands[1], 0))" +{ + operands[1] + = adjust_address (operands[1], SImode, INTVAL (operands[2]) / 8); + + return "move%.l %1,%0"; +}) + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=&d") + (zero_extract:SI (match_operand:SI 1 "register_operand" "do") + (match_operand:SI 2 "const_int_operand" "n") + (match_operand:SI 3 "const_int_operand" "n")))] + "TARGET_68020 && TARGET_BITFIELD + && (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16) + && INTVAL (operands[3]) % INTVAL (operands[2]) == 0 + && (GET_CODE (operands[1]) == REG + || ! mode_dependent_address_p (XEXP (operands[1], 0)))" +{ + cc_status.flags |= CC_NOT_NEGATIVE; + if (REG_P (operands[1])) + { + if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32) + return "bfextu %1{%b3:%b2},%0"; + } + else + operands[1] + = adjust_address (operands[1], SImode, INTVAL (operands[3]) / 8); + + output_asm_insn ("clr%.l %0", operands); + if (GET_CODE (operands[0]) == MEM) + operands[0] = adjust_address (operands[0], + INTVAL (operands[2]) == 8 ? QImode : HImode, + (32 - INTVAL (operands[1])) / 8); + + if (INTVAL (operands[2]) == 8) + return "move%.b %1,%0"; + return "move%.w %1,%0"; +}) + +; +; Special case for 32-bit field in memory. This only occurs when 32-bit +; alignment of structure members is specified. +; +; The move is allowed to be odd byte aligned, because that's still faster +; than an odd byte aligned bit-field instruction. +; +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=rm") + (sign_extract:SI (match_operand:QI 1 "memory_src_operand" "oS") + (const_int 32) + (match_operand:SI 2 "const_int_operand" "n")))] + "TARGET_68020 && TARGET_BITFIELD + && (INTVAL (operands[2]) % 8) == 0 + && ! mode_dependent_address_p (XEXP (operands[1], 0))" +{ + operands[1] + = adjust_address (operands[1], SImode, INTVAL (operands[2]) / 8); + + return "move%.l %1,%0"; +}) + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (sign_extract:SI (match_operand:SI 1 "register_operand" "do") + (match_operand:SI 2 "const_int_operand" "n") + (match_operand:SI 3 "const_int_operand" "n")))] + "TARGET_68020 && TARGET_BITFIELD + && (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16) + && INTVAL (operands[3]) % INTVAL (operands[2]) == 0 + && (GET_CODE (operands[1]) == REG + || ! mode_dependent_address_p (XEXP (operands[1], 0)))" +{ + if (REG_P (operands[1])) + { + if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32) + return "bfexts %1{%b3:%b2},%0"; + } + else + operands[1] + = adjust_address (operands[1], + INTVAL (operands[2]) == 8 ? QImode : HImode, + INTVAL (operands[3]) / 8); + + if (INTVAL (operands[2]) == 8) + return "move%.b %1,%0\;extb%.l %0"; + return "move%.w %1,%0\;ext%.l %0"; +}) + +;; Bit-field instructions, general cases. +;; "o,d" constraint causes a nonoffsettable memref to match the "o" +;; so that its address is reloaded. + +(define_expand "extv" + [(set (match_operand:SI 0 "register_operand" "") + (sign_extract:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "const_int_operand" "") + (match_operand:SI 3 "const_int_operand" "")))] + "TARGET_68020 && TARGET_BITFIELD" + "") + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (sign_extract:SI (match_operand:QI 1 "memory_operand" "o") + (match_operand:SI 2 "nonmemory_operand" "dn") + (match_operand:SI 3 "nonmemory_operand" "dn")))] + "TARGET_68020 && TARGET_BITFIELD" + "bfexts %1{%b3:%b2},%0") + +(define_expand "extzv" + [(set (match_operand:SI 0 "register_operand" "") + (zero_extract:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "const_int_operand" "") + (match_operand:SI 3 "const_int_operand" "")))] + "TARGET_68020 && TARGET_BITFIELD" + "") + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (zero_extract:SI (match_operand:QI 1 "memory_operand" "o") + (match_operand:SI 2 "nonmemory_operand" "dn") + (match_operand:SI 3 "nonmemory_operand" "dn")))] + "TARGET_68020 && TARGET_BITFIELD" +{ + if (GET_CODE (operands[2]) == CONST_INT) + { + if (INTVAL (operands[2]) != 32) + cc_status.flags |= CC_NOT_NEGATIVE; + } + else + { + CC_STATUS_INIT; + } + return "bfextu %1{%b3:%b2},%0"; +}) + +(define_insn "" + [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o") + (match_operand:SI 1 "nonmemory_operand" "dn") + (match_operand:SI 2 "nonmemory_operand" "dn")) + (xor:SI (zero_extract:SI (match_dup 0) (match_dup 1) (match_dup 2)) + (match_operand 3 "const_int_operand" "n")))] + "TARGET_68020 && TARGET_BITFIELD + && (INTVAL (operands[3]) == -1 + || (GET_CODE (operands[1]) == CONST_INT + && (~ INTVAL (operands[3]) & ((1 << INTVAL (operands[1]))- 1)) == 0))" +{ + CC_STATUS_INIT; + return "bfchg %0{%b2:%b1}"; +}) + +(define_insn "" + [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o") + (match_operand:SI 1 "nonmemory_operand" "dn") + (match_operand:SI 2 "nonmemory_operand" "dn")) + (const_int 0))] + "TARGET_68020 && TARGET_BITFIELD" +{ + CC_STATUS_INIT; + return "bfclr %0{%b2:%b1}"; +}) + +(define_insn "" + [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o") + (match_operand:SI 1 "general_operand" "dn") + (match_operand:SI 2 "general_operand" "dn")) + (const_int -1))] + "TARGET_68020 && TARGET_BITFIELD" +{ + CC_STATUS_INIT; + return "bfset %0{%b2:%b1}"; +}) + +(define_expand "insv" + [(set (zero_extract:SI (match_operand:SI 0 "nonimmediate_operand" "") + (match_operand:SI 1 "const_int_operand" "") + (match_operand:SI 2 "const_int_operand" "")) + (match_operand:SI 3 "register_operand" ""))] + "TARGET_68020 && TARGET_BITFIELD" + "") + +(define_insn "" + [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o") + (match_operand:SI 1 "nonmemory_operand" "dn") + (match_operand:SI 2 "nonmemory_operand" "dn")) + (match_operand:SI 3 "register_operand" "d"))] + "TARGET_68020 && TARGET_BITFIELD" + "bfins %3,%0{%b2:%b1}") + +;; Now recognize bit-field insns that operate on registers +;; (or at least were intended to do so). + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (sign_extract:SI (match_operand:SI 1 "register_operand" "d") + (match_operand:SI 2 "const_int_operand" "n") + (match_operand:SI 3 "const_int_operand" "n")))] + "TARGET_68020 && TARGET_BITFIELD" + "bfexts %1{%b3:%b2},%0") + +(define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (zero_extract:SI (match_operand:SI 1 "register_operand" "d") + (match_operand:SI 2 "const_int_operand" "n") + (match_operand:SI 3 "const_int_operand" "n")))] + "TARGET_68020 && TARGET_BITFIELD" +{ + if (GET_CODE (operands[2]) == CONST_INT) + { + if (INTVAL (operands[2]) != 32) + cc_status.flags |= CC_NOT_NEGATIVE; + } + else + { + CC_STATUS_INIT; + } + return "bfextu %1{%b3:%b2},%0"; +}) + +(define_insn "" + [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d") + (match_operand:SI 1 "const_int_operand" "n") + (match_operand:SI 2 "const_int_operand" "n")) + (const_int 0))] + "TARGET_68020 && TARGET_BITFIELD" +{ + CC_STATUS_INIT; + return "bfclr %0{%b2:%b1}"; +}) + +(define_insn "" + [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d") + (match_operand:SI 1 "const_int_operand" "n") + (match_operand:SI 2 "const_int_operand" "n")) + (const_int -1))] + "TARGET_68020 && TARGET_BITFIELD" +{ + CC_STATUS_INIT; + return "bfset %0{%b2:%b1}"; +}) + +(define_insn "" + [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d") + (match_operand:SI 1 "const_int_operand" "n") + (match_operand:SI 2 "const_int_operand" "n")) + (match_operand:SI 3 "register_operand" "d"))] + "TARGET_68020 && TARGET_BITFIELD" +{ +#if 0 + /* These special cases are now recognized by a specific pattern. */ + if (GET_CODE (operands[1]) == CONST_INT && GET_CODE (operands[2]) == CONST_INT + && INTVAL (operands[1]) == 16 && INTVAL (operands[2]) == 16) + return "move%.w %3,%0"; + if (GET_CODE (operands[1]) == CONST_INT && GET_CODE (operands[2]) == CONST_INT + && INTVAL (operands[1]) == 24 && INTVAL (operands[2]) == 8) + return "move%.b %3,%0"; +#endif + return "bfins %3,%0{%b2:%b1}"; +}) + +;; Special patterns for optimizing bit-field instructions. + +(define_insn "" + [(set (cc0) + (compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "o") + (match_operand:SI 1 "const_int_operand" "n") + (match_operand:SI 2 "general_operand" "dn")) + (const_int 0)))] + "TARGET_68020 && TARGET_BITFIELD" +{ + if (operands[1] == const1_rtx + && GET_CODE (operands[2]) == CONST_INT) + { + int width = GET_CODE (operands[0]) == REG ? 31 : 7; + return output_btst (operands, + GEN_INT (width - INTVAL (operands[2])), + operands[0], insn, 1000); + /* Pass 1000 as SIGNPOS argument so that btst will + not think we are testing the sign bit for an `and' + and assume that nonzero implies a negative result. */ + } + if (INTVAL (operands[1]) != 32) + cc_status.flags = CC_NOT_NEGATIVE; + return "bftst %0{%b2:%b1}"; +}) + + +;;; now handle the register cases +(define_insn "" + [(set (cc0) + (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "d") + (match_operand:SI 1 "const_int_operand" "n") + (match_operand:SI 2 "general_operand" "dn")) + (const_int 0)))] + "TARGET_68020 && TARGET_BITFIELD" +{ + if (operands[1] == const1_rtx + && GET_CODE (operands[2]) == CONST_INT) + { + int width = GET_CODE (operands[0]) == REG ? 31 : 7; + return output_btst (operands, GEN_INT (width - INTVAL (operands[2])), + operands[0], insn, 1000); + /* Pass 1000 as SIGNPOS argument so that btst will + not think we are testing the sign bit for an `and' + and assume that nonzero implies a negative result. */ + } + if (INTVAL (operands[1]) != 32) + cc_status.flags = CC_NOT_NEGATIVE; + return "bftst %0{%b2:%b1}"; +}) + +(define_insn "scc0_di" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm") + (match_operator 1 "ordered_comparison_operator" + [(match_operand:DI 2 "general_operand" "ro") (const_int 0)]))] + "! TARGET_COLDFIRE" +{ + return output_scc_di (operands[1], operands[2], const0_rtx, operands[0]); +}) + +(define_insn "scc0_di_5200" + [(set (match_operand:QI 0 "nonimmediate_operand" "=d") + (match_operator 1 "ordered_comparison_operator" + [(match_operand:DI 2 "general_operand" "ro") (const_int 0)]))] + "TARGET_COLDFIRE" +{ + return output_scc_di (operands[1], operands[2], const0_rtx, operands[0]); +}) + +(define_insn "scc_di" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm,dm") + (match_operator 1 "ordered_comparison_operator" + [(match_operand:DI 2 "general_operand" "ro,r") + (match_operand:DI 3 "general_operand" "r,ro")]))] + "! TARGET_COLDFIRE" +{ + return output_scc_di (operands[1], operands[2], operands[3], operands[0]); +}) + +(define_insn "scc_di_5200" + [(set (match_operand:QI 0 "nonimmediate_operand" "=d,d") + (match_operator 1 "ordered_comparison_operator" + [(match_operand:DI 2 "general_operand" "ro,r") + (match_operand:DI 3 "general_operand" "r,ro")]))] + "TARGET_COLDFIRE" +{ + return output_scc_di (operands[1], operands[2], operands[3], operands[0]); +}) + +;; Note that operand 0 of an SCC insn is supported in the hardware as +;; memory, but we cannot allow it to be in memory in case the address +;; needs to be reloaded. + +(define_insn "" + [(set (match_operand:QI 0 "register_operand" "=d") + (eq:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + OUTPUT_JUMP ("seq %0", "fseq %0", "seq %0"); +}) + +(define_insn "" + [(set (match_operand:QI 0 "register_operand" "=d") + (ne:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + OUTPUT_JUMP ("sne %0", "fsne %0", "sne %0"); +}) + +(define_insn "" + [(set (match_operand:QI 0 "register_operand" "=d") + (gt:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + OUTPUT_JUMP ("sgt %0", "fsgt %0", 0); +}) + +(define_insn "" + [(set (match_operand:QI 0 "register_operand" "=d") + (gtu:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + return "shi %0"; +}) + +(define_insn "" + [(set (match_operand:QI 0 "register_operand" "=d") + (lt:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + OUTPUT_JUMP ("slt %0", "fslt %0", "smi %0"); +}) + +(define_insn "" + [(set (match_operand:QI 0 "register_operand" "=d") + (ltu:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + return "scs %0"; +}) + +(define_insn "" + [(set (match_operand:QI 0 "register_operand" "=d") + (ge:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + OUTPUT_JUMP ("sge %0", "fsge %0", "spl %0"); +}) + +(define_insn "*scc" + [(set (match_operand:QI 0 "register_operand" "=d") + (geu:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + return "scc %0"; +} + [(set_attr "type" "scc")]) + +(define_insn "" + [(set (match_operand:QI 0 "register_operand" "=d") + (le:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + OUTPUT_JUMP ("sle %0", "fsle %0", 0); +}) + +(define_insn "*sls" + [(set (match_operand:QI 0 "register_operand" "=d") + (leu:QI (cc0) (const_int 0)))] + "" +{ + cc_status = cc_prev_status; + return "sls %0"; +} + [(set_attr "type" "scc")]) + +(define_insn "*sordered_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (ordered:QI (cc0) (const_int 0)))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsor %0"; +}) + +(define_insn "*sunordered_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (unordered:QI (cc0) (const_int 0)))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsun %0"; +}) + +(define_insn "*suneq_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (uneq:QI (cc0) (const_int 0)))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsueq %0"; +}) + +(define_insn "*sunge_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (unge:QI (cc0) (const_int 0)))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsuge %0"; +}) + +(define_insn "*sungt_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (ungt:QI (cc0) (const_int 0)))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsugt %0"; +}) + +(define_insn "*sunle_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (unle:QI (cc0) (const_int 0)))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsule %0"; +}) + +(define_insn "*sunlt_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (unlt:QI (cc0) (const_int 0)))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsult %0"; +}) + +(define_insn "*sltgt_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (ltgt:QI (cc0) (const_int 0)))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsogl %0"; +}) + +(define_insn "*fsogt_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (not:QI (unle:QI (cc0) (const_int 0))))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsogt %0"; +}) + +(define_insn "*fsoge_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (not:QI (unlt:QI (cc0) (const_int 0))))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsoge %0"; +}) + +(define_insn "*fsolt_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (not:QI (unge:QI (cc0) (const_int 0))))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsolt %0"; +}) + +(define_insn "*fsole_1" + [(set (match_operand:QI 0 "register_operand" "=d") + (not:QI (ungt:QI (cc0) (const_int 0))))] + "TARGET_68881 && !TUNE_68060" +{ + cc_status = cc_prev_status; + return "fsole %0"; +}) + +;; Basic conditional jump instructions. + +(define_insn "beq0_di" + [(set (pc) + (if_then_else (eq (match_operand:DI 0 "general_operand" "d*ao,<>") + (const_int 0)) + (label_ref (match_operand 1 "" ",")) + (pc))) + (clobber (match_scratch:SI 2 "=d,d"))] + "" +{ + CC_STATUS_INIT; + if (which_alternative == 1) + return "move%.l %0,%2\;or%.l %0,%2\;jeq %l1"; + if ((cc_prev_status.value1 + && rtx_equal_p (cc_prev_status.value1, operands[0])) + || (cc_prev_status.value2 + && rtx_equal_p (cc_prev_status.value2, operands[0]))) + { + cc_status = cc_prev_status; + return "jeq %l1"; + } + if (GET_CODE (operands[0]) == REG) + operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else + operands[3] = adjust_address (operands[0], SImode, 4); + if (! ADDRESS_REG_P (operands[0])) + { + if (reg_overlap_mentioned_p (operands[2], operands[0])) + { + if (reg_overlap_mentioned_p (operands[2], operands[3])) + return "or%.l %0,%2\;jeq %l1"; + else + return "or%.l %3,%2\;jeq %l1"; + } + return "move%.l %0,%2\;or%.l %3,%2\;jeq %l1"; + } + operands[4] = gen_label_rtx(); + if (TARGET_68020 || TARGET_COLDFIRE) + output_asm_insn ("tst%.l %0\;jne %l4\;tst%.l %3\;jeq %l1", operands); + else + output_asm_insn ("cmp%.w #0,%0\;jne %l4\;cmp%.w #0,%3\;jeq %l1", operands); + (*targetm.asm_out.internal_label) (asm_out_file, "L", + CODE_LABEL_NUMBER (operands[4])); + return ""; +}) + +(define_insn "bne0_di" + [(set (pc) + (if_then_else (ne (match_operand:DI 0 "general_operand" "do,*a") + (const_int 0)) + (label_ref (match_operand 1 "" ",")) + (pc))) + (clobber (match_scratch:SI 2 "=d,X"))] + "" +{ + if ((cc_prev_status.value1 + && rtx_equal_p (cc_prev_status.value1, operands[0])) + || (cc_prev_status.value2 + && rtx_equal_p (cc_prev_status.value2, operands[0]))) + { + cc_status = cc_prev_status; + return "jne %l1"; + } + CC_STATUS_INIT; + if (GET_CODE (operands[0]) == REG) + operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + else + operands[3] = adjust_address (operands[0], SImode, 4); + if (!ADDRESS_REG_P (operands[0])) + { + if (reg_overlap_mentioned_p (operands[2], operands[0])) + { + if (reg_overlap_mentioned_p (operands[2], operands[3])) + return "or%.l %0,%2\;jne %l1"; + else + return "or%.l %3,%2\;jne %l1"; + } + return "move%.l %0,%2\;or%.l %3,%2\;jne %l1"; + } + if (TARGET_68020 || TARGET_COLDFIRE) + return "tst%.l %0\;jne %l1\;tst%.l %3\;jne %l1"; + else + return "cmp%.w #0,%0\;jne %l1\;cmp%.w #0,%3\;jne %l1"; +}) + +(define_insn "bge0_di" + [(set (pc) + (if_then_else (ge (match_operand:DI 0 "general_operand" "ro") + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc)))] + "" +{ + if ((cc_prev_status.value1 + && rtx_equal_p (cc_prev_status.value1, operands[0])) + || (cc_prev_status.value2 + && rtx_equal_p (cc_prev_status.value2, operands[0]))) + { + cc_status = cc_prev_status; + return cc_status.flags & CC_REVERSED ? "jle %l1" : "jpl %l1"; + } + CC_STATUS_INIT; + if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (operands[0])) + output_asm_insn("tst%.l %0", operands); + else + { + /* On an address reg, cmpw may replace cmpl. */ + output_asm_insn("cmp%.w #0,%0", operands); + } + return "jpl %l1"; +}) + +(define_insn "blt0_di" + [(set (pc) + (if_then_else (lt (match_operand:DI 0 "general_operand" "ro") + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc)))] + "" +{ + if ((cc_prev_status.value1 + && rtx_equal_p (cc_prev_status.value1, operands[0])) + || (cc_prev_status.value2 + && rtx_equal_p (cc_prev_status.value2, operands[0]))) + { + cc_status = cc_prev_status; + return cc_status.flags & CC_REVERSED ? "jgt %l1" : "jmi %l1"; + } + CC_STATUS_INIT; + if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (operands[0])) + output_asm_insn("tst%.l %0", operands); + else + { + /* On an address reg, cmpw may replace cmpl. */ + output_asm_insn("cmp%.w #0,%0", operands); + } + return "jmi %l1"; +}) + +(define_insn "beq" + [(set (pc) + (if_then_else (eq (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + OUTPUT_JUMP ("jeq %l0", "fjeq %l0", "jeq %l0"); +} + [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))]) + +(define_insn "bne" + [(set (pc) + (if_then_else (ne (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + OUTPUT_JUMP ("jne %l0", "fjne %l0", "jne %l0"); +} + [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))]) + +(define_insn "bgt" + [(set (pc) + (if_then_else (gt (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + OUTPUT_JUMP ("jgt %l0", "fjgt %l0", 0); +} + [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))]) + +(define_insn "bgtu" + [(set (pc) + (if_then_else (gtu (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + return "jhi %l0"; +} + [(set_attr "type" "bcc")]) + +(define_insn "blt" + [(set (pc) + (if_then_else (lt (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + OUTPUT_JUMP ("jlt %l0", "fjlt %l0", "jmi %l0"); +} + [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))]) + +(define_insn "bltu" + [(set (pc) + (if_then_else (ltu (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + return "jcs %l0"; +} + [(set_attr "type" "bcc")]) + +(define_insn "bge" + [(set (pc) + (if_then_else (ge (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + OUTPUT_JUMP ("jge %l0", "fjge %l0", "jpl %l0"); +}) + +(define_insn "bgeu" + [(set (pc) + (if_then_else (geu (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + return "jcc %l0"; +} + [(set_attr "type" "bcc")]) + +(define_insn "ble" + [(set (pc) + (if_then_else (le (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + OUTPUT_JUMP ("jle %l0", "fjle %l0", 0); +} + [(set_attr "type" "bcc")]) + +(define_insn "bleu" + [(set (pc) + (if_then_else (leu (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + return "jls %l0"; +} + [(set_attr "type" "bcc")]) + +(define_insn "bordered" + [(set (pc) + (if_then_else (ordered (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjor %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "bunordered" + [(set (pc) + (if_then_else (unordered (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjun %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "buneq" + [(set (pc) + (if_then_else (uneq (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjueq %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "bunge" + [(set (pc) + (if_then_else (unge (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjuge %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "bungt" + [(set (pc) + (if_then_else (ungt (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjugt %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "bunle" + [(set (pc) + (if_then_else (unle (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjule %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "bunlt" + [(set (pc) + (if_then_else (unlt (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjult %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "bltgt" + [(set (pc) + (if_then_else (ltgt (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjogl %l0"; +} + [(set_attr "type" "fbcc")]) + +;; Negated conditional jump instructions. + +(define_insn "*beq_rev" + [(set (pc) + (if_then_else (eq (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + OUTPUT_JUMP ("jne %l0", "fjne %l0", "jne %l0"); +} + [(set_attr "type" "bcc")]) + +(define_insn "*bne_rev" + [(set (pc) + (if_then_else (ne (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + OUTPUT_JUMP ("jeq %l0", "fjeq %l0", "jeq %l0"); +} + [(set_attr "type" "bcc")]) + +(define_insn "*bgt_rev" + [(set (pc) + (if_then_else (gt (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + OUTPUT_JUMP ("jle %l0", "fjngt %l0", 0); +} + [(set_attr "type" "bcc")]) + +(define_insn "*bgtu_rev" + [(set (pc) + (if_then_else (gtu (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + return "jls %l0"; +} + [(set_attr "type" "bcc")]) + +(define_insn "*blt_rev" + [(set (pc) + (if_then_else (lt (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + OUTPUT_JUMP ("jge %l0", "fjnlt %l0", "jpl %l0"); +} + [(set_attr "type" "bcc")]) + +(define_insn "*bltu_rev" + [(set (pc) + (if_then_else (ltu (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + return "jcc %l0"; +} + [(set_attr "type" "bcc")]) + +(define_insn "*bge_rev" + [(set (pc) + (if_then_else (ge (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + OUTPUT_JUMP ("jlt %l0", "fjnge %l0", "jmi %l0"); +} + [(set_attr "type" "bcc")]) + +(define_insn "*bgeu_rev" + [(set (pc) + (if_then_else (geu (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + return "jcs %l0"; +} + [(set_attr "type" "bcc")]) + +(define_insn "*ble_rev" + [(set (pc) + (if_then_else (le (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + OUTPUT_JUMP ("jgt %l0", "fjnle %l0", 0); +} + [(set_attr "type" "bcc")]) + +(define_insn "*bleu_rev" + [(set (pc) + (if_then_else (leu (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) + { + cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; + return 0; + } + + return "jhi %l0"; +} + [(set_attr "type" "bcc")]) + +(define_insn "*bordered_rev" + [(set (pc) + (if_then_else (ordered (cc0) (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjun %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "*bunordered_rev" + [(set (pc) + (if_then_else (unordered (cc0) (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjor %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "*buneq_rev" + [(set (pc) + (if_then_else (uneq (cc0) (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjogl %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "*bunge_rev" + [(set (pc) + (if_then_else (unge (cc0) (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjolt %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "*bungt_rev" + [(set (pc) + (if_then_else (ungt (cc0) (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjole %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "*bunle_rev" + [(set (pc) + (if_then_else (unle (cc0) (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjogt %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "*bunlt_rev" + [(set (pc) + (if_then_else (unlt (cc0) (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjoge %l0"; +} + [(set_attr "type" "fbcc")]) + +(define_insn "*bltgt_rev" + [(set (pc) + (if_then_else (ltgt (cc0) (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "TARGET_HARD_FLOAT" +{ + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjueq %l0"; +} + [(set_attr "type" "fbcc")]) + +;; Unconditional and other jump instructions +(define_insn "jump" + [(set (pc) + (label_ref (match_operand 0 "" "")))] + "" + "jra %l0" + [(set_attr "type" "bra")]) + +(define_expand "tablejump" + [(parallel [(set (pc) (match_operand 0 "" "")) + (use (label_ref (match_operand 1 "" "")))])] + "" +{ +#ifdef CASE_VECTOR_PC_RELATIVE + operands[0] = gen_rtx_PLUS (SImode, pc_rtx, + gen_rtx_SIGN_EXTEND (SImode, operands[0])); +#endif +}) + +;; Jump to variable address from dispatch table of absolute addresses. +(define_insn "*tablejump_internal" + [(set (pc) (match_operand:SI 0 "register_operand" "a")) + (use (label_ref (match_operand 1 "" "")))] + "" +{ + return MOTOROLA ? "jmp (%0)" : "jmp %0@"; +} + [(set_attr "type" "jmp")]) + +;; Jump to variable address from dispatch table of relative addresses. +(define_insn "" + [(set (pc) + (plus:SI (pc) + (sign_extend:SI (match_operand:HI 0 "register_operand" "r")))) + (use (label_ref (match_operand 1 "" "")))] + "" +{ +#ifdef ASM_RETURN_CASE_JUMP + ASM_RETURN_CASE_JUMP; +#else + if (TARGET_COLDFIRE) + { + if (ADDRESS_REG_P (operands[0])) + return MOTOROLA ? "jmp (2,pc,%0.l)" : "jmp pc@(2,%0:l)"; + else if (MOTOROLA) + return "ext%.l %0\;jmp (2,pc,%0.l)"; + else + return "extl %0\;jmp pc@(2,%0:l)"; + } + else + return MOTOROLA ? "jmp (2,pc,%0.w)" : "jmp pc@(2,%0:w)"; +#endif +}) + +;; Decrement-and-branch insns. +(define_insn "*dbne_hi" + [(set (pc) + (if_then_else + (ne (match_operand:HI 0 "nonimmediate_operand" "+d*g") + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (set (match_dup 0) + (plus:HI (match_dup 0) + (const_int -1)))] + "!TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + if (DATA_REG_P (operands[0])) + return "dbra %0,%l1"; + if (GET_CODE (operands[0]) == MEM) + return "subq%.w #1,%0\;jcc %l1"; + return "subq%.w #1,%0\;cmp%.w #-1,%0\;jne %l1"; +}) + +(define_insn "*dbne_si" + [(set (pc) + (if_then_else + (ne (match_operand:SI 0 "nonimmediate_operand" "+d*g") + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (set (match_dup 0) + (plus:SI (match_dup 0) + (const_int -1)))] + "!TARGET_COLDFIRE" +{ + CC_STATUS_INIT; + if (DATA_REG_P (operands[0])) + return "dbra %0,%l1\;clr%.w %0\;subq%.l #1,%0\;jcc %l1"; + if (GET_CODE (operands[0]) == MEM) + return "subq%.l #1,%0\;jcc %l1"; + return "subq%.l #1,%0\;cmp%.l #-1,%0\;jne %l1"; +}) + +;; Two dbra patterns that use REG_NOTES info generated by strength_reduce. + +(define_insn "*dbge_hi" + [(set (pc) + (if_then_else + (ge (plus:HI (match_operand:HI 0 "nonimmediate_operand" "+d*am") + (const_int -1)) + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (set (match_dup 0) + (plus:HI (match_dup 0) + (const_int -1)))] + "!TARGET_COLDFIRE && find_reg_note (insn, REG_NONNEG, 0)" +{ + CC_STATUS_INIT; + if (DATA_REG_P (operands[0])) + return "dbra %0,%l1"; + if (GET_CODE (operands[0]) == MEM) + return "subq%.w #1,%0\;jcc %l1"; + return "subq%.w #1,%0\;cmp%.w #-1,%0\;jne %l1"; +}) + +(define_expand "decrement_and_branch_until_zero" + [(parallel [(set (pc) + (if_then_else + (ge (plus:SI (match_operand:SI 0 "nonimmediate_operand" "") + (const_int -1)) + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (set (match_dup 0) + (plus:SI (match_dup 0) + (const_int -1)))])] + "" + "") + +(define_insn "*dbge_si" + [(set (pc) + (if_then_else + (ge (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+d*am") + (const_int -1)) + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (set (match_dup 0) + (plus:SI (match_dup 0) + (const_int -1)))] + "!TARGET_COLDFIRE && find_reg_note (insn, REG_NONNEG, 0)" +{ + CC_STATUS_INIT; + if (DATA_REG_P (operands[0])) + return "dbra %0,%l1\;clr%.w %0\;subq%.l #1,%0\;jcc %l1"; + if (GET_CODE (operands[0]) == MEM) + return "subq%.l #1,%0\;jcc %l1"; + return "subq%.l #1,%0\;cmp%.l #-1,%0\;jne %l1"; +}) + +(define_expand "sibcall" + [(call (match_operand:QI 0 "memory_operand" "") + (match_operand:SI 1 "general_operand" ""))] + "" +{ + operands[0] = m68k_legitimize_sibcall_address (operands[0]); +}) + +(define_insn "*sibcall" + [(call (mem:QI (match_operand:SI 0 "sibcall_operand" "")) + (match_operand:SI 1 "general_operand" ""))] + "SIBLING_CALL_P (insn)" +{ + return output_sibcall (operands[0]); +}) + +(define_expand "sibcall_value" + [(set (match_operand 0 "" "") + (call (match_operand:QI 1 "memory_operand" "") + (match_operand:SI 2 "general_operand" "")))] + "" +{ + operands[1] = m68k_legitimize_sibcall_address (operands[1]); +}) + +(define_insn "*sibcall_value" + [(set (match_operand 0 "" "=rf,rf") + (call (mem:QI (match_operand:SI 1 "sibcall_operand" "")) + (match_operand:SI 2 "general_operand" "")))] + "SIBLING_CALL_P (insn)" +{ + operands[0] = operands[1]; + return output_sibcall (operands[0]); +}) + +;; Call subroutine with no return value. +(define_expand "call" + [(call (match_operand:QI 0 "memory_operand" "") + (match_operand:SI 1 "general_operand" ""))] + ;; Operand 1 not really used on the m68000. + "" +{ + operands[0] = m68k_legitimize_call_address (operands[0]); +}) + +(define_insn "*call" + [(call (mem:QI (match_operand:SI 0 "call_operand" "a,W")) + (match_operand:SI 1 "general_operand" "g,g"))] + ;; Operand 1 not really used on the m68000. + "!SIBLING_CALL_P (insn)" +{ + return output_call (operands[0]); +} + [(set_attr "type" "jsr")]) + +;; Call subroutine, returning value in operand 0 +;; (which must be a hard register). +(define_expand "call_value" + [(set (match_operand 0 "" "") + (call (match_operand:QI 1 "memory_operand" "") + (match_operand:SI 2 "general_operand" "")))] + ;; Operand 2 not really used on the m68000. + "" +{ + operands[1] = m68k_legitimize_call_address (operands[1]); +}) + +(define_insn "*non_symbolic_call_value" + [(set (match_operand 0 "" "=rf,rf") + (call (mem:QI (match_operand:SI 1 "non_symbolic_call_operand" "a,W")) + (match_operand:SI 2 "general_operand" "g,g")))] + ;; Operand 2 not really used on the m68000. + "!SIBLING_CALL_P (insn)" + "jsr %a1" + [(set_attr "type" "jsr") + (set_attr "opx" "1")]) + +(define_insn "*symbolic_call_value_jsr" + [(set (match_operand 0 "" "=rf,rf") + (call (mem:QI (match_operand:SI 1 "symbolic_operand" "a,W")) + (match_operand:SI 2 "general_operand" "g,g")))] + ;; Operand 2 not really used on the m68000. + "!SIBLING_CALL_P (insn) && m68k_symbolic_call_var == M68K_SYMBOLIC_CALL_JSR" +{ + operands[0] = operands[1]; + return m68k_symbolic_call; +} + [(set_attr "type" "jsr") + (set_attr "opx" "1")]) + +(define_insn "*symbolic_call_value_bsr" + [(set (match_operand 0 "" "=rf,rf") + (call (mem:QI (match_operand:SI 1 "symbolic_operand" "a,W")) + (match_operand:SI 2 "general_operand" "g,g")))] + ;; Operand 2 not really used on the m68000. + "!SIBLING_CALL_P (insn) + && (m68k_symbolic_call_var == M68K_SYMBOLIC_CALL_BSR_C + || m68k_symbolic_call_var == M68K_SYMBOLIC_CALL_BSR_P)" +{ + operands[0] = operands[1]; + return m68k_symbolic_call; +} + [(set_attr "type" "bsr") + (set_attr "opx" "1")]) + +;; Call subroutine returning any type. + +(define_expand "untyped_call" + [(parallel [(call (match_operand 0 "" "") + (const_int 0)) + (match_operand 1 "" "") + (match_operand 2 "" "")])] + "NEEDS_UNTYPED_CALL" +{ + int i; + + emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx)); + + for (i = 0; i < XVECLEN (operands[2], 0); i++) + { + rtx set = XVECEXP (operands[2], 0, i); + emit_move_insn (SET_DEST (set), SET_SRC (set)); + } + + /* The optimizer does not know that the call sets the function value + registers we stored in the result block. We avoid problems by + claiming that all hard registers are used and clobbered at this + point. */ + emit_insn (gen_blockage ()); + + DONE; +}) + +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and +;; all of memory. This blocks insns from being moved across this point. + +(define_insn "blockage" + [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)] + "" + "") + +(define_insn "nop" + [(const_int 0)] + "" + "nop" + [(set_attr "type" "nop")]) + +(define_expand "prologue" + [(const_int 0)] + "" +{ + m68k_expand_prologue (); + DONE; +}) + +(define_expand "epilogue" + [(return)] + "" +{ + m68k_expand_epilogue (false); + DONE; +}) + +(define_expand "sibcall_epilogue" + [(return)] + "" +{ + m68k_expand_epilogue (true); + DONE; +}) + +;; Used for frameless functions which save no regs and allocate no locals. +(define_expand "return" + [(return)] + "m68k_use_return_insn ()" + "") + +(define_insn "*return" + [(return)] + "" +{ + switch (m68k_get_function_kind (current_function_decl)) + { + case m68k_fk_interrupt_handler: + return "rte"; + + case m68k_fk_interrupt_thread: + return "sleep"; + + default: + if (crtl->args.pops_args) + { + operands[0] = GEN_INT (crtl->args.pops_args); + return "rtd %0"; + } + else + return "rts"; + } +} + [(set_attr "type" "rts")]) + +(define_insn "*m68k_store_multiple" + [(match_parallel 0 "" [(match_operand 1 "")])] + "m68k_movem_pattern_p (operands[0], NULL, 0, true)" +{ + return m68k_output_movem (operands, operands[0], 0, true); +}) + +(define_insn "*m68k_store_multiple_automod" + [(match_parallel 0 "" + [(set (match_operand:SI 1 "register_operand" "=a") + (plus:SI (match_operand:SI 2 "register_operand" "1") + (match_operand:SI 3 "const_int_operand")))])] + "m68k_movem_pattern_p (operands[0], operands[1], INTVAL (operands[3]), true)" +{ + return m68k_output_movem (operands, operands[0], INTVAL (operands[3]), true); +}) + +(define_insn "*m68k_load_multiple" + [(match_parallel 0 "" [(match_operand 1 "")])] + "m68k_movem_pattern_p (operands[0], NULL, 0, false)" +{ + return m68k_output_movem (operands, operands[0], 0, false); +}) + +(define_insn "*m68k_load_multiple_automod" + [(match_parallel 0 "" + [(set (match_operand:SI 1 "register_operand" "=a") + (plus:SI (match_operand:SI 2 "register_operand" "1") + (match_operand:SI 3 "const_int_operand")))])] + "m68k_movem_pattern_p (operands[0], operands[1], + INTVAL (operands[3]), false)" +{ + return m68k_output_movem (operands, operands[0], + INTVAL (operands[3]), false); +}) + +(define_expand "link" + [(parallel + [(set (match_operand:SI 0 "register_operand") + (plus:SI (reg:SI SP_REG) (const_int -4))) + (set (match_dup 2) + (match_dup 0)) + (set (reg:SI SP_REG) + (plus:SI (reg:SI SP_REG) + (match_operand:SI 1 "const_int_operand")))])] + "TARGET_68020 || INTVAL (operands[1]) >= -0x8004" +{ + operands[2] = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, -4)); +}) + +(define_insn "*link" + [(set (match_operand:SI 0 "register_operand" "+r") + (plus:SI (reg:SI SP_REG) (const_int -4))) + (set (mem:SI (plus:SI (reg:SI SP_REG) (const_int -4))) + (match_dup 0)) + (set (reg:SI SP_REG) + (plus:SI (reg:SI SP_REG) + (match_operand:SI 1 "const_int_operand")))] + "TARGET_68020 || INTVAL (operands[1]) >= -0x8004" +{ + operands[1] = GEN_INT (INTVAL (operands[1]) + 4); + if (!MOTOROLA) + return "link %0,%1"; + else if (INTVAL (operands[1]) >= -0x8000) + return "link.w %0,%1"; + else + return "link.l %0,%1"; +} + [(set_attr "type" "link")]) + +(define_expand "unlink" + [(parallel + [(set (match_operand:SI 0 "register_operand") + (match_dup 1)) + (set (reg:SI SP_REG) + (plus:SI (match_dup 0) + (const_int 4)))])] + "" +{ + operands[1] = gen_frame_mem (SImode, copy_rtx (operands[0])); +}) + +(define_insn "*unlink" + [(set (match_operand:SI 0 "register_operand" "+r") + (mem:SI (match_dup 0))) + (set (reg:SI SP_REG) + (plus:SI (match_dup 0) + (const_int 4)))] + "" + "unlk %0" + [(set_attr "type" "unlk")]) + +(define_insn "load_got" + [(set (match_operand:SI 0 "register_operand" "=a") + (unspec:SI [(const_int 0)] UNSPEC_GOT))] + "" +{ + if (TARGET_ID_SHARED_LIBRARY) + { + operands[1] = gen_rtx_REG (Pmode, PIC_REG); + return MOTOROLA ? "move.l %?(%1),%0" : "movel %1@(%?), %0"; + } + else if (MOTOROLA) + { + if (TARGET_COLDFIRE) + /* Load the full 32-bit PC-relative offset of + _GLOBAL_OFFSET_TABLE_ into the PIC register, then use it to + calculate the absolute value. The offset and "lea" + operation word together occupy 6 bytes. */ + return ("move.l #_GLOBAL_OFFSET_TABLE_@GOTPC, %0\n\t" + "lea (-6, %%pc, %0), %0"); + else + return "lea (%%pc, _GLOBAL_OFFSET_TABLE_@GOTPC), %0"; + } + else + return ("movel #_GLOBAL_OFFSET_TABLE_, %0\n\t" + "lea %%pc@(0,%0:l),%0"); +}) + +(define_insn "indirect_jump" + [(set (pc) (match_operand:SI 0 "address_operand" "p"))] + "" + "jmp %a0" + [(set_attr "type" "jmp")]) + +;; This should not be used unless the add/sub insns can't be. + +(define_insn "*lea" + [(set (match_operand:SI 0 "nonimmediate_operand" "=a") + (match_operand:QI 1 "address_operand" "p"))] + "" + "lea %a1,%0") + +;; This is the first machine-dependent peephole optimization. +;; It is useful when a floating value is returned from a function call +;; and then is moved into an FP register. +;; But it is mainly intended to test the support for these optimizations. + +(define_peephole2 + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4))) + (set (match_operand:DF 0 "register_operand" "") + (match_operand:DF 1 "register_operand" ""))] + "FP_REG_P (operands[0]) && !FP_REG_P (operands[1])" + [(set (mem:SI (reg:SI SP_REG)) (match_dup 1)) + (set (mem:SI (pre_dec:SI (reg:SI SP_REG))) (match_dup 2)) + (set (match_dup 0) (mem:DF (post_inc:SI (reg:SI SP_REG))))] + "split_di(operands + 1, 1, operands + 1, operands + 2);") + +;; Optimize a stack-adjust followed by a push of an argument. +;; This is said to happen frequently with -msoft-float +;; when there are consecutive library calls. + +(define_peephole2 + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4))) + (set (match_operand:SF 0 "push_operand" "") + (match_operand:SF 1 "general_operand" ""))] + "!reg_mentioned_p (stack_pointer_rtx, operands[0])" + [(set (match_dup 0) (match_dup 1))] + "operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);") + +(define_peephole2 + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) + (match_operand:SI 0 "const_int_operand" ""))) + (set (match_operand:SF 1 "push_operand" "") + (match_operand:SF 2 "general_operand" ""))] + "INTVAL (operands[0]) > 4 + && !reg_mentioned_p (stack_pointer_rtx, operands[2])" + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (match_dup 0))) + (set (match_dup 1) (match_dup 2))] +{ + operands[0] = GEN_INT (INTVAL (operands[0]) - 4); + operands[1] = replace_equiv_address (operands[1], stack_pointer_rtx); +}) + +;; Speed up stack adjust followed by a fullword fixedpoint push. +;; Constant operands need special care, as replacing a "pea X.w" with +;; "move.l #X,(%sp)" is often not a win. + +;; Already done by the previous csa pass, left as reference. +(define_peephole2 + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4))) + (set (match_operand:SI 0 "push_operand" "") + (match_operand:SI 1 "general_operand" ""))] + "!reg_mentioned_p (stack_pointer_rtx, operands[1])" + [(set (match_dup 0) (match_dup 1))] + "operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);") + +;; Try to use moveq, after stack push has been changed into a simple move. +(define_peephole2 + [(match_scratch:SI 2 "d") + (set (match_operand:SI 0 "memory_operand" "") + (match_operand:SI 1 "const_int_operand" ""))] + "GET_CODE (XEXP (operands[0], 0)) != PRE_DEC + && INTVAL (operands[1]) != 0 + && IN_RANGE (INTVAL (operands[1]), -0x80, 0x7f) + && !valid_mov3q_const (INTVAL (operands[1]))" + [(set (match_dup 2) (match_dup 1)) + (set (match_dup 0) (match_dup 2))]) + +;; This sequence adds an instruction, but is two bytes shorter. +(define_peephole2 + [(match_scratch:SI 2 "d") + (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 12))) + (set (match_operand:SI 0 "push_operand" "") + (match_operand:SI 1 "const_int_operand" ""))] + "INTVAL (operands[1]) != 0 + && IN_RANGE (INTVAL (operands[1]), -0x80, 0x7f) + && !valid_mov3q_const (INTVAL (operands[1]))" + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 8))) + (set (match_dup 2) (match_dup 1)) + (set (match_dup 0) (match_dup 2))] + "operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);") + +;; Changing pea X.w into a move.l is no real win here. +(define_peephole2 + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) + (match_operand:SI 0 "const_int_operand" ""))) + (set (match_operand:SI 1 "push_operand" "") + (match_operand:SI 2 "general_operand" ""))] + "INTVAL (operands[0]) > 4 + && !reg_mentioned_p (stack_pointer_rtx, operands[2]) + && !(CONST_INT_P (operands[2]) && INTVAL (operands[2]) != 0 + && IN_RANGE (INTVAL (operands[2]), -0x8000, 0x7fff) + && !valid_mov3q_const (INTVAL (operands[2])))" + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (match_dup 0))) + (set (match_dup 1) (match_dup 2))] +{ + operands[0] = GEN_INT (INTVAL (operands[0]) - 4); + operands[1] = replace_equiv_address (operands[1], stack_pointer_rtx); +}) + +;; Speed up pushing a single byte/two bytes but leaving four bytes of space +;; (which differs slightly between m680x0 and ColdFire). + +(define_peephole2 + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -4))) + (set (match_operand:QI 0 "memory_operand" "") + (match_operand:QI 1 "register_operand" ""))] + "!reg_mentioned_p (stack_pointer_rtx, operands[1]) + && GET_CODE (XEXP (operands[0], 0)) == PLUS + && rtx_equal_p (XEXP (XEXP (operands[0], 0), 0), stack_pointer_rtx) + && CONST_INT_P (XEXP (XEXP (operands[0], 0), 1)) + && INTVAL (XEXP (XEXP (operands[0], 0), 1)) == 3" + [(set (match_dup 0) (match_dup 1))] +{ + rtx addr = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx); + operands[0] = adjust_automodify_address (operands[0], SImode, addr, -3); + operands[1] = simplify_gen_subreg (SImode, operands[1], QImode, 0); +}) + +(define_peephole2 + [(set (match_operand:QI 0 "push_operand" "") + (match_operand:QI 1 "register_operand" "")) + (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -3)))] + "!reg_mentioned_p (stack_pointer_rtx, operands[1])" + [(set (match_dup 0) (match_dup 1))] +{ + operands[0] = adjust_automodify_address (operands[0], SImode, + XEXP (operands[0], 0), -3); + operands[1] = simplify_gen_subreg (SImode, operands[1], QImode, 0); +}) + +(define_peephole2 + [(set (match_operand:HI 0 "push_operand" "") + (match_operand:HI 1 "register_operand" "")) + (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -2)))] + "!reg_mentioned_p (stack_pointer_rtx, operands[1])" + [(set (match_dup 0) (match_dup 1))] +{ + operands[0] = adjust_automodify_address (operands[0], SImode, + XEXP (operands[0], 0), -2); + operands[1] = simplify_gen_subreg (SImode, operands[1], HImode, 0); +}) + +;; Optimize a series of strict_low_part assignments + +(define_peephole2 + [(set (match_operand:SI 0 "register_operand" "") + (const_int 0)) + (set (strict_low_part (match_operand:HI 1 "register_operand" "")) + (match_operand:HI 2 "general_operand" ""))] + "REGNO (operands[0]) == REGNO (operands[1]) + && strict_low_part_peephole_ok (HImode, insn, operands[0])" + [(set (strict_low_part (match_dup 1)) (match_dup 2))] + "") + +(define_peephole2 + [(set (match_operand:SI 0 "register_operand" "") + (const_int 0)) + (set (strict_low_part (match_operand:QI 1 "register_operand" "")) + (match_operand:QI 2 "general_operand" ""))] + "REGNO (operands[0]) == REGNO (operands[1]) + && strict_low_part_peephole_ok (QImode, insn, operands[0])" + [(set (strict_low_part (match_dup 1)) (match_dup 2))] + "") + +;; dbCC peepholes +;; +;; Turns +;; loop: +;; [ ... ] +;; jCC label ; abnormal loop termination +;; dbra dN, loop ; normal loop termination +;; +;; Into +;; loop: +;; [ ... ] +;; dbCC dN, loop +;; jCC label +;; +;; Which moves the jCC condition outside the inner loop for free. +;; + +(define_peephole + [(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p" + [(cc0) (const_int 0)]) + (label_ref (match_operand 2 "" "")) + (pc))) + (parallel + [(set (pc) + (if_then_else + (ne (match_operand:HI 0 "register_operand" "") + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (set (match_dup 0) + (plus:HI (match_dup 0) + (const_int -1)))])] + "!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()" +{ + CC_STATUS_INIT; + output_dbcc_and_branch (operands); + return ""; +}) + +(define_peephole + [(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p" + [(cc0) (const_int 0)]) + (label_ref (match_operand 2 "" "")) + (pc))) + (parallel + [(set (pc) + (if_then_else + (ne (match_operand:SI 0 "register_operand" "") + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (set (match_dup 0) + (plus:SI (match_dup 0) + (const_int -1)))])] + "!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()" +{ + CC_STATUS_INIT; + output_dbcc_and_branch (operands); + return ""; +}) + +(define_peephole + [(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p" + [(cc0) (const_int 0)]) + (label_ref (match_operand 2 "" "")) + (pc))) + (parallel + [(set (pc) + (if_then_else + (ge (plus:HI (match_operand:HI 0 "register_operand" "") + (const_int -1)) + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (set (match_dup 0) + (plus:HI (match_dup 0) + (const_int -1)))])] + "!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()" +{ + CC_STATUS_INIT; + output_dbcc_and_branch (operands); + return ""; +}) + +(define_peephole + [(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p" + [(cc0) (const_int 0)]) + (label_ref (match_operand 2 "" "")) + (pc))) + (parallel + [(set (pc) + (if_then_else + (ge (plus:SI (match_operand:SI 0 "register_operand" "") + (const_int -1)) + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (set (match_dup 0) + (plus:SI (match_dup 0) + (const_int -1)))])] + "!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()" +{ + CC_STATUS_INIT; + output_dbcc_and_branch (operands); + return ""; +}) + + +(define_insn "extendsfxf2" + [(set (match_operand:XF 0 "nonimmediate_operand" "=fm,f") + (float_extend:XF (match_operand:SF 1 "general_operand" "f,rmF")))] + "TARGET_68881" +{ + if (FP_REG_P (operands[0]) && FP_REG_P (operands[1])) + { + if (REGNO (operands[0]) == REGNO (operands[1])) + { + /* Extending float to double in an fp-reg is a no-op. + NOTICE_UPDATE_CC has already assumed that the + cc will be set. So cancel what it did. */ + cc_status = cc_prev_status; + return ""; + } + return "f%$move%.x %1,%0"; + } + if (FP_REG_P (operands[0])) + { + if (FP_REG_P (operands[1])) + return "f%$move%.x %1,%0"; + else if (ADDRESS_REG_P (operands[1])) + return "move%.l %1,%-\;f%$move%.s %+,%0"; + else if (GET_CODE (operands[1]) == CONST_DOUBLE) + return output_move_const_single (operands); + return "f%$move%.s %f1,%0"; + } + return "fmove%.x %f1,%0"; +}) + + +(define_insn "extenddfxf2" + [(set (match_operand:XF 0 "nonimmediate_operand" "=fm,f") + (float_extend:XF + (match_operand:DF 1 "general_operand" "f,rmE")))] + "TARGET_68881" +{ + if (FP_REG_P (operands[0]) && FP_REG_P (operands[1])) + { + if (REGNO (operands[0]) == REGNO (operands[1])) + { + /* Extending float to double in an fp-reg is a no-op. + NOTICE_UPDATE_CC has already assumed that the + cc will be set. So cancel what it did. */ + cc_status = cc_prev_status; + return ""; + } + return "fmove%.x %1,%0"; + } + if (FP_REG_P (operands[0])) + { + if (REG_P (operands[1])) + { + rtx xoperands[2]; + xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); + output_asm_insn ("move%.l %1,%-", xoperands); + output_asm_insn ("move%.l %1,%-", operands); + return "f%&move%.d %+,%0"; + } + if (GET_CODE (operands[1]) == CONST_DOUBLE) + return output_move_const_double (operands); + return "f%&move%.d %f1,%0"; + } + return "fmove%.x %f1,%0"; +}) + +(define_insn "truncxfdf2" + [(set (match_operand:DF 0 "nonimmediate_operand" "=m,!r") + (float_truncate:DF + (match_operand:XF 1 "general_operand" "f,f")))] + "TARGET_68881" +{ + if (REG_P (operands[0])) + { + output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands); + operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); + return "move%.l %+,%0"; + } + return "fmove%.d %f1,%0"; +}) + +(define_insn "truncxfsf2" + [(set (match_operand:SF 0 "nonimmediate_operand" "=dm") + (float_truncate:SF + (match_operand:XF 1 "general_operand" "f")))] + "TARGET_68881" + "fmove%.s %f1,%0") + +(define_insn "sin2" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (unspec:FP + [(match_operand:FP 1 "general_operand" "fm")] UNSPEC_SIN))] + "TARGET_68881 && flag_unsafe_math_optimizations" +{ + if (FP_REG_P (operands[1])) + return "fsin%.x %1,%0"; + else + return "fsin%. %1,%0"; +}) + +(define_insn "cos2" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (unspec:FP + [(match_operand:FP 1 "general_operand" "fm")] UNSPEC_COS))] + "TARGET_68881 && flag_unsafe_math_optimizations" +{ + if (FP_REG_P (operands[1])) + return "fcos%.x %1,%0"; + else + return "fcos%. %1,%0"; +}) + +;; Unconditional traps are assumed to have (const_int 1) for the condition. +(define_insn "trap" + [(trap_if (const_int 1) (const_int 7))] + "" + "trap #7" + [(set_attr "type" "trap")]) + +(define_expand "ctrapdi4" + [(trap_if (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (match_operand:SI 3 "const1_operand" ""))] + "TARGET_68020" +{ + if (operands[2] == const0_rtx) + emit_insn (gen_tstdi (operands[1])); + else + emit_insn (gen_cmpdi (operands[1], operands[2])); + operands[1] = cc0_rtx; + operands[2] = const0_rtx; +}) + +(define_expand "ctrapsi4" + [(set (cc0) + (compare (match_operand:SI 1 "nonimmediate_operand" "") + (match_operand:SI 2 "general_operand" ""))) + (trap_if (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (match_operand:SI 3 "const1_operand" ""))] + "TARGET_68020" + "") + +(define_expand "ctraphi4" + [(set (cc0) + (compare (match_operand:HI 1 "nonimmediate_src_operand" "") + (match_operand:HI 2 "general_src_operand" ""))) + (trap_if (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (match_operand:SI 3 "const1_operand" ""))] + "TARGET_68020" + "") + +(define_expand "ctrapqi4" + [(set (cc0) + (compare (match_operand:QI 1 "nonimmediate_src_operand" "") + (match_operand:QI 2 "general_src_operand" ""))) + (trap_if (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (match_operand:SI 3 "const1_operand" ""))] + "TARGET_68020" + "") + +(define_insn "*conditional_trap" + [(trap_if (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (match_operand:SI 1 "const1_operand" "I"))] + "TARGET_68020 && ! flags_in_68881 ()" +{ + switch (GET_CODE (operands[0])) + { + case EQ: return "trapeq"; + case NE: return "trapne"; + case GT: return "trapgt"; + case GTU: return "traphi"; + case LT: return "traplt"; + case LTU: return "trapcs"; + case GE: return "trapge"; + case GEU: return "trapcc"; + case LE: return "traple"; + case LEU: return "trapls"; + default: gcc_unreachable (); + } +}) + +;; These are to prevent the scheduler from moving stores to the frame +;; before the stack adjustment. +(define_insn "stack_tie" + [(set (mem:BLK (scratch)) + (unspec:BLK [(match_operand:SI 0 "register_operand" "r") + (match_operand:SI 1 "register_operand" "r")] + UNSPEC_TIE))] + "" + "" + [(set_attr "type" "ignore")]) + +;; Instruction that subscribes one word in ColdFire instruction buffer. +;; This instruction is used within scheduler only and should not appear +;; in the instruction stream. +(define_insn "ib" + [(unspec [(const_int 0)] UNSPEC_IB)] + "" + "#" + [(set_attr "type" "ib")]) + +(include "cf.md") -- cgit v1.2.3