summaryrefslogtreecommitdiff
path: root/gcc/config/m68k
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/m68k')
-rw-r--r--gcc/config/m68k/cf.md2250
-rw-r--r--gcc/config/m68k/constraints.md164
-rw-r--r--gcc/config/m68k/crti.s44
-rw-r--r--gcc/config/m68k/crtn.s40
-rw-r--r--gcc/config/m68k/fpgnulib.c595
-rw-r--r--gcc/config/m68k/ieee.opt24
-rw-r--r--gcc/config/m68k/lb1sf68.asm4116
-rw-r--r--gcc/config/m68k/linux-unwind.h158
-rw-r--r--gcc/config/m68k/linux.h242
-rw-r--r--gcc/config/m68k/m68020-elf.h30
-rw-r--r--gcc/config/m68k/m68k-devices.def189
-rw-r--r--gcc/config/m68k/m68k-modes.def25
-rw-r--r--gcc/config/m68k/m68k-none.h19
-rw-r--r--gcc/config/m68k/m68k-protos.h102
-rw-r--r--gcc/config/m68k/m68k.c6615
-rw-r--r--gcc/config/m68k/m68k.h1034
-rw-r--r--gcc/config/m68k/m68k.md7808
-rw-r--r--gcc/config/m68k/m68k.opt188
-rw-r--r--gcc/config/m68k/m68kelf.h164
-rw-r--r--gcc/config/m68k/m68kemb.h53
-rw-r--r--gcc/config/m68k/math-68881.h529
-rw-r--r--gcc/config/m68k/netbsd-elf.h315
-rw-r--r--gcc/config/m68k/openbsd.h89
-rw-r--r--gcc/config/m68k/predicates.md246
-rw-r--r--gcc/config/m68k/print-sysroot-suffix.sh81
-rw-r--r--gcc/config/m68k/rtemself.h33
-rw-r--r--gcc/config/m68k/t-cf7
-rw-r--r--gcc/config/m68k/t-crtstuff10
-rw-r--r--gcc/config/m68k/t-floatlib31
-rw-r--r--gcc/config/m68k/t-linux33
-rw-r--r--gcc/config/m68k/t-m68k4
-rw-r--r--gcc/config/m68k/t-m68kbare4
-rw-r--r--gcc/config/m68k/t-m68kelf4
-rw-r--r--gcc/config/m68k/t-mlibs115
-rw-r--r--gcc/config/m68k/t-openbsd4
-rw-r--r--gcc/config/m68k/t-rtems9
-rw-r--r--gcc/config/m68k/t-slibgcc-elf-ver3
-rw-r--r--gcc/config/m68k/t-uclinux36
-rw-r--r--gcc/config/m68k/uclinux-oldabi.h70
-rw-r--r--gcc/config/m68k/uclinux.h72
-rw-r--r--gcc/config/m68k/uclinux.opt36
41 files changed, 25591 insertions, 0 deletions
diff --git a/gcc/config/m68k/cf.md b/gcc/config/m68k/cf.md
new file mode 100644
index 000000000..d6f1e92c3
--- /dev/null
+++ b/gcc/config/m68k/cf.md
@@ -0,0 +1,2250 @@
+;; ColdFire V1, V2, V3 and V4/V4e DFA description.
+;; Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
+;; Contributed by CodeSourcery Inc., www.codesourcery.com
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Instruction Buffer
+(define_automaton "cfv123_ib")
+
+;; These pseudo units are used to model instruction buffer of ColdFire cores.
+;; Instruction of size N can be issued only when cf_ib_wN is available.
+(define_cpu_unit "cf_ib_w1, cf_ib_w2, cf_ib_w3" "cfv123_ib")
+
+;; Instruction occupies 1 word in the instruction buffer.
+(define_reservation "cf_ib1" "cf_ib_w1")
+;; Instruction occupies 2 words in the instruction buffer.
+(define_reservation "cf_ib2" "cf_ib_w1+cf_ib_w2")
+;; Instruction occupies 3 words in the instruction buffer.
+(define_reservation "cf_ib3" "cf_ib_w1+cf_ib_w2+cf_ib_w3")
+
+;; This reservation is used at the start of each cycle to setup the maximal
+;; length of instruction that can be issued on current cycle.
+;; E.g., when this reservation is applied for the first time, cf_ib_w3
+;; resource is marked busy, thus filtering out all 3-word insns.
+;;
+;; This reservation requires deterministic automaton.
+;;
+;; At each cycle, given that memory bus is available (i.e., there is no
+;; pending memory operation), instruction fetch pipeline (IFP) prefetches
+;; two instruction words into instruction buffer (IB).
+(define_insn_reservation "cf_ib1" 0
+ (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "ib"))
+ "cf_ib_w3|cf_ib_w2|cf_ib_w1")
+
+;; Operand Execution Pipeline
+(define_automaton "cfv123_oep")
+
+(define_cpu_unit "cf_dsoc,cf_agex" "cfv123_oep")
+
+;; A memory unit that is reffered to as 'certain hardware resources' in
+;; ColdFire reference manuals. This unit remains occupied for two cycles
+;; after last dsoc cycle of a store - hence there is a 2 cycle delay between
+;; two consecutive stores.
+(define_automaton "cfv123_chr")
+
+(define_cpu_unit "cf_chr" "cfv123_chr")
+
+;; Memory bus
+(define_automaton "cfv123_mem")
+
+;; When memory bus is subscribed, that implies that instruction buffer won't
+;; get its portion this cycle. To model that we query if cf_mem unit is
+;; subscribed and adjust number of prefetched instruction words accordingly.
+;;
+(define_query_cpu_unit "cf_mem1, cf_mem2" "cfv123_mem")
+
+(define_reservation "cf_mem" "cf_mem1+cf_mem2")
+
+(define_automaton "cf_mac")
+
+(define_cpu_unit "cf_mac1,cf_mac2,cf_mac3,cf_mac4"
+ "cf_mac")
+
+(define_automaton "cfv123_guess")
+
+(define_query_cpu_unit "cfv123_guess" "cfv123_guess")
+
+;; Register to register move.
+;; Takes 1 cycle.
+(define_reservation "cfv123_alu_00"
+ "cf_dsoc,cf_agex")
+
+;; Load from a memory location.
+;; Takes 3 cycles.
+(define_reservation "cfv12_alu_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex")
+;; Takes 2 cycles.
+(define_reservation "cfv12_omove_10"
+ "cf_dsoc+cf_agex,cf_dsoc+cf_mem,cf_agex")
+;; Takes 4 cycles.
+(define_reservation "cfv3_alu_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex")
+;; Takes 3 cycles.
+(define_reservation "cfv3_omove_10"
+ "cf_dsoc+cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex")
+
+;; Load from an indexed location.
+;; Takes 4 cycles.
+(define_reservation "cfv12_alu_i0"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex")
+;; Takes 3 cycles.
+(define_reservation "cfv12_omove_i0"
+ "cf_dsoc+cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex")
+;; Takes 5 cycles.
+(define_reservation "cfv3_alu_i0"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex")
+;; Takes 4 cycles.
+(define_reservation "cfv3_omove_i0"
+ "cf_dsoc+cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex")
+
+;; Store to a memory location.
+;; Takes 1 cycle.
+(define_reservation "cfv12_alu_01"
+ "cf_dsoc+cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 1 cycle.
+(define_reservation "cfv3_alu_01"
+ "cf_dsoc+cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+
+;; Store to an indexed location.
+;; Takes 2 cycles.
+(define_reservation "cfv12_alu_0i"
+ "cf_dsoc+cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 2 cycles.
+(define_reservation "cfv3_alu_0i"
+ "cf_dsoc+cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+
+;; Load from a memory location and store to a memory location.
+;; Takes 3 cycles
+(define_reservation "cfv12_alu_11"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 2 cycles.
+(define_reservation "cfv12_omove_11"
+ "cf_dsoc+cf_agex,cf_dsoc+cf_mem,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 4 cycles
+(define_reservation "cfv3_alu_11"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+;; Takes 3 cycles.
+(define_reservation "cfv3_omove_11"
+ "cf_dsoc+cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+
+;; Load from an indexed location and store to a memory location.
+;; Takes 4 cycles.
+(define_reservation "cfv12_alu_i1"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 3 cycles.
+(define_reservation "cfv12_omove_i1"
+ "cf_dsoc+cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 5 cycles.
+(define_reservation "cfv3_alu_i1"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+;; Takes 4 cycles.
+(define_reservation "cfv3_omove_i1"
+ "cf_dsoc+cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+
+;; Load from a memory location and store to an indexed location.
+;; Takes 4 cycles.
+(define_reservation "cfv12_alu_1i"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 3 cycles.
+(define_reservation "cfv12_omove_1i"
+ "cf_dsoc+cf_agex,cf_dsoc+cf_mem,cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 5 cycles.
+(define_reservation "cfv3_alu_1i"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+;; Takes 4 cycles.
+(define_reservation "cfv3_omove_1i"
+ "cf_dsoc+cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+
+;; Lea operation for a memory location.
+;; Takes 1 cycle.
+(define_reservation "cfv123_lea_10"
+ "cf_dsoc,cf_agex")
+
+;; Lea operation for an indexed location.
+;; Takes 2 cycles.
+(define_reservation "cfv123_lea_i0"
+ "cf_dsoc,cf_agex,cf_agex")
+
+;; Pea operation for a memory location.
+;; Takes 2 cycles.
+(define_reservation "cfv12_pea_11"
+ "cf_dsoc,cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 2 cycles.
+(define_reservation "cfv3_pea_11"
+ "cf_dsoc,cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+
+;; Pea operation for an indexed location.
+;; Takes 3 cycles.
+(define_reservation "cfv12_pea_i1"
+ "cf_dsoc,cf_agex,cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr")
+;; Takes 3 cycles.
+(define_reservation "cfv3_pea_i1"
+ "cf_dsoc,cf_agex,cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr")
+
+;; Long multiplication with no mac.
+;; Takes 9-18 cycles.
+(define_reservation "cfv123_mul_l_00"
+ "cf_dsoc,(cf_agex+cf_dsoc)*17,cf_agex")
+
+;; Word multiplication with no mac.
+;; Takes 9 cycles.
+(define_reservation "cfv123_mul_w_00"
+ "cf_dsoc,(cf_agex+cf_dsoc)*8,cf_agex")
+
+;; Long multiplication with no mac.
+;; Takes 11-20 cycles.
+(define_reservation "cfv12_mul_l_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,(cf_agex+cf_dsoc)*17,cf_agex")
+;; Takes 12-21 cycles.
+(define_reservation "cfv3_mul_l_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,(cf_agex+cf_dsoc)*17,cf_agex")
+
+;; Word multiplication with no mac.
+;; Takes 11 cycles.
+(define_reservation "cfv12_mul_w_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,(cf_agex+cf_dsoc)*8,cf_agex")
+;; Takes 12 cycles.
+(define_reservation "cfv3_mul_w_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,(cf_agex+cf_dsoc)*8,cf_agex")
+
+;; Word multiplication with no mac.
+;; Takes 12 cycles.
+(define_reservation "cfv12_mul_w_i0"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,(cf_agex+cf_dsoc)*8,cf_agex")
+;; Takes 13 cycles.
+(define_reservation "cfv3_mul_w_i0"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,(cf_agex+cf_dsoc)*8,cf_agex")
+
+;; Long multiplication with mac.
+;; Takes 5 cycles.
+(define_reservation "cfv123_mac_l_00"
+ "cf_dsoc,cf_agex,cf_mac1,cf_mac2,cf_mac3,cf_mac4")
+
+;; Word multiplication with mac.
+;; Takes 3 cycles.
+(define_reservation "cfv123_mac_w_00"
+ "cf_dsoc,cf_agex,cf_mac1,cf_mac2")
+
+;; Long multiplication with mac.
+;; Takes 7 cycles.
+(define_reservation "cfv12_mac_l_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex,cf_mac1,cf_mac2,cf_mac3,cf_mac4")
+;; Takes 8 cycles.
+(define_reservation "cfv3_mac_l_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_mac1,cf_mac2,cf_mac3,cf_mac4")
+
+;; Word multiplication with mac.
+;; Takes 5 cycles.
+(define_reservation "cfv12_mac_w_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex,cf_mac1,cf_mac2")
+;; Takes 6 cycles.
+(define_reservation "cfv3_mac_w_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_mac1,cf_mac2")
+
+;; Word multiplication with mac.
+;; Takes 6 cycles.
+(define_reservation "cfv12_mac_w_i0"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex,cf_mac1,cf_mac2")
+;; Takes 7 cycles.
+(define_reservation "cfv3_mac_w_i0"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_mac1,cf_mac2")
+
+;; Multiplication with emac.
+;; Takes 4 cycles.
+(define_reservation "cfv123_emac_00"
+ "cf_dsoc,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4")
+
+;; Multiplication with emac.
+;; Takes 6 cycles.
+(define_reservation "cfv12_emac_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4")
+;; Takes 7 cycles.
+(define_reservation "cfv3_emac_10"
+ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4")
+
+;; Word multiplication with emac.
+;; Takes 7 cycles.
+(define_reservation "cfv12_emac_w_i0"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4")
+;; Takes 8 cycles.
+(define_reservation "cfv3_emac_w_i0"
+ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4")
+
+;; Return instruction.
+;; ??? As return reads target address from stack, use a mem-read reservation
+;; ??? for it.
+;; ??? It's not clear what the core does during these 5 cycles.
+;; ??? Luckily, we don't care that much about an insn that won't be moved.
+;; Takes 5 cycles.
+(define_reservation "cfv12_rts" "cfv12_alu_10")
+;; Takes 8 cycles.
+(define_reservation "cfv3_rts" "cfv3_alu_10")
+
+;; Call instruction.
+;; ??? It's not clear what reservation is best to use for calls.
+;; ??? For now we use mem-write + return reservations to reflect the fact of
+;; ??? pushing and poping return address to and from the stack.
+;; Takes 3 cycles.
+(define_reservation "cfv12_call" "cfv12_alu_01,cfv12_rts")
+;; Takes 1/5 cycles.
+(define_reservation "cfv3_call" "cfv3_alu_01,cfv3_rts")
+
+;; Conditional branch instruction.
+;; ??? Branch reservations are unclear to me so far. Luckily, we don't care
+;; ??? that much about branches.
+;; Takes 2 cycles.
+(define_reservation "cfv12_bcc" "cfv123_alu_00")
+;; Takes 1 cycles.
+(define_reservation "cfv3_bcc" "cfv123_alu_00")
+
+;; Unconditional branch instruciton.
+;; Takes 2 cycles.
+(define_reservation "cfv12_bra" "cfv12_alu_01")
+;; Takes 1 cycles.
+(define_reservation "cfv3_bra" "cfv3_alu_01")
+
+;; Computed jump instruction.
+;; Takes 3 cycles.
+(define_reservation "cfv12_jmp"
+ "(cf_dsoc+cf_agex)*3")
+;; Takes 5 cycles.
+(define_reservation "cfv3_jmp"
+ "(cf_dsoc+cf_agex)*5")
+
+;; Instruction reservations.
+
+;; Below reservations are simple derivation from the above reservations.
+;; Each reservation from the above expands into 3 reservations below - one
+;; for each instruction size.
+;; A number in the end of reservation's name is the size of the instruction.
+
+(define_insn_reservation "cfv123_alu_00_1" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "00"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv123_alu_00")
+
+(define_insn_reservation "cfv123_alu_00_2" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "00"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv123_alu_00")
+
+(define_insn_reservation "cfv123_alu_00_3" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "00"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv123_alu_00")
+
+(define_insn_reservation "cfv1_alu_10_1" 3
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_alu_10")
+
+(define_insn_reservation "cfv1_alu_10_2" 3
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_alu_10")
+
+(define_insn_reservation "cfv1_alu_10_3" 3
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_10")
+
+(define_insn_reservation "cfv1_omove_10_1" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_omove_10")
+
+(define_insn_reservation "cfv1_omove_10_2" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_omove_10")
+
+(define_insn_reservation "cfv1_omove_10_3" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_10")
+
+(define_insn_reservation "cfv2_alu_10_1" 3
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_alu_10")
+
+(define_insn_reservation "cfv2_alu_10_2" 3
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_alu_10")
+
+(define_insn_reservation "cfv2_alu_10_3" 3
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_10")
+
+(define_insn_reservation "cfv2_omove_10_1" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_omove_10")
+
+(define_insn_reservation "cfv2_omove_10_2" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_omove_10")
+
+(define_insn_reservation "cfv2_omove_10_3" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_10")
+
+(define_insn_reservation "cfv3_alu_10_1" 4
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_alu_10")
+
+(define_insn_reservation "cfv3_alu_10_2" 4
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_alu_10")
+
+(define_insn_reservation "cfv3_alu_10_3" 4
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_alu_10")
+
+(define_insn_reservation "cfv3_omove_10_1" 3
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_omove_10")
+
+(define_insn_reservation "cfv3_omove_10_2" 3
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_omove_10")
+
+(define_insn_reservation "cfv3_omove_10_3" 3
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "10"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_omove_10")
+
+(define_insn_reservation "cfv1_alu_i0_2" 4
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_alu_i0")
+
+(define_insn_reservation "cfv1_alu_i0_3" 4
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_i0")
+
+(define_insn_reservation "cfv1_omove_i0_2" 3
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_omove_i0")
+
+(define_insn_reservation "cfv1_omove_i0_3" 3
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_i0")
+
+(define_insn_reservation "cfv2_alu_i0_2" 4
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_alu_i0")
+
+(define_insn_reservation "cfv2_alu_i0_3" 4
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_i0")
+
+(define_insn_reservation "cfv2_omove_i0_2" 3
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_omove_i0")
+
+(define_insn_reservation "cfv2_omove_i0_3" 3
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_i0")
+
+(define_insn_reservation "cfv3_alu_i0_2" 5
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_alu_i0")
+
+(define_insn_reservation "cfv3_alu_i0_3" 5
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_alu_i0")
+
+(define_insn_reservation "cfv3_omove_i0_2" 4
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_omove_i0")
+
+(define_insn_reservation "cfv3_omove_i0_3" 4
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "i0"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_omove_i0")
+
+(define_insn_reservation "cfv12_alu_01_1" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "01"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_alu_01")
+
+(define_insn_reservation "cfv12_alu_01_2" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "01"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_alu_01")
+
+(define_insn_reservation "cfv12_alu_01_3" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "01"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_01")
+
+(define_insn_reservation "cfv3_alu_01_1" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "01"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_alu_01")
+
+(define_insn_reservation "cfv3_alu_01_2" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "01"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_alu_01")
+
+(define_insn_reservation "cfv3_alu_01_3" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "01"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_alu_01")
+
+(define_insn_reservation "cfv12_alu_0i_2" 2
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "0i"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_alu_0i")
+
+(define_insn_reservation "cfv12_alu_0i_3" 2
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "0i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_0i")
+
+(define_insn_reservation "cfv3_alu_0i_2" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "0i"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_alu_0i")
+
+(define_insn_reservation "cfv3_alu_0i_3" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "0i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_alu_0i")
+
+(define_insn_reservation "cfv1_alu_11_1" 1
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_alu_11")
+
+(define_insn_reservation "cfv1_alu_11_2" 1
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_alu_11")
+
+(define_insn_reservation "cfv1_alu_11_3" 1
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_11")
+
+(define_insn_reservation "cfv1_omove_11_1" 1
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_omove_11")
+
+(define_insn_reservation "cfv1_omove_11_2" 1
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_omove_11")
+
+(define_insn_reservation "cfv1_omove_11_3" 1
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_11")
+
+(define_insn_reservation "cfv2_alu_11_1" 1
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_alu_11")
+
+(define_insn_reservation "cfv2_alu_11_2" 1
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_alu_11")
+
+(define_insn_reservation "cfv2_alu_11_3" 1
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_11")
+
+(define_insn_reservation "cfv2_omove_11_1" 1
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_omove_11")
+
+(define_insn_reservation "cfv2_omove_11_2" 1
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_omove_11")
+
+(define_insn_reservation "cfv2_omove_11_3" 1
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_11")
+
+(define_insn_reservation "cfv3_alu_11_1" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_alu_11")
+
+(define_insn_reservation "cfv3_alu_11_2" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "size" "2"))
+ (eq_attr "op_mem" "11"))
+ "cf_ib2+cfv3_alu_11")
+
+(define_insn_reservation "cfv3_alu_11_3" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_alu_11")
+
+(define_insn_reservation "cfv3_omove_11_1" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_omove_11")
+
+(define_insn_reservation "cfv3_omove_11_2" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "size" "2"))
+ (eq_attr "op_mem" "11"))
+ "cf_ib2+cfv3_omove_11")
+
+(define_insn_reservation "cfv3_omove_11_3" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_omove_11")
+
+(define_insn_reservation "cfv1_alu_i1_2" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_alu_i1")
+
+(define_insn_reservation "cfv1_alu_i1_3" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_i1")
+
+(define_insn_reservation "cfv1_omove_i1_2" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_omove_i1")
+
+(define_insn_reservation "cfv1_omove_i1_3" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_i1")
+
+(define_insn_reservation "cfv2_alu_i1_2" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_alu_i1")
+
+(define_insn_reservation "cfv2_alu_i1_3" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_i1")
+
+(define_insn_reservation "cfv2_omove_i1_2" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_omove_i1")
+
+(define_insn_reservation "cfv2_omove_i1_3" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_i1")
+
+(define_insn_reservation "cfv3_alu_i1_2" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_alu_i1")
+
+(define_insn_reservation "cfv3_alu_i1_3" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_alu_i1")
+
+(define_insn_reservation "cfv3_omove_i1_2" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_omove_i1")
+
+(define_insn_reservation "cfv3_omove_i1_3" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_omove_i1")
+
+(define_insn_reservation "cfv1_alu_1i_2" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_alu_1i")
+
+(define_insn_reservation "cfv1_alu_1i_3" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_1i")
+
+(define_insn_reservation "cfv1_omove_1i_2" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_omove_1i")
+
+(define_insn_reservation "cfv1_omove_1i_3" 2
+ (and (and (and (eq_attr "cpu" "cfv1")
+ (eq_attr "type" "
+clr,clr_l,mov3q_l,move,moveq_l,tst,
+move_l,tst_l"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_1i")
+
+(define_insn_reservation "cfv2_alu_1i_2" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_alu_1i")
+
+(define_insn_reservation "cfv2_alu_1i_3" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_alu_1i")
+
+(define_insn_reservation "cfv2_omove_1i_2" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_omove_1i")
+
+(define_insn_reservation "cfv2_omove_1i_3" 2
+ (and (and (and (eq_attr "cpu" "cfv2")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_omove_1i")
+
+(define_insn_reservation "cfv3_alu_1i_2" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_alu_1i")
+
+(define_insn_reservation "cfv3_alu_1i_3" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift,
+clr,clr_l,mov3q_l,move,moveq_l,tst"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_alu_1i")
+
+(define_insn_reservation "cfv3_omove_1i_2" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_omove_1i")
+
+(define_insn_reservation "cfv3_omove_1i_3" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "
+move_l,tst_l"))
+ (eq_attr "op_mem" "1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_omove_1i")
+
+(define_insn_reservation "cfv123_lea_10_1" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "lea"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv123_lea_10")
+
+(define_insn_reservation "cfv123_lea_10_2" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "lea"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv123_lea_10")
+
+(define_insn_reservation "cfv123_lea_10_3" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "lea"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv123_lea_10")
+
+(define_insn_reservation "cfv123_lea_i0_2" 2
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "lea"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv123_lea_i0")
+
+(define_insn_reservation "cfv123_lea_i0_3" 2
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "lea"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv123_lea_i0")
+
+(define_insn_reservation "cfv12_pea_11_1" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_pea_11")
+
+(define_insn_reservation "cfv12_pea_11_2" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_pea_11")
+
+(define_insn_reservation "cfv12_pea_11_3" 1
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_pea_11")
+
+(define_insn_reservation "cfv3_pea_11_1" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_pea_11")
+
+(define_insn_reservation "cfv3_pea_11_2" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_pea_11")
+
+(define_insn_reservation "cfv3_pea_11_3" 1
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "11"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_pea_11")
+
+(define_insn_reservation "cfv12_pea_i1_2" 2
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_pea_i1")
+
+(define_insn_reservation "cfv12_pea_i1_3" 2
+ (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_pea_i1")
+
+(define_insn_reservation "cfv3_pea_i1_2" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_pea_i1")
+
+(define_insn_reservation "cfv3_pea_i1_3" 2
+ (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_pea_i1")
+
+(define_insn_reservation "cfv123_mul_l_00_1" 18
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv123_mul_l_00")
+
+(define_insn_reservation "cfv123_mul_l_00_2" 18
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv123_mul_l_00")
+
+(define_insn_reservation "cfv123_mul_l_00_3" 18
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv123_mul_l_00")
+
+(define_insn_reservation "cfv123_mul_w_00_1" 9
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv123_mul_w_00")
+
+(define_insn_reservation "cfv123_mul_w_00_2" 9
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv123_mul_w_00")
+
+(define_insn_reservation "cfv123_mul_w_00_3" 9
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv123_mul_w_00")
+
+(define_insn_reservation "cfv12_mul_l_10_1" 20
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_mul_l_10")
+
+(define_insn_reservation "cfv12_mul_l_10_2" 20
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_mul_l_10")
+
+(define_insn_reservation "cfv12_mul_l_10_3" 20
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_mul_l_10")
+
+(define_insn_reservation "cfv3_mul_l_10_1" 21
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_mul_l_10")
+
+(define_insn_reservation "cfv3_mul_l_10_2" 21
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_mul_l_10")
+
+(define_insn_reservation "cfv3_mul_l_10_3" 21
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_mul_l_10")
+
+(define_insn_reservation "cfv12_mul_w_10_1" 11
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_mul_w_10")
+
+(define_insn_reservation "cfv12_mul_w_10_2" 11
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_mul_w_10")
+
+(define_insn_reservation "cfv12_mul_w_10_3" 11
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_mul_w_10")
+
+(define_insn_reservation "cfv3_mul_w_10_1" 12
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_mul_w_10")
+
+(define_insn_reservation "cfv3_mul_w_10_2" 12
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_mul_w_10")
+
+(define_insn_reservation "cfv3_mul_w_10_3" 12
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_mul_w_10")
+
+(define_insn_reservation "cfv12_mul_w_i0_2" 12
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_mul_w_i0")
+
+(define_insn_reservation "cfv12_mul_w_i0_3" 12
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_mul_w_i0")
+
+(define_insn_reservation "cfv3_mul_w_i0_2" 13
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_mul_w_i0")
+
+(define_insn_reservation "cfv3_mul_w_i0_3" 13
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "no"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_mul_w_i0")
+
+(define_insn_reservation "cfv123_mac_l_00_1" 5
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv123_mac_l_00")
+
+(define_insn_reservation "cfv123_mac_l_00_2" 5
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv123_mac_l_00")
+
+(define_insn_reservation "cfv123_mac_l_00_3" 5
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv123_mac_l_00")
+
+(define_insn_reservation "cfv123_mac_w_00_1" 3
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv123_mac_w_00")
+
+(define_insn_reservation "cfv123_mac_w_00_2" 3
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv123_mac_w_00")
+
+(define_insn_reservation "cfv123_mac_w_00_3" 3
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv123_mac_w_00")
+
+(define_insn_reservation "cfv12_mac_l_10_1" 7
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_mac_l_10")
+
+(define_insn_reservation "cfv12_mac_l_10_2" 7
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_mac_l_10")
+
+(define_insn_reservation "cfv12_mac_l_10_3" 7
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_mac_l_10")
+
+(define_insn_reservation "cfv3_mac_l_10_1" 8
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_mac_l_10")
+
+(define_insn_reservation "cfv3_mac_l_10_2" 8
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_mac_l_10")
+
+(define_insn_reservation "cfv3_mac_l_10_3" 8
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_mac_l_10")
+
+(define_insn_reservation "cfv12_mac_w_10_1" 5
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_mac_w_10")
+
+(define_insn_reservation "cfv12_mac_w_10_2" 5
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_mac_w_10")
+
+(define_insn_reservation "cfv12_mac_w_10_3" 5
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_mac_w_10")
+
+(define_insn_reservation "cfv3_mac_w_10_1" 6
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_mac_w_10")
+
+(define_insn_reservation "cfv3_mac_w_10_2" 6
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_mac_w_10")
+
+(define_insn_reservation "cfv3_mac_w_10_3" 6
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_mac_w_10")
+
+(define_insn_reservation "cfv12_mac_w_i0_2" 6
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_mac_w_i0")
+
+(define_insn_reservation "cfv12_mac_w_i0_3" 6
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_mac_w_i0")
+
+(define_insn_reservation "cfv3_mac_w_i0_2" 7
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_mac_w_i0")
+
+(define_insn_reservation "cfv3_mac_w_i0_3" 7
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_mac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_mac_w_i0")
+
+(define_insn_reservation "cfv123_emac_00_1" 4
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_l,mul_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv123_emac_00")
+
+(define_insn_reservation "cfv123_emac_00_2" 4
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_l,mul_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv123_emac_00")
+
+(define_insn_reservation "cfv123_emac_00_3" 4
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_l,mul_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv123_emac_00")
+
+(define_insn_reservation "cfv12_emac_l_10_1" 6
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_emac_10")
+
+(define_insn_reservation "cfv12_emac_l_10_2" 6
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_emac_10")
+
+(define_insn_reservation "cfv12_emac_l_10_3" 6
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_emac_10")
+
+(define_insn_reservation "cfv3_emac_l_10_1" 7
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_emac_10")
+
+(define_insn_reservation "cfv3_emac_l_10_2" 7
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_emac_10")
+
+(define_insn_reservation "cfv3_emac_l_10_3" 7
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_l"))
+ (eq_attr "op_mem" "10,i0,i1,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_emac_10")
+
+(define_insn_reservation "cfv12_emac_w_10_1" 6
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_emac_10")
+
+(define_insn_reservation "cfv12_emac_w_10_2" 6
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_emac_10")
+
+(define_insn_reservation "cfv12_emac_w_10_3" 6
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_emac_10")
+
+(define_insn_reservation "cfv3_emac_w_10_1" 7
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_emac_10")
+
+(define_insn_reservation "cfv3_emac_w_10_2" 7
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_emac_10")
+
+(define_insn_reservation "cfv3_emac_w_10_3" 7
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_emac_10")
+
+(define_insn_reservation "cfv12_emac_w_i0_2" 7
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv12_emac_w_i0")
+
+(define_insn_reservation "cfv12_emac_w_i0_3" 7
+ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_emac_w_i0")
+
+(define_insn_reservation "cfv3_emac_w_i0_2" 8
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "1,2"))
+ "cf_ib2+cfv3_emac_w_i0")
+
+(define_insn_reservation "cfv3_emac_w_i0_3" 8
+ (and (and (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "mac" "cf_emac"))
+ (eq_attr "type" "mul_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_emac_w_i0")
+
+(define_insn_reservation "cfv12_rts" 5
+ (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "rts"))
+ "cf_ib1+cfv12_rts")
+
+(define_insn_reservation "cfv3_rts" 8
+ (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "rts"))
+ "cf_ib1+cfv3_rts")
+
+(define_insn_reservation "cfv12_call_1" 3
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "bsr,jsr"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_call")
+
+(define_insn_reservation "cfv12_call_2" 3
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "bsr,jsr"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_call")
+
+(define_insn_reservation "cfv12_call_3" 3
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "bsr,jsr"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_call")
+
+(define_insn_reservation "cfv3_call_1" 1
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "bsr,jsr"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_call")
+
+(define_insn_reservation "cfv3_call_2" 1
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "bsr,jsr"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_call")
+
+(define_insn_reservation "cfv3_call_3" 1
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "bsr,jsr"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_call")
+
+(define_insn_reservation "cfv12_bcc_1" 2
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "bcc"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_bcc")
+
+(define_insn_reservation "cfv12_bcc_2" 2
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "bcc"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_bcc")
+
+(define_insn_reservation "cfv12_bcc_3" 2
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "bcc"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_bcc")
+
+(define_insn_reservation "cfv3_bcc_1" 1
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "bcc"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_bcc")
+
+(define_insn_reservation "cfv3_bcc_2" 1
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "bcc"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_bcc")
+
+(define_insn_reservation "cfv3_bcc_3" 1
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "bcc"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_bcc")
+
+(define_insn_reservation "cfv12_bra_1" 2
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "bra"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_bra")
+
+(define_insn_reservation "cfv12_bra_2" 2
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "bra"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_bra")
+
+(define_insn_reservation "cfv12_bra_3" 2
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "bra"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_bra")
+
+(define_insn_reservation "cfv3_bra_1" 1
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "bra"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_bra")
+
+(define_insn_reservation "cfv3_bra_2" 1
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "bra"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_bra")
+
+(define_insn_reservation "cfv3_bra_3" 1
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "bra"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_bra")
+
+(define_insn_reservation "cfv12_jmp_1" 3
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "jmp"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv12_jmp")
+
+(define_insn_reservation "cfv12_jmp_2" 3
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "jmp"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv12_jmp")
+
+(define_insn_reservation "cfv12_jmp_3" 3
+ (and (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "jmp"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv12_jmp")
+
+(define_insn_reservation "cfv3_jmp_1" 5
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "jmp"))
+ (eq_attr "size" "1"))
+ "cf_ib1+cfv3_jmp")
+
+(define_insn_reservation "cfv3_jmp_2" 5
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "jmp"))
+ (eq_attr "size" "2"))
+ "cf_ib2+cfv3_jmp")
+
+(define_insn_reservation "cfv3_jmp_3" 5
+ (and (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "jmp"))
+ (eq_attr "size" "3"))
+ "cf_ib3+cfv3_jmp")
+
+(define_insn_reservation "cfv12_unlk" 2
+ (and (eq_attr "cpu" "cfv1,cfv2")
+ (eq_attr "type" "unlk"))
+ "cf_ib1+cfv12_alu_10")
+
+(define_insn_reservation "cfv3_unlk" 3
+ (and (eq_attr "cpu" "cfv3")
+ (eq_attr "type" "unlk"))
+ "cf_ib1+cfv3_alu_10")
+
+;; Dummy reservation for instructions that are not handled.
+(define_insn_reservation "cfv123_guess" 3
+ (and (eq_attr "cpu" "cfv1,cfv2,cfv3")
+ (eq_attr "type" "falu,fbcc,fcmp,fdiv,fmove,fmul,fneg,fsqrt,ftst,
+ div_w,div_l,link,mvsz,nop,trap,unknown"))
+ "cf_ib3+cfv123_guess+cf_dsoc+cf_agex+cf_mem")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Below is pipeline description of ColdFire V4 core.
+;; It is substantially different from the description of V1, V2 or V3 cores,
+;; primarily due to no need to model the instruction buffer.
+;;
+;; V4 pipeline model uses a completely separate set of cpu units.
+
+;; Operand Execution Pipeline.
+(define_automaton "cfv4_oep")
+
+(define_cpu_unit "cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da"
+ "cfv4_oep")
+
+;; V4 has 3 cases of dual-issue.
+;; After issuing a cfv4_pOEPx instruction, it'll be possible to issue
+;; a cfv4_sOEPx instruction on the same cycle (see final_presence_sets below).
+(define_cpu_unit "cfv4_pOEP1,cfv4_sOEP1,
+ cfv4_pOEP2,cfv4_sOEP2,
+ cfv4_pOEP3,cfv4_sOEP3" "cfv4_oep")
+
+(final_presence_set "cfv4_sOEP1" "cfv4_pOEP1")
+(final_presence_set "cfv4_sOEP2" "cfv4_pOEP2")
+(final_presence_set "cfv4_sOEP3" "cfv4_pOEP3")
+
+;; Reservation for instructions that don't allow dual-issue.
+(define_reservation "cfv4_ds" "cfv4_pOEP1+cfv4_sOEP1+
+ cfv4_pOEP2+cfv4_sOEP2+
+ cfv4_pOEP3+cfv4_sOEP3")
+
+;; Memory access resource.
+(define_automaton "cfv4_mem")
+
+(define_cpu_unit "cfv4_mem" "cfv4_mem")
+
+;; EMAC.
+(define_automaton "cfv4_emac")
+
+(define_cpu_unit "cfv4_emac" "cfv4_emac")
+
+;; FPU.
+(define_automaton "cfv4_fp")
+
+(define_cpu_unit "cfv4_fp" "cfv4_fp")
+
+;; Automaton for unknown instruction.
+(define_automaton "cfv4_guess")
+
+(define_query_cpu_unit "cfv4_guess" "cfv4_guess")
+
+;; This bypass allows 1st case of dual-issue.
+(define_bypass 0 "cfv4_00_oag_pOEP1,cfv4_10_pOEP1,cfv4_i0_pOEP1"
+ "cfv4_00_oag,cfv4_00_oag_pOEP3_sOEP12,cfv4_00_oag_pOEP1,
+ cfv4_00_oag_moveql,cfv4_00_ex_sOEP13")
+
+;; The following bypasses decrease the latency of producers if it modifies
+;; a target register in the EX stage and the consumer also uses
+;; that register in the EX stage.
+(define_bypass 1 "cfv4_00_ex" "cfv4_00_ex,cfv4_00_ex_sOEP13")
+(define_bypass 1 "cfv4_00_ex" "cfv4_10,cfv4_10_pOEP1,cfv4_i0,cfv4_i0_pOEP1"
+ "!m68k_sched_address_bypass_p")
+
+;; Indexed loads with scale factors 2 and 4 require an update of the index
+;; register in the register file. Considering that the index register is
+;; only needed at the second cycle of address generation, we get
+;; a latency of 4.
+;; Producers for indexed loads with scale factor 1 should have
+;; a latency of 3. Since we're only allowed one bypass, we handle it
+;; in the adjust_cost hook.
+(define_bypass 4
+ "cfv4_00_oag,cfv4_00_oag_pOEP3_sOEP12,cfv4_00_oag_lea,cfv4_00_oag_pOEP1,
+ cfv4_00_oag_moveql"
+ "cfv4_i0,cfv4_i0_pOEP1"
+ "m68k_sched_indexed_address_bypass_p")
+
+;; First part of cfv4_00.
+;; If issued in pairs with cfv4_movel_?0, the cost should be increased.
+;; ??? Is it possible that combined cfv4_movel_00 and cfv4_oag_00 instructions
+;; have longer latency than the two instructions emitted sequentially?
+;; Due to register renaming, the result of the sequence would be available
+;; after 3 cycles, instead of 4 for combined instruction?
+(define_insn_reservation "cfv4_00_oag" 1
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "alu_l,aluq_l,clr_l,cmp_l,mov3q_l,neg_l"))
+ (eq_attr "op_mem" "00"))
+ "cfv4_sOEP1|cfv4_sOEP3|(cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)")
+
+(define_insn_reservation "cfv4_00_oag_pOEP3_sOEP12" 1
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "move_l,mov3q_l,clr_l"))
+ (and (eq_attr "op_mem" "00")
+ (and (eq_attr "opx_type" "Rn")
+ (eq_attr "opy_type" "none,imm_q,imm_w,imm_l"))))
+ "cfv4_sOEP1|cfv4_sOEP2|(cfv4_pOEP3,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)")
+
+(define_insn_reservation "cfv4_00_oag_lea" 1
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "lea"))
+ "cfv4_pOEP3,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex")
+
+(define_insn_reservation "cfv4_00_oag_pOEP1" 1
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "move_l,mov3q_l,clr_l"))
+ (and (eq_attr "op_mem" "00")
+ (ior (eq_attr "opx_type" "!Rn")
+ (eq_attr "opy_type" "!none,imm_q,imm_w,imm_l"))))
+ "cfv4_sOEP1|(cfv4_pOEP1,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)")
+
+(define_insn_reservation "cfv4_00_oag_moveql" 1
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "moveq_l"))
+ (eq_attr "op_mem" "00"))
+ "cfv4_sOEP1|cfv4_sOEP2|cfv4_sOEP3|(cfv4_pOEP3,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)")
+
+;; Second part of cfv4_00.
+;; Latency is either 1 or 4 depending on which stage the consumer
+;; will need the data.
+
+(define_insn_reservation "cfv4_00_ex" 4
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "bitr,bitrw,clr,cmp,move,mvsz,scc,tst"))
+ (eq_attr "op_mem" "00"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex")
+
+(define_insn_reservation "cfv4_00_ex_sOEP13" 4
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "alux_l,ext,shift,tst_l"))
+ (eq_attr "op_mem" "00"))
+ "cfv4_sOEP1|cfv4_sOEP3|(cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)")
+
+;; Several types mentioned in this reservation (e.g., ext and shift) don't
+;; support implicit load. But we handle them anyway due to first scheduling
+;; pass, which handles non-strict rtl.
+;;
+;; Latency is either 1 or 4 depending in which stage the consumer
+;; will need the data.
+(define_insn_reservation "cfv4_10" 4
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw,
+ clr,clr_l,cmp,cmp_l,ext,
+ mov3q_l,move,moveq_l,mvsz,neg_l,
+ shift,tst,tst_l"))
+ (eq_attr "op_mem" "10"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex")
+
+;; Specialization of cfv4_10.
+;; move.l has OC2-to-DS forwarding path, that saves one cycle of latency.
+(define_insn_reservation "cfv4_10_pOEP1" 3
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "move_l"))
+ (eq_attr "op_mem" "10"))
+ "cfv4_pOEP1,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex")
+
+;; Same here. But +1 to latency due to longer OAG.
+(define_insn_reservation "cfv4_i0" 5
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw,
+ clr,clr_l,cmp,cmp_l,ext,
+ mov3q_l,move,moveq_l,mvsz,neg_l,
+ shift,tst,tst_l"))
+ (eq_attr "op_mem" "i0"))
+ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex")
+
+;; ??? Does indexed load trigger dual-issue?
+;; ??? Does OC2-to-DS forwarding path saves a cycle?
+(define_insn_reservation "cfv4_i0_pOEP1" 4
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "move_l"))
+ (eq_attr "op_mem" "i0"))
+ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex")
+
+;; This reservation is for moves and clr. Arithmetic instructions
+;; don't write to memory unless they also read from it.
+;; But, before reload we can have all sorts of things.
+;; With cfv4_pOEP2 allow dual-issue for type 2 cases.
+(define_insn_reservation "cfv4_01" 1
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw,
+ clr,clr_l,cmp,cmp_l,ext,
+ mov3q_l,move,move_l,moveq_l,mvsz,neg_l,
+ shift"))
+ (eq_attr "op_mem" "01"))
+ "cfv4_pOEP2,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem")
+
+;; ??? Does indexed store trigger dual-issue?
+(define_insn_reservation "cfv4_0i" 2
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw,
+ clr,clr_l,cmp,cmp_l,ext,
+ mov3q_l,move,move_l,moveq_l,mvsz,neg_l,
+ shift"))
+ (eq_attr "op_mem" "0i"))
+ "cfv4_pOEP2,cfv4_oag,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem")
+
+(define_insn_reservation "cfv4_11" 1
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw,
+ clr,clr_l,cmp,cmp_l,ext,
+ mov3q_l,move,move_l,moveq_l,mvsz,neg_l,
+ shift"))
+ (eq_attr "op_mem" "11"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem")
+
+;; Latency is 2 due to long OAG stage.
+(define_insn_reservation "cfv4_i1" 2
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw,
+ clr,clr_l,cmp,cmp_l,ext,
+ mov3q_l,move,move_l,moveq_l,mvsz,neg_l,
+ shift"))
+ (eq_attr "op_mem" "i1"))
+ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem")
+
+;; This one is the same as cfv4_i1.
+;; ??? Should it be different?
+(define_insn_reservation "cfv4_1i" 2
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw,
+ clr,clr_l,cmp,cmp_l,ext,
+ mov3q_l,move,move_l,moveq_l,mvsz,neg_l,
+ shift"))
+ (eq_attr "op_mem" "1i"))
+ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem")
+
+;; ??? Does pea indeed support case 2 of dual-issue?
+(define_insn_reservation "cfv4_11_pea" 1
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "11,00,01,0i,10"))
+ "cfv4_pOEP2,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem")
+
+;; ??? Does pea indeed support case 2 of dual-issue?
+;; ??? Does indexed store trigger dual-issue?
+(define_insn_reservation "cfv4_i1_pea" 1
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "pea"))
+ (eq_attr "op_mem" "i1,1i"))
+ "cfv4_pOEP2,cfv4_oag,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem")
+
+(define_insn_reservation "cfv4_link" 2
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "link"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_ex,cfv4_da,cfv4_mem")
+
+(define_insn_reservation "cfv4_unlink" 2
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "unlk"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex")
+
+(define_insn_reservation "cfv4_divw_00" 20
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "div_w"))
+ (eq_attr "op_mem" "00,01,0i"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex*15")
+
+(define_insn_reservation "cfv4_divw_10" 20
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "div_w"))
+ (eq_attr "op_mem" "10,11,1i"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex*15")
+
+(define_insn_reservation "cfv4_divw_i0" 21
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "div_w"))
+ (eq_attr "op_mem" "i0,i1"))
+ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex*15")
+
+(define_insn_reservation "cfv4_divl_00" 35
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "div_l"))
+ (eq_attr "op_mem" "00,01,0i"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex*30")
+
+(define_insn_reservation "cfv4_divl_10" 35
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "div_l"))
+ (eq_attr "op_mem" "10,11,1i,i0,i1"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex*30")
+
+(define_insn_reservation "cfv4_emac_mul_00" 7
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "mul_w,mul_l"))
+ (eq_attr "op_mem" "00,01,0i"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_emac")
+
+(define_insn_reservation "cfv4_emac_mul_10" 7
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "mul_w,mul_l"))
+ (eq_attr "op_mem" "10,11,1i"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_emac")
+
+(define_insn_reservation "cfv4_emac_mul_i0" 8
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "mul_w,mul_l"))
+ (eq_attr "op_mem" "i0,i1"))
+ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_emac")
+
+(define_insn_reservation "cfv4_falu_00" 7
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "falu,fcmp,fmul"))
+ (eq_attr "op_mem" "00,01,0i"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp")
+
+(define_insn_reservation "cfv4_falu_10" 7
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "falu,fcmp,fmul"))
+ (eq_attr "op_mem" "10,i0,11,1i,i1"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_fp")
+
+(define_insn_reservation "cfv4_fneg_00" 4
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "fmove,fneg,ftst"))
+ (eq_attr "op_mem" "00"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp")
+
+(define_insn_reservation "cfv4_fmove_fneg_10" 4
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "fmove,fneg,ftst"))
+ (eq_attr "op_mem" "10,i0,11,1i,i1"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_fp")
+
+(define_insn_reservation "cfv4_fmove_01" 1
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "fmove,fneg,ftst"))
+ (eq_attr "op_mem" "01,0i"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp,cfv4_da,cfv4_mem")
+
+(define_insn_reservation "cfv4_fdiv_00" 23
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "fdiv"))
+ (eq_attr "op_mem" "00,01,0i"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp*17")
+
+(define_insn_reservation "cfv4_fdiv_10" 23
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "fdiv"))
+ (eq_attr "op_mem" "10,i0,11,1i,i1"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_fp*17")
+
+(define_insn_reservation "cfv4_fsqrt_00" 56
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "fsqrt"))
+ (eq_attr "op_mem" "00,01,0i"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp*50")
+
+(define_insn_reservation "cfv4_fsqrt_10" 56
+ (and (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "fsqrt"))
+ (eq_attr "op_mem" "10,i0,11,1i,i1"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_fp*50")
+
+(define_insn_reservation "cfv4_bcc" 0
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "bcc"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex")
+
+(define_insn_reservation "cfv4_fbcc" 2
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "fbcc"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp")
+
+;; ??? Why is bra said to write to memory: 1(0/1) ?
+(define_insn_reservation "cfv4_bra_bsr" 1
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "bra,bsr"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex")
+
+(define_insn_reservation "cfv4_jmp_jsr" 5
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "jmp,jsr"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex")
+
+(define_insn_reservation "cfv4_rts" 2
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "rts"))
+ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex")
+
+(define_insn_reservation "cfv4_nop" 1
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "nop"))
+ "cfv4_ds+cfv4_oag+cfv4_oc1+cfv4_mem+cfv4_oc2+cfv4_ex")
+
+(define_insn_reservation "cfv4_guess" 10
+ (and (eq_attr "cpu" "cfv4")
+ (eq_attr "type" "trap,unknown"))
+ "cfv4_guess+cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_emac+cfv4_fp")
+
+(define_insn_reservation "ignore" 0
+ (eq_attr "type" "ignore")
+ "nothing")
diff --git a/gcc/config/m68k/constraints.md b/gcc/config/m68k/constraints.md
new file mode 100644
index 000000000..a4885cda6
--- /dev/null
+++ b/gcc/config/m68k/constraints.md
@@ -0,0 +1,164 @@
+;; Constraint definitions for m68k
+;; Copyright (C) 2007 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_register_constraint "a" "ADDR_REGS"
+ "Address register.")
+
+(define_register_constraint "d" "DATA_REGS"
+ "Data register.")
+
+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
+ "Floating point register.")
+
+(define_constraint "I"
+ "Integer constant in the range 1 @dots 8, for immediate shift counts and addq."
+ (and (match_code "const_int")
+ (match_test "ival > 0 && ival <= 8")))
+
+(define_constraint "J"
+ "Signed 16-bit integer constant."
+ (and (match_code "const_int")
+ (match_test "ival >= -0x8000 && ival <= 0x7fff")))
+
+(define_constraint "K"
+ "Integer constant that moveq can't handle."
+ (and (match_code "const_int")
+ (match_test "ival < -0x80 || ival >= 0x80")))
+
+(define_constraint "L"
+ "Integer constant in the range -8 @dots -1, for subq."
+ (and (match_code "const_int")
+ (match_test "ival < 0 && ival >= -8")))
+
+(define_constraint "M"
+ "Integer constant that moveq+notb can't handle."
+ (and (match_code "const_int")
+ (match_test "ival < -0x100 || ival >= 0x100")))
+
+(define_constraint "N"
+ "Integer constant in the range 24 @dots 31, for rotatert:SI 8 to 1 expressed as rotate."
+ (and (match_code "const_int")
+ (match_test "ival >= 24 && ival <= 31")))
+
+(define_constraint "O"
+ "Integer constant 16, for rotate using swap."
+ (and (match_code "const_int")
+ (match_test "ival == 16")))
+
+(define_constraint "P"
+ "Integer constant in the range 8 @dots 15, for rotatert:HI 8 to 1 expressed as rotate."
+ (and (match_code "const_int")
+ (match_test "ival >= 8 && ival <= 15")))
+
+(define_constraint "R"
+ "Integer constant that mov3q can handle."
+ (and (match_code "const_int")
+ (match_test "valid_mov3q_const (ival)")))
+
+(define_constraint "G"
+ "Defines all of the floating constants that are *NOT* 68881
+ constants. This is so 68881 constants get reloaded and the fpmovecr
+ is used."
+ (and (match_code "const_double")
+ (match_test "!(TARGET_68881 && standard_68881_constant_p (op))")))
+
+(define_constraint "H"
+ "Defines a real zero constant."
+ (and (match_code "const_double")
+ (match_test "op == CONST0_RTX (GET_MODE (op))")))
+
+(define_constraint "S"
+ "Used for operands that satisfy 'm' when -mpcrel is in effect."
+ (and (match_code "mem")
+ (match_test "TARGET_PCREL
+ && (GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF
+ || GET_CODE (XEXP (op, 0)) == CONST)")))
+
+(define_constraint "T"
+ "Used for operands that satisfy 's' when -mpcrel is not in effect."
+ (and (match_code "symbol_ref,label_ref,const")
+ (match_test "!flag_pic")))
+
+(define_memory_constraint "Q"
+ "Means address register indirect addressing mode."
+ (and (match_code "mem")
+ (match_test "m68k_matches_q_p (op)")))
+
+(define_constraint "U"
+ "Used for register offset addressing."
+ (and (match_code "mem")
+ (match_test "m68k_matches_u_p (op)")))
+
+(define_constraint "W"
+ "Used for const_call_operands."
+ (match_operand 0 "const_call_operand"))
+
+(define_constraint "Cs"
+ "symbol_ref or const."
+ (match_code "symbol_ref,const"))
+
+(define_constraint "Ci"
+ "const_int."
+ (and (match_code "const_int")
+ (match_test "true")))
+
+(define_constraint "C0"
+ "const_int 0."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "Cj"
+ "Range of signed numbers that don't fit in 16 bits."
+ (and (match_code "const_int")
+ (match_test "ival < -0x8000 || ival > 0x7FFF")))
+
+(define_constraint "Cu"
+ "16-bit offset for wrapped symbols"
+ (and (match_code "const")
+ (match_test "m68k_unwrap_symbol (op, false) != op")))
+
+(define_constraint "CQ"
+ "Integers valid for mvq."
+ (and (match_code "const_int")
+ (match_test "m68k_const_method (ival) == MOVQ")))
+
+(define_constraint "CW"
+ "Integers valid for a moveq followed by a swap."
+ (and (match_code "const_int")
+ (match_test "m68k_const_method (ival) == SWAP")))
+
+(define_constraint "CZ"
+ "Integers valid for mvz."
+ (and (match_code "const_int")
+ (match_test "m68k_const_method (ival) == MVZ")))
+
+(define_constraint "CS"
+ "Integers valid for mvs."
+ (and (match_code "const_int")
+ (match_test "m68k_const_method (ival) == MVS")))
+
+(define_constraint "Ap"
+ "push_operand."
+ (match_operand 0 "push_operand"))
+
+(define_constraint "Ac"
+ "Non-register operands allowed in clr."
+ (and (match_operand 0 "movsi_const0_operand")
+ (match_test "!REG_P (op)")))
diff --git a/gcc/config/m68k/crti.s b/gcc/config/m68k/crti.s
new file mode 100644
index 000000000..12fb59f41
--- /dev/null
+++ b/gcc/config/m68k/crti.s
@@ -0,0 +1,44 @@
+/* Specialized code needed to support construction and destruction of
+ file-scope objects in C++ and Java code, and to support exception handling.
+ Copyright (C) 1999, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file just supplies function prologues for the .init and .fini
+ * sections. It is linked in before crtbegin.o.
+ */
+
+ .ident "GNU C crti.o"
+
+ .section .init
+ .globl _init
+ .type _init,@function
+_init:
+ linkw %fp,#0
+
+ .section .fini
+ .globl _fini
+ .type _fini,@function
+_fini:
+ linkw %fp,#0
diff --git a/gcc/config/m68k/crtn.s b/gcc/config/m68k/crtn.s
new file mode 100644
index 000000000..b7d70f02e
--- /dev/null
+++ b/gcc/config/m68k/crtn.s
@@ -0,0 +1,40 @@
+/* Specialized code needed to support construction and destruction of
+ file-scope objects in C++ and Java code, and to support exception handling.
+ Copyright (C) 1999, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file supplies function epilogues for the .init and .fini sections.
+ * It is linked in after all other files.
+ */
+
+ .ident "GNU C crtn.o"
+
+ .section .init
+ unlk %fp
+ rts
+
+ .section .fini
+ unlk %fp
+ rts
diff --git a/gcc/config/m68k/fpgnulib.c b/gcc/config/m68k/fpgnulib.c
new file mode 100644
index 000000000..2a7f6c75d
--- /dev/null
+++ b/gcc/config/m68k/fpgnulib.c
@@ -0,0 +1,595 @@
+/* This is a stripped down version of floatlib.c. It supplies only those
+ functions which exist in libgcc, but for which there is not assembly
+ language versions in m68k/lb1sf68.asm.
+
+ It also includes simplistic support for extended floats (by working in
+ double precision). You must compile this file again with -DEXTFLOAT
+ to get this support. */
+
+/*
+** gnulib support for software floating point.
+** Copyright (C) 1991 by Pipeline Associates, Inc. All rights reserved.
+** Permission is granted to do *anything* you want with this file,
+** commercial or otherwise, provided this message remains intact. So there!
+** I would appreciate receiving any updates/patches/changes that anyone
+** makes, and am willing to be the repository for said changes (am I
+** making a big mistake?).
+**
+** Pat Wood
+** Pipeline Associates, Inc.
+** pipeline!phw@motown.com or
+** sun!pipeline!phw or
+** uunet!motown!pipeline!phw
+**
+** 05/01/91 -- V1.0 -- first release to gcc mailing lists
+** 05/04/91 -- V1.1 -- added float and double prototypes and return values
+** -- fixed problems with adding and subtracting zero
+** -- fixed rounding in truncdfsf2
+** -- fixed SWAP define and tested on 386
+*/
+
+/*
+** The following are routines that replace the gnulib soft floating point
+** routines that are called automatically when -msoft-float is selected.
+** The support single and double precision IEEE format, with provisions
+** for byte-swapped machines (tested on 386). Some of the double-precision
+** routines work at full precision, but most of the hard ones simply punt
+** and call the single precision routines, producing a loss of accuracy.
+** long long support is not assumed or included.
+** Overall accuracy is close to IEEE (actually 68882) for single-precision
+** arithmetic. I think there may still be a 1 in 1000 chance of a bit
+** being rounded the wrong way during a multiply. I'm not fussy enough to
+** bother with it, but if anyone is, knock yourself out.
+**
+** Efficiency has only been addressed where it was obvious that something
+** would make a big difference. Anyone who wants to do this right for
+** best speed should go in and rewrite in assembler.
+**
+** I have tested this only on a 68030 workstation and 386/ix integrated
+** in with -msoft-float.
+*/
+
+/* the following deal with IEEE single-precision numbers */
+#define EXCESS 126L
+#define SIGNBIT 0x80000000L
+#define HIDDEN (1L << 23L)
+#define SIGN(fp) ((fp) & SIGNBIT)
+#define EXP(fp) (((fp) >> 23L) & 0xFF)
+#define MANT(fp) (((fp) & 0x7FFFFFL) | HIDDEN)
+#define PACK(s,e,m) ((s) | ((e) << 23L) | (m))
+
+/* the following deal with IEEE double-precision numbers */
+#define EXCESSD 1022L
+#define HIDDEND (1L << 20L)
+#define EXPDBITS 11
+#define EXPDMASK 0x7FFL
+#define EXPD(fp) (((fp.l.upper) >> 20L) & 0x7FFL)
+#define SIGND(fp) ((fp.l.upper) & SIGNBIT)
+#define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
+ (fp.l.lower >> 22))
+#define MANTDMASK 0xFFFFFL /* mask of upper part */
+
+/* the following deal with IEEE extended-precision numbers */
+#define EXCESSX 16382L
+#define HIDDENX (1L << 31L)
+#define EXPXBITS 15
+#define EXPXMASK 0x7FFF
+#define EXPX(fp) (((fp.l.upper) >> 16) & EXPXMASK)
+#define SIGNX(fp) ((fp.l.upper) & SIGNBIT)
+#define MANTXMASK 0x7FFFFFFFL /* mask of upper part */
+
+union double_long
+{
+ double d;
+ struct {
+ long upper;
+ unsigned long lower;
+ } l;
+};
+
+union float_long {
+ float f;
+ long l;
+};
+
+union long_double_long
+{
+ long double ld;
+ struct
+ {
+ long upper;
+ unsigned long middle;
+ unsigned long lower;
+ } l;
+};
+
+#ifndef EXTFLOAT
+
+int
+__unordsf2(float a, float b)
+{
+ union float_long fl;
+
+ fl.f = a;
+ if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
+ return 1;
+ fl.f = b;
+ if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
+ return 1;
+ return 0;
+}
+
+int
+__unorddf2(double a, double b)
+{
+ union double_long dl;
+
+ dl.d = a;
+ if (EXPD(dl) == EXPDMASK
+ && ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
+ return 1;
+ dl.d = b;
+ if (EXPD(dl) == EXPDMASK
+ && ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
+ return 1;
+ return 0;
+}
+
+/* convert unsigned int to double */
+double
+__floatunsidf (unsigned long a1)
+{
+ long exp = 32 + EXCESSD;
+ union double_long dl;
+
+ if (!a1)
+ {
+ dl.l.upper = dl.l.lower = 0;
+ return dl.d;
+ }
+
+ while (a1 < 0x2000000L)
+ {
+ a1 <<= 4;
+ exp -= 4;
+ }
+
+ while (a1 < 0x80000000L)
+ {
+ a1 <<= 1;
+ exp--;
+ }
+
+ /* pack up and go home */
+ dl.l.upper = exp << 20L;
+ dl.l.upper |= (a1 >> 11L) & ~HIDDEND;
+ dl.l.lower = a1 << 21L;
+
+ return dl.d;
+}
+
+/* convert int to double */
+double
+__floatsidf (long a1)
+{
+ long sign = 0, exp = 31 + EXCESSD;
+ union double_long dl;
+
+ if (!a1)
+ {
+ dl.l.upper = dl.l.lower = 0;
+ return dl.d;
+ }
+
+ if (a1 < 0)
+ {
+ sign = SIGNBIT;
+ a1 = (long)-(unsigned long)a1;
+ if (a1 < 0)
+ {
+ dl.l.upper = SIGNBIT | ((32 + EXCESSD) << 20L);
+ dl.l.lower = 0;
+ return dl.d;
+ }
+ }
+
+ while (a1 < 0x1000000L)
+ {
+ a1 <<= 4;
+ exp -= 4;
+ }
+
+ while (a1 < 0x40000000L)
+ {
+ a1 <<= 1;
+ exp--;
+ }
+
+ /* pack up and go home */
+ dl.l.upper = sign;
+ dl.l.upper |= exp << 20L;
+ dl.l.upper |= (a1 >> 10L) & ~HIDDEND;
+ dl.l.lower = a1 << 22L;
+
+ return dl.d;
+}
+
+/* convert unsigned int to float */
+float
+__floatunsisf (unsigned long l)
+{
+ double foo = __floatunsidf (l);
+ return foo;
+}
+
+/* convert int to float */
+float
+__floatsisf (long l)
+{
+ double foo = __floatsidf (l);
+ return foo;
+}
+
+/* convert float to double */
+double
+__extendsfdf2 (float a1)
+{
+ register union float_long fl1;
+ register union double_long dl;
+ register long exp;
+ register long mant;
+
+ fl1.f = a1;
+
+ dl.l.upper = SIGN (fl1.l);
+ if ((fl1.l & ~SIGNBIT) == 0)
+ {
+ dl.l.lower = 0;
+ return dl.d;
+ }
+
+ exp = EXP(fl1.l);
+ mant = MANT (fl1.l) & ~HIDDEN;
+ if (exp == 0)
+ {
+ /* Denormal. */
+ exp = 1;
+ while (!(mant & HIDDEN))
+ {
+ mant <<= 1;
+ exp--;
+ }
+ mant &= ~HIDDEN;
+ }
+ exp = exp - EXCESS + EXCESSD;
+ dl.l.upper |= exp << 20;
+ dl.l.upper |= mant >> 3;
+ dl.l.lower = mant << 29;
+
+ return dl.d;
+}
+
+/* convert double to float */
+float
+__truncdfsf2 (double a1)
+{
+ register long exp;
+ register long mant;
+ register union float_long fl;
+ register union double_long dl1;
+ int sticky;
+ int shift;
+
+ dl1.d = a1;
+
+ if ((dl1.l.upper & ~SIGNBIT) == 0 && !dl1.l.lower)
+ {
+ fl.l = SIGND(dl1);
+ return fl.f;
+ }
+
+ exp = EXPD (dl1) - EXCESSD + EXCESS;
+
+ sticky = dl1.l.lower & ((1 << 22) - 1);
+ mant = MANTD (dl1);
+ /* shift double mantissa 6 bits so we can round */
+ sticky |= mant & ((1 << 6) - 1);
+ mant >>= 6;
+
+ /* Check for underflow and denormals. */
+ if (exp <= 0)
+ {
+ if (exp < -24)
+ {
+ sticky |= mant;
+ mant = 0;
+ }
+ else
+ {
+ sticky |= mant & ((1 << (1 - exp)) - 1);
+ mant >>= 1 - exp;
+ }
+ exp = 0;
+ }
+
+ /* now round */
+ shift = 1;
+ if ((mant & 1) && (sticky || (mant & 2)))
+ {
+ int rounding = exp ? 2 : 1;
+
+ mant += 1;
+
+ /* did the round overflow? */
+ if (mant >= (HIDDEN << rounding))
+ {
+ exp++;
+ shift = rounding;
+ }
+ }
+ /* shift down */
+ mant >>= shift;
+
+ mant &= ~HIDDEN;
+
+ /* pack up and go home */
+ fl.l = PACK (SIGND (dl1), exp, mant);
+ return (fl.f);
+}
+
+/* convert double to int */
+long
+__fixdfsi (double a1)
+{
+ register union double_long dl1;
+ register long exp;
+ register long l;
+
+ dl1.d = a1;
+
+ if (!dl1.l.upper && !dl1.l.lower)
+ return 0;
+
+ exp = EXPD (dl1) - EXCESSD - 31;
+ l = MANTD (dl1);
+
+ if (exp > 0)
+ {
+ /* Return largest integer. */
+ return SIGND (dl1) ? 0x80000000L : 0x7fffffffL;
+ }
+
+ if (exp <= -32)
+ return 0;
+
+ /* shift down until exp = 0 */
+ if (exp < 0)
+ l >>= -exp;
+
+ return (SIGND (dl1) ? -l : l);
+}
+
+/* convert float to int */
+long
+__fixsfsi (float a1)
+{
+ double foo = a1;
+ return __fixdfsi (foo);
+}
+
+#else /* EXTFLOAT */
+
+/* We do not need these routines for coldfire, as it has no extended
+ float format. */
+#if !defined (__mcoldfire__)
+
+/* Primitive extended precision floating point support.
+
+ We assume all numbers are normalized, don't do any rounding, etc. */
+
+/* Prototypes for the above in case we use them. */
+double __floatunsidf (unsigned long);
+double __floatsidf (long);
+float __floatsisf (long);
+double __extendsfdf2 (float);
+float __truncdfsf2 (double);
+long __fixdfsi (double);
+long __fixsfsi (float);
+
+int
+__unordxf2(long double a, long double b)
+{
+ union long_double_long ldl;
+
+ ldl.ld = a;
+ if (EXPX(ldl) == EXPXMASK
+ && ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
+ return 1;
+ ldl.ld = b;
+ if (EXPX(ldl) == EXPXMASK
+ && ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
+ return 1;
+ return 0;
+}
+
+/* convert double to long double */
+long double
+__extenddfxf2 (double d)
+{
+ register union double_long dl;
+ register union long_double_long ldl;
+ register long exp;
+
+ dl.d = d;
+ /*printf ("dfxf in: %g\n", d);*/
+
+ ldl.l.upper = SIGND (dl);
+ if ((dl.l.upper & ~SIGNBIT) == 0 && !dl.l.lower)
+ {
+ ldl.l.middle = 0;
+ ldl.l.lower = 0;
+ return ldl.ld;
+ }
+
+ exp = EXPD (dl) - EXCESSD + EXCESSX;
+ ldl.l.upper |= exp << 16;
+ ldl.l.middle = HIDDENX;
+ /* 31-20: # mantissa bits in ldl.l.middle - # mantissa bits in dl.l.upper */
+ ldl.l.middle |= (dl.l.upper & MANTDMASK) << (31 - 20);
+ /* 1+20: explicit-integer-bit + # mantissa bits in dl.l.upper */
+ ldl.l.middle |= dl.l.lower >> (1 + 20);
+ /* 32 - 21: # bits of dl.l.lower in ldl.l.middle */
+ ldl.l.lower = dl.l.lower << (32 - 21);
+
+ /*printf ("dfxf out: %s\n", dumpxf (ldl.ld));*/
+ return ldl.ld;
+}
+
+/* convert long double to double */
+double
+__truncxfdf2 (long double ld)
+{
+ register long exp;
+ register union double_long dl;
+ register union long_double_long ldl;
+
+ ldl.ld = ld;
+ /*printf ("xfdf in: %s\n", dumpxf (ld));*/
+
+ dl.l.upper = SIGNX (ldl);
+ if ((ldl.l.upper & ~SIGNBIT) == 0 && !ldl.l.middle && !ldl.l.lower)
+ {
+ dl.l.lower = 0;
+ return dl.d;
+ }
+
+ exp = EXPX (ldl) - EXCESSX + EXCESSD;
+ /* ??? quick and dirty: keep `exp' sane */
+ if (exp >= EXPDMASK)
+ exp = EXPDMASK - 1;
+ dl.l.upper |= exp << (32 - (EXPDBITS + 1));
+ /* +1-1: add one for sign bit, but take one off for explicit-integer-bit */
+ dl.l.upper |= (ldl.l.middle & MANTXMASK) >> (EXPDBITS + 1 - 1);
+ dl.l.lower = (ldl.l.middle & MANTXMASK) << (32 - (EXPDBITS + 1 - 1));
+ dl.l.lower |= ldl.l.lower >> (EXPDBITS + 1 - 1);
+
+ /*printf ("xfdf out: %g\n", dl.d);*/
+ return dl.d;
+}
+
+/* convert a float to a long double */
+long double
+__extendsfxf2 (float f)
+{
+ long double foo = __extenddfxf2 (__extendsfdf2 (f));
+ return foo;
+}
+
+/* convert a long double to a float */
+float
+__truncxfsf2 (long double ld)
+{
+ float foo = __truncdfsf2 (__truncxfdf2 (ld));
+ return foo;
+}
+
+/* convert an int to a long double */
+long double
+__floatsixf (long l)
+{
+ double foo = __floatsidf (l);
+ return foo;
+}
+
+/* convert an unsigned int to a long double */
+long double
+__floatunsixf (unsigned long l)
+{
+ double foo = __floatunsidf (l);
+ return foo;
+}
+
+/* convert a long double to an int */
+long
+__fixxfsi (long double ld)
+{
+ long foo = __fixdfsi ((double) ld);
+ return foo;
+}
+
+/* The remaining provide crude math support by working in double precision. */
+
+long double
+__addxf3 (long double x1, long double x2)
+{
+ return (double) x1 + (double) x2;
+}
+
+long double
+__subxf3 (long double x1, long double x2)
+{
+ return (double) x1 - (double) x2;
+}
+
+long double
+__mulxf3 (long double x1, long double x2)
+{
+ return (double) x1 * (double) x2;
+}
+
+long double
+__divxf3 (long double x1, long double x2)
+{
+ return (double) x1 / (double) x2;
+}
+
+long double
+__negxf2 (long double x1)
+{
+ return - (double) x1;
+}
+
+long
+__cmpxf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__eqxf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__nexf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__ltxf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__lexf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__gtxf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__gexf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+#endif /* !__mcoldfire__ */
+#endif /* EXTFLOAT */
diff --git a/gcc/config/m68k/ieee.opt b/gcc/config/m68k/ieee.opt
new file mode 100644
index 000000000..1fd67d8b4
--- /dev/null
+++ b/gcc/config/m68k/ieee.opt
@@ -0,0 +1,24 @@
+; Extra IEEE options for the Motorola 68000 port of the compiler.
+
+; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+; This option is ignored by gcc
+mieee-fp
+Target RejectNegative
+Use IEEE math for fp comparisons
diff --git a/gcc/config/m68k/lb1sf68.asm b/gcc/config/m68k/lb1sf68.asm
new file mode 100644
index 000000000..0339a092c
--- /dev/null
+++ b/gcc/config/m68k/lb1sf68.asm
@@ -0,0 +1,4116 @@
+/* libgcc routines for 68000 w/o floating-point hardware.
+ Copyright (C) 1994, 1996, 1997, 1998, 2008, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Use this one for any 680x0; assumes no floating point hardware.
+ The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
+ Some of this code comes from MINIX, via the folks at ericsson.
+ D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
+*/
+
+/* These are predefined by new versions of GNU cpp. */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+#ifndef __IMMEDIATE_PREFIX__
+#define __IMMEDIATE_PREFIX__ #
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Note that X is a function. */
+
+#ifdef __ELF__
+#define FUNC(x) .type SYM(x),function
+#else
+/* The .proc pseudo-op is accepted, but ignored, by GAS. We could just
+ define this to the empty string for non-ELF systems, but defining it
+ to .proc means that the information is available to the assembler if
+ the need arises. */
+#define FUNC(x) .proc
+#endif
+
+/* Use the right prefix for registers. */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+/* Use the right prefix for immediate values. */
+
+#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
+
+#define d0 REG (d0)
+#define d1 REG (d1)
+#define d2 REG (d2)
+#define d3 REG (d3)
+#define d4 REG (d4)
+#define d5 REG (d5)
+#define d6 REG (d6)
+#define d7 REG (d7)
+#define a0 REG (a0)
+#define a1 REG (a1)
+#define a2 REG (a2)
+#define a3 REG (a3)
+#define a4 REG (a4)
+#define a5 REG (a5)
+#define a6 REG (a6)
+#define fp REG (fp)
+#define sp REG (sp)
+#define pc REG (pc)
+
+/* Provide a few macros to allow for PIC code support.
+ * With PIC, data is stored A5 relative so we've got to take a bit of special
+ * care to ensure that all loads of global data is via A5. PIC also requires
+ * jumps and subroutine calls to be PC relative rather than absolute. We cheat
+ * a little on this and in the PIC case, we use short offset branches and
+ * hope that the final object code is within range (which it should be).
+ */
+#ifndef __PIC__
+
+ /* Non PIC (absolute/relocatable) versions */
+
+ .macro PICCALL addr
+ jbsr \addr
+ .endm
+
+ .macro PICJUMP addr
+ jmp \addr
+ .endm
+
+ .macro PICLEA sym, reg
+ lea \sym, \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ pea \sym
+ .endm
+
+#else /* __PIC__ */
+
+# if defined (__uClinux__)
+
+ /* Versions for uClinux */
+
+# if defined(__ID_SHARED_LIBRARY__)
+
+ /* -mid-shared-library versions */
+
+ .macro PICLEA sym, reg
+ movel a5@(_current_shared_library_a5_offset_), \reg
+ movel \sym@GOT(\reg), \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ movel a5@(_current_shared_library_a5_offset_), \areg
+ movel \sym@GOT(\areg), sp@-
+ .endm
+
+ .macro PICCALL addr
+ PICLEA \addr,a0
+ jsr a0@
+ .endm
+
+ .macro PICJUMP addr
+ PICLEA \addr,a0
+ jmp a0@
+ .endm
+
+# else /* !__ID_SHARED_LIBRARY__ */
+
+ /* Versions for -msep-data */
+
+ .macro PICLEA sym, reg
+ movel \sym@GOT(a5), \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ movel \sym@GOT(a5), sp@-
+ .endm
+
+ .macro PICCALL addr
+#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
+ lea \addr-.-8,a0
+ jsr pc@(a0)
+#else
+ jbsr \addr
+#endif
+ .endm
+
+ .macro PICJUMP addr
+ /* ISA C has no bra.l instruction, and since this assembly file
+ gets assembled into multiple object files, we avoid the
+ bra instruction entirely. */
+#if defined (__mcoldfire__) && !defined (__mcfisab__)
+ lea \addr-.-8,a0
+ jmp pc@(a0)
+#else
+ bra \addr
+#endif
+ .endm
+
+# endif
+
+# else /* !__uClinux__ */
+
+ /* Versions for Linux */
+
+ .macro PICLEA sym, reg
+ movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \reg
+ lea (-6, pc, \reg), \reg
+ movel \sym@GOT(\reg), \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \areg
+ lea (-6, pc, \areg), \areg
+ movel \sym@GOT(\areg), sp@-
+ .endm
+
+ .macro PICCALL addr
+#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
+ lea \addr-.-8,a0
+ jsr pc@(a0)
+#else
+ jbsr \addr
+#endif
+ .endm
+
+ .macro PICJUMP addr
+ /* ISA C has no bra.l instruction, and since this assembly file
+ gets assembled into multiple object files, we avoid the
+ bra instruction entirely. */
+#if defined (__mcoldfire__) && !defined (__mcfisab__)
+ lea \addr-.-8,a0
+ jmp pc@(a0)
+#else
+ bra \addr
+#endif
+ .endm
+
+# endif
+#endif /* __PIC__ */
+
+
+#ifdef L_floatex
+
+| This is an attempt at a decent floating point (single, double and
+| extended double) code for the GNU C compiler. It should be easy to
+| adapt to other compilers (but beware of the local labels!).
+
+| Starting date: 21 October, 1990
+
+| It is convenient to introduce the notation (s,e,f) for a floating point
+| number, where s=sign, e=exponent, f=fraction. We will call a floating
+| point number fpn to abbreviate, independently of the precision.
+| Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
+| for doubles and 16383 for long doubles). We then have the following
+| different cases:
+| 1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
+| (-1)^s x 1.f x 2^(e-bias-1).
+| 2. Denormalized fpns have e=0. They correspond to numbers of the form
+| (-1)^s x 0.f x 2^(-bias).
+| 3. +/-INFINITY have e=MAX_EXP, f=0.
+| 4. Quiet NaN (Not a Number) have all bits set.
+| 5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
+
+|=============================================================================
+| exceptions
+|=============================================================================
+
+| This is the floating point condition code register (_fpCCR):
+|
+| struct {
+| short _exception_bits;
+| short _trap_enable_bits;
+| short _sticky_bits;
+| short _rounding_mode;
+| short _format;
+| short _last_operation;
+| union {
+| float sf;
+| double df;
+| } _operand1;
+| union {
+| float sf;
+| double df;
+| } _operand2;
+| } _fpCCR;
+
+ .data
+ .even
+
+ .globl SYM (_fpCCR)
+
+SYM (_fpCCR):
+__exception_bits:
+ .word 0
+__trap_enable_bits:
+ .word 0
+__sticky_bits:
+ .word 0
+__rounding_mode:
+ .word ROUND_TO_NEAREST
+__format:
+ .word NIL
+__last_operation:
+ .word NOOP
+__operand1:
+ .long 0
+ .long 0
+__operand2:
+ .long 0
+ .long 0
+
+| Offsets:
+EBITS = __exception_bits - SYM (_fpCCR)
+TRAPE = __trap_enable_bits - SYM (_fpCCR)
+STICK = __sticky_bits - SYM (_fpCCR)
+ROUND = __rounding_mode - SYM (_fpCCR)
+FORMT = __format - SYM (_fpCCR)
+LASTO = __last_operation - SYM (_fpCCR)
+OPER1 = __operand1 - SYM (_fpCCR)
+OPER2 = __operand2 - SYM (_fpCCR)
+
+| The following exception types are supported:
+INEXACT_RESULT = 0x0001
+UNDERFLOW = 0x0002
+OVERFLOW = 0x0004
+DIVIDE_BY_ZERO = 0x0008
+INVALID_OPERATION = 0x0010
+
+| The allowed rounding modes are:
+UNKNOWN = -1
+ROUND_TO_NEAREST = 0 | round result to nearest representable value
+ROUND_TO_ZERO = 1 | round result towards zero
+ROUND_TO_PLUS = 2 | round result towards plus infinity
+ROUND_TO_MINUS = 3 | round result towards minus infinity
+
+| The allowed values of format are:
+NIL = 0
+SINGLE_FLOAT = 1
+DOUBLE_FLOAT = 2
+LONG_FLOAT = 3
+
+| The allowed values for the last operation are:
+NOOP = 0
+ADD = 1
+MULTIPLY = 2
+DIVIDE = 3
+NEGATE = 4
+COMPARE = 5
+EXTENDSFDF = 6
+TRUNCDFSF = 7
+
+|=============================================================================
+| __clear_sticky_bits
+|=============================================================================
+
+| The sticky bits are normally not cleared (thus the name), whereas the
+| exception type and exception value reflect the last computation.
+| This routine is provided to clear them (you can also write to _fpCCR,
+| since it is globally visible).
+
+ .globl SYM (__clear_sticky_bit)
+
+ .text
+ .even
+
+| void __clear_sticky_bits(void);
+SYM (__clear_sticky_bit):
+ PICLEA SYM (_fpCCR),a0
+#ifndef __mcoldfire__
+ movew IMM (0),a0@(STICK)
+#else
+ clr.w a0@(STICK)
+#endif
+ rts
+
+|=============================================================================
+| $_exception_handler
+|=============================================================================
+
+ .globl $_exception_handler
+
+ .text
+ .even
+
+| This is the common exit point if an exception occurs.
+| NOTE: it is NOT callable from C!
+| It expects the exception type in d7, the format (SINGLE_FLOAT,
+| DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
+| It sets the corresponding exception and sticky bits, and the format.
+| Depending on the format if fills the corresponding slots for the
+| operands which produced the exception (all this information is provided
+| so if you write your own exception handlers you have enough information
+| to deal with the problem).
+| Then checks to see if the corresponding exception is trap-enabled,
+| in which case it pushes the address of _fpCCR and traps through
+| trap FPTRAP (15 for the moment).
+
+FPTRAP = 15
+
+$_exception_handler:
+ PICLEA SYM (_fpCCR),a0
+ movew d7,a0@(EBITS) | set __exception_bits
+#ifndef __mcoldfire__
+ orw d7,a0@(STICK) | and __sticky_bits
+#else
+ movew a0@(STICK),d4
+ orl d7,d4
+ movew d4,a0@(STICK)
+#endif
+ movew d6,a0@(FORMT) | and __format
+ movew d5,a0@(LASTO) | and __last_operation
+
+| Now put the operands in place:
+#ifndef __mcoldfire__
+ cmpw IMM (SINGLE_FLOAT),d6
+#else
+ cmpl IMM (SINGLE_FLOAT),d6
+#endif
+ beq 1f
+ movel a6@(8),a0@(OPER1)
+ movel a6@(12),a0@(OPER1+4)
+ movel a6@(16),a0@(OPER2)
+ movel a6@(20),a0@(OPER2+4)
+ bra 2f
+1: movel a6@(8),a0@(OPER1)
+ movel a6@(12),a0@(OPER2)
+2:
+| And check whether the exception is trap-enabled:
+#ifndef __mcoldfire__
+ andw a0@(TRAPE),d7 | is exception trap-enabled?
+#else
+ clrl d6
+ movew a0@(TRAPE),d6
+ andl d6,d7
+#endif
+ beq 1f | no, exit
+ PICPEA SYM (_fpCCR),a1 | yes, push address of _fpCCR
+ trap IMM (FPTRAP) | and trap
+#ifndef __mcoldfire__
+1: moveml sp@+,d2-d7 | restore data registers
+#else
+1: moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+#endif /* L_floatex */
+
+#ifdef L_mulsi3
+ .text
+ FUNC(__mulsi3)
+ .globl SYM (__mulsi3)
+SYM (__mulsi3):
+ movew sp@(4), d0 /* x0 -> d0 */
+ muluw sp@(10), d0 /* x0*y1 */
+ movew sp@(6), d1 /* x1 -> d1 */
+ muluw sp@(8), d1 /* x1*y0 */
+#ifndef __mcoldfire__
+ addw d1, d0
+#else
+ addl d1, d0
+#endif
+ swap d0
+ clrw d0
+ movew sp@(6), d1 /* x1 -> d1 */
+ muluw sp@(10), d1 /* x1*y1 */
+ addl d1, d0
+
+ rts
+#endif /* L_mulsi3 */
+
+#ifdef L_udivsi3
+ .text
+ FUNC(__udivsi3)
+ .globl SYM (__udivsi3)
+SYM (__udivsi3):
+#ifndef __mcoldfire__
+ movel d2, sp@-
+ movel sp@(12), d1 /* d1 = divisor */
+ movel sp@(8), d0 /* d0 = dividend */
+
+ cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */
+ jcc L3 /* then try next algorithm */
+ movel d0, d2
+ clrw d2
+ swap d2
+ divu d1, d2 /* high quotient in lower word */
+ movew d2, d0 /* save high quotient */
+ swap d0
+ movew sp@(10), d2 /* get low dividend + high rest */
+ divu d1, d2 /* low quotient */
+ movew d2, d0
+ jra L6
+
+L3: movel d1, d2 /* use d2 as divisor backup */
+L4: lsrl IMM (1), d1 /* shift divisor */
+ lsrl IMM (1), d0 /* shift dividend */
+ cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
+ jcc L4
+ divu d1, d0 /* now we have 16-bit divisor */
+ andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
+
+/* Multiply the 16-bit tentative quotient with the 32-bit divisor. Because of
+ the operand ranges, this might give a 33-bit product. If this product is
+ greater than the dividend, the tentative quotient was too large. */
+ movel d2, d1
+ mulu d0, d1 /* low part, 32 bits */
+ swap d2
+ mulu d0, d2 /* high part, at most 17 bits */
+ swap d2 /* align high part with low part */
+ tstw d2 /* high part 17 bits? */
+ jne L5 /* if 17 bits, quotient was too large */
+ addl d2, d1 /* add parts */
+ jcs L5 /* if sum is 33 bits, quotient was too large */
+ cmpl sp@(8), d1 /* compare the sum with the dividend */
+ jls L6 /* if sum > dividend, quotient was too large */
+L5: subql IMM (1), d0 /* adjust quotient */
+
+L6: movel sp@+, d2
+ rts
+
+#else /* __mcoldfire__ */
+
+/* ColdFire implementation of non-restoring division algorithm from
+ Hennessy & Patterson, Appendix A. */
+ link a6,IMM (-12)
+ moveml d2-d4,sp@
+ movel a6@(8),d0
+ movel a6@(12),d1
+ clrl d2 | clear p
+ moveq IMM (31),d4
+L1: addl d0,d0 | shift reg pair (p,a) one bit left
+ addxl d2,d2
+ movl d2,d3 | subtract b from p, store in tmp.
+ subl d1,d3
+ jcs L2 | if no carry,
+ bset IMM (0),d0 | set the low order bit of a to 1,
+ movl d3,d2 | and store tmp in p.
+L2: subql IMM (1),d4
+ jcc L1
+ moveml sp@,d2-d4 | restore data registers
+ unlk a6 | and return
+ rts
+#endif /* __mcoldfire__ */
+
+#endif /* L_udivsi3 */
+
+#ifdef L_divsi3
+ .text
+ FUNC(__divsi3)
+ .globl SYM (__divsi3)
+SYM (__divsi3):
+ movel d2, sp@-
+
+ moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */
+ movel sp@(12), d1 /* d1 = divisor */
+ jpl L1
+ negl d1
+#ifndef __mcoldfire__
+ negb d2 /* change sign because divisor <0 */
+#else
+ negl d2 /* change sign because divisor <0 */
+#endif
+L1: movel sp@(8), d0 /* d0 = dividend */
+ jpl L2
+ negl d0
+#ifndef __mcoldfire__
+ negb d2
+#else
+ negl d2
+#endif
+
+L2: movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__udivsi3) /* divide abs(dividend) by abs(divisor) */
+ addql IMM (8), sp
+
+ tstb d2
+ jpl L3
+ negl d0
+
+L3: movel sp@+, d2
+ rts
+#endif /* L_divsi3 */
+
+#ifdef L_umodsi3
+ .text
+ FUNC(__umodsi3)
+ .globl SYM (__umodsi3)
+SYM (__umodsi3):
+ movel sp@(8), d1 /* d1 = divisor */
+ movel sp@(4), d0 /* d0 = dividend */
+ movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__udivsi3)
+ addql IMM (8), sp
+ movel sp@(8), d1 /* d1 = divisor */
+#ifndef __mcoldfire__
+ movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
+ addql IMM (8), sp
+#else
+ mulsl d1,d0
+#endif
+ movel sp@(4), d1 /* d1 = dividend */
+ subl d0, d1 /* d1 = a - (a/b)*b */
+ movel d1, d0
+ rts
+#endif /* L_umodsi3 */
+
+#ifdef L_modsi3
+ .text
+ FUNC(__modsi3)
+ .globl SYM (__modsi3)
+SYM (__modsi3):
+ movel sp@(8), d1 /* d1 = divisor */
+ movel sp@(4), d0 /* d0 = dividend */
+ movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__divsi3)
+ addql IMM (8), sp
+ movel sp@(8), d1 /* d1 = divisor */
+#ifndef __mcoldfire__
+ movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
+ addql IMM (8), sp
+#else
+ mulsl d1,d0
+#endif
+ movel sp@(4), d1 /* d1 = dividend */
+ subl d0, d1 /* d1 = a - (a/b)*b */
+ movel d1, d0
+ rts
+#endif /* L_modsi3 */
+
+
+#ifdef L_double
+
+ .globl SYM (_fpCCR)
+ .globl $_exception_handler
+
+QUIET_NaN = 0xffffffff
+
+D_MAX_EXP = 0x07ff
+D_BIAS = 1022
+DBL_MAX_EXP = D_MAX_EXP - D_BIAS
+DBL_MIN_EXP = 1 - D_BIAS
+DBL_MANT_DIG = 53
+
+INEXACT_RESULT = 0x0001
+UNDERFLOW = 0x0002
+OVERFLOW = 0x0004
+DIVIDE_BY_ZERO = 0x0008
+INVALID_OPERATION = 0x0010
+
+DOUBLE_FLOAT = 2
+
+NOOP = 0
+ADD = 1
+MULTIPLY = 2
+DIVIDE = 3
+NEGATE = 4
+COMPARE = 5
+EXTENDSFDF = 6
+TRUNCDFSF = 7
+
+UNKNOWN = -1
+ROUND_TO_NEAREST = 0 | round result to nearest representable value
+ROUND_TO_ZERO = 1 | round result towards zero
+ROUND_TO_PLUS = 2 | round result towards plus infinity
+ROUND_TO_MINUS = 3 | round result towards minus infinity
+
+| Entry points:
+
+ .globl SYM (__adddf3)
+ .globl SYM (__subdf3)
+ .globl SYM (__muldf3)
+ .globl SYM (__divdf3)
+ .globl SYM (__negdf2)
+ .globl SYM (__cmpdf2)
+ .globl SYM (__cmpdf2_internal)
+ .hidden SYM (__cmpdf2_internal)
+
+ .text
+ .even
+
+| These are common routines to return and signal exceptions.
+
+Ld$den:
+| Return and signal a denormalized number
+ orl d7,d0
+ movew IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Ld$infty:
+Ld$overflow:
+| Return a properly signed INFINITY and set the exception flags
+ movel IMM (0x7ff00000),d0
+ movel IMM (0),d1
+ orl d7,d0
+ movew IMM (INEXACT_RESULT+OVERFLOW),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Ld$underflow:
+| Return 0 and set the exception flags
+ movel IMM (0),d0
+ movel d0,d1
+ movew IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Ld$inop:
+| Return a quiet NaN and set the exception flags
+ movel IMM (QUIET_NaN),d0
+ movel d0,d1
+ movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Ld$div$0:
+| Return a properly signed INFINITY and set the exception flags
+ movel IMM (0x7ff00000),d0
+ movel IMM (0),d1
+ orl d7,d0
+ movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+|=============================================================================
+|=============================================================================
+| double precision routines
+|=============================================================================
+|=============================================================================
+
+| A double precision floating point number (double) has the format:
+|
+| struct _double {
+| unsigned int sign : 1; /* sign bit */
+| unsigned int exponent : 11; /* exponent, shifted by 126 */
+| unsigned int fraction : 52; /* fraction */
+| } double;
+|
+| Thus sizeof(double) = 8 (64 bits).
+|
+| All the routines are callable from C programs, and return the result
+| in the register pair d0-d1. They also preserve all registers except
+| d0-d1 and a0-a1.
+
+|=============================================================================
+| __subdf3
+|=============================================================================
+
+| double __subdf3(double, double);
+ FUNC(__subdf3)
+SYM (__subdf3):
+ bchg IMM (31),sp@(12) | change sign of second operand
+ | and fall through, so we always add
+|=============================================================================
+| __adddf3
+|=============================================================================
+
+| double __adddf3(double, double);
+ FUNC(__adddf3)
+SYM (__adddf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0) | everything will be done in registers
+ moveml d2-d7,sp@- | save all data registers and a2 (but d0-d1)
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get first operand
+ movel a6@(12),d1 |
+ movel a6@(16),d2 | get second operand
+ movel a6@(20),d3 |
+
+ movel d0,d7 | get d0's sign bit in d7 '
+ addl d1,d1 | check and clear sign bit of a, and gain one
+ addxl d0,d0 | bit of extra precision
+ beq Ladddf$b | if zero return second operand
+
+ movel d2,d6 | save sign in d6
+ addl d3,d3 | get rid of sign bit and gain one bit of
+ addxl d2,d2 | extra precision
+ beq Ladddf$a | if zero return first operand
+
+ andl IMM (0x80000000),d7 | isolate a's sign bit '
+ swap d6 | and also b's sign bit '
+#ifndef __mcoldfire__
+ andw IMM (0x8000),d6 |
+ orw d6,d7 | and combine them into d7, so that a's sign '
+ | bit is in the high word and b's is in the '
+ | low word, so d6 is free to be used
+#else
+ andl IMM (0x8000),d6
+ orl d6,d7
+#endif
+ movel d7,a0 | now save d7 into a0, so d7 is free to
+ | be used also
+
+| Get the exponents and check for denormalized and/or infinity.
+
+ movel IMM (0x001fffff),d6 | mask for the fraction
+ movel IMM (0x00200000),d7 | mask to put hidden bit back
+
+ movel d0,d4 |
+ andl d6,d0 | get fraction in d0
+ notl d6 | make d6 into mask for the exponent
+ andl d6,d4 | get exponent in d4
+ beq Ladddf$a$den | branch if a is denormalized
+ cmpl d6,d4 | check for INFINITY or NaN
+ beq Ladddf$nf |
+ orl d7,d0 | and put hidden bit back
+Ladddf$1:
+ swap d4 | shift right exponent so that it starts
+#ifndef __mcoldfire__
+ lsrw IMM (5),d4 | in bit 0 and not bit 20
+#else
+ lsrl IMM (5),d4 | in bit 0 and not bit 20
+#endif
+| Now we have a's exponent in d4 and fraction in d0-d1 '
+ movel d2,d5 | save b to get exponent
+ andl d6,d5 | get exponent in d5
+ beq Ladddf$b$den | branch if b is denormalized
+ cmpl d6,d5 | check for INFINITY or NaN
+ beq Ladddf$nf
+ notl d6 | make d6 into mask for the fraction again
+ andl d6,d2 | and get fraction in d2
+ orl d7,d2 | and put hidden bit back
+Ladddf$2:
+ swap d5 | shift right exponent so that it starts
+#ifndef __mcoldfire__
+ lsrw IMM (5),d5 | in bit 0 and not bit 20
+#else
+ lsrl IMM (5),d5 | in bit 0 and not bit 20
+#endif
+
+| Now we have b's exponent in d5 and fraction in d2-d3. '
+
+| The situation now is as follows: the signs are combined in a0, the
+| numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
+| and d5 (b). To do the rounding correctly we need to keep all the
+| bits until the end, so we need to use d0-d1-d2-d3 for the first number
+| and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
+| exponents in a2-a3.
+
+#ifndef __mcoldfire__
+ moveml a2-a3,sp@- | save the address registers
+#else
+ movel a2,sp@-
+ movel a3,sp@-
+ movel a4,sp@-
+#endif
+
+ movel d4,a2 | save the exponents
+ movel d5,a3 |
+
+ movel IMM (0),d7 | and move the numbers around
+ movel d7,d6 |
+ movel d3,d5 |
+ movel d2,d4 |
+ movel d7,d3 |
+ movel d7,d2 |
+
+| Here we shift the numbers until the exponents are the same, and put
+| the largest exponent in a2.
+#ifndef __mcoldfire__
+ exg d4,a2 | get exponents back
+ exg d5,a3 |
+ cmpw d4,d5 | compare the exponents
+#else
+ movel d4,a4 | get exponents back
+ movel a2,d4
+ movel a4,a2
+ movel d5,a4
+ movel a3,d5
+ movel a4,a3
+ cmpl d4,d5 | compare the exponents
+#endif
+ beq Ladddf$3 | if equal don't shift '
+ bhi 9f | branch if second exponent is higher
+
+| Here we have a's exponent larger than b's, so we have to shift b. We do
+| this by using as counter d2:
+1: movew d4,d2 | move largest exponent to d2
+#ifndef __mcoldfire__
+ subw d5,d2 | and subtract second exponent
+ exg d4,a2 | get back the longs we saved
+ exg d5,a3 |
+#else
+ subl d5,d2 | and subtract second exponent
+ movel d4,a4 | get back the longs we saved
+ movel a2,d4
+ movel a4,a2
+ movel d5,a4
+ movel a3,d5
+ movel a4,a3
+#endif
+| if difference is too large we don't shift (actually, we can just exit) '
+#ifndef __mcoldfire__
+ cmpw IMM (DBL_MANT_DIG+2),d2
+#else
+ cmpl IMM (DBL_MANT_DIG+2),d2
+#endif
+ bge Ladddf$b$small
+#ifndef __mcoldfire__
+ cmpw IMM (32),d2 | if difference >= 32, shift by longs
+#else
+ cmpl IMM (32),d2 | if difference >= 32, shift by longs
+#endif
+ bge 5f
+2:
+#ifndef __mcoldfire__
+ cmpw IMM (16),d2 | if difference >= 16, shift by words
+#else
+ cmpl IMM (16),d2 | if difference >= 16, shift by words
+#endif
+ bge 6f
+ bra 3f | enter dbra loop
+
+4:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d4
+ roxrl IMM (1),d5
+ roxrl IMM (1),d6
+ roxrl IMM (1),d7
+#else
+ lsrl IMM (1),d7
+ btst IMM (0),d6
+ beq 10f
+ bset IMM (31),d7
+10: lsrl IMM (1),d6
+ btst IMM (0),d5
+ beq 11f
+ bset IMM (31),d6
+11: lsrl IMM (1),d5
+ btst IMM (0),d4
+ beq 12f
+ bset IMM (31),d5
+12: lsrl IMM (1),d4
+#endif
+3:
+#ifndef __mcoldfire__
+ dbra d2,4b
+#else
+ subql IMM (1),d2
+ bpl 4b
+#endif
+ movel IMM (0),d2
+ movel d2,d3
+ bra Ladddf$4
+5:
+ movel d6,d7
+ movel d5,d6
+ movel d4,d5
+ movel IMM (0),d4
+#ifndef __mcoldfire__
+ subw IMM (32),d2
+#else
+ subl IMM (32),d2
+#endif
+ bra 2b
+6:
+ movew d6,d7
+ swap d7
+ movew d5,d6
+ swap d6
+ movew d4,d5
+ swap d5
+ movew IMM (0),d4
+ swap d4
+#ifndef __mcoldfire__
+ subw IMM (16),d2
+#else
+ subl IMM (16),d2
+#endif
+ bra 3b
+
+9:
+#ifndef __mcoldfire__
+ exg d4,d5
+ movew d4,d6
+ subw d5,d6 | keep d5 (largest exponent) in d4
+ exg d4,a2
+ exg d5,a3
+#else
+ movel d5,d6
+ movel d4,d5
+ movel d6,d4
+ subl d5,d6
+ movel d4,a4
+ movel a2,d4
+ movel a4,a2
+ movel d5,a4
+ movel a3,d5
+ movel a4,a3
+#endif
+| if difference is too large we don't shift (actually, we can just exit) '
+#ifndef __mcoldfire__
+ cmpw IMM (DBL_MANT_DIG+2),d6
+#else
+ cmpl IMM (DBL_MANT_DIG+2),d6
+#endif
+ bge Ladddf$a$small
+#ifndef __mcoldfire__
+ cmpw IMM (32),d6 | if difference >= 32, shift by longs
+#else
+ cmpl IMM (32),d6 | if difference >= 32, shift by longs
+#endif
+ bge 5f
+2:
+#ifndef __mcoldfire__
+ cmpw IMM (16),d6 | if difference >= 16, shift by words
+#else
+ cmpl IMM (16),d6 | if difference >= 16, shift by words
+#endif
+ bge 6f
+ bra 3f | enter dbra loop
+
+4:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+#endif
+3:
+#ifndef __mcoldfire__
+ dbra d6,4b
+#else
+ subql IMM (1),d6
+ bpl 4b
+#endif
+ movel IMM (0),d7
+ movel d7,d6
+ bra Ladddf$4
+5:
+ movel d2,d3
+ movel d1,d2
+ movel d0,d1
+ movel IMM (0),d0
+#ifndef __mcoldfire__
+ subw IMM (32),d6
+#else
+ subl IMM (32),d6
+#endif
+ bra 2b
+6:
+ movew d2,d3
+ swap d3
+ movew d1,d2
+ swap d2
+ movew d0,d1
+ swap d1
+ movew IMM (0),d0
+ swap d0
+#ifndef __mcoldfire__
+ subw IMM (16),d6
+#else
+ subl IMM (16),d6
+#endif
+ bra 3b
+Ladddf$3:
+#ifndef __mcoldfire__
+ exg d4,a2
+ exg d5,a3
+#else
+ movel d4,a4
+ movel a2,d4
+ movel a4,a2
+ movel d5,a4
+ movel a3,d5
+ movel a4,a3
+#endif
+Ladddf$4:
+| Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
+| the signs in a4.
+
+| Here we have to decide whether to add or subtract the numbers:
+#ifndef __mcoldfire__
+ exg d7,a0 | get the signs
+ exg d6,a3 | a3 is free to be used
+#else
+ movel d7,a4
+ movel a0,d7
+ movel a4,a0
+ movel d6,a4
+ movel a3,d6
+ movel a4,a3
+#endif
+ movel d7,d6 |
+ movew IMM (0),d7 | get a's sign in d7 '
+ swap d6 |
+ movew IMM (0),d6 | and b's sign in d6 '
+ eorl d7,d6 | compare the signs
+ bmi Lsubdf$0 | if the signs are different we have
+ | to subtract
+#ifndef __mcoldfire__
+ exg d7,a0 | else we add the numbers
+ exg d6,a3 |
+#else
+ movel d7,a4
+ movel a0,d7
+ movel a4,a0
+ movel d6,a4
+ movel a3,d6
+ movel a4,a3
+#endif
+ addl d7,d3 |
+ addxl d6,d2 |
+ addxl d5,d1 |
+ addxl d4,d0 |
+
+ movel a2,d4 | return exponent to d4
+ movel a0,d7 |
+ andl IMM (0x80000000),d7 | d7 now has the sign
+
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+
+| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
+| the case of denormalized numbers in the rounding routine itself).
+| As in the addition (not in the subtraction!) we could have set
+| one more bit we check this:
+ btst IMM (DBL_MANT_DIG+1),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+1:
+ lea pc@(Ladddf$5),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Ladddf$5:
+| Put back the exponent and check for overflow
+#ifndef __mcoldfire__
+ cmpw IMM (0x7ff),d4 | is the exponent big?
+#else
+ cmpl IMM (0x7ff),d4 | is the exponent big?
+#endif
+ bge 1f
+ bclr IMM (DBL_MANT_DIG-1),d0
+#ifndef __mcoldfire__
+ lslw IMM (4),d4 | put exponent back into position
+#else
+ lsll IMM (4),d4 | put exponent back into position
+#endif
+ swap d0 |
+#ifndef __mcoldfire__
+ orw d4,d0 |
+#else
+ orl d4,d0 |
+#endif
+ swap d0 |
+ bra Ladddf$ret
+1:
+ moveq IMM (ADD),d5
+ bra Ld$overflow
+
+Lsubdf$0:
+| Here we do the subtraction.
+#ifndef __mcoldfire__
+ exg d7,a0 | put sign back in a0
+ exg d6,a3 |
+#else
+ movel d7,a4
+ movel a0,d7
+ movel a4,a0
+ movel d6,a4
+ movel a3,d6
+ movel a4,a3
+#endif
+ subl d7,d3 |
+ subxl d6,d2 |
+ subxl d5,d1 |
+ subxl d4,d0 |
+ beq Ladddf$ret$1 | if zero just exit
+ bpl 1f | if positive skip the following
+ movel a0,d7 |
+ bchg IMM (31),d7 | change sign bit in d7
+ movel d7,a0 |
+ negl d3 |
+ negxl d2 |
+ negxl d1 | and negate result
+ negxl d0 |
+1:
+ movel a2,d4 | return exponent to d4
+ movel a0,d7
+ andl IMM (0x80000000),d7 | isolate sign bit
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3 |
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+
+| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
+| the case of denormalized numbers in the rounding routine itself).
+| As in the addition (not in the subtraction!) we could have set
+| one more bit we check this:
+ btst IMM (DBL_MANT_DIG+1),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+1:
+ lea pc@(Lsubdf$1),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Lsubdf$1:
+| Put back the exponent and sign (we don't have overflow). '
+ bclr IMM (DBL_MANT_DIG-1),d0
+#ifndef __mcoldfire__
+ lslw IMM (4),d4 | put exponent back into position
+#else
+ lsll IMM (4),d4 | put exponent back into position
+#endif
+ swap d0 |
+#ifndef __mcoldfire__
+ orw d4,d0 |
+#else
+ orl d4,d0 |
+#endif
+ swap d0 |
+ bra Ladddf$ret
+
+| If one of the numbers was too small (difference of exponents >=
+| DBL_MANT_DIG+1) we return the other (and now we don't have to '
+| check for finiteness or zero).
+Ladddf$a$small:
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+ movel a6@(16),d0
+ movel a6@(20),d1
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+Ladddf$b$small:
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+ movel a6@(8),d0
+ movel a6@(12),d1
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+Ladddf$a$den:
+ movel d7,d4 | d7 contains 0x00200000
+ bra Ladddf$1
+
+Ladddf$b$den:
+ movel d7,d5 | d7 contains 0x00200000
+ notl d6
+ bra Ladddf$2
+
+Ladddf$b:
+| Return b (if a is zero)
+ movel d2,d0
+ movel d3,d1
+ bne 1f | Check if b is -0
+ cmpl IMM (0x80000000),d0
+ bne 1f
+ andl IMM (0x80000000),d7 | Use the sign of a
+ clrl d0
+ bra Ladddf$ret
+Ladddf$a:
+ movel a6@(8),d0
+ movel a6@(12),d1
+1:
+ moveq IMM (ADD),d5
+| Check for NaN and +/-INFINITY.
+ movel d0,d7 |
+ andl IMM (0x80000000),d7 |
+ bclr IMM (31),d0 |
+ cmpl IMM (0x7ff00000),d0 |
+ bge 2f |
+ movel d0,d0 | check for zero, since we don't '
+ bne Ladddf$ret | want to return -0 by mistake
+ bclr IMM (31),d7 |
+ bra Ladddf$ret |
+2:
+ andl IMM (0x000fffff),d0 | check for NaN (nonzero fraction)
+ orl d1,d0 |
+ bne Ld$inop |
+ bra Ld$infty |
+
+Ladddf$ret$1:
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3 | restore regs and exit
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+
+Ladddf$ret:
+| Normal exit.
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+ orl d7,d0 | put sign bit back
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+Ladddf$ret$den:
+| Return a denormalized number.
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0 | shift right once more
+ roxrl IMM (1),d1 |
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+#endif
+ bra Ladddf$ret
+
+Ladddf$nf:
+ moveq IMM (ADD),d5
+| This could be faster but it is not worth the effort, since it is not
+| executed very often. We sacrifice speed for clarity here.
+ movel a6@(8),d0 | get the numbers back (remember that we
+ movel a6@(12),d1 | did some processing already)
+ movel a6@(16),d2 |
+ movel a6@(20),d3 |
+ movel IMM (0x7ff00000),d4 | useful constant (INFINITY)
+ movel d0,d7 | save sign bits
+ movel d2,d6 |
+ bclr IMM (31),d0 | clear sign bits
+ bclr IMM (31),d2 |
+| We know that one of them is either NaN of +/-INFINITY
+| Check for NaN (if either one is NaN return NaN)
+ cmpl d4,d0 | check first a (d0)
+ bhi Ld$inop | if d0 > 0x7ff00000 or equal and
+ bne 2f
+ tstl d1 | d1 > 0, a is NaN
+ bne Ld$inop |
+2: cmpl d4,d2 | check now b (d1)
+ bhi Ld$inop |
+ bne 3f
+ tstl d3 |
+ bne Ld$inop |
+3:
+| Now comes the check for +/-INFINITY. We know that both are (maybe not
+| finite) numbers, but we have to check if both are infinite whether we
+| are adding or subtracting them.
+ eorl d7,d6 | to check sign bits
+ bmi 1f
+ andl IMM (0x80000000),d7 | get (common) sign bit
+ bra Ld$infty
+1:
+| We know one (or both) are infinite, so we test for equality between the
+| two numbers (if they are equal they have to be infinite both, so we
+| return NaN).
+ cmpl d2,d0 | are both infinite?
+ bne 1f | if d0 <> d2 they are not equal
+ cmpl d3,d1 | if d0 == d2 test d3 and d1
+ beq Ld$inop | if equal return NaN
+1:
+ andl IMM (0x80000000),d7 | get a's sign bit '
+ cmpl d4,d0 | test now for infinity
+ beq Ld$infty | if a is INFINITY return with this sign
+ bchg IMM (31),d7 | else we know b is INFINITY and has
+ bra Ld$infty | the opposite sign
+
+|=============================================================================
+| __muldf3
+|=============================================================================
+
+| double __muldf3(double, double);
+ FUNC(__muldf3)
+SYM (__muldf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get a into d0-d1
+ movel a6@(12),d1 |
+ movel a6@(16),d2 | and b into d2-d3
+ movel a6@(20),d3 |
+ movel d0,d7 | d7 will hold the sign of the product
+ eorl d2,d7 |
+ andl IMM (0x80000000),d7 |
+ movel d7,a0 | save sign bit into a0
+ movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
+ movel d7,d6 | another (mask for fraction)
+ notl d6 |
+ bclr IMM (31),d0 | get rid of a's sign bit '
+ movel d0,d4 |
+ orl d1,d4 |
+ beq Lmuldf$a$0 | branch if a is zero
+ movel d0,d4 |
+ bclr IMM (31),d2 | get rid of b's sign bit '
+ movel d2,d5 |
+ orl d3,d5 |
+ beq Lmuldf$b$0 | branch if b is zero
+ movel d2,d5 |
+ cmpl d7,d0 | is a big?
+ bhi Lmuldf$inop | if a is NaN return NaN
+ beq Lmuldf$a$nf | we still have to check d1 and b ...
+ cmpl d7,d2 | now compare b with INFINITY
+ bhi Lmuldf$inop | is b NaN?
+ beq Lmuldf$b$nf | we still have to check d3 ...
+| Here we have both numbers finite and nonzero (and with no sign bit).
+| Now we get the exponents into d4 and d5.
+ andl d7,d4 | isolate exponent in d4
+ beq Lmuldf$a$den | if exponent zero, have denormalized
+ andl d6,d0 | isolate fraction
+ orl IMM (0x00100000),d0 | and put hidden bit back
+ swap d4 | I like exponents in the first byte
+#ifndef __mcoldfire__
+ lsrw IMM (4),d4 |
+#else
+ lsrl IMM (4),d4 |
+#endif
+Lmuldf$1:
+ andl d7,d5 |
+ beq Lmuldf$b$den |
+ andl d6,d2 |
+ orl IMM (0x00100000),d2 | and put hidden bit back
+ swap d5 |
+#ifndef __mcoldfire__
+ lsrw IMM (4),d5 |
+#else
+ lsrl IMM (4),d5 |
+#endif
+Lmuldf$2: |
+#ifndef __mcoldfire__
+ addw d5,d4 | add exponents
+ subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)
+#else
+ addl d5,d4 | add exponents
+ subl IMM (D_BIAS+1),d4 | and subtract bias (plus one)
+#endif
+
+| We are now ready to do the multiplication. The situation is as follows:
+| both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
+| denormalized to start with!), which means that in the product bit 104
+| (which will correspond to bit 8 of the fourth long) is set.
+
+| Here we have to do the product.
+| To do it we have to juggle the registers back and forth, as there are not
+| enough to keep everything in them. So we use the address registers to keep
+| some intermediate data.
+
+#ifndef __mcoldfire__
+ moveml a2-a3,sp@- | save a2 and a3 for temporary use
+#else
+ movel a2,sp@-
+ movel a3,sp@-
+ movel a4,sp@-
+#endif
+ movel IMM (0),a2 | a2 is a null register
+ movel d4,a3 | and a3 will preserve the exponent
+
+| First, shift d2-d3 so bit 20 becomes bit 31:
+#ifndef __mcoldfire__
+ rorl IMM (5),d2 | rotate d2 5 places right
+ swap d2 | and swap it
+ rorl IMM (5),d3 | do the same thing with d3
+ swap d3 |
+ movew d3,d6 | get the rightmost 11 bits of d3
+ andw IMM (0x07ff),d6 |
+ orw d6,d2 | and put them into d2
+ andw IMM (0xf800),d3 | clear those bits in d3
+#else
+ moveq IMM (11),d7 | left shift d2 11 bits
+ lsll d7,d2
+ movel d3,d6 | get a copy of d3
+ lsll d7,d3 | left shift d3 11 bits
+ andl IMM (0xffe00000),d6 | get the top 11 bits of d3
+ moveq IMM (21),d7 | right shift them 21 bits
+ lsrl d7,d6
+ orl d6,d2 | stick them at the end of d2
+#endif
+
+ movel d2,d6 | move b into d6-d7
+ movel d3,d7 | move a into d4-d5
+ movel d0,d4 | and clear d0-d1-d2-d3 (to put result)
+ movel d1,d5 |
+ movel IMM (0),d3 |
+ movel d3,d2 |
+ movel d3,d1 |
+ movel d3,d0 |
+
+| We use a1 as counter:
+ movel IMM (DBL_MANT_DIG-1),a1
+#ifndef __mcoldfire__
+ exg d7,a1
+#else
+ movel d7,a4
+ movel a1,d7
+ movel a4,a1
+#endif
+
+1:
+#ifndef __mcoldfire__
+ exg d7,a1 | put counter back in a1
+#else
+ movel d7,a4
+ movel a1,d7
+ movel a4,a1
+#endif
+ addl d3,d3 | shift sum once left
+ addxl d2,d2 |
+ addxl d1,d1 |
+ addxl d0,d0 |
+ addl d7,d7 |
+ addxl d6,d6 |
+ bcc 2f | if bit clear skip the following
+#ifndef __mcoldfire__
+ exg d7,a2 |
+#else
+ movel d7,a4
+ movel a2,d7
+ movel a4,a2
+#endif
+ addl d5,d3 | else add a to the sum
+ addxl d4,d2 |
+ addxl d7,d1 |
+ addxl d7,d0 |
+#ifndef __mcoldfire__
+ exg d7,a2 |
+#else
+ movel d7,a4
+ movel a2,d7
+ movel a4,a2
+#endif
+2:
+#ifndef __mcoldfire__
+ exg d7,a1 | put counter in d7
+ dbf d7,1b | decrement and branch
+#else
+ movel d7,a4
+ movel a1,d7
+ movel a4,a1
+ subql IMM (1),d7
+ bpl 1b
+#endif
+
+ movel a3,d4 | restore exponent
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+
+| Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
+| first thing to do now is to normalize it so bit 8 becomes bit
+| DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
+ swap d0
+ swap d1
+ movew d1,d0
+ swap d2
+ movew d2,d1
+ swap d3
+ movew d3,d2
+ movew IMM (0),d3
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+#else
+ moveq IMM (29),d6
+ lsrl IMM (3),d3
+ movel d2,d7
+ lsll d6,d7
+ orl d7,d3
+ lsrl IMM (3),d2
+ movel d1,d7
+ lsll d6,d7
+ orl d7,d2
+ lsrl IMM (3),d1
+ movel d0,d7
+ lsll d6,d7
+ orl d7,d1
+ lsrl IMM (3),d0
+#endif
+
+| Now round, check for over- and underflow, and exit.
+ movel a0,d7 | get sign bit back into d7
+ moveq IMM (MULTIPLY),d5
+
+ btst IMM (DBL_MANT_DIG+1-32),d0
+ beq Lround$exit
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+ bra Lround$exit
+
+Lmuldf$inop:
+ moveq IMM (MULTIPLY),d5
+ bra Ld$inop
+
+Lmuldf$b$nf:
+ moveq IMM (MULTIPLY),d5
+ movel a0,d7 | get sign bit back into d7
+ tstl d3 | we know d2 == 0x7ff00000, so check d3
+ bne Ld$inop | if d3 <> 0 b is NaN
+ bra Ld$overflow | else we have overflow (since a is finite)
+
+Lmuldf$a$nf:
+ moveq IMM (MULTIPLY),d5
+ movel a0,d7 | get sign bit back into d7
+ tstl d1 | we know d0 == 0x7ff00000, so check d1
+ bne Ld$inop | if d1 <> 0 a is NaN
+ bra Ld$overflow | else signal overflow
+
+| If either number is zero return zero, unless the other is +/-INFINITY or
+| NaN, in which case we return NaN.
+Lmuldf$b$0:
+ moveq IMM (MULTIPLY),d5
+#ifndef __mcoldfire__
+ exg d2,d0 | put b (==0) into d0-d1
+ exg d3,d1 | and a (with sign bit cleared) into d2-d3
+ movel a0,d0 | set result sign
+#else
+ movel d0,d2 | put a into d2-d3
+ movel d1,d3
+ movel a0,d0 | put result zero into d0-d1
+ movq IMM(0),d1
+#endif
+ bra 1f
+Lmuldf$a$0:
+ movel a0,d0 | set result sign
+ movel a6@(16),d2 | put b into d2-d3 again
+ movel a6@(20),d3 |
+ bclr IMM (31),d2 | clear sign bit
+1: cmpl IMM (0x7ff00000),d2 | check for non-finiteness
+ bge Ld$inop | in case NaN or +/-INFINITY return NaN
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+| If a number is denormalized we put an exponent of 1 but do not put the
+| hidden bit back into the fraction; instead we shift left until bit 21
+| (the hidden bit) is set, adjusting the exponent accordingly. We do this
+| to ensure that the product of the fractions is close to 1.
+Lmuldf$a$den:
+ movel IMM (1),d4
+ andl d6,d0
+1: addl d1,d1 | shift a left until bit 20 is set
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ subw IMM (1),d4 | and adjust exponent
+#else
+ subl IMM (1),d4 | and adjust exponent
+#endif
+ btst IMM (20),d0 |
+ bne Lmuldf$1 |
+ bra 1b
+
+Lmuldf$b$den:
+ movel IMM (1),d5
+ andl d6,d2
+1: addl d3,d3 | shift b left until bit 20 is set
+ addxl d2,d2 |
+#ifndef __mcoldfire__
+ subw IMM (1),d5 | and adjust exponent
+#else
+ subql IMM (1),d5 | and adjust exponent
+#endif
+ btst IMM (20),d2 |
+ bne Lmuldf$2 |
+ bra 1b
+
+
+|=============================================================================
+| __divdf3
+|=============================================================================
+
+| double __divdf3(double, double);
+ FUNC(__divdf3)
+SYM (__divdf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get a into d0-d1
+ movel a6@(12),d1 |
+ movel a6@(16),d2 | and b into d2-d3
+ movel a6@(20),d3 |
+ movel d0,d7 | d7 will hold the sign of the result
+ eorl d2,d7 |
+ andl IMM (0x80000000),d7
+ movel d7,a0 | save sign into a0
+ movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
+ movel d7,d6 | another (mask for fraction)
+ notl d6 |
+ bclr IMM (31),d0 | get rid of a's sign bit '
+ movel d0,d4 |
+ orl d1,d4 |
+ beq Ldivdf$a$0 | branch if a is zero
+ movel d0,d4 |
+ bclr IMM (31),d2 | get rid of b's sign bit '
+ movel d2,d5 |
+ orl d3,d5 |
+ beq Ldivdf$b$0 | branch if b is zero
+ movel d2,d5
+ cmpl d7,d0 | is a big?
+ bhi Ldivdf$inop | if a is NaN return NaN
+ beq Ldivdf$a$nf | if d0 == 0x7ff00000 we check d1
+ cmpl d7,d2 | now compare b with INFINITY
+ bhi Ldivdf$inop | if b is NaN return NaN
+ beq Ldivdf$b$nf | if d2 == 0x7ff00000 we check d3
+| Here we have both numbers finite and nonzero (and with no sign bit).
+| Now we get the exponents into d4 and d5 and normalize the numbers to
+| ensure that the ratio of the fractions is around 1. We do this by
+| making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
+| set, even if they were denormalized to start with.
+| Thus, the result will satisfy: 2 > result > 1/2.
+ andl d7,d4 | and isolate exponent in d4
+ beq Ldivdf$a$den | if exponent is zero we have a denormalized
+ andl d6,d0 | and isolate fraction
+ orl IMM (0x00100000),d0 | and put hidden bit back
+ swap d4 | I like exponents in the first byte
+#ifndef __mcoldfire__
+ lsrw IMM (4),d4 |
+#else
+ lsrl IMM (4),d4 |
+#endif
+Ldivdf$1: |
+ andl d7,d5 |
+ beq Ldivdf$b$den |
+ andl d6,d2 |
+ orl IMM (0x00100000),d2
+ swap d5 |
+#ifndef __mcoldfire__
+ lsrw IMM (4),d5 |
+#else
+ lsrl IMM (4),d5 |
+#endif
+Ldivdf$2: |
+#ifndef __mcoldfire__
+ subw d5,d4 | subtract exponents
+ addw IMM (D_BIAS),d4 | and add bias
+#else
+ subl d5,d4 | subtract exponents
+ addl IMM (D_BIAS),d4 | and add bias
+#endif
+
+| We are now ready to do the division. We have prepared things in such a way
+| that the ratio of the fractions will be less than 2 but greater than 1/2.
+| At this point the registers in use are:
+| d0-d1 hold a (first operand, bit DBL_MANT_DIG-32=0, bit
+| DBL_MANT_DIG-1-32=1)
+| d2-d3 hold b (second operand, bit DBL_MANT_DIG-32=1)
+| d4 holds the difference of the exponents, corrected by the bias
+| a0 holds the sign of the ratio
+
+| To do the rounding correctly we need to keep information about the
+| nonsignificant bits. One way to do this would be to do the division
+| using four registers; another is to use two registers (as originally
+| I did), but use a sticky bit to preserve information about the
+| fractional part. Note that we can keep that info in a1, which is not
+| used.
+ movel IMM (0),d6 | d6-d7 will hold the result
+ movel d6,d7 |
+ movel IMM (0),a1 | and a1 will hold the sticky bit
+
+ movel IMM (DBL_MANT_DIG-32+1),d5
+
+1: cmpl d0,d2 | is a < b?
+ bhi 3f | if b > a skip the following
+ beq 4f | if d0==d2 check d1 and d3
+2: subl d3,d1 |
+ subxl d2,d0 | a <-- a - b
+ bset d5,d6 | set the corresponding bit in d6
+3: addl d1,d1 | shift a by 1
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d5,1b | and branch back
+#else
+ subql IMM (1), d5
+ bpl 1b
+#endif
+ bra 5f
+4: cmpl d1,d3 | here d0==d2, so check d1 and d3
+ bhi 3b | if d1 > d2 skip the subtraction
+ bra 2b | else go do it
+5:
+| Here we have to start setting the bits in the second long.
+ movel IMM (31),d5 | again d5 is counter
+
+1: cmpl d0,d2 | is a < b?
+ bhi 3f | if b > a skip the following
+ beq 4f | if d0==d2 check d1 and d3
+2: subl d3,d1 |
+ subxl d2,d0 | a <-- a - b
+ bset d5,d7 | set the corresponding bit in d7
+3: addl d1,d1 | shift a by 1
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d5,1b | and branch back
+#else
+ subql IMM (1), d5
+ bpl 1b
+#endif
+ bra 5f
+4: cmpl d1,d3 | here d0==d2, so check d1 and d3
+ bhi 3b | if d1 > d2 skip the subtraction
+ bra 2b | else go do it
+5:
+| Now go ahead checking until we hit a one, which we store in d2.
+ movel IMM (DBL_MANT_DIG),d5
+1: cmpl d2,d0 | is a < b?
+ bhi 4f | if b < a, exit
+ beq 3f | if d0==d2 check d1 and d3
+2: addl d1,d1 | shift a by 1
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d5,1b | and branch back
+#else
+ subql IMM (1), d5
+ bpl 1b
+#endif
+ movel IMM (0),d2 | here no sticky bit was found
+ movel d2,d3
+ bra 5f
+3: cmpl d1,d3 | here d0==d2, so check d1 and d3
+ bhi 2b | if d1 > d2 go back
+4:
+| Here put the sticky bit in d2-d3 (in the position which actually corresponds
+| to it; if you don't do this the algorithm loses in some cases). '
+ movel IMM (0),d2
+ movel d2,d3
+#ifndef __mcoldfire__
+ subw IMM (DBL_MANT_DIG),d5
+ addw IMM (63),d5
+ cmpw IMM (31),d5
+#else
+ subl IMM (DBL_MANT_DIG),d5
+ addl IMM (63),d5
+ cmpl IMM (31),d5
+#endif
+ bhi 2f
+1: bset d5,d3
+ bra 5f
+#ifndef __mcoldfire__
+ subw IMM (32),d5
+#else
+ subl IMM (32),d5
+#endif
+2: bset d5,d2
+5:
+| Finally we are finished! Move the longs in the address registers to
+| their final destination:
+ movel d6,d0
+ movel d7,d1
+ movel IMM (0),d3
+
+| Here we have finished the division, with the result in d0-d1-d2-d3, with
+| 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
+| If it is not, then definitely bit 21 is set. Normalize so bit 22 is
+| not set:
+ btst IMM (DBL_MANT_DIG-32+1),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+1:
+| Now round, check for over- and underflow, and exit.
+ movel a0,d7 | restore sign bit to d7
+ moveq IMM (DIVIDE),d5
+ bra Lround$exit
+
+Ldivdf$inop:
+ moveq IMM (DIVIDE),d5
+ bra Ld$inop
+
+Ldivdf$a$0:
+| If a is zero check to see whether b is zero also. In that case return
+| NaN; then check if b is NaN, and return NaN also in that case. Else
+| return a properly signed zero.
+ moveq IMM (DIVIDE),d5
+ bclr IMM (31),d2 |
+ movel d2,d4 |
+ orl d3,d4 |
+ beq Ld$inop | if b is also zero return NaN
+ cmpl IMM (0x7ff00000),d2 | check for NaN
+ bhi Ld$inop |
+ blt 1f |
+ tstl d3 |
+ bne Ld$inop |
+1: movel a0,d0 | else return signed zero
+ moveq IMM(0),d1 |
+ PICLEA SYM (_fpCCR),a0 | clear exception flags
+ movew IMM (0),a0@ |
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 |
+#else
+ moveml sp@,d2-d7 |
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 |
+ rts |
+
+Ldivdf$b$0:
+ moveq IMM (DIVIDE),d5
+| If we got here a is not zero. Check if a is NaN; in that case return NaN,
+| else return +/-INFINITY. Remember that a is in d0 with the sign bit
+| cleared already.
+ movel a0,d7 | put a's sign bit back in d7 '
+ cmpl IMM (0x7ff00000),d0 | compare d0 with INFINITY
+ bhi Ld$inop | if larger it is NaN
+ tstl d1 |
+ bne Ld$inop |
+ bra Ld$div$0 | else signal DIVIDE_BY_ZERO
+
+Ldivdf$b$nf:
+ moveq IMM (DIVIDE),d5
+| If d2 == 0x7ff00000 we have to check d3.
+ tstl d3 |
+ bne Ld$inop | if d3 <> 0, b is NaN
+ bra Ld$underflow | else b is +/-INFINITY, so signal underflow
+
+Ldivdf$a$nf:
+ moveq IMM (DIVIDE),d5
+| If d0 == 0x7ff00000 we have to check d1.
+ tstl d1 |
+ bne Ld$inop | if d1 <> 0, a is NaN
+| If a is INFINITY we have to check b
+ cmpl d7,d2 | compare b with INFINITY
+ bge Ld$inop | if b is NaN or INFINITY return NaN
+ tstl d3 |
+ bne Ld$inop |
+ bra Ld$overflow | else return overflow
+
+| If a number is denormalized we put an exponent of 1 but do not put the
+| bit back into the fraction.
+Ldivdf$a$den:
+ movel IMM (1),d4
+ andl d6,d0
+1: addl d1,d1 | shift a left until bit 20 is set
+ addxl d0,d0
+#ifndef __mcoldfire__
+ subw IMM (1),d4 | and adjust exponent
+#else
+ subl IMM (1),d4 | and adjust exponent
+#endif
+ btst IMM (DBL_MANT_DIG-32-1),d0
+ bne Ldivdf$1
+ bra 1b
+
+Ldivdf$b$den:
+ movel IMM (1),d5
+ andl d6,d2
+1: addl d3,d3 | shift b left until bit 20 is set
+ addxl d2,d2
+#ifndef __mcoldfire__
+ subw IMM (1),d5 | and adjust exponent
+#else
+ subql IMM (1),d5 | and adjust exponent
+#endif
+ btst IMM (DBL_MANT_DIG-32-1),d2
+ bne Ldivdf$2
+ bra 1b
+
+Lround$exit:
+| This is a common exit point for __muldf3 and __divdf3. When they enter
+| this point the sign of the result is in d7, the result in d0-d1, normalized
+| so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
+
+| First check for underlow in the exponent:
+#ifndef __mcoldfire__
+ cmpw IMM (-DBL_MANT_DIG-1),d4
+#else
+ cmpl IMM (-DBL_MANT_DIG-1),d4
+#endif
+ blt Ld$underflow
+| It could happen that the exponent is less than 1, in which case the
+| number is denormalized. In this case we shift right and adjust the
+| exponent until it becomes 1 or the fraction is zero (in the latter case
+| we signal underflow and return zero).
+ movel d7,a0 |
+ movel IMM (0),d6 | use d6-d7 to collect bits flushed right
+ movel d6,d7 | use d6-d7 to collect bits flushed right
+#ifndef __mcoldfire__
+ cmpw IMM (1),d4 | if the exponent is less than 1 we
+#else
+ cmpl IMM (1),d4 | if the exponent is less than 1 we
+#endif
+ bge 2f | have to shift right (denormalize)
+1:
+#ifndef __mcoldfire__
+ addw IMM (1),d4 | adjust the exponent
+ lsrl IMM (1),d0 | shift right once
+ roxrl IMM (1),d1 |
+ roxrl IMM (1),d2 |
+ roxrl IMM (1),d3 |
+ roxrl IMM (1),d6 |
+ roxrl IMM (1),d7 |
+ cmpw IMM (1),d4 | is the exponent 1 already?
+#else
+ addl IMM (1),d4 | adjust the exponent
+ lsrl IMM (1),d7
+ btst IMM (0),d6
+ beq 13f
+ bset IMM (31),d7
+13: lsrl IMM (1),d6
+ btst IMM (0),d3
+ beq 14f
+ bset IMM (31),d6
+14: lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+ cmpl IMM (1),d4 | is the exponent 1 already?
+#endif
+ beq 2f | if not loop back
+ bra 1b |
+ bra Ld$underflow | safety check, shouldn't execute '
+2: orl d6,d2 | this is a trick so we don't lose '
+ orl d7,d3 | the bits which were flushed right
+ movel a0,d7 | get back sign bit into d7
+| Now call the rounding routine (which takes care of denormalized numbers):
+ lea pc@(Lround$0),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Lround$0:
+| Here we have a correctly rounded result (either normalized or denormalized).
+
+| Here we should have either a normalized number or a denormalized one, and
+| the exponent is necessarily larger or equal to 1 (so we don't have to '
+| check again for underflow!). We have to check for overflow or for a
+| denormalized number (which also signals underflow).
+| Check for overflow (i.e., exponent >= 0x7ff).
+#ifndef __mcoldfire__
+ cmpw IMM (0x07ff),d4
+#else
+ cmpl IMM (0x07ff),d4
+#endif
+ bge Ld$overflow
+| Now check for a denormalized number (exponent==0):
+ movew d4,d4
+ beq Ld$den
+1:
+| Put back the exponents and sign and return.
+#ifndef __mcoldfire__
+ lslw IMM (4),d4 | exponent back to fourth byte
+#else
+ lsll IMM (4),d4 | exponent back to fourth byte
+#endif
+ bclr IMM (DBL_MANT_DIG-32-1),d0
+ swap d0 | and put back exponent
+#ifndef __mcoldfire__
+ orw d4,d0 |
+#else
+ orl d4,d0 |
+#endif
+ swap d0 |
+ orl d7,d0 | and sign also
+
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+|=============================================================================
+| __negdf2
+|=============================================================================
+
+| double __negdf2(double, double);
+ FUNC(__negdf2)
+SYM (__negdf2):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ moveq IMM (NEGATE),d5
+ movel a6@(8),d0 | get number to negate in d0-d1
+ movel a6@(12),d1 |
+ bchg IMM (31),d0 | negate
+ movel d0,d2 | make a positive copy (for the tests)
+ bclr IMM (31),d2 |
+ movel d2,d4 | check for zero
+ orl d1,d4 |
+ beq 2f | if zero (either sign) return +zero
+ cmpl IMM (0x7ff00000),d2 | compare to +INFINITY
+ blt 1f | if finite, return
+ bhi Ld$inop | if larger (fraction not zero) is NaN
+ tstl d1 | if d2 == 0x7ff00000 check d1
+ bne Ld$inop |
+ movel d0,d7 | else get sign and return INFINITY
+ andl IMM (0x80000000),d7
+ bra Ld$infty
+1: PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+2: bclr IMM (31),d0
+ bra 1b
+
+|=============================================================================
+| __cmpdf2
+|=============================================================================
+
+GREATER = 1
+LESS = -1
+EQUAL = 0
+
+| int __cmpdf2_internal(double, double, int);
+SYM (__cmpdf2_internal):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@- | save registers
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ moveq IMM (COMPARE),d5
+ movel a6@(8),d0 | get first operand
+ movel a6@(12),d1 |
+ movel a6@(16),d2 | get second operand
+ movel a6@(20),d3 |
+| First check if a and/or b are (+/-) zero and in that case clear
+| the sign bit.
+ movel d0,d6 | copy signs into d6 (a) and d7(b)
+ bclr IMM (31),d0 | and clear signs in d0 and d2
+ movel d2,d7 |
+ bclr IMM (31),d2 |
+ cmpl IMM (0x7ff00000),d0 | check for a == NaN
+ bhi Lcmpd$inop | if d0 > 0x7ff00000, a is NaN
+ beq Lcmpdf$a$nf | if equal can be INFINITY, so check d1
+ movel d0,d4 | copy into d4 to test for zero
+ orl d1,d4 |
+ beq Lcmpdf$a$0 |
+Lcmpdf$0:
+ cmpl IMM (0x7ff00000),d2 | check for b == NaN
+ bhi Lcmpd$inop | if d2 > 0x7ff00000, b is NaN
+ beq Lcmpdf$b$nf | if equal can be INFINITY, so check d3
+ movel d2,d4 |
+ orl d3,d4 |
+ beq Lcmpdf$b$0 |
+Lcmpdf$1:
+| Check the signs
+ eorl d6,d7
+ bpl 1f
+| If the signs are not equal check if a >= 0
+ tstl d6
+ bpl Lcmpdf$a$gt$b | if (a >= 0 && b < 0) => a > b
+ bmi Lcmpdf$b$gt$a | if (a < 0 && b >= 0) => a < b
+1:
+| If the signs are equal check for < 0
+ tstl d6
+ bpl 1f
+| If both are negative exchange them
+#ifndef __mcoldfire__
+ exg d0,d2
+ exg d1,d3
+#else
+ movel d0,d7
+ movel d2,d0
+ movel d7,d2
+ movel d1,d7
+ movel d3,d1
+ movel d7,d3
+#endif
+1:
+| Now that they are positive we just compare them as longs (does this also
+| work for denormalized numbers?).
+ cmpl d0,d2
+ bhi Lcmpdf$b$gt$a | |b| > |a|
+ bne Lcmpdf$a$gt$b | |b| < |a|
+| If we got here d0 == d2, so we compare d1 and d3.
+ cmpl d1,d3
+ bhi Lcmpdf$b$gt$a | |b| > |a|
+ bne Lcmpdf$a$gt$b | |b| < |a|
+| If we got here a == b.
+ movel IMM (EQUAL),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+Lcmpdf$a$gt$b:
+ movel IMM (GREATER),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+Lcmpdf$b$gt$a:
+ movel IMM (LESS),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+Lcmpdf$a$0:
+ bclr IMM (31),d6
+ bra Lcmpdf$0
+Lcmpdf$b$0:
+ bclr IMM (31),d7
+ bra Lcmpdf$1
+
+Lcmpdf$a$nf:
+ tstl d1
+ bne Ld$inop
+ bra Lcmpdf$0
+
+Lcmpdf$b$nf:
+ tstl d3
+ bne Ld$inop
+ bra Lcmpdf$1
+
+Lcmpd$inop:
+ movl a6@(24),d0
+ moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+| int __cmpdf2(double, double);
+ FUNC(__cmpdf2)
+SYM (__cmpdf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+
+|=============================================================================
+| rounding routines
+|=============================================================================
+
+| The rounding routines expect the number to be normalized in registers
+| d0-d1-d2-d3, with the exponent in register d4. They assume that the
+| exponent is larger or equal to 1. They return a properly normalized number
+| if possible, and a denormalized number otherwise. The exponent is returned
+| in d4.
+
+Lround$to$nearest:
+| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
+| Here we assume that the exponent is not too small (this should be checked
+| before entering the rounding routine), but the number could be denormalized.
+
+| Check for denormalized numbers:
+1: btst IMM (DBL_MANT_DIG-32),d0
+ bne 2f | if set the number is normalized
+| Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
+| is one (remember that a denormalized number corresponds to an
+| exponent of -D_BIAS+1).
+#ifndef __mcoldfire__
+ cmpw IMM (1),d4 | remember that the exponent is at least one
+#else
+ cmpl IMM (1),d4 | remember that the exponent is at least one
+#endif
+ beq 2f | an exponent of one means denormalized
+ addl d3,d3 | else shift and adjust the exponent
+ addxl d2,d2 |
+ addxl d1,d1 |
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d4,1b |
+#else
+ subql IMM (1), d4
+ bpl 1b
+#endif
+2:
+| Now round: we do it as follows: after the shifting we can write the
+| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
+| If delta < 1, do nothing. If delta > 1, add 1 to f.
+| If delta == 1, we make sure the rounded number will be even (odd?)
+| (after shifting).
+ btst IMM (0),d1 | is delta < 1?
+ beq 2f | if so, do not do anything
+ orl d2,d3 | is delta == 1?
+ bne 1f | if so round to even
+ movel d1,d3 |
+ andl IMM (2),d3 | bit 1 is the last significant bit
+ movel IMM (0),d2 |
+ addl d3,d1 |
+ addxl d2,d0 |
+ bra 2f |
+1: movel IMM (1),d3 | else add 1
+ movel IMM (0),d2 |
+ addl d3,d1 |
+ addxl d2,d0
+| Shift right once (because we used bit #DBL_MANT_DIG-32!).
+2:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+#endif
+
+| Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
+| 'fraction overflow' ...).
+ btst IMM (DBL_MANT_DIG-32),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+1:
+| If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
+| have to put the exponent to zero and return a denormalized number.
+ btst IMM (DBL_MANT_DIG-32-1),d0
+ beq 1f
+ jmp a0@
+1: movel IMM (0),d4
+ jmp a0@
+
+Lround$to$zero:
+Lround$to$plus:
+Lround$to$minus:
+ jmp a0@
+#endif /* L_double */
+
+#ifdef L_float
+
+ .globl SYM (_fpCCR)
+ .globl $_exception_handler
+
+QUIET_NaN = 0xffffffff
+SIGNL_NaN = 0x7f800001
+INFINITY = 0x7f800000
+
+F_MAX_EXP = 0xff
+F_BIAS = 126
+FLT_MAX_EXP = F_MAX_EXP - F_BIAS
+FLT_MIN_EXP = 1 - F_BIAS
+FLT_MANT_DIG = 24
+
+INEXACT_RESULT = 0x0001
+UNDERFLOW = 0x0002
+OVERFLOW = 0x0004
+DIVIDE_BY_ZERO = 0x0008
+INVALID_OPERATION = 0x0010
+
+SINGLE_FLOAT = 1
+
+NOOP = 0
+ADD = 1
+MULTIPLY = 2
+DIVIDE = 3
+NEGATE = 4
+COMPARE = 5
+EXTENDSFDF = 6
+TRUNCDFSF = 7
+
+UNKNOWN = -1
+ROUND_TO_NEAREST = 0 | round result to nearest representable value
+ROUND_TO_ZERO = 1 | round result towards zero
+ROUND_TO_PLUS = 2 | round result towards plus infinity
+ROUND_TO_MINUS = 3 | round result towards minus infinity
+
+| Entry points:
+
+ .globl SYM (__addsf3)
+ .globl SYM (__subsf3)
+ .globl SYM (__mulsf3)
+ .globl SYM (__divsf3)
+ .globl SYM (__negsf2)
+ .globl SYM (__cmpsf2)
+ .globl SYM (__cmpsf2_internal)
+ .hidden SYM (__cmpsf2_internal)
+
+| These are common routines to return and signal exceptions.
+
+ .text
+ .even
+
+Lf$den:
+| Return and signal a denormalized number
+ orl d7,d0
+ moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Lf$infty:
+Lf$overflow:
+| Return a properly signed INFINITY and set the exception flags
+ movel IMM (INFINITY),d0
+ orl d7,d0
+ moveq IMM (INEXACT_RESULT+OVERFLOW),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Lf$underflow:
+| Return 0 and set the exception flags
+ moveq IMM (0),d0
+ moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Lf$inop:
+| Return a quiet NaN and set the exception flags
+ movel IMM (QUIET_NaN),d0
+ moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Lf$div$0:
+| Return a properly signed INFINITY and set the exception flags
+ movel IMM (INFINITY),d0
+ orl d7,d0
+ moveq IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+|=============================================================================
+|=============================================================================
+| single precision routines
+|=============================================================================
+|=============================================================================
+
+| A single precision floating point number (float) has the format:
+|
+| struct _float {
+| unsigned int sign : 1; /* sign bit */
+| unsigned int exponent : 8; /* exponent, shifted by 126 */
+| unsigned int fraction : 23; /* fraction */
+| } float;
+|
+| Thus sizeof(float) = 4 (32 bits).
+|
+| All the routines are callable from C programs, and return the result
+| in the single register d0. They also preserve all registers except
+| d0-d1 and a0-a1.
+
+|=============================================================================
+| __subsf3
+|=============================================================================
+
+| float __subsf3(float, float);
+ FUNC(__subsf3)
+SYM (__subsf3):
+ bchg IMM (31),sp@(8) | change sign of second operand
+ | and fall through
+|=============================================================================
+| __addsf3
+|=============================================================================
+
+| float __addsf3(float, float);
+ FUNC(__addsf3)
+SYM (__addsf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0) | everything will be done in registers
+ moveml d2-d7,sp@- | save all data registers but d0-d1
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get first operand
+ movel a6@(12),d1 | get second operand
+ movel d0,a0 | get d0's sign bit '
+ addl d0,d0 | check and clear sign bit of a
+ beq Laddsf$b | if zero return second operand
+ movel d1,a1 | save b's sign bit '
+ addl d1,d1 | get rid of sign bit
+ beq Laddsf$a | if zero return first operand
+
+| Get the exponents and check for denormalized and/or infinity.
+
+ movel IMM (0x00ffffff),d4 | mask to get fraction
+ movel IMM (0x01000000),d5 | mask to put hidden bit back
+
+ movel d0,d6 | save a to get exponent
+ andl d4,d0 | get fraction in d0
+ notl d4 | make d4 into a mask for the exponent
+ andl d4,d6 | get exponent in d6
+ beq Laddsf$a$den | branch if a is denormalized
+ cmpl d4,d6 | check for INFINITY or NaN
+ beq Laddsf$nf
+ swap d6 | put exponent into first word
+ orl d5,d0 | and put hidden bit back
+Laddsf$1:
+| Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
+ movel d1,d7 | get exponent in d7
+ andl d4,d7 |
+ beq Laddsf$b$den | branch if b is denormalized
+ cmpl d4,d7 | check for INFINITY or NaN
+ beq Laddsf$nf
+ swap d7 | put exponent into first word
+ notl d4 | make d4 into a mask for the fraction
+ andl d4,d1 | get fraction in d1
+ orl d5,d1 | and put hidden bit back
+Laddsf$2:
+| Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
+
+| Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
+| shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
+| bit).
+
+ movel d1,d2 | move b to d2, since we want to use
+ | two registers to do the sum
+ movel IMM (0),d1 | and clear the new ones
+ movel d1,d3 |
+
+| Here we shift the numbers in registers d0 and d1 so the exponents are the
+| same, and put the largest exponent in d6. Note that we are using two
+| registers for each number (see the discussion by D. Knuth in "Seminumerical
+| Algorithms").
+#ifndef __mcoldfire__
+ cmpw d6,d7 | compare exponents
+#else
+ cmpl d6,d7 | compare exponents
+#endif
+ beq Laddsf$3 | if equal don't shift '
+ bhi 5f | branch if second exponent largest
+1:
+ subl d6,d7 | keep the largest exponent
+ negl d7
+#ifndef __mcoldfire__
+ lsrw IMM (8),d7 | put difference in lower byte
+#else
+ lsrl IMM (8),d7 | put difference in lower byte
+#endif
+| if difference is too large we don't shift (actually, we can just exit) '
+#ifndef __mcoldfire__
+ cmpw IMM (FLT_MANT_DIG+2),d7
+#else
+ cmpl IMM (FLT_MANT_DIG+2),d7
+#endif
+ bge Laddsf$b$small
+#ifndef __mcoldfire__
+ cmpw IMM (16),d7 | if difference >= 16 swap
+#else
+ cmpl IMM (16),d7 | if difference >= 16 swap
+#endif
+ bge 4f
+2:
+#ifndef __mcoldfire__
+ subw IMM (1),d7
+#else
+ subql IMM (1), d7
+#endif
+3:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d2 | shift right second operand
+ roxrl IMM (1),d3
+ dbra d7,3b
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ subql IMM (1), d7
+ bpl 3b
+#endif
+ bra Laddsf$3
+4:
+ movew d2,d3
+ swap d3
+ movew d3,d2
+ swap d2
+#ifndef __mcoldfire__
+ subw IMM (16),d7
+#else
+ subl IMM (16),d7
+#endif
+ bne 2b | if still more bits, go back to normal case
+ bra Laddsf$3
+5:
+#ifndef __mcoldfire__
+ exg d6,d7 | exchange the exponents
+#else
+ eorl d6,d7
+ eorl d7,d6
+ eorl d6,d7
+#endif
+ subl d6,d7 | keep the largest exponent
+ negl d7 |
+#ifndef __mcoldfire__
+ lsrw IMM (8),d7 | put difference in lower byte
+#else
+ lsrl IMM (8),d7 | put difference in lower byte
+#endif
+| if difference is too large we don't shift (and exit!) '
+#ifndef __mcoldfire__
+ cmpw IMM (FLT_MANT_DIG+2),d7
+#else
+ cmpl IMM (FLT_MANT_DIG+2),d7
+#endif
+ bge Laddsf$a$small
+#ifndef __mcoldfire__
+ cmpw IMM (16),d7 | if difference >= 16 swap
+#else
+ cmpl IMM (16),d7 | if difference >= 16 swap
+#endif
+ bge 8f
+6:
+#ifndef __mcoldfire__
+ subw IMM (1),d7
+#else
+ subl IMM (1),d7
+#endif
+7:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0 | shift right first operand
+ roxrl IMM (1),d1
+ dbra d7,7b
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ subql IMM (1),d7
+ bpl 7b
+#endif
+ bra Laddsf$3
+8:
+ movew d0,d1
+ swap d1
+ movew d1,d0
+ swap d0
+#ifndef __mcoldfire__
+ subw IMM (16),d7
+#else
+ subl IMM (16),d7
+#endif
+ bne 6b | if still more bits, go back to normal case
+ | otherwise we fall through
+
+| Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
+| signs are stored in a0 and a1).
+
+Laddsf$3:
+| Here we have to decide whether to add or subtract the numbers
+#ifndef __mcoldfire__
+ exg d6,a0 | get signs back
+ exg d7,a1 | and save the exponents
+#else
+ movel d6,d4
+ movel a0,d6
+ movel d4,a0
+ movel d7,d4
+ movel a1,d7
+ movel d4,a1
+#endif
+ eorl d6,d7 | combine sign bits
+ bmi Lsubsf$0 | if negative a and b have opposite
+ | sign so we actually subtract the
+ | numbers
+
+| Here we have both positive or both negative
+#ifndef __mcoldfire__
+ exg d6,a0 | now we have the exponent in d6
+#else
+ movel d6,d4
+ movel a0,d6
+ movel d4,a0
+#endif
+ movel a0,d7 | and sign in d7
+ andl IMM (0x80000000),d7
+| Here we do the addition.
+ addl d3,d1
+ addxl d2,d0
+| Note: now we have d2, d3, d4 and d5 to play with!
+
+| Put the exponent, in the first byte, in d2, to use the "standard" rounding
+| routines:
+ movel d6,d2
+#ifndef __mcoldfire__
+ lsrw IMM (8),d2
+#else
+ lsrl IMM (8),d2
+#endif
+
+| Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
+| the case of denormalized numbers in the rounding routine itself).
+| As in the addition (not in the subtraction!) we could have set
+| one more bit we check this:
+ btst IMM (FLT_MANT_DIG+1),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+#endif
+ addl IMM (1),d2
+1:
+ lea pc@(Laddsf$4),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Laddsf$4:
+| Put back the exponent, but check for overflow.
+#ifndef __mcoldfire__
+ cmpw IMM (0xff),d2
+#else
+ cmpl IMM (0xff),d2
+#endif
+ bhi 1f
+ bclr IMM (FLT_MANT_DIG-1),d0
+#ifndef __mcoldfire__
+ lslw IMM (7),d2
+#else
+ lsll IMM (7),d2
+#endif
+ swap d2
+ orl d2,d0
+ bra Laddsf$ret
+1:
+ moveq IMM (ADD),d5
+ bra Lf$overflow
+
+Lsubsf$0:
+| We are here if a > 0 and b < 0 (sign bits cleared).
+| Here we do the subtraction.
+ movel d6,d7 | put sign in d7
+ andl IMM (0x80000000),d7
+
+ subl d3,d1 | result in d0-d1
+ subxl d2,d0 |
+ beq Laddsf$ret | if zero just exit
+ bpl 1f | if positive skip the following
+ bchg IMM (31),d7 | change sign bit in d7
+ negl d1
+ negxl d0
+1:
+#ifndef __mcoldfire__
+ exg d2,a0 | now we have the exponent in d2
+ lsrw IMM (8),d2 | put it in the first byte
+#else
+ movel d2,d4
+ movel a0,d2
+ movel d4,a0
+ lsrl IMM (8),d2 | put it in the first byte
+#endif
+
+| Now d0-d1 is positive and the sign bit is in d7.
+
+| Note that we do not have to normalize, since in the subtraction bit
+| #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
+| the rounding routines themselves.
+ lea pc@(Lsubsf$1),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Lsubsf$1:
+| Put back the exponent (we can't have overflow!). '
+ bclr IMM (FLT_MANT_DIG-1),d0
+#ifndef __mcoldfire__
+ lslw IMM (7),d2
+#else
+ lsll IMM (7),d2
+#endif
+ swap d2
+ orl d2,d0
+ bra Laddsf$ret
+
+| If one of the numbers was too small (difference of exponents >=
+| FLT_MANT_DIG+2) we return the other (and now we don't have to '
+| check for finiteness or zero).
+Laddsf$a$small:
+ movel a6@(12),d0
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+Laddsf$b$small:
+ movel a6@(8),d0
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+| If the numbers are denormalized remember to put exponent equal to 1.
+
+Laddsf$a$den:
+ movel d5,d6 | d5 contains 0x01000000
+ swap d6
+ bra Laddsf$1
+
+Laddsf$b$den:
+ movel d5,d7
+ swap d7
+ notl d4 | make d4 into a mask for the fraction
+ | (this was not executed after the jump)
+ bra Laddsf$2
+
+| The rest is mainly code for the different results which can be
+| returned (checking always for +/-INFINITY and NaN).
+
+Laddsf$b:
+| Return b (if a is zero).
+ movel a6@(12),d0
+ cmpl IMM (0x80000000),d0 | Check if b is -0
+ bne 1f
+ movel a0,d7
+ andl IMM (0x80000000),d7 | Use the sign of a
+ clrl d0
+ bra Laddsf$ret
+Laddsf$a:
+| Return a (if b is zero).
+ movel a6@(8),d0
+1:
+ moveq IMM (ADD),d5
+| We have to check for NaN and +/-infty.
+ movel d0,d7
+ andl IMM (0x80000000),d7 | put sign in d7
+ bclr IMM (31),d0 | clear sign
+ cmpl IMM (INFINITY),d0 | check for infty or NaN
+ bge 2f
+ movel d0,d0 | check for zero (we do this because we don't '
+ bne Laddsf$ret | want to return -0 by mistake
+ bclr IMM (31),d7 | if zero be sure to clear sign
+ bra Laddsf$ret | if everything OK just return
+2:
+| The value to be returned is either +/-infty or NaN
+ andl IMM (0x007fffff),d0 | check for NaN
+ bne Lf$inop | if mantissa not zero is NaN
+ bra Lf$infty
+
+Laddsf$ret:
+| Normal exit (a and b nonzero, result is not NaN nor +/-infty).
+| We have to clear the exception flags (just the exception type).
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+ orl d7,d0 | put sign bit
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+Laddsf$ret$den:
+| Return a denormalized number (for addition we don't signal underflow) '
+ lsrl IMM (1),d0 | remember to shift right back once
+ bra Laddsf$ret | and return
+
+| Note: when adding two floats of the same sign if either one is
+| NaN we return NaN without regard to whether the other is finite or
+| not. When subtracting them (i.e., when adding two numbers of
+| opposite signs) things are more complicated: if both are INFINITY
+| we return NaN, if only one is INFINITY and the other is NaN we return
+| NaN, but if it is finite we return INFINITY with the corresponding sign.
+
+Laddsf$nf:
+ moveq IMM (ADD),d5
+| This could be faster but it is not worth the effort, since it is not
+| executed very often. We sacrifice speed for clarity here.
+ movel a6@(8),d0 | get the numbers back (remember that we
+ movel a6@(12),d1 | did some processing already)
+ movel IMM (INFINITY),d4 | useful constant (INFINITY)
+ movel d0,d2 | save sign bits
+ movel d1,d3
+ bclr IMM (31),d0 | clear sign bits
+ bclr IMM (31),d1
+| We know that one of them is either NaN of +/-INFINITY
+| Check for NaN (if either one is NaN return NaN)
+ cmpl d4,d0 | check first a (d0)
+ bhi Lf$inop
+ cmpl d4,d1 | check now b (d1)
+ bhi Lf$inop
+| Now comes the check for +/-INFINITY. We know that both are (maybe not
+| finite) numbers, but we have to check if both are infinite whether we
+| are adding or subtracting them.
+ eorl d3,d2 | to check sign bits
+ bmi 1f
+ movel d0,d7
+ andl IMM (0x80000000),d7 | get (common) sign bit
+ bra Lf$infty
+1:
+| We know one (or both) are infinite, so we test for equality between the
+| two numbers (if they are equal they have to be infinite both, so we
+| return NaN).
+ cmpl d1,d0 | are both infinite?
+ beq Lf$inop | if so return NaN
+
+ movel d0,d7
+ andl IMM (0x80000000),d7 | get a's sign bit '
+ cmpl d4,d0 | test now for infinity
+ beq Lf$infty | if a is INFINITY return with this sign
+ bchg IMM (31),d7 | else we know b is INFINITY and has
+ bra Lf$infty | the opposite sign
+
+|=============================================================================
+| __mulsf3
+|=============================================================================
+
+| float __mulsf3(float, float);
+ FUNC(__mulsf3)
+SYM (__mulsf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get a into d0
+ movel a6@(12),d1 | and b into d1
+ movel d0,d7 | d7 will hold the sign of the product
+ eorl d1,d7 |
+ andl IMM (0x80000000),d7
+ movel IMM (INFINITY),d6 | useful constant (+INFINITY)
+ movel d6,d5 | another (mask for fraction)
+ notl d5 |
+ movel IMM (0x00800000),d4 | this is to put hidden bit back
+ bclr IMM (31),d0 | get rid of a's sign bit '
+ movel d0,d2 |
+ beq Lmulsf$a$0 | branch if a is zero
+ bclr IMM (31),d1 | get rid of b's sign bit '
+ movel d1,d3 |
+ beq Lmulsf$b$0 | branch if b is zero
+ cmpl d6,d0 | is a big?
+ bhi Lmulsf$inop | if a is NaN return NaN
+ beq Lmulsf$inf | if a is INFINITY we have to check b
+ cmpl d6,d1 | now compare b with INFINITY
+ bhi Lmulsf$inop | is b NaN?
+ beq Lmulsf$overflow | is b INFINITY?
+| Here we have both numbers finite and nonzero (and with no sign bit).
+| Now we get the exponents into d2 and d3.
+ andl d6,d2 | and isolate exponent in d2
+ beq Lmulsf$a$den | if exponent is zero we have a denormalized
+ andl d5,d0 | and isolate fraction
+ orl d4,d0 | and put hidden bit back
+ swap d2 | I like exponents in the first byte
+#ifndef __mcoldfire__
+ lsrw IMM (7),d2 |
+#else
+ lsrl IMM (7),d2 |
+#endif
+Lmulsf$1: | number
+ andl d6,d3 |
+ beq Lmulsf$b$den |
+ andl d5,d1 |
+ orl d4,d1 |
+ swap d3 |
+#ifndef __mcoldfire__
+ lsrw IMM (7),d3 |
+#else
+ lsrl IMM (7),d3 |
+#endif
+Lmulsf$2: |
+#ifndef __mcoldfire__
+ addw d3,d2 | add exponents
+ subw IMM (F_BIAS+1),d2 | and subtract bias (plus one)
+#else
+ addl d3,d2 | add exponents
+ subl IMM (F_BIAS+1),d2 | and subtract bias (plus one)
+#endif
+
+| We are now ready to do the multiplication. The situation is as follows:
+| both a and b have bit FLT_MANT_DIG-1 set (even if they were
+| denormalized to start with!), which means that in the product
+| bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
+| high long) is set.
+
+| To do the multiplication let us move the number a little bit around ...
+ movel d1,d6 | second operand in d6
+ movel d0,d5 | first operand in d4-d5
+ movel IMM (0),d4
+ movel d4,d1 | the sums will go in d0-d1
+ movel d4,d0
+
+| now bit FLT_MANT_DIG-1 becomes bit 31:
+ lsll IMM (31-FLT_MANT_DIG+1),d6
+
+| Start the loop (we loop #FLT_MANT_DIG times):
+ moveq IMM (FLT_MANT_DIG-1),d3
+1: addl d1,d1 | shift sum
+ addxl d0,d0
+ lsll IMM (1),d6 | get bit bn
+ bcc 2f | if not set skip sum
+ addl d5,d1 | add a
+ addxl d4,d0
+2:
+#ifndef __mcoldfire__
+ dbf d3,1b | loop back
+#else
+ subql IMM (1),d3
+ bpl 1b
+#endif
+
+| Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
+| (mod 32) of d0 set. The first thing to do now is to normalize it so bit
+| FLT_MANT_DIG is set (to do the rounding).
+#ifndef __mcoldfire__
+ rorl IMM (6),d1
+ swap d1
+ movew d1,d3
+ andw IMM (0x03ff),d3
+ andw IMM (0xfd00),d1
+#else
+ movel d1,d3
+ lsll IMM (8),d1
+ addl d1,d1
+ addl d1,d1
+ moveq IMM (22),d5
+ lsrl d5,d3
+ orl d3,d1
+ andl IMM (0xfffffd00),d1
+#endif
+ lsll IMM (8),d0
+ addl d0,d0
+ addl d0,d0
+#ifndef __mcoldfire__
+ orw d3,d0
+#else
+ orl d3,d0
+#endif
+
+ moveq IMM (MULTIPLY),d5
+
+ btst IMM (FLT_MANT_DIG+1),d0
+ beq Lround$exit
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ addw IMM (1),d2
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ addql IMM (1),d2
+#endif
+ bra Lround$exit
+
+Lmulsf$inop:
+ moveq IMM (MULTIPLY),d5
+ bra Lf$inop
+
+Lmulsf$overflow:
+ moveq IMM (MULTIPLY),d5
+ bra Lf$overflow
+
+Lmulsf$inf:
+ moveq IMM (MULTIPLY),d5
+| If either is NaN return NaN; else both are (maybe infinite) numbers, so
+| return INFINITY with the correct sign (which is in d7).
+ cmpl d6,d1 | is b NaN?
+ bhi Lf$inop | if so return NaN
+ bra Lf$overflow | else return +/-INFINITY
+
+| If either number is zero return zero, unless the other is +/-INFINITY,
+| or NaN, in which case we return NaN.
+Lmulsf$b$0:
+| Here d1 (==b) is zero.
+ movel a6@(8),d1 | get a again to check for non-finiteness
+ bra 1f
+Lmulsf$a$0:
+ movel a6@(12),d1 | get b again to check for non-finiteness
+1: bclr IMM (31),d1 | clear sign bit
+ cmpl IMM (INFINITY),d1 | and check for a large exponent
+ bge Lf$inop | if b is +/-INFINITY or NaN return NaN
+ movel d7,d0 | else return signed zero
+ PICLEA SYM (_fpCCR),a0 |
+ movew IMM (0),a0@ |
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 |
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 |
+ rts |
+
+| If a number is denormalized we put an exponent of 1 but do not put the
+| hidden bit back into the fraction; instead we shift left until bit 23
+| (the hidden bit) is set, adjusting the exponent accordingly. We do this
+| to ensure that the product of the fractions is close to 1.
+Lmulsf$a$den:
+ movel IMM (1),d2
+ andl d5,d0
+1: addl d0,d0 | shift a left (until bit 23 is set)
+#ifndef __mcoldfire__
+ subw IMM (1),d2 | and adjust exponent
+#else
+ subql IMM (1),d2 | and adjust exponent
+#endif
+ btst IMM (FLT_MANT_DIG-1),d0
+ bne Lmulsf$1 |
+ bra 1b | else loop back
+
+Lmulsf$b$den:
+ movel IMM (1),d3
+ andl d5,d1
+1: addl d1,d1 | shift b left until bit 23 is set
+#ifndef __mcoldfire__
+ subw IMM (1),d3 | and adjust exponent
+#else
+ subql IMM (1),d3 | and adjust exponent
+#endif
+ btst IMM (FLT_MANT_DIG-1),d1
+ bne Lmulsf$2 |
+ bra 1b | else loop back
+
+|=============================================================================
+| __divsf3
+|=============================================================================
+
+| float __divsf3(float, float);
+ FUNC(__divsf3)
+SYM (__divsf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get a into d0
+ movel a6@(12),d1 | and b into d1
+ movel d0,d7 | d7 will hold the sign of the result
+ eorl d1,d7 |
+ andl IMM (0x80000000),d7 |
+ movel IMM (INFINITY),d6 | useful constant (+INFINITY)
+ movel d6,d5 | another (mask for fraction)
+ notl d5 |
+ movel IMM (0x00800000),d4 | this is to put hidden bit back
+ bclr IMM (31),d0 | get rid of a's sign bit '
+ movel d0,d2 |
+ beq Ldivsf$a$0 | branch if a is zero
+ bclr IMM (31),d1 | get rid of b's sign bit '
+ movel d1,d3 |
+ beq Ldivsf$b$0 | branch if b is zero
+ cmpl d6,d0 | is a big?
+ bhi Ldivsf$inop | if a is NaN return NaN
+ beq Ldivsf$inf | if a is INFINITY we have to check b
+ cmpl d6,d1 | now compare b with INFINITY
+ bhi Ldivsf$inop | if b is NaN return NaN
+ beq Ldivsf$underflow
+| Here we have both numbers finite and nonzero (and with no sign bit).
+| Now we get the exponents into d2 and d3 and normalize the numbers to
+| ensure that the ratio of the fractions is close to 1. We do this by
+| making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
+ andl d6,d2 | and isolate exponent in d2
+ beq Ldivsf$a$den | if exponent is zero we have a denormalized
+ andl d5,d0 | and isolate fraction
+ orl d4,d0 | and put hidden bit back
+ swap d2 | I like exponents in the first byte
+#ifndef __mcoldfire__
+ lsrw IMM (7),d2 |
+#else
+ lsrl IMM (7),d2 |
+#endif
+Ldivsf$1: |
+ andl d6,d3 |
+ beq Ldivsf$b$den |
+ andl d5,d1 |
+ orl d4,d1 |
+ swap d3 |
+#ifndef __mcoldfire__
+ lsrw IMM (7),d3 |
+#else
+ lsrl IMM (7),d3 |
+#endif
+Ldivsf$2: |
+#ifndef __mcoldfire__
+ subw d3,d2 | subtract exponents
+ addw IMM (F_BIAS),d2 | and add bias
+#else
+ subl d3,d2 | subtract exponents
+ addl IMM (F_BIAS),d2 | and add bias
+#endif
+
+| We are now ready to do the division. We have prepared things in such a way
+| that the ratio of the fractions will be less than 2 but greater than 1/2.
+| At this point the registers in use are:
+| d0 holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
+| d1 holds b (second operand, bit FLT_MANT_DIG=1)
+| d2 holds the difference of the exponents, corrected by the bias
+| d7 holds the sign of the ratio
+| d4, d5, d6 hold some constants
+ movel d7,a0 | d6-d7 will hold the ratio of the fractions
+ movel IMM (0),d6 |
+ movel d6,d7
+
+ moveq IMM (FLT_MANT_DIG+1),d3
+1: cmpl d0,d1 | is a < b?
+ bhi 2f |
+ bset d3,d6 | set a bit in d6
+ subl d1,d0 | if a >= b a <-- a-b
+ beq 3f | if a is zero, exit
+2: addl d0,d0 | multiply a by 2
+#ifndef __mcoldfire__
+ dbra d3,1b
+#else
+ subql IMM (1),d3
+ bpl 1b
+#endif
+
+| Now we keep going to set the sticky bit ...
+ moveq IMM (FLT_MANT_DIG),d3
+1: cmpl d0,d1
+ ble 2f
+ addl d0,d0
+#ifndef __mcoldfire__
+ dbra d3,1b
+#else
+ subql IMM(1),d3
+ bpl 1b
+#endif
+ movel IMM (0),d1
+ bra 3f
+2: movel IMM (0),d1
+#ifndef __mcoldfire__
+ subw IMM (FLT_MANT_DIG),d3
+ addw IMM (31),d3
+#else
+ subl IMM (FLT_MANT_DIG),d3
+ addl IMM (31),d3
+#endif
+ bset d3,d1
+3:
+ movel d6,d0 | put the ratio in d0-d1
+ movel a0,d7 | get sign back
+
+| Because of the normalization we did before we are guaranteed that
+| d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
+| bit 25 could be set, and if it is not set then bit 24 is necessarily set.
+ btst IMM (FLT_MANT_DIG+1),d0
+ beq 1f | if it is not set, then bit 24 is set
+ lsrl IMM (1),d0 |
+#ifndef __mcoldfire__
+ addw IMM (1),d2 |
+#else
+ addl IMM (1),d2 |
+#endif
+1:
+| Now round, check for over- and underflow, and exit.
+ moveq IMM (DIVIDE),d5
+ bra Lround$exit
+
+Ldivsf$inop:
+ moveq IMM (DIVIDE),d5
+ bra Lf$inop
+
+Ldivsf$overflow:
+ moveq IMM (DIVIDE),d5
+ bra Lf$overflow
+
+Ldivsf$underflow:
+ moveq IMM (DIVIDE),d5
+ bra Lf$underflow
+
+Ldivsf$a$0:
+ moveq IMM (DIVIDE),d5
+| If a is zero check to see whether b is zero also. In that case return
+| NaN; then check if b is NaN, and return NaN also in that case. Else
+| return a properly signed zero.
+ andl IMM (0x7fffffff),d1 | clear sign bit and test b
+ beq Lf$inop | if b is also zero return NaN
+ cmpl IMM (INFINITY),d1 | check for NaN
+ bhi Lf$inop |
+ movel d7,d0 | else return signed zero
+ PICLEA SYM (_fpCCR),a0 |
+ movew IMM (0),a0@ |
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 |
+#else
+ moveml sp@,d2-d7 |
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 |
+ rts |
+
+Ldivsf$b$0:
+ moveq IMM (DIVIDE),d5
+| If we got here a is not zero. Check if a is NaN; in that case return NaN,
+| else return +/-INFINITY. Remember that a is in d0 with the sign bit
+| cleared already.
+ cmpl IMM (INFINITY),d0 | compare d0 with INFINITY
+ bhi Lf$inop | if larger it is NaN
+ bra Lf$div$0 | else signal DIVIDE_BY_ZERO
+
+Ldivsf$inf:
+ moveq IMM (DIVIDE),d5
+| If a is INFINITY we have to check b
+ cmpl IMM (INFINITY),d1 | compare b with INFINITY
+ bge Lf$inop | if b is NaN or INFINITY return NaN
+ bra Lf$overflow | else return overflow
+
+| If a number is denormalized we put an exponent of 1 but do not put the
+| bit back into the fraction.
+Ldivsf$a$den:
+ movel IMM (1),d2
+ andl d5,d0
+1: addl d0,d0 | shift a left until bit FLT_MANT_DIG-1 is set
+#ifndef __mcoldfire__
+ subw IMM (1),d2 | and adjust exponent
+#else
+ subl IMM (1),d2 | and adjust exponent
+#endif
+ btst IMM (FLT_MANT_DIG-1),d0
+ bne Ldivsf$1
+ bra 1b
+
+Ldivsf$b$den:
+ movel IMM (1),d3
+ andl d5,d1
+1: addl d1,d1 | shift b left until bit FLT_MANT_DIG is set
+#ifndef __mcoldfire__
+ subw IMM (1),d3 | and adjust exponent
+#else
+ subl IMM (1),d3 | and adjust exponent
+#endif
+ btst IMM (FLT_MANT_DIG-1),d1
+ bne Ldivsf$2
+ bra 1b
+
+Lround$exit:
+| This is a common exit point for __mulsf3 and __divsf3.
+
+| First check for underlow in the exponent:
+#ifndef __mcoldfire__
+ cmpw IMM (-FLT_MANT_DIG-1),d2
+#else
+ cmpl IMM (-FLT_MANT_DIG-1),d2
+#endif
+ blt Lf$underflow
+| It could happen that the exponent is less than 1, in which case the
+| number is denormalized. In this case we shift right and adjust the
+| exponent until it becomes 1 or the fraction is zero (in the latter case
+| we signal underflow and return zero).
+ movel IMM (0),d6 | d6 is used temporarily
+#ifndef __mcoldfire__
+ cmpw IMM (1),d2 | if the exponent is less than 1 we
+#else
+ cmpl IMM (1),d2 | if the exponent is less than 1 we
+#endif
+ bge 2f | have to shift right (denormalize)
+1:
+#ifndef __mcoldfire__
+ addw IMM (1),d2 | adjust the exponent
+ lsrl IMM (1),d0 | shift right once
+ roxrl IMM (1),d1 |
+ roxrl IMM (1),d6 | d6 collect bits we would lose otherwise
+ cmpw IMM (1),d2 | is the exponent 1 already?
+#else
+ addql IMM (1),d2 | adjust the exponent
+ lsrl IMM (1),d6
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d6
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ cmpl IMM (1),d2 | is the exponent 1 already?
+#endif
+ beq 2f | if not loop back
+ bra 1b |
+ bra Lf$underflow | safety check, shouldn't execute '
+2: orl d6,d1 | this is a trick so we don't lose '
+ | the extra bits which were flushed right
+| Now call the rounding routine (which takes care of denormalized numbers):
+ lea pc@(Lround$0),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Lround$0:
+| Here we have a correctly rounded result (either normalized or denormalized).
+
+| Here we should have either a normalized number or a denormalized one, and
+| the exponent is necessarily larger or equal to 1 (so we don't have to '
+| check again for underflow!). We have to check for overflow or for a
+| denormalized number (which also signals underflow).
+| Check for overflow (i.e., exponent >= 255).
+#ifndef __mcoldfire__
+ cmpw IMM (0x00ff),d2
+#else
+ cmpl IMM (0x00ff),d2
+#endif
+ bge Lf$overflow
+| Now check for a denormalized number (exponent==0).
+ movew d2,d2
+ beq Lf$den
+1:
+| Put back the exponents and sign and return.
+#ifndef __mcoldfire__
+ lslw IMM (7),d2 | exponent back to fourth byte
+#else
+ lsll IMM (7),d2 | exponent back to fourth byte
+#endif
+ bclr IMM (FLT_MANT_DIG-1),d0
+ swap d0 | and put back exponent
+#ifndef __mcoldfire__
+ orw d2,d0 |
+#else
+ orl d2,d0
+#endif
+ swap d0 |
+ orl d7,d0 | and sign also
+
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+|=============================================================================
+| __negsf2
+|=============================================================================
+
+| This is trivial and could be shorter if we didn't bother checking for NaN '
+| and +/-INFINITY.
+
+| float __negsf2(float);
+ FUNC(__negsf2)
+SYM (__negsf2):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ moveq IMM (NEGATE),d5
+ movel a6@(8),d0 | get number to negate in d0
+ bchg IMM (31),d0 | negate
+ movel d0,d1 | make a positive copy
+ bclr IMM (31),d1 |
+ tstl d1 | check for zero
+ beq 2f | if zero (either sign) return +zero
+ cmpl IMM (INFINITY),d1 | compare to +INFINITY
+ blt 1f |
+ bhi Lf$inop | if larger (fraction not zero) is NaN
+ movel d0,d7 | else get sign and return INFINITY
+ andl IMM (0x80000000),d7
+ bra Lf$infty
+1: PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+2: bclr IMM (31),d0
+ bra 1b
+
+|=============================================================================
+| __cmpsf2
+|=============================================================================
+
+GREATER = 1
+LESS = -1
+EQUAL = 0
+
+| int __cmpsf2_internal(float, float, int);
+SYM (__cmpsf2_internal):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@- | save registers
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ moveq IMM (COMPARE),d5
+ movel a6@(8),d0 | get first operand
+ movel a6@(12),d1 | get second operand
+| Check if either is NaN, and in that case return garbage and signal
+| INVALID_OPERATION. Check also if either is zero, and clear the signs
+| if necessary.
+ movel d0,d6
+ andl IMM (0x7fffffff),d0
+ beq Lcmpsf$a$0
+ cmpl IMM (0x7f800000),d0
+ bhi Lcmpf$inop
+Lcmpsf$1:
+ movel d1,d7
+ andl IMM (0x7fffffff),d1
+ beq Lcmpsf$b$0
+ cmpl IMM (0x7f800000),d1
+ bhi Lcmpf$inop
+Lcmpsf$2:
+| Check the signs
+ eorl d6,d7
+ bpl 1f
+| If the signs are not equal check if a >= 0
+ tstl d6
+ bpl Lcmpsf$a$gt$b | if (a >= 0 && b < 0) => a > b
+ bmi Lcmpsf$b$gt$a | if (a < 0 && b >= 0) => a < b
+1:
+| If the signs are equal check for < 0
+ tstl d6
+ bpl 1f
+| If both are negative exchange them
+#ifndef __mcoldfire__
+ exg d0,d1
+#else
+ movel d0,d7
+ movel d1,d0
+ movel d7,d1
+#endif
+1:
+| Now that they are positive we just compare them as longs (does this also
+| work for denormalized numbers?).
+ cmpl d0,d1
+ bhi Lcmpsf$b$gt$a | |b| > |a|
+ bne Lcmpsf$a$gt$b | |b| < |a|
+| If we got here a == b.
+ movel IMM (EQUAL),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+#endif
+ unlk a6
+ rts
+Lcmpsf$a$gt$b:
+ movel IMM (GREATER),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+Lcmpsf$b$gt$a:
+ movel IMM (LESS),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+Lcmpsf$a$0:
+ bclr IMM (31),d6
+ bra Lcmpsf$1
+Lcmpsf$b$0:
+ bclr IMM (31),d7
+ bra Lcmpsf$2
+
+Lcmpf$inop:
+ movl a6@(16),d0
+ moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+| int __cmpsf2(float, float);
+ FUNC(__cmpsf2)
+SYM (__cmpsf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+
+|=============================================================================
+| rounding routines
+|=============================================================================
+
+| The rounding routines expect the number to be normalized in registers
+| d0-d1, with the exponent in register d2. They assume that the
+| exponent is larger or equal to 1. They return a properly normalized number
+| if possible, and a denormalized number otherwise. The exponent is returned
+| in d2.
+
+Lround$to$nearest:
+| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
+| Here we assume that the exponent is not too small (this should be checked
+| before entering the rounding routine), but the number could be denormalized.
+
+| Check for denormalized numbers:
+1: btst IMM (FLT_MANT_DIG),d0
+ bne 2f | if set the number is normalized
+| Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
+| is one (remember that a denormalized number corresponds to an
+| exponent of -F_BIAS+1).
+#ifndef __mcoldfire__
+ cmpw IMM (1),d2 | remember that the exponent is at least one
+#else
+ cmpl IMM (1),d2 | remember that the exponent is at least one
+#endif
+ beq 2f | an exponent of one means denormalized
+ addl d1,d1 | else shift and adjust the exponent
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d2,1b |
+#else
+ subql IMM (1),d2
+ bpl 1b
+#endif
+2:
+| Now round: we do it as follows: after the shifting we can write the
+| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
+| If delta < 1, do nothing. If delta > 1, add 1 to f.
+| If delta == 1, we make sure the rounded number will be even (odd?)
+| (after shifting).
+ btst IMM (0),d0 | is delta < 1?
+ beq 2f | if so, do not do anything
+ tstl d1 | is delta == 1?
+ bne 1f | if so round to even
+ movel d0,d1 |
+ andl IMM (2),d1 | bit 1 is the last significant bit
+ addl d1,d0 |
+ bra 2f |
+1: movel IMM (1),d1 | else add 1
+ addl d1,d0 |
+| Shift right once (because we used bit #FLT_MANT_DIG!).
+2: lsrl IMM (1),d0
+| Now check again bit #FLT_MANT_DIG (rounding could have produced a
+| 'fraction overflow' ...).
+ btst IMM (FLT_MANT_DIG),d0
+ beq 1f
+ lsrl IMM (1),d0
+#ifndef __mcoldfire__
+ addw IMM (1),d2
+#else
+ addql IMM (1),d2
+#endif
+1:
+| If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
+| have to put the exponent to zero and return a denormalized number.
+ btst IMM (FLT_MANT_DIG-1),d0
+ beq 1f
+ jmp a0@
+1: movel IMM (0),d2
+ jmp a0@
+
+Lround$to$zero:
+Lround$to$plus:
+Lround$to$minus:
+ jmp a0@
+#endif /* L_float */
+
+| gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
+| __ledf2, __ltdf2 to all return the same value as a direct call to
+| __cmpdf2 would. In this implementation, each of these routines
+| simply calls __cmpdf2. It would be more efficient to give the
+| __cmpdf2 routine several names, but separating them out will make it
+| easier to write efficient versions of these routines someday.
+| If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
+| The other routines return 1.
+
+#ifdef L_eqdf2
+ .text
+ FUNC(__eqdf2)
+ .globl SYM (__eqdf2)
+SYM (__eqdf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_eqdf2 */
+
+#ifdef L_nedf2
+ .text
+ FUNC(__nedf2)
+ .globl SYM (__nedf2)
+SYM (__nedf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_nedf2 */
+
+#ifdef L_gtdf2
+ .text
+ FUNC(__gtdf2)
+ .globl SYM (__gtdf2)
+SYM (__gtdf2):
+ link a6,IMM (0)
+ pea -1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_gtdf2 */
+
+#ifdef L_gedf2
+ .text
+ FUNC(__gedf2)
+ .globl SYM (__gedf2)
+SYM (__gedf2):
+ link a6,IMM (0)
+ pea -1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_gedf2 */
+
+#ifdef L_ltdf2
+ .text
+ FUNC(__ltdf2)
+ .globl SYM (__ltdf2)
+SYM (__ltdf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_ltdf2 */
+
+#ifdef L_ledf2
+ .text
+ FUNC(__ledf2)
+ .globl SYM (__ledf2)
+SYM (__ledf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_ledf2 */
+
+| The comments above about __eqdf2, et. al., also apply to __eqsf2,
+| et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
+
+#ifdef L_eqsf2
+ .text
+ FUNC(__eqsf2)
+ .globl SYM (__eqsf2)
+SYM (__eqsf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_eqsf2 */
+
+#ifdef L_nesf2
+ .text
+ FUNC(__nesf2)
+ .globl SYM (__nesf2)
+SYM (__nesf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_nesf2 */
+
+#ifdef L_gtsf2
+ .text
+ FUNC(__gtsf2)
+ .globl SYM (__gtsf2)
+SYM (__gtsf2):
+ link a6,IMM (0)
+ pea -1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_gtsf2 */
+
+#ifdef L_gesf2
+ .text
+ FUNC(__gesf2)
+ .globl SYM (__gesf2)
+SYM (__gesf2):
+ link a6,IMM (0)
+ pea -1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_gesf2 */
+
+#ifdef L_ltsf2
+ .text
+ FUNC(__ltsf2)
+ .globl SYM (__ltsf2)
+SYM (__ltsf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_ltsf2 */
+
+#ifdef L_lesf2
+ .text
+ FUNC(__lesf2)
+ .globl SYM (__lesf2)
+SYM (__lesf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_lesf2 */
+
+#if defined (__ELF__) && defined (__linux__)
+ /* Make stack non-executable for ELF linux targets. */
+ .section .note.GNU-stack,"",@progbits
+#endif
diff --git a/gcc/config/m68k/linux-unwind.h b/gcc/config/m68k/linux-unwind.h
new file mode 100644
index 000000000..053c15558
--- /dev/null
+++ b/gcc/config/m68k/linux-unwind.h
@@ -0,0 +1,158 @@
+/* DWARF2 EH unwinding support for Linux/m68k.
+ Copyright (C) 2006, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs.
+ Don't use this at all if inhibit_libc is used. */
+
+#ifndef inhibit_libc
+
+#include <signal.h>
+
+/* <sys/ucontext.h> is unfortunately broken right now. */
+struct uw_ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ unsigned long uc_filler[80];
+ __sigset_t uc_sigmask;
+};
+
+#define MD_FALLBACK_FRAME_STATE_FOR m68k_fallback_frame_state
+
+#ifdef __mcoldfire__
+#define M68K_FP_SIZE 8
+#else
+#define M68K_FP_SIZE 12
+#endif
+
+static _Unwind_Reason_Code
+m68k_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ unsigned short *pc = context->ra;
+ long cfa;
+
+ /* moveq #__NR_sigreturn,%d0; trap #0 */
+ if (pc[0] == 0x7077 && pc[1] == 0x4e40)
+ {
+ struct sigcontext *sc;
+
+ /* Context is passed as the 3rd argument. */
+ sc = *(struct sigcontext **) (context->cfa + 8);
+
+ cfa = sc->sc_usp;
+ fs->regs.cfa_how = CFA_REG_OFFSET;
+ fs->regs.cfa_reg = 15;
+ fs->regs.cfa_offset = cfa - (long) context->cfa;
+
+ fs->regs.reg[0].how = REG_SAVED_OFFSET;
+ fs->regs.reg[0].loc.offset = (long) &sc->sc_d0 - cfa;
+ fs->regs.reg[1].how = REG_SAVED_OFFSET;
+ fs->regs.reg[1].loc.offset = (long) &sc->sc_d1 - cfa;
+ fs->regs.reg[8].how = REG_SAVED_OFFSET;
+ fs->regs.reg[8].loc.offset = (long) &sc->sc_a0 - cfa;
+ fs->regs.reg[9].how = REG_SAVED_OFFSET;
+ fs->regs.reg[9].loc.offset = (long) &sc->sc_a1 - cfa;
+
+#ifdef __uClinux__
+ fs->regs.reg[13].how = REG_SAVED_OFFSET;
+ fs->regs.reg[13].loc.offset = (long) &sc->sc_a5 - cfa;
+#endif
+
+ fs->regs.reg[24].how = REG_SAVED_OFFSET;
+ fs->regs.reg[24].loc.offset = (long) &sc->sc_pc - cfa;
+
+#ifndef __uClinux__
+ if (*(int *) sc->sc_fpstate)
+ {
+ int *fpregs = (int *) sc->sc_fpregs;
+
+ fs->regs.reg[16].how = REG_SAVED_OFFSET;
+ fs->regs.reg[16].loc.offset = (long) &fpregs[0] - cfa;
+ fs->regs.reg[17].how = REG_SAVED_OFFSET;
+ fs->regs.reg[17].loc.offset = (long) &fpregs[M68K_FP_SIZE/4] - cfa;
+ }
+#elif defined __mcffpu__
+# error Implement this when uClinux kernel is ported to an FPU architecture
+#endif
+ }
+#ifdef __mcoldfire__
+ /* move.l #__NR_rt_sigreturn,%d0; trap #0 */
+ else if (pc[0] == 0x203c && pc[1] == 0x0000 &&
+ pc[2] == 0x00ad && pc[3] == 0x4e40)
+#else
+ /* moveq #~__NR_rt_sigreturn,%d0; not.b %d0; trap #0 */
+ else if (pc[0] == 0x7052 && pc[1] == 0x4600 && pc[2] == 0x4e40)
+#endif
+ {
+ struct uw_ucontext *uc;
+ greg_t *gregs;
+ int i;
+
+ /* Context is passed as the 3rd argument. */
+ uc = *(struct uw_ucontext **) (context->cfa + 8);
+
+ gregs = uc->uc_mcontext.gregs;
+ cfa = gregs[15];
+ fs->regs.cfa_how = CFA_REG_OFFSET;
+ fs->regs.cfa_reg = 15;
+ fs->regs.cfa_offset = cfa - (long) context->cfa;
+
+ /* register %d0-%d7/%a0-%a6 */
+ for (i = 0; i <= 14; i++)
+ {
+ fs->regs.reg[i].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i].loc.offset = (long) &gregs[i] - cfa;
+ }
+
+ /* return address */
+ fs->regs.reg[24].how = REG_SAVED_OFFSET;
+ fs->regs.reg[24].loc.offset = (long) &gregs[16] - cfa;
+
+#define uc_fpstate uc_filler[0]
+
+ if (uc->uc_fpstate)
+ {
+ long fpregs = (long) uc->uc_mcontext.fpregs.f_fpregs;
+
+ /* register %fp0-%fp7 */
+ for (i = 16; i <= 23; i++)
+ {
+ fs->regs.reg[i].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i].loc.offset = fpregs - cfa;
+ fpregs += M68K_FP_SIZE;
+ }
+ }
+ }
+ else
+ return _URC_END_OF_STACK;
+
+ fs->retaddr_column = 24;
+ fs->signal_frame = 1;
+
+ return _URC_NO_REASON;
+}
+#endif /* ifdef inhibit_libc */
diff --git a/gcc/config/m68k/linux.h b/gcc/config/m68k/linux.h
new file mode 100644
index 000000000..82417b477
--- /dev/null
+++ b/gcc/config/m68k/linux.h
@@ -0,0 +1,242 @@
+/* Definitions for Motorola 68k running Linux-based GNU systems with
+ ELF format.
+ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004, 2006,
+ 2007, 2009, 2010, 2011 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (68k GNU/Linux with ELF)");
+
+/* Add %(asm_cpu_spec) to a generic definition of ASM_SPEC. */
+#undef ASM_SPEC
+#define ASM_SPEC "%(asm_cpu_spec) %(asm_pcrel_spec)"
+
+#undef PREFERRED_STACK_BOUNDARY
+#define PREFERRED_STACK_BOUNDARY 32
+
+/* for 68k machines this only needs to be TRUE for the 68000 */
+
+#undef STRICT_ALIGNMENT
+#define STRICT_ALIGNMENT 0
+#undef M68K_HONOR_TARGET_STRICT_ALIGNMENT
+#define M68K_HONOR_TARGET_STRICT_ALIGNMENT 0
+
+/* Here are four prefixes that are used by asm_fprintf to
+ facilitate customization for alternate assembler syntaxes.
+ Machines with no likelihood of an alternate syntax need not
+ define these and need not use asm_fprintf. */
+
+/* The prefix for register names. Note that REGISTER_NAMES
+ is supposed to include this prefix. Also note that this is NOT an
+ fprintf format string, it is a literal string */
+
+#undef REGISTER_PREFIX
+#define REGISTER_PREFIX "%"
+
+/* The prefix for local (compiler generated) labels.
+ These labels will not appear in the symbol table. */
+
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+#define ASM_COMMENT_START "|"
+
+/* Target OS builtins. */
+#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
+
+/* Provide a LINK_SPEC appropriate for GNU/Linux. Here we provide support
+ for the special GCC options -static and -shared, which allow us to
+ link things in one of these three modes by applying the appropriate
+ combinations of options at link-time.
+
+ When the -shared link option is used a final link is not being
+ done. */
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-m m68kelf %{shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker " LINUX_DYNAMIC_LINKER "} \
+ %{static}}"
+
+/* For compatibility with linux/a.out */
+
+#undef PCC_BITFIELD_TYPE_MATTERS
+
+/* Currently, JUMP_TABLES_IN_TEXT_SECTION must be defined in order to
+ keep switch tables in the text section. */
+
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* Use the default action for outputting the case label. */
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_RETURN_CASE_JUMP \
+ do { \
+ if (TARGET_COLDFIRE) \
+ { \
+ if (ADDRESS_REG_P (operands[0])) \
+ return "jmp %%pc@(2,%0:l)"; \
+ else \
+ return "ext%.l %0\n\tjmp %%pc@(2,%0:l)"; \
+ } \
+ else \
+ return "jmp %%pc@(2,%0:w)"; \
+ } while (0)
+
+/* This is how to output an assembler line that says to advance the
+ location counter to a multiple of 2**LOG bytes. */
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) > 0) \
+ fprintf ((FILE), "%s%u\n", ALIGN_ASM_OP, 1 << (LOG));
+
+/* If defined, a C expression whose value is a string containing the
+ assembler operation to identify the following data as uninitialized global
+ data. */
+
+#define BSS_SECTION_ASM_OP "\t.section\t.bss"
+
+/* A C statement (sans semicolon) to output to the stdio stream
+ FILE the assembler definition of uninitialized global DECL named
+ NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+ Try to use asm_output_aligned_bss to implement this macro. */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+#define NO_PROFILE_COUNTERS 1
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+{ \
+ if (flag_pic) \
+ fprintf (FILE, "\tbsr.l _mcount@PLTPC\n"); \
+ else \
+ fprintf (FILE, "\tjbsr _mcount\n"); \
+}
+
+/* Do not break .stabs pseudos into continuations. */
+
+#define DBX_CONTIN_LENGTH 0
+
+/* 1 if N is a possible register number for a function value. For
+ m68k/SVR4 allow d0, a0, or fp0 as return registers, for integral,
+ pointer, or floating types, respectively. Reject fp0 if not using
+ a 68881 coprocessor. */
+
+#undef FUNCTION_VALUE_REGNO_P
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ((N) == D0_REG || (N) == A0_REG || (TARGET_68881 && (N) == FP0_REG))
+
+/* Define this to be true when FUNCTION_VALUE_REGNO_P is true for
+ more than one register. */
+
+#undef NEEDS_UNTYPED_CALL
+#define NEEDS_UNTYPED_CALL 1
+
+/* Define how to generate (in the callee) the output value of a
+ function and how to find (in the caller) the value returned by a
+ function. VALTYPE is the data type of the value (as a tree). If
+ the precise function being called is known, FUNC is its
+ FUNCTION_DECL; otherwise, FUNC is 0. For m68k/SVR4 generate the
+ result in d0, a0, or fp0 as appropriate. */
+
+#undef FUNCTION_VALUE
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ m68k_function_value (VALTYPE, FUNC)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE.
+ For m68k/SVR4 look for integer values in d0, pointer values in d0
+ (returned in both d0 and a0), and floating values in fp0. */
+
+#undef LIBCALL_VALUE
+#define LIBCALL_VALUE(MODE) \
+ m68k_libcall_value (MODE)
+
+/* For m68k SVR4, structures are returned using the reentrant
+ technique. */
+#undef PCC_STATIC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Finalize the trampoline by flushing the insn cache. */
+
+#undef FINALIZE_TRAMPOLINE
+#define FINALIZE_TRAMPOLINE(TRAMP) \
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"), \
+ LCT_NORMAL, VOIDmode, 2, TRAMP, Pmode, \
+ plus_constant (TRAMP, TRAMPOLINE_SIZE), Pmode);
+
+/* Clear the instruction cache from `beg' to `end'. This makes an
+ inline system call to SYS_cacheflush. The arguments are as
+ follows:
+
+ cacheflush (addr, scope, cache, len)
+
+ addr - the start address for the flush
+ scope - the scope of the flush (see the cpush insn)
+ cache - which cache to flush (see the cpush insn)
+ len - a factor relating to the number of flushes to perform:
+ len/16 lines, or len/4096 pages. */
+
+#define CLEAR_INSN_CACHE(BEG, END) \
+{ \
+ register unsigned long _beg __asm ("%d1") = (unsigned long) (BEG); \
+ unsigned long _end = (unsigned long) (END); \
+ register unsigned long _len __asm ("%d4") = (_end - _beg + 32); \
+ __asm __volatile \
+ ("move%.l #123, %/d0\n\t" /* system call nr */ \
+ "move%.l #1, %/d2\n\t" /* clear lines */ \
+ "move%.l #3, %/d3\n\t" /* insn+data caches */ \
+ "trap #0" \
+ : /* no outputs */ \
+ : "d" (_beg), "d" (_len) \
+ : "%d0", "%d2", "%d3"); \
+}
+
+#define TARGET_ASM_FILE_END file_end_indicate_exec_stack
+
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#define MD_UNWIND_SUPPORT "config/m68k/linux-unwind.h"
diff --git a/gcc/config/m68k/m68020-elf.h b/gcc/config/m68k/m68020-elf.h
new file mode 100644
index 000000000..299657cdc
--- /dev/null
+++ b/gcc/config/m68k/m68020-elf.h
@@ -0,0 +1,30 @@
+/* Definitions of target machine for GNU compiler. "naked" 68020,
+ elf object files and debugging, version.
+ Copyright (C) 1987, 1988, 1992, 1995, 1996, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This comment is here to see if it will keep Sun's cpp from dying. */
+
+/* We need to override the default specs from elfos.h. This suppresses the
+ loading of crt0.o by gcc's default linker spec. For embedded targets crt0
+ now comes from the linker script. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin.o%s"
+
+/* end of m68020-elf.h */
diff --git a/gcc/config/m68k/m68k-devices.def b/gcc/config/m68k/m68k-devices.def
new file mode 100644
index 000000000..4838fb062
--- /dev/null
+++ b/gcc/config/m68k/m68k-devices.def
@@ -0,0 +1,189 @@
+/* m68k device names -*- C -*-
+ Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+ Written by CodeSourcery
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This file lists each target device that we support. It is used by
+ both C code and build scripts.
+
+ Following Freescale's lead, we group devices into families that share
+ the same core and extension units. Devices in these families differ
+ only in the set of peripherals they provide. We pick one device to
+ act as the representative of each family.
+
+ We further group device families into multilibs, again picking one
+ family (and its representative device) to represent each multilib.
+
+ Devices are declared using the construct:
+
+ M68K_DEVICE (NAME, ENUM_VALUE, FAMILY, MULTILIB, MICROARCH, ISA, FLAGS)
+
+ where the arguments are as follows:
+
+ NAME
+ The name of the device as a string. This string acts as the
+ device's -mcpu argument and is guaranteed to be unique.
+
+ ENUM_VALUE
+ The associated value in the target_device enumeration.
+ This value is also guaranteed to be unique.
+
+ FAMILY
+ The NAME field of the family's representative device.
+
+ MULTILIB
+ The NAME field of the multilib's representative device.
+
+ MICROARCH
+ The class of core used by devices in this family. The field
+ is a uarch enumeration value without the leading "u".
+
+ ISA
+ The ISA implemented by this family. The field is
+ an m68k_isa enumeration value.
+
+ FLAGS
+ The FL_* flags that apply to this family, excluding FL_FOR_isa_*.
+ See m68k.h for the full list.
+
+ There is a bit of duplication between devices in the same family,
+ but this approach makes scripting easier. We keep each entry on
+ a single line for the same reason.
+
+ As the compiler does not (currently) generate MAC or EMAC commands,
+ we do not need separate multilibs for cores that only differ in
+ their MAC functionality. */
+
+/* 680x0 series processors. */
+M68K_DEVICE ("68000", m68000, "68000", "68000", 68000, isa_00, 0)
+M68K_DEVICE ("68010", m68010, "68010", "68000", 68010, isa_10, 0)
+M68K_DEVICE ("68020", m68020, "68020", "68020", 68020, isa_20, FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("68030", m68030, "68030", "68020", 68030, isa_20, FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("68040", m68040, "68040", "68040", 68040, isa_40, FL_MMU)
+M68K_DEVICE ("68060", m68060, "68060", "68060", 68060, isa_40, FL_MMU)
+M68K_DEVICE ("68302", m68302, "68302", "68000", 68000, isa_00, FL_MMU)
+M68K_DEVICE ("68332", m68332, "68332", "cpu32", cpu32, isa_cpu32, FL_MMU)
+M68K_DEVICE ("cpu32", cpu32, "cpu32", "cpu32", cpu32, isa_cpu32, FL_MMU)
+
+/* ColdFire CFV1 processor. */
+/* For historical reasons, the 51 multilib is named 51qe. */
+M68K_DEVICE ("51", mcf51, "51", "51qe", cfv1, isa_c, FL_CF_USP)
+M68K_DEVICE ("51ac", mcf51ac, "51", "51qe", cfv1, isa_c, FL_CF_USP)
+M68K_DEVICE ("51cn", mcf51cn, "51", "51qe", cfv1, isa_c, FL_CF_USP)
+M68K_DEVICE ("51em", mcf51em, "51", "51qe", cfv1, isa_c, FL_CF_USP | FL_CF_MAC)
+M68K_DEVICE ("51jm", mcf51jm, "51", "51qe", cfv1, isa_c, FL_CF_USP)
+M68K_DEVICE ("51qe", mcf51qe, "51", "51qe", cfv1, isa_c, FL_CF_USP)
+
+/* ColdFire CFV2 processors. */
+M68K_DEVICE ("5202", mcf5202, "5206", "5206", cfv2, isa_a, 0)
+M68K_DEVICE ("5204", mcf5204, "5206", "5206", cfv2, isa_a, 0)
+M68K_DEVICE ("5206", mcf5206, "5206", "5206", cfv2, isa_a, 0)
+M68K_DEVICE ("5206e", mcf5206e, "5206e", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("5207", mcf5207, "5208", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5208", mcf5208, "5208", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5210a", mcf5210a, "5211a", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("5211a", mcf5211a, "5211a", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("5211", mcf5211, "5213", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("5212", mcf5212, "5213", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("5213", mcf5213, "5213", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("5214", mcf5214, "5216", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5216", mcf5216, "5216", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5221x", mcf5221x, "5221x", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("52221", mcf52221, "52223", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("52223", mcf52223, "52223", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("52230", mcf52230, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52231", mcf52231, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52232", mcf52232, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52233", mcf52233, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52234", mcf52234, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52235", mcf52235, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5224", mcf5224, "5225", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("5225", mcf5225, "5225", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("52252", mcf52252, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52254", mcf52254, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52255", mcf52255, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52256", mcf52256, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52258", mcf52258, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52259", mcf52259, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52274", mcf52274, "52277", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("52277", mcf52277, "52277", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5232", mcf5232, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5233", mcf5233, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5234", mcf5234, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5235", mcf5235, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("523x", mcf523x, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5249", mcf5249, "5249", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5250", mcf5250, "5250", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5253", mcf5253, "5253", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5270", mcf5270, "5271", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5271", mcf5271, "5271", "5208", cfv2, isa_aplus, FL_CF_HWDIV)
+M68K_DEVICE ("5272", mcf5272, "5272", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("5274", mcf5274, "5275", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5275", mcf5275, "5275", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5280", mcf5280, "5282", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5281", mcf5281, "5282", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5282", mcf5282, "5282", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("528x", mcf528x, "5282", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+
+/* CFV3 processors. */
+M68K_DEVICE ("53011", mcf53011, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("53012", mcf53012, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("53013", mcf53013, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("53014", mcf53014, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("53015", mcf53015, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("53016", mcf53016, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("53017", mcf53017, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5307", mcf5307, "5307", "5307", cfv3, isa_a, FL_CF_HWDIV | FL_CF_MAC)
+M68K_DEVICE ("5327", mcf5327, "5329", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5328", mcf5328, "5329", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5329", mcf5329, "5329", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("532x", mcf532x, "5329", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5372", mcf5372, "5373", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("5373", mcf5373, "5373", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+M68K_DEVICE ("537x", mcf537x, "5373", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
+
+/* CFV4/CFV4e processors. */
+M68K_DEVICE ("5407", mcf5407, "5407", "5407", cfv4, isa_b, FL_CF_MAC)
+M68K_DEVICE ("54410", mcf54410, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54415", mcf54415, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54416", mcf54416, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54417", mcf54417, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54418", mcf54418, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54450", mcf54450, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54451", mcf54451, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54452", mcf54452, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54453", mcf54453, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54454", mcf54454, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("54455", mcf54455, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
+M68K_DEVICE ("5470", mcf5470, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5471", mcf5471, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5472", mcf5472, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5473", mcf5473, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5474", mcf5474, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5475", mcf5475, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("547x", mcf547x, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5480", mcf5480, "5485", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5481", mcf5481, "5485", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5482", mcf5482, "5485", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5483", mcf5483, "5485", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5484", mcf5484, "5485", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("5485", mcf5485, "5485", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+M68K_DEVICE ("548x", mcf548x, "5485", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
+
+/* Fido processor. */
+M68K_DEVICE ("fidoa", fidoa, "cpu32", "fidoa", cpu32, isa_cpu32, FL_FIDOA | FL_MMU)
diff --git a/gcc/config/m68k/m68k-modes.def b/gcc/config/m68k/m68k-modes.def
new file mode 100644
index 000000000..06297cad9
--- /dev/null
+++ b/gcc/config/m68k/m68k-modes.def
@@ -0,0 +1,25 @@
+/* M68k extra machine modes.
+ Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* These differ in the representation of the canonical NaN. */
+RESET_FLOAT_FORMAT (SF, motorola_single_format);
+RESET_FLOAT_FORMAT (DF, motorola_double_format);
+
+/* 80-bit floating point (IEEE extended, in a 96-bit field) */
+FRACTIONAL_FLOAT_MODE (XF, 80, 12, ieee_extended_motorola_format);
diff --git a/gcc/config/m68k/m68k-none.h b/gcc/config/m68k/m68k-none.h
new file mode 100644
index 000000000..8e7652885
--- /dev/null
+++ b/gcc/config/m68k/m68k-none.h
@@ -0,0 +1,19 @@
+/* Definitions of target machine for GNU compiler. "naked" 68020.
+ Copyright (C) 1994, 1996, 2003, 2006, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
diff --git a/gcc/config/m68k/m68k-protos.h b/gcc/config/m68k/m68k-protos.h
new file mode 100644
index 000000000..ad0202630
--- /dev/null
+++ b/gcc/config/m68k/m68k-protos.h
@@ -0,0 +1,102 @@
+/* Definitions of target machine for GNU compiler. Sun 68000/68020 version.
+ Copyright (C) 2000, 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Define functions defined in aux-output.c and used in templates. */
+
+#ifdef RTX_CODE
+extern enum m68k_function_kind m68k_get_function_kind (tree);
+extern HOST_WIDE_INT m68k_initial_elimination_offset (int from, int to);
+
+extern void split_di (rtx[], int, rtx[], rtx[]);
+
+extern bool valid_mov3q_const (HOST_WIDE_INT);
+extern const char *output_move_simode (rtx *);
+extern const char *output_move_himode (rtx *);
+extern const char *output_move_qimode (rtx *);
+extern const char *output_move_stricthi (rtx *);
+extern const char *output_move_strictqi (rtx *);
+extern const char *output_move_double (rtx *);
+extern const char *output_move_const_single (rtx *);
+extern const char *output_move_const_double (rtx *);
+extern const char *output_btst (rtx *, rtx, rtx, rtx, int);
+extern const char *output_scc_di (rtx, rtx, rtx, rtx);
+extern const char *output_addsi3 (rtx *);
+extern const char *output_andsi3 (rtx *);
+extern const char *output_iorsi3 (rtx *);
+extern const char *output_xorsi3 (rtx *);
+extern const char *output_call (rtx);
+extern const char *output_sibcall (rtx);
+extern void output_dbcc_and_branch (rtx *);
+extern int floating_exact_log2 (rtx);
+extern bool strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn, rtx target);
+
+/* Functions from m68k.c used in macros. */
+extern int standard_68881_constant_p (rtx);
+extern void print_operand_address (FILE *, rtx);
+extern void print_operand (FILE *, rtx, int);
+extern bool m68k_output_addr_const_extra (FILE *, rtx);
+extern void notice_update_cc (rtx, rtx);
+extern bool m68k_legitimate_base_reg_p (rtx, bool);
+extern bool m68k_legitimate_index_reg_p (rtx, bool);
+extern bool m68k_illegitimate_symbolic_constant_p (rtx);
+extern bool m68k_matches_q_p (rtx);
+extern bool m68k_matches_u_p (rtx);
+extern rtx legitimize_pic_address (rtx, enum machine_mode, rtx);
+extern rtx m68k_legitimize_tls_address (rtx);
+extern bool m68k_tls_reference_p (rtx, bool);
+extern int valid_dbcc_comparison_p_2 (rtx, enum machine_mode);
+extern rtx m68k_libcall_value (enum machine_mode);
+extern rtx m68k_function_value (const_tree, const_tree);
+extern int emit_move_sequence (rtx *, enum machine_mode, rtx);
+extern bool m68k_movem_pattern_p (rtx, rtx, HOST_WIDE_INT, bool);
+extern const char *m68k_output_movem (rtx *, rtx, HOST_WIDE_INT, bool);
+extern void m68k_final_prescan_insn (rtx, rtx *, int);
+
+/* Functions from m68k.c used in constraints.md. */
+extern rtx m68k_unwrap_symbol (rtx, bool);
+
+/* Functions from m68k.c used in genattrtab. */
+#ifdef HAVE_ATTR_cpu
+extern enum attr_cpu m68k_sched_cpu;
+extern enum attr_mac m68k_sched_mac;
+
+extern enum attr_opx_type m68k_sched_attr_opx_type (rtx, int);
+extern enum attr_opy_type m68k_sched_attr_opy_type (rtx, int);
+extern enum attr_size m68k_sched_attr_size (rtx);
+extern enum attr_op_mem m68k_sched_attr_op_mem (rtx);
+extern enum attr_type m68k_sched_branch_type (rtx);
+#endif /* HAVE_ATTR_cpu */
+
+#endif /* RTX_CODE */
+
+extern bool m68k_regno_mode_ok (int, enum machine_mode);
+extern enum reg_class m68k_secondary_reload_class (enum reg_class,
+ enum machine_mode, rtx);
+extern enum reg_class m68k_preferred_reload_class (rtx, enum reg_class);
+extern int flags_in_68881 (void);
+extern void m68k_expand_prologue (void);
+extern bool m68k_use_return_insn (void);
+extern void m68k_expand_epilogue (bool);
+extern const char *m68k_cpp_cpu_ident (const char *);
+extern const char *m68k_cpp_cpu_family (const char *);
+extern void init_68881_table (void);
+extern rtx m68k_legitimize_call_address (rtx);
+extern rtx m68k_legitimize_sibcall_address (rtx);
+extern int m68k_hard_regno_rename_ok(unsigned int, unsigned int);
diff --git a/gcc/config/m68k/m68k.c b/gcc/config/m68k/m68k.c
new file mode 100644
index 000000000..e5bd0119a
--- /dev/null
+++ b/gcc/config/m68k/m68k.c
@@ -0,0 +1,6615 @@
+/* Subroutines for insn-output.c for Motorola 68000 family.
+ Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+ 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "rtl.h"
+#include "function.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "recog.h"
+#include "diagnostic-core.h"
+#include "expr.h"
+#include "reload.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "debug.h"
+#include "flags.h"
+#include "df.h"
+/* ??? Need to add a dependency between m68k.o and sched-int.h. */
+#include "sched-int.h"
+#include "insn-codes.h"
+#include "ggc.h"
+
+enum reg_class regno_reg_class[] =
+{
+ DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
+ DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
+ ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
+ ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ ADDR_REGS
+};
+
+
+/* The minimum number of integer registers that we want to save with the
+ movem instruction. Using two movel instructions instead of a single
+ moveml is about 15% faster for the 68020 and 68030 at no expense in
+ code size. */
+#define MIN_MOVEM_REGS 3
+
+/* The minimum number of floating point registers that we want to save
+ with the fmovem instruction. */
+#define MIN_FMOVEM_REGS 1
+
+/* Structure describing stack frame layout. */
+struct m68k_frame
+{
+ /* Stack pointer to frame pointer offset. */
+ HOST_WIDE_INT offset;
+
+ /* Offset of FPU registers. */
+ HOST_WIDE_INT foffset;
+
+ /* Frame size in bytes (rounded up). */
+ HOST_WIDE_INT size;
+
+ /* Data and address register. */
+ int reg_no;
+ unsigned int reg_mask;
+
+ /* FPU registers. */
+ int fpu_no;
+ unsigned int fpu_mask;
+
+ /* Offsets relative to ARG_POINTER. */
+ HOST_WIDE_INT frame_pointer_offset;
+ HOST_WIDE_INT stack_pointer_offset;
+
+ /* Function which the above information refers to. */
+ int funcdef_no;
+};
+
+/* Current frame information calculated by m68k_compute_frame_layout(). */
+static struct m68k_frame current_frame;
+
+/* Structure describing an m68k address.
+
+ If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
+ with null fields evaluating to 0. Here:
+
+ - BASE satisfies m68k_legitimate_base_reg_p
+ - INDEX satisfies m68k_legitimate_index_reg_p
+ - OFFSET satisfies m68k_legitimate_constant_address_p
+
+ INDEX is either HImode or SImode. The other fields are SImode.
+
+ If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
+ the address is (BASE)+. */
+struct m68k_address {
+ enum rtx_code code;
+ rtx base;
+ rtx index;
+ rtx offset;
+ int scale;
+};
+
+static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
+static int m68k_sched_issue_rate (void);
+static int m68k_sched_variable_issue (FILE *, int, rtx, int);
+static void m68k_sched_md_init_global (FILE *, int, int);
+static void m68k_sched_md_finish_global (FILE *, int);
+static void m68k_sched_md_init (FILE *, int, int);
+static void m68k_sched_dfa_pre_advance_cycle (void);
+static void m68k_sched_dfa_post_advance_cycle (void);
+static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
+
+static bool m68k_can_eliminate (const int, const int);
+static void m68k_conditional_register_usage (void);
+static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
+static bool m68k_handle_option (size_t, const char *, int);
+static void m68k_option_override (void);
+static void m68k_override_options_after_change (void);
+static rtx find_addr_reg (rtx);
+static const char *singlemove_string (rtx *);
+static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
+ HOST_WIDE_INT, tree);
+static rtx m68k_struct_value_rtx (tree, int);
+static tree m68k_handle_fndecl_attribute (tree *node, tree name,
+ tree args, int flags,
+ bool *no_add_attrs);
+static void m68k_compute_frame_layout (void);
+static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
+static bool m68k_ok_for_sibcall_p (tree, tree);
+static bool m68k_tls_symbol_p (rtx);
+static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
+static bool m68k_rtx_costs (rtx, int, int, int *, bool);
+#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
+static bool m68k_return_in_memory (const_tree, const_tree);
+#endif
+static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
+static void m68k_trampoline_init (rtx, tree, rtx);
+static int m68k_return_pops_args (tree, tree, int);
+static rtx m68k_delegitimize_address (rtx);
+static void m68k_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
+ const_tree, bool);
+static rtx m68k_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
+ const_tree, bool);
+
+
+/* Specify the identification number of the library being built */
+const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
+
+/* Initialize the GCC target structure. */
+
+#if INT_OP_GROUP == INT_OP_DOT_WORD
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
+#endif
+
+#if INT_OP_GROUP == INT_OP_NO_DOT
+#undef TARGET_ASM_BYTE_OP
+#define TARGET_ASM_BYTE_OP "\tbyte\t"
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
+#endif
+
+#if INT_OP_GROUP == INT_OP_DC
+#undef TARGET_ASM_BYTE_OP
+#define TARGET_ASM_BYTE_OP "\tdc.b\t"
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
+#endif
+
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
+
+#undef TARGET_ASM_FILE_START_APP_OFF
+#define TARGET_ASM_FILE_START_APP_OFF true
+
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
+
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
+
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
+
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
+
+#undef TARGET_SCHED_INIT_GLOBAL
+#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
+
+#undef TARGET_SCHED_FINISH_GLOBAL
+#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
+
+#undef TARGET_SCHED_INIT
+#define TARGET_SCHED_INIT m68k_sched_md_init
+
+#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
+#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
+
+#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
+#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
+
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
+ m68k_sched_first_cycle_multipass_dfa_lookahead
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION m68k_handle_option
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE m68k_option_override
+
+#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
+#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS m68k_rtx_costs
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
+
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
+
+#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
+#endif
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS (true)
+
+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
+#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
+#endif
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE m68k_can_eliminate
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
+
+#undef TARGET_RETURN_POPS_ARGS
+#define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
+
+#undef TARGET_DELEGITIMIZE_ADDRESS
+#define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG m68k_function_arg
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
+
+static const struct attribute_spec m68k_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
+ { "interrupt_handler", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
+ { "interrupt_thread", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Base flags for 68k ISAs. */
+#define FL_FOR_isa_00 FL_ISA_68000
+#define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
+/* FL_68881 controls the default setting of -m68881. gcc has traditionally
+ generated 68881 code for 68020 and 68030 targets unless explicitly told
+ not to. */
+#define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
+ | FL_BITFIELD | FL_68881)
+#define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
+#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
+
+/* Base flags for ColdFire ISAs. */
+#define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
+#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
+/* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
+#define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
+/* ISA_C is not upwardly compatible with ISA_B. */
+#define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
+
+enum m68k_isa
+{
+ /* Traditional 68000 instruction sets. */
+ isa_00,
+ isa_10,
+ isa_20,
+ isa_40,
+ isa_cpu32,
+ /* ColdFire instruction set variants. */
+ isa_a,
+ isa_aplus,
+ isa_b,
+ isa_c,
+ isa_max
+};
+
+/* Information about one of the -march, -mcpu or -mtune arguments. */
+struct m68k_target_selection
+{
+ /* The argument being described. */
+ const char *name;
+
+ /* For -mcpu, this is the device selected by the option.
+ For -mtune and -march, it is a representative device
+ for the microarchitecture or ISA respectively. */
+ enum target_device device;
+
+ /* The M68K_DEVICE fields associated with DEVICE. See the comment
+ in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
+ const char *family;
+ enum uarch_type microarch;
+ enum m68k_isa isa;
+ unsigned long flags;
+};
+
+/* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
+static const struct m68k_target_selection all_devices[] =
+{
+#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
+ { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
+#include "m68k-devices.def"
+#undef M68K_DEVICE
+ { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
+};
+
+/* A list of all ISAs, mapping each one to a representative device.
+ Used for -march selection. */
+static const struct m68k_target_selection all_isas[] =
+{
+ { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
+ { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
+ { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
+ { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
+ { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
+ { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
+ { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
+ { "isaa", mcf5206e, NULL, ucfv2, isa_a, (FL_FOR_isa_a
+ | FL_CF_HWDIV) },
+ { "isaaplus", mcf5271, NULL, ucfv2, isa_aplus, (FL_FOR_isa_aplus
+ | FL_CF_HWDIV) },
+ { "isab", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
+ { "isac", unk_device, NULL, ucfv4, isa_c, (FL_FOR_isa_c
+ | FL_CF_HWDIV) },
+ { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
+};
+
+/* A list of all microarchitectures, mapping each one to a representative
+ device. Used for -mtune selection. */
+static const struct m68k_target_selection all_microarchs[] =
+{
+ { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
+ { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
+ { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
+ { "68020-40", m68020, NULL, u68020_40, isa_20, FL_FOR_isa_20 },
+ { "68020-60", m68020, NULL, u68020_60, isa_20, FL_FOR_isa_20 },
+ { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
+ { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
+ { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
+ { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
+ { "cfv1", mcf51qe, NULL, ucfv1, isa_c, FL_FOR_isa_c },
+ { "cfv2", mcf5206, NULL, ucfv2, isa_a, FL_FOR_isa_a },
+ { "cfv3", mcf5307, NULL, ucfv3, isa_a, (FL_FOR_isa_a
+ | FL_CF_HWDIV) },
+ { "cfv4", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
+ { "cfv4e", mcf547x, NULL, ucfv4e, isa_b, (FL_FOR_isa_b
+ | FL_CF_USP
+ | FL_CF_EMAC
+ | FL_CF_FPU) },
+ { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
+};
+
+/* The entries associated with the -mcpu, -march and -mtune settings,
+ or null for options that have not been used. */
+const struct m68k_target_selection *m68k_cpu_entry;
+const struct m68k_target_selection *m68k_arch_entry;
+const struct m68k_target_selection *m68k_tune_entry;
+
+/* Which CPU we are generating code for. */
+enum target_device m68k_cpu;
+
+/* Which microarchitecture to tune for. */
+enum uarch_type m68k_tune;
+
+/* Which FPU to use. */
+enum fpu_type m68k_fpu;
+
+/* The set of FL_* flags that apply to the target processor. */
+unsigned int m68k_cpu_flags;
+
+/* The set of FL_* flags that apply to the processor to be tuned for. */
+unsigned int m68k_tune_flags;
+
+/* Asm templates for calling or jumping to an arbitrary symbolic address,
+ or NULL if such calls or jumps are not supported. The address is held
+ in operand 0. */
+const char *m68k_symbolic_call;
+const char *m68k_symbolic_jump;
+
+/* Enum variable that corresponds to m68k_symbolic_call values. */
+enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
+
+
+/* See whether TABLE has an entry with name NAME. Return true and
+ store the entry in *ENTRY if so, otherwise return false and
+ leave *ENTRY alone. */
+
+static bool
+m68k_find_selection (const struct m68k_target_selection **entry,
+ const struct m68k_target_selection *table,
+ const char *name)
+{
+ size_t i;
+
+ for (i = 0; table[i].name; i++)
+ if (strcmp (table[i].name, name) == 0)
+ {
+ *entry = table + i;
+ return true;
+ }
+ return false;
+}
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+m68k_handle_option (size_t code, const char *arg, int value)
+{
+ switch (code)
+ {
+ case OPT_march_:
+ return m68k_find_selection (&m68k_arch_entry, all_isas, arg);
+
+ case OPT_mcpu_:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, arg);
+
+ case OPT_mtune_:
+ return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg);
+
+ case OPT_m5200:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206");
+
+ case OPT_m5206e:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e");
+
+ case OPT_m528x:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x");
+
+ case OPT_m5307:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307");
+
+ case OPT_m5407:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407");
+
+ case OPT_mcfv4e:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x");
+
+ case OPT_m68000:
+ case OPT_mc68000:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000");
+
+ case OPT_m68010:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010");
+
+ case OPT_m68020:
+ case OPT_mc68020:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020");
+
+ case OPT_m68020_40:
+ return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
+ "68020-40")
+ && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
+
+ case OPT_m68020_60:
+ return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
+ "68020-60")
+ && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
+
+ case OPT_m68030:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030");
+
+ case OPT_m68040:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040");
+
+ case OPT_m68060:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060");
+
+ case OPT_m68302:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302");
+
+ case OPT_m68332:
+ case OPT_mcpu32:
+ return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332");
+
+ case OPT_mshared_library_id_:
+ if (value > MAX_LIBRARY_ID)
+ error ("-mshared-library-id=%s is not between 0 and %d",
+ arg, MAX_LIBRARY_ID);
+ else
+ {
+ char *tmp;
+ asprintf (&tmp, "%d", (value * -4) - 4);
+ m68k_library_id_string = tmp;
+ }
+ return true;
+
+ default:
+ return true;
+ }
+}
+
+/* Implement TARGET_OPTION_OVERRIDE. */
+
+static void
+m68k_option_override (void)
+{
+ const struct m68k_target_selection *entry;
+ unsigned long target_mask;
+
+ /* User can choose:
+
+ -mcpu=
+ -march=
+ -mtune=
+
+ -march=ARCH should generate code that runs any processor
+ implementing architecture ARCH. -mcpu=CPU should override -march
+ and should generate code that runs on processor CPU, making free
+ use of any instructions that CPU understands. -mtune=UARCH applies
+ on top of -mcpu or -march and optimizes the code for UARCH. It does
+ not change the target architecture. */
+ if (m68k_cpu_entry)
+ {
+ /* Complain if the -march setting is for a different microarchitecture,
+ or includes flags that the -mcpu setting doesn't. */
+ if (m68k_arch_entry
+ && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
+ || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
+ warning (0, "-mcpu=%s conflicts with -march=%s",
+ m68k_cpu_entry->name, m68k_arch_entry->name);
+
+ entry = m68k_cpu_entry;
+ }
+ else
+ entry = m68k_arch_entry;
+
+ if (!entry)
+ entry = all_devices + TARGET_CPU_DEFAULT;
+
+ m68k_cpu_flags = entry->flags;
+
+ /* Use the architecture setting to derive default values for
+ certain flags. */
+ target_mask = 0;
+
+ /* ColdFire is lenient about alignment. */
+ if (!TARGET_COLDFIRE)
+ target_mask |= MASK_STRICT_ALIGNMENT;
+
+ if ((m68k_cpu_flags & FL_BITFIELD) != 0)
+ target_mask |= MASK_BITFIELD;
+ if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
+ target_mask |= MASK_CF_HWDIV;
+ if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
+ target_mask |= MASK_HARD_FLOAT;
+ target_flags |= target_mask & ~target_flags_explicit;
+
+ /* Set the directly-usable versions of the -mcpu and -mtune settings. */
+ m68k_cpu = entry->device;
+ if (m68k_tune_entry)
+ {
+ m68k_tune = m68k_tune_entry->microarch;
+ m68k_tune_flags = m68k_tune_entry->flags;
+ }
+#ifdef M68K_DEFAULT_TUNE
+ else if (!m68k_cpu_entry && !m68k_arch_entry)
+ {
+ enum target_device dev;
+ dev = all_microarchs[M68K_DEFAULT_TUNE].device;
+ m68k_tune_flags = all_devices[dev]->flags;
+ }
+#endif
+ else
+ {
+ m68k_tune = entry->microarch;
+ m68k_tune_flags = entry->flags;
+ }
+
+ /* Set the type of FPU. */
+ m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
+ : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
+ : FPUTYPE_68881);
+
+ /* Sanity check to ensure that msep-data and mid-sahred-library are not
+ * both specified together. Doing so simply doesn't make sense.
+ */
+ if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
+ error ("cannot specify both -msep-data and -mid-shared-library");
+
+ /* If we're generating code for a separate A5 relative data segment,
+ * we've got to enable -fPIC as well. This might be relaxable to
+ * -fpic but it hasn't been tested properly.
+ */
+ if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
+ flag_pic = 2;
+
+ /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
+ error if the target does not support them. */
+ if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
+ error ("-mpcrel -fPIC is not currently supported on selected cpu");
+
+ /* ??? A historic way of turning on pic, or is this intended to
+ be an embedded thing that doesn't have the same name binding
+ significance that it does on hosted ELF systems? */
+ if (TARGET_PCREL && flag_pic == 0)
+ flag_pic = 1;
+
+ if (!flag_pic)
+ {
+ m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
+
+ m68k_symbolic_jump = "jra %a0";
+ }
+ else if (TARGET_ID_SHARED_LIBRARY)
+ /* All addresses must be loaded from the GOT. */
+ ;
+ else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
+ {
+ if (TARGET_PCREL)
+ m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
+ else
+ m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
+
+ if (TARGET_ISAC)
+ /* No unconditional long branch */;
+ else if (TARGET_PCREL)
+ m68k_symbolic_jump = "bra%.l %c0";
+ else
+ m68k_symbolic_jump = "bra%.l %p0";
+ /* Turn off function cse if we are doing PIC. We always want
+ function call to be done as `bsr foo@PLTPC'. */
+ /* ??? It's traditional to do this for -mpcrel too, but it isn't
+ clear how intentional that is. */
+ flag_no_function_cse = 1;
+ }
+
+ switch (m68k_symbolic_call_var)
+ {
+ case M68K_SYMBOLIC_CALL_JSR:
+ m68k_symbolic_call = "jsr %a0";
+ break;
+
+ case M68K_SYMBOLIC_CALL_BSR_C:
+ m68k_symbolic_call = "bsr%.l %c0";
+ break;
+
+ case M68K_SYMBOLIC_CALL_BSR_P:
+ m68k_symbolic_call = "bsr%.l %p0";
+ break;
+
+ case M68K_SYMBOLIC_CALL_NONE:
+ gcc_assert (m68k_symbolic_call == NULL);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
+ if (align_labels > 2)
+ {
+ warning (0, "-falign-labels=%d is not supported", align_labels);
+ align_labels = 0;
+ }
+ if (align_loops > 2)
+ {
+ warning (0, "-falign-loops=%d is not supported", align_loops);
+ align_loops = 0;
+ }
+#endif
+
+ SUBTARGET_OVERRIDE_OPTIONS;
+
+ /* Setup scheduling options. */
+ if (TUNE_CFV1)
+ m68k_sched_cpu = CPU_CFV1;
+ else if (TUNE_CFV2)
+ m68k_sched_cpu = CPU_CFV2;
+ else if (TUNE_CFV3)
+ m68k_sched_cpu = CPU_CFV3;
+ else if (TUNE_CFV4)
+ m68k_sched_cpu = CPU_CFV4;
+ else
+ {
+ m68k_sched_cpu = CPU_UNKNOWN;
+ flag_schedule_insns = 0;
+ flag_schedule_insns_after_reload = 0;
+ flag_modulo_sched = 0;
+ }
+
+ if (m68k_sched_cpu != CPU_UNKNOWN)
+ {
+ if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
+ m68k_sched_mac = MAC_CF_EMAC;
+ else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
+ m68k_sched_mac = MAC_CF_MAC;
+ else
+ m68k_sched_mac = MAC_NO;
+ }
+}
+
+/* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
+
+static void
+m68k_override_options_after_change (void)
+{
+ if (m68k_sched_cpu == CPU_UNKNOWN)
+ {
+ flag_schedule_insns = 0;
+ flag_schedule_insns_after_reload = 0;
+ flag_modulo_sched = 0;
+ }
+}
+
+/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
+ given argument and NAME is the argument passed to -mcpu. Return NULL
+ if -mcpu was not passed. */
+
+const char *
+m68k_cpp_cpu_ident (const char *prefix)
+{
+ if (!m68k_cpu_entry)
+ return NULL;
+ return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
+}
+
+/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
+ given argument and NAME is the name of the representative device for
+ the -mcpu argument's family. Return NULL if -mcpu was not passed. */
+
+const char *
+m68k_cpp_cpu_family (const char *prefix)
+{
+ if (!m68k_cpu_entry)
+ return NULL;
+ return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
+}
+
+/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
+ "interrupt_handler" attribute and interrupt_thread if FUNC has an
+ "interrupt_thread" attribute. Otherwise, return
+ m68k_fk_normal_function. */
+
+enum m68k_function_kind
+m68k_get_function_kind (tree func)
+{
+ tree a;
+
+ gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
+
+ a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
+ if (a != NULL_TREE)
+ return m68k_fk_interrupt_handler;
+
+ a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
+ if (a != NULL_TREE)
+ return m68k_fk_interrupt_handler;
+
+ a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
+ if (a != NULL_TREE)
+ return m68k_fk_interrupt_thread;
+
+ return m68k_fk_normal_function;
+}
+
+/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
+ struct attribute_spec.handler. */
+static tree
+m68k_handle_fndecl_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ }
+
+ if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
+ {
+ error ("multiple interrupt attributes not allowed");
+ *no_add_attrs = true;
+ }
+
+ if (!TARGET_FIDOA
+ && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
+ {
+ error ("interrupt_thread is available only on fido");
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+static void
+m68k_compute_frame_layout (void)
+{
+ int regno, saved;
+ unsigned int mask;
+ enum m68k_function_kind func_kind =
+ m68k_get_function_kind (current_function_decl);
+ bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
+ bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
+
+ /* Only compute the frame once per function.
+ Don't cache information until reload has been completed. */
+ if (current_frame.funcdef_no == current_function_funcdef_no
+ && reload_completed)
+ return;
+
+ current_frame.size = (get_frame_size () + 3) & -4;
+
+ mask = saved = 0;
+
+ /* Interrupt thread does not need to save any register. */
+ if (!interrupt_thread)
+ for (regno = 0; regno < 16; regno++)
+ if (m68k_save_reg (regno, interrupt_handler))
+ {
+ mask |= 1 << (regno - D0_REG);
+ saved++;
+ }
+ current_frame.offset = saved * 4;
+ current_frame.reg_no = saved;
+ current_frame.reg_mask = mask;
+
+ current_frame.foffset = 0;
+ mask = saved = 0;
+ if (TARGET_HARD_FLOAT)
+ {
+ /* Interrupt thread does not need to save any register. */
+ if (!interrupt_thread)
+ for (regno = 16; regno < 24; regno++)
+ if (m68k_save_reg (regno, interrupt_handler))
+ {
+ mask |= 1 << (regno - FP0_REG);
+ saved++;
+ }
+ current_frame.foffset = saved * TARGET_FP_REG_SIZE;
+ current_frame.offset += current_frame.foffset;
+ }
+ current_frame.fpu_no = saved;
+ current_frame.fpu_mask = mask;
+
+ /* Remember what function this frame refers to. */
+ current_frame.funcdef_no = current_function_funcdef_no;
+}
+
+/* Worker function for TARGET_CAN_ELIMINATE. */
+
+bool
+m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
+}
+
+HOST_WIDE_INT
+m68k_initial_elimination_offset (int from, int to)
+{
+ int argptr_offset;
+ /* The arg pointer points 8 bytes before the start of the arguments,
+ as defined by FIRST_PARM_OFFSET. This makes it coincident with the
+ frame pointer in most frames. */
+ argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
+ if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
+ return argptr_offset;
+
+ m68k_compute_frame_layout ();
+
+ gcc_assert (to == STACK_POINTER_REGNUM);
+ switch (from)
+ {
+ case ARG_POINTER_REGNUM:
+ return current_frame.offset + current_frame.size - argptr_offset;
+ case FRAME_POINTER_REGNUM:
+ return current_frame.offset + current_frame.size;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This function is responsible for
+ knowing which registers should not be saved even if used.
+ Return true if we need to save REGNO. */
+
+static bool
+m68k_save_reg (unsigned int regno, bool interrupt_handler)
+{
+ if (flag_pic && regno == PIC_REG)
+ {
+ if (crtl->saves_all_registers)
+ return true;
+ if (crtl->uses_pic_offset_table)
+ return true;
+ /* Reload may introduce constant pool references into a function
+ that thitherto didn't need a PIC register. Note that the test
+ above will not catch that case because we will only set
+ crtl->uses_pic_offset_table when emitting
+ the address reloads. */
+ if (crtl->uses_const_pool)
+ return true;
+ }
+
+ if (crtl->calls_eh_return)
+ {
+ unsigned int i;
+ for (i = 0; ; i++)
+ {
+ unsigned int test = EH_RETURN_DATA_REGNO (i);
+ if (test == INVALID_REGNUM)
+ break;
+ if (test == regno)
+ return true;
+ }
+ }
+
+ /* Fixed regs we never touch. */
+ if (fixed_regs[regno])
+ return false;
+
+ /* The frame pointer (if it is such) is handled specially. */
+ if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
+ return false;
+
+ /* Interrupt handlers must also save call_used_regs
+ if they are live or when calling nested functions. */
+ if (interrupt_handler)
+ {
+ if (df_regs_ever_live_p (regno))
+ return true;
+
+ if (!current_function_is_leaf && call_used_regs[regno])
+ return true;
+ }
+
+ /* Never need to save registers that aren't touched. */
+ if (!df_regs_ever_live_p (regno))
+ return false;
+
+ /* Otherwise save everything that isn't call-clobbered. */
+ return !call_used_regs[regno];
+}
+
+/* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
+ the lowest memory address. COUNT is the number of registers to be
+ moved, with register REGNO + I being moved if bit I of MASK is set.
+ STORE_P specifies the direction of the move and ADJUST_STACK_P says
+ whether or not this is pre-decrement (if STORE_P) or post-increment
+ (if !STORE_P) operation. */
+
+static rtx
+m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
+ unsigned int count, unsigned int regno,
+ unsigned int mask, bool store_p, bool adjust_stack_p)
+{
+ int i;
+ rtx body, addr, src, operands[2];
+ enum machine_mode mode;
+
+ body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
+ mode = reg_raw_mode[regno];
+ i = 0;
+
+ if (adjust_stack_p)
+ {
+ src = plus_constant (base, (count
+ * GET_MODE_SIZE (mode)
+ * (HOST_WIDE_INT) (store_p ? -1 : 1)));
+ XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
+ }
+
+ for (; mask != 0; mask >>= 1, regno++)
+ if (mask & 1)
+ {
+ addr = plus_constant (base, offset);
+ operands[!store_p] = gen_frame_mem (mode, addr);
+ operands[store_p] = gen_rtx_REG (mode, regno);
+ XVECEXP (body, 0, i++)
+ = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
+ offset += GET_MODE_SIZE (mode);
+ }
+ gcc_assert (i == XVECLEN (body, 0));
+
+ return emit_insn (body);
+}
+
+/* Make INSN a frame-related instruction. */
+
+static void
+m68k_set_frame_related (rtx insn)
+{
+ rtx body;
+ int i;
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ body = PATTERN (insn);
+ if (GET_CODE (body) == PARALLEL)
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
+}
+
+/* Emit RTL for the "prologue" define_expand. */
+
+void
+m68k_expand_prologue (void)
+{
+ HOST_WIDE_INT fsize_with_regs;
+ rtx limit, src, dest;
+
+ m68k_compute_frame_layout ();
+
+ /* If the stack limit is a symbol, we can check it here,
+ before actually allocating the space. */
+ if (crtl->limit_stack
+ && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
+ {
+ limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
+ if (!LEGITIMATE_CONSTANT_P (limit))
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
+ limit = gen_rtx_REG (Pmode, D0_REG);
+ }
+ emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
+ stack_pointer_rtx, limit),
+ stack_pointer_rtx, limit,
+ const1_rtx));
+ }
+
+ fsize_with_regs = current_frame.size;
+ if (TARGET_COLDFIRE)
+ {
+ /* ColdFire's move multiple instructions do not allow pre-decrement
+ addressing. Add the size of movem saves to the initial stack
+ allocation instead. */
+ if (current_frame.reg_no >= MIN_MOVEM_REGS)
+ fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
+ if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
+ fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (fsize_with_regs == 0 && TUNE_68040)
+ {
+ /* On the 68040, two separate moves are faster than link.w 0. */
+ dest = gen_frame_mem (Pmode,
+ gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
+ m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
+ m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ else if (fsize_with_regs < 0x8000 || TARGET_68020)
+ m68k_set_frame_related
+ (emit_insn (gen_link (frame_pointer_rtx,
+ GEN_INT (-4 - fsize_with_regs))));
+ else
+ {
+ m68k_set_frame_related
+ (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
+ m68k_set_frame_related
+ (emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-fsize_with_regs))));
+ }
+
+ /* If the frame pointer is needed, emit a special barrier that
+ will prevent the scheduler from moving stores to the frame
+ before the stack adjustment. */
+ emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
+ }
+ else if (fsize_with_regs != 0)
+ m68k_set_frame_related
+ (emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-fsize_with_regs))));
+
+ if (current_frame.fpu_mask)
+ {
+ gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
+ if (TARGET_68881)
+ m68k_set_frame_related
+ (m68k_emit_movem (stack_pointer_rtx,
+ current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
+ current_frame.fpu_no, FP0_REG,
+ current_frame.fpu_mask, true, true));
+ else
+ {
+ int offset;
+
+ /* If we're using moveml to save the integer registers,
+ the stack pointer will point to the bottom of the moveml
+ save area. Find the stack offset of the first FP register. */
+ if (current_frame.reg_no < MIN_MOVEM_REGS)
+ offset = 0;
+ else
+ offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
+ m68k_set_frame_related
+ (m68k_emit_movem (stack_pointer_rtx, offset,
+ current_frame.fpu_no, FP0_REG,
+ current_frame.fpu_mask, true, false));
+ }
+ }
+
+ /* If the stack limit is not a symbol, check it here.
+ This has the disadvantage that it may be too late... */
+ if (crtl->limit_stack)
+ {
+ if (REG_P (stack_limit_rtx))
+ emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
+ stack_limit_rtx),
+ stack_pointer_rtx, stack_limit_rtx,
+ const1_rtx));
+
+ else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
+ warning (0, "stack limit expression is not supported");
+ }
+
+ if (current_frame.reg_no < MIN_MOVEM_REGS)
+ {
+ /* Store each register separately in the same order moveml does. */
+ int i;
+
+ for (i = 16; i-- > 0; )
+ if (current_frame.reg_mask & (1 << i))
+ {
+ src = gen_rtx_REG (SImode, D0_REG + i);
+ dest = gen_frame_mem (SImode,
+ gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
+ m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
+ }
+ }
+ else
+ {
+ if (TARGET_COLDFIRE)
+ /* The required register save space has already been allocated.
+ The first register should be stored at (%sp). */
+ m68k_set_frame_related
+ (m68k_emit_movem (stack_pointer_rtx, 0,
+ current_frame.reg_no, D0_REG,
+ current_frame.reg_mask, true, false));
+ else
+ m68k_set_frame_related
+ (m68k_emit_movem (stack_pointer_rtx,
+ current_frame.reg_no * -GET_MODE_SIZE (SImode),
+ current_frame.reg_no, D0_REG,
+ current_frame.reg_mask, true, true));
+ }
+
+ if (!TARGET_SEP_DATA
+ && crtl->uses_pic_offset_table)
+ emit_insn (gen_load_got (pic_offset_table_rtx));
+}
+
+/* Return true if a simple (return) instruction is sufficient for this
+ instruction (i.e. if no epilogue is needed). */
+
+bool
+m68k_use_return_insn (void)
+{
+ if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
+ return false;
+
+ m68k_compute_frame_layout ();
+ return current_frame.offset == 0;
+}
+
+/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
+ SIBCALL_P says which.
+
+ The function epilogue should not depend on the current stack pointer!
+ It should use the frame pointer only, if there is a frame pointer.
+ This is mandatory because of alloca; we also take advantage of it to
+ omit stack adjustments before returning. */
+
+void
+m68k_expand_epilogue (bool sibcall_p)
+{
+ HOST_WIDE_INT fsize, fsize_with_regs;
+ bool big, restore_from_sp;
+
+ m68k_compute_frame_layout ();
+
+ fsize = current_frame.size;
+ big = false;
+ restore_from_sp = false;
+
+ /* FIXME : current_function_is_leaf below is too strong.
+ What we really need to know there is if there could be pending
+ stack adjustment needed at that point. */
+ restore_from_sp = (!frame_pointer_needed
+ || (!cfun->calls_alloca
+ && current_function_is_leaf));
+
+ /* fsize_with_regs is the size we need to adjust the sp when
+ popping the frame. */
+ fsize_with_regs = fsize;
+ if (TARGET_COLDFIRE && restore_from_sp)
+ {
+ /* ColdFire's move multiple instructions do not allow post-increment
+ addressing. Add the size of movem loads to the final deallocation
+ instead. */
+ if (current_frame.reg_no >= MIN_MOVEM_REGS)
+ fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
+ if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
+ fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
+ }
+
+ if (current_frame.offset + fsize >= 0x8000
+ && !restore_from_sp
+ && (current_frame.reg_mask || current_frame.fpu_mask))
+ {
+ if (TARGET_COLDFIRE
+ && (current_frame.reg_no >= MIN_MOVEM_REGS
+ || current_frame.fpu_no >= MIN_FMOVEM_REGS))
+ {
+ /* ColdFire's move multiple instructions do not support the
+ (d8,Ax,Xi) addressing mode, so we're as well using a normal
+ stack-based restore. */
+ emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
+ GEN_INT (-(current_frame.offset + fsize)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ gen_rtx_REG (Pmode, A1_REG),
+ frame_pointer_rtx));
+ restore_from_sp = true;
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
+ fsize = 0;
+ big = true;
+ }
+ }
+
+ if (current_frame.reg_no < MIN_MOVEM_REGS)
+ {
+ /* Restore each register separately in the same order moveml does. */
+ int i;
+ HOST_WIDE_INT offset;
+
+ offset = current_frame.offset + fsize;
+ for (i = 0; i < 16; i++)
+ if (current_frame.reg_mask & (1 << i))
+ {
+ rtx addr;
+
+ if (big)
+ {
+ /* Generate the address -OFFSET(%fp,%a1.l). */
+ addr = gen_rtx_REG (Pmode, A1_REG);
+ addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
+ addr = plus_constant (addr, -offset);
+ }
+ else if (restore_from_sp)
+ addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
+ else
+ addr = plus_constant (frame_pointer_rtx, -offset);
+ emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
+ gen_frame_mem (SImode, addr));
+ offset -= GET_MODE_SIZE (SImode);
+ }
+ }
+ else if (current_frame.reg_mask)
+ {
+ if (big)
+ m68k_emit_movem (gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (Pmode, A1_REG),
+ frame_pointer_rtx),
+ -(current_frame.offset + fsize),
+ current_frame.reg_no, D0_REG,
+ current_frame.reg_mask, false, false);
+ else if (restore_from_sp)
+ m68k_emit_movem (stack_pointer_rtx, 0,
+ current_frame.reg_no, D0_REG,
+ current_frame.reg_mask, false,
+ !TARGET_COLDFIRE);
+ else
+ m68k_emit_movem (frame_pointer_rtx,
+ -(current_frame.offset + fsize),
+ current_frame.reg_no, D0_REG,
+ current_frame.reg_mask, false, false);
+ }
+
+ if (current_frame.fpu_no > 0)
+ {
+ if (big)
+ m68k_emit_movem (gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (Pmode, A1_REG),
+ frame_pointer_rtx),
+ -(current_frame.foffset + fsize),
+ current_frame.fpu_no, FP0_REG,
+ current_frame.fpu_mask, false, false);
+ else if (restore_from_sp)
+ {
+ if (TARGET_COLDFIRE)
+ {
+ int offset;
+
+ /* If we used moveml to restore the integer registers, the
+ stack pointer will still point to the bottom of the moveml
+ save area. Find the stack offset of the first FP
+ register. */
+ if (current_frame.reg_no < MIN_MOVEM_REGS)
+ offset = 0;
+ else
+ offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
+ m68k_emit_movem (stack_pointer_rtx, offset,
+ current_frame.fpu_no, FP0_REG,
+ current_frame.fpu_mask, false, false);
+ }
+ else
+ m68k_emit_movem (stack_pointer_rtx, 0,
+ current_frame.fpu_no, FP0_REG,
+ current_frame.fpu_mask, false, true);
+ }
+ else
+ m68k_emit_movem (frame_pointer_rtx,
+ -(current_frame.foffset + fsize),
+ current_frame.fpu_no, FP0_REG,
+ current_frame.fpu_mask, false, false);
+ }
+
+ if (frame_pointer_needed)
+ emit_insn (gen_unlink (frame_pointer_rtx));
+ else if (fsize_with_regs)
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (fsize_with_regs)));
+
+ if (crtl->calls_eh_return)
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ EH_RETURN_STACKADJ_RTX));
+
+ if (!sibcall_p)
+ emit_jump_insn (gen_rtx_RETURN (VOIDmode));
+}
+
+/* Return true if X is a valid comparison operator for the dbcc
+ instruction.
+
+ Note it rejects floating point comparison operators.
+ (In the future we could use Fdbcc).
+
+ It also rejects some comparisons when CC_NO_OVERFLOW is set. */
+
+int
+valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ switch (GET_CODE (x))
+ {
+ case EQ: case NE: case GTU: case LTU:
+ case GEU: case LEU:
+ return 1;
+
+ /* Reject some when CC_NO_OVERFLOW is set. This may be over
+ conservative */
+ case GT: case LT: case GE: case LE:
+ return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
+ default:
+ return 0;
+ }
+}
+
+/* Return nonzero if flags are currently in the 68881 flag register. */
+int
+flags_in_68881 (void)
+{
+ /* We could add support for these in the future */
+ return cc_status.flags & CC_IN_68881;
+}
+
+/* Return true if PARALLEL contains register REGNO. */
+static bool
+m68k_reg_present_p (const_rtx parallel, unsigned int regno)
+{
+ int i;
+
+ if (REG_P (parallel) && REGNO (parallel) == regno)
+ return true;
+
+ if (GET_CODE (parallel) != PARALLEL)
+ return false;
+
+ for (i = 0; i < XVECLEN (parallel, 0); ++i)
+ {
+ const_rtx x;
+
+ x = XEXP (XVECEXP (parallel, 0, i), 0);
+ if (REG_P (x) && REGNO (x) == regno)
+ return true;
+ }
+
+ return false;
+}
+
+/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
+
+static bool
+m68k_ok_for_sibcall_p (tree decl, tree exp)
+{
+ enum m68k_function_kind kind;
+
+ /* We cannot use sibcalls for nested functions because we use the
+ static chain register for indirect calls. */
+ if (CALL_EXPR_STATIC_CHAIN (exp))
+ return false;
+
+ if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
+ {
+ /* Check that the return value locations are the same. For
+ example that we aren't returning a value from the sibling in
+ a D0 register but then need to transfer it to a A0 register. */
+ rtx cfun_value;
+ rtx call_value;
+
+ cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
+ cfun->decl);
+ call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
+
+ /* Check that the values are equal or that the result the callee
+ function returns is superset of what the current function returns. */
+ if (!(rtx_equal_p (cfun_value, call_value)
+ || (REG_P (cfun_value)
+ && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
+ return false;
+ }
+
+ kind = m68k_get_function_kind (current_function_decl);
+ if (kind == m68k_fk_normal_function)
+ /* We can always sibcall from a normal function, because it's
+ undefined if it is calling an interrupt function. */
+ return true;
+
+ /* Otherwise we can only sibcall if the function kind is known to be
+ the same. */
+ if (decl && m68k_get_function_kind (decl) == kind)
+ return true;
+
+ return false;
+}
+
+/* On the m68k all args are always pushed. */
+
+static rtx
+m68k_function_arg (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ return NULL_RTX;
+}
+
+static void
+m68k_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ *cum += (mode != BLKmode
+ ? (GET_MODE_SIZE (mode) + 3) & ~3
+ : (int_size_in_bytes (type) + 3) & ~3);
+}
+
+/* Convert X to a legitimate function call memory reference and return the
+ result. */
+
+rtx
+m68k_legitimize_call_address (rtx x)
+{
+ gcc_assert (MEM_P (x));
+ if (call_operand (XEXP (x, 0), VOIDmode))
+ return x;
+ return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
+}
+
+/* Likewise for sibling calls. */
+
+rtx
+m68k_legitimize_sibcall_address (rtx x)
+{
+ gcc_assert (MEM_P (x));
+ if (sibcall_operand (XEXP (x, 0), VOIDmode))
+ return x;
+
+ emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
+ return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
+}
+
+/* Convert X to a legitimate address and return it if successful. Otherwise
+ return X.
+
+ For the 68000, we handle X+REG by loading X into a register R and
+ using R+REG. R will go in an address reg and indexing will be used.
+ However, if REG is a broken-out memory address or multiplication,
+ nothing needs to be done because REG can certainly go in an address reg. */
+
+static rtx
+m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
+{
+ if (m68k_tls_symbol_p (x))
+ return m68k_legitimize_tls_address (x);
+
+ if (GET_CODE (x) == PLUS)
+ {
+ int ch = (x) != (oldx);
+ int copied = 0;
+
+#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
+
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ COPY_ONCE (x);
+ XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
+ }
+ if (GET_CODE (XEXP (x, 1)) == MULT)
+ {
+ COPY_ONCE (x);
+ XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
+ }
+ if (ch)
+ {
+ if (GET_CODE (XEXP (x, 1)) == REG
+ && GET_CODE (XEXP (x, 0)) == REG)
+ {
+ if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ COPY_ONCE (x);
+ x = force_operand (x, 0);
+ }
+ return x;
+ }
+ if (memory_address_p (mode, x))
+ return x;
+ }
+ if (GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
+ {
+ rtx temp = gen_reg_rtx (Pmode);
+ rtx val = force_operand (XEXP (x, 1), 0);
+ emit_move_insn (temp, val);
+ COPY_ONCE (x);
+ XEXP (x, 1) = temp;
+ if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_CODE (XEXP (x, 0)) == REG)
+ x = force_operand (x, 0);
+ }
+ else if (GET_CODE (XEXP (x, 1)) == REG
+ || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
+ && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
+ && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
+ {
+ rtx temp = gen_reg_rtx (Pmode);
+ rtx val = force_operand (XEXP (x, 0), 0);
+ emit_move_insn (temp, val);
+ COPY_ONCE (x);
+ XEXP (x, 0) = temp;
+ if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_CODE (XEXP (x, 1)) == REG)
+ x = force_operand (x, 0);
+ }
+ }
+
+ return x;
+}
+
+
+/* Output a dbCC; jCC sequence. Note we do not handle the
+ floating point version of this sequence (Fdbcc). We also
+ do not handle alternative conditions when CC_NO_OVERFLOW is
+ set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
+ kick those out before we get here. */
+
+void
+output_dbcc_and_branch (rtx *operands)
+{
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ:
+ output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
+ break;
+
+ case NE:
+ output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
+ break;
+
+ case GT:
+ output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
+ break;
+
+ case GTU:
+ output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
+ break;
+
+ case LT:
+ output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
+ break;
+
+ case LTU:
+ output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
+ break;
+
+ case GE:
+ output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
+ break;
+
+ case GEU:
+ output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
+ break;
+
+ case LE:
+ output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
+ break;
+
+ case LEU:
+ output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* If the decrement is to be done in SImode, then we have
+ to compensate for the fact that dbcc decrements in HImode. */
+ switch (GET_MODE (operands[0]))
+ {
+ case SImode:
+ output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
+ break;
+
+ case HImode:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+const char *
+output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
+{
+ rtx loperands[7];
+ enum rtx_code op_code = GET_CODE (op);
+
+ /* This does not produce a useful cc. */
+ CC_STATUS_INIT;
+
+ /* The m68k cmp.l instruction requires operand1 to be a reg as used
+ below. Swap the operands and change the op if these requirements
+ are not fulfilled. */
+ if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
+ {
+ rtx tmp = operand1;
+
+ operand1 = operand2;
+ operand2 = tmp;
+ op_code = swap_condition (op_code);
+ }
+ loperands[0] = operand1;
+ if (GET_CODE (operand1) == REG)
+ loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
+ else
+ loperands[1] = adjust_address (operand1, SImode, 4);
+ if (operand2 != const0_rtx)
+ {
+ loperands[2] = operand2;
+ if (GET_CODE (operand2) == REG)
+ loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
+ else
+ loperands[3] = adjust_address (operand2, SImode, 4);
+ }
+ loperands[4] = gen_label_rtx ();
+ if (operand2 != const0_rtx)
+ output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
+ else
+ {
+ if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
+ output_asm_insn ("tst%.l %0", loperands);
+ else
+ output_asm_insn ("cmp%.w #0,%0", loperands);
+
+ output_asm_insn ("jne %l4", loperands);
+
+ if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
+ output_asm_insn ("tst%.l %1", loperands);
+ else
+ output_asm_insn ("cmp%.w #0,%1", loperands);
+ }
+
+ loperands[5] = dest;
+
+ switch (op_code)
+ {
+ case EQ:
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("seq %5", loperands);
+ break;
+
+ case NE:
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("sne %5", loperands);
+ break;
+
+ case GT:
+ loperands[6] = gen_label_rtx ();
+ output_asm_insn ("shi %5\n\tjra %l6", loperands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("sgt %5", loperands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[6]));
+ break;
+
+ case GTU:
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("shi %5", loperands);
+ break;
+
+ case LT:
+ loperands[6] = gen_label_rtx ();
+ output_asm_insn ("scs %5\n\tjra %l6", loperands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("slt %5", loperands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[6]));
+ break;
+
+ case LTU:
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("scs %5", loperands);
+ break;
+
+ case GE:
+ loperands[6] = gen_label_rtx ();
+ output_asm_insn ("scc %5\n\tjra %l6", loperands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("sge %5", loperands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[6]));
+ break;
+
+ case GEU:
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("scc %5", loperands);
+ break;
+
+ case LE:
+ loperands[6] = gen_label_rtx ();
+ output_asm_insn ("sls %5\n\tjra %l6", loperands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("sle %5", loperands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[6]));
+ break;
+
+ case LEU:
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (loperands[4]));
+ output_asm_insn ("sls %5", loperands);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return "";
+}
+
+const char *
+output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
+{
+ operands[0] = countop;
+ operands[1] = dataop;
+
+ if (GET_CODE (countop) == CONST_INT)
+ {
+ register int count = INTVAL (countop);
+ /* If COUNT is bigger than size of storage unit in use,
+ advance to the containing unit of same size. */
+ if (count > signpos)
+ {
+ int offset = (count & ~signpos) / 8;
+ count = count & signpos;
+ operands[1] = dataop = adjust_address (dataop, QImode, offset);
+ }
+ if (count == signpos)
+ cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
+ else
+ cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
+
+ /* These three statements used to use next_insns_test_no...
+ but it appears that this should do the same job. */
+ if (count == 31
+ && next_insn_tests_no_inequality (insn))
+ return "tst%.l %1";
+ if (count == 15
+ && next_insn_tests_no_inequality (insn))
+ return "tst%.w %1";
+ if (count == 7
+ && next_insn_tests_no_inequality (insn))
+ return "tst%.b %1";
+ /* Try to use `movew to ccr' followed by the appropriate branch insn.
+ On some m68k variants unfortunately that's slower than btst.
+ On 68000 and higher, that should also work for all HImode operands. */
+ if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
+ {
+ if (count == 3 && DATA_REG_P (operands[1])
+ && next_insn_tests_no_inequality (insn))
+ {
+ cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
+ return "move%.w %1,%%ccr";
+ }
+ if (count == 2 && DATA_REG_P (operands[1])
+ && next_insn_tests_no_inequality (insn))
+ {
+ cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
+ return "move%.w %1,%%ccr";
+ }
+ /* count == 1 followed by bvc/bvs and
+ count == 0 followed by bcc/bcs are also possible, but need
+ m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
+ }
+
+ cc_status.flags = CC_NOT_NEGATIVE;
+ }
+ return "btst %0,%1";
+}
+
+/* Return true if X is a legitimate base register. STRICT_P says
+ whether we need strict checking. */
+
+bool
+m68k_legitimate_base_reg_p (rtx x, bool strict_p)
+{
+ /* Allow SUBREG everywhere we allow REG. This results in better code. */
+ if (!strict_p && GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ return (REG_P (x)
+ && (strict_p
+ ? REGNO_OK_FOR_BASE_P (REGNO (x))
+ : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
+}
+
+/* Return true if X is a legitimate index register. STRICT_P says
+ whether we need strict checking. */
+
+bool
+m68k_legitimate_index_reg_p (rtx x, bool strict_p)
+{
+ if (!strict_p && GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ return (REG_P (x)
+ && (strict_p
+ ? REGNO_OK_FOR_INDEX_P (REGNO (x))
+ : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
+}
+
+/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
+ (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
+ ADDRESS if so. STRICT_P says whether we need strict checking. */
+
+static bool
+m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
+{
+ int scale;
+
+ /* Check for a scale factor. */
+ scale = 1;
+ if ((TARGET_68020 || TARGET_COLDFIRE)
+ && GET_CODE (x) == MULT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (INTVAL (XEXP (x, 1)) == 2
+ || INTVAL (XEXP (x, 1)) == 4
+ || (INTVAL (XEXP (x, 1)) == 8
+ && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
+ {
+ scale = INTVAL (XEXP (x, 1));
+ x = XEXP (x, 0);
+ }
+
+ /* Check for a word extension. */
+ if (!TARGET_COLDFIRE
+ && GET_CODE (x) == SIGN_EXTEND
+ && GET_MODE (XEXP (x, 0)) == HImode)
+ x = XEXP (x, 0);
+
+ if (m68k_legitimate_index_reg_p (x, strict_p))
+ {
+ address->scale = scale;
+ address->index = x;
+ return true;
+ }
+
+ return false;
+}
+
+/* Return true if X is an illegitimate symbolic constant. */
+
+bool
+m68k_illegitimate_symbolic_constant_p (rtx x)
+{
+ rtx base, offset;
+
+ if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
+ {
+ split_const (x, &base, &offset);
+ if (GET_CODE (base) == SYMBOL_REF
+ && !offset_within_block_p (base, INTVAL (offset)))
+ return true;
+ }
+ return m68k_tls_reference_p (x, false);
+}
+
+/* Return true if X is a legitimate constant address that can reach
+ bytes in the range [X, X + REACH). STRICT_P says whether we need
+ strict checking. */
+
+static bool
+m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
+{
+ rtx base, offset;
+
+ if (!CONSTANT_ADDRESS_P (x))
+ return false;
+
+ if (flag_pic
+ && !(strict_p && TARGET_PCREL)
+ && symbolic_operand (x, VOIDmode))
+ return false;
+
+ if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
+ {
+ split_const (x, &base, &offset);
+ if (GET_CODE (base) == SYMBOL_REF
+ && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
+ return false;
+ }
+
+ return !m68k_tls_reference_p (x, false);
+}
+
+/* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
+ labels will become jump tables. */
+
+static bool
+m68k_jump_table_ref_p (rtx x)
+{
+ if (GET_CODE (x) != LABEL_REF)
+ return false;
+
+ x = XEXP (x, 0);
+ if (!NEXT_INSN (x) && !PREV_INSN (x))
+ return true;
+
+ x = next_nonnote_insn (x);
+ return x && JUMP_TABLE_DATA_P (x);
+}
+
+/* Return true if X is a legitimate address for values of mode MODE.
+ STRICT_P says whether strict checking is needed. If the address
+ is valid, describe its components in *ADDRESS. */
+
+static bool
+m68k_decompose_address (enum machine_mode mode, rtx x,
+ bool strict_p, struct m68k_address *address)
+{
+ unsigned int reach;
+
+ memset (address, 0, sizeof (*address));
+
+ if (mode == BLKmode)
+ reach = 1;
+ else
+ reach = GET_MODE_SIZE (mode);
+
+ /* Check for (An) (mode 2). */
+ if (m68k_legitimate_base_reg_p (x, strict_p))
+ {
+ address->base = x;
+ return true;
+ }
+
+ /* Check for -(An) and (An)+ (modes 3 and 4). */
+ if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
+ && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
+ {
+ address->code = GET_CODE (x);
+ address->base = XEXP (x, 0);
+ return true;
+ }
+
+ /* Check for (d16,An) (mode 5). */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
+ && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
+ {
+ address->base = XEXP (x, 0);
+ address->offset = XEXP (x, 1);
+ return true;
+ }
+
+ /* Check for GOT loads. These are (bd,An,Xn) addresses if
+ TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
+ addresses. */
+ if (GET_CODE (x) == PLUS
+ && XEXP (x, 0) == pic_offset_table_rtx)
+ {
+ /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
+ they are invalid in this context. */
+ if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
+ {
+ address->base = XEXP (x, 0);
+ address->offset = XEXP (x, 1);
+ return true;
+ }
+ }
+
+ /* The ColdFire FPU only accepts addressing modes 2-5. */
+ if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return false;
+
+ /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
+ check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
+ All these modes are variations of mode 7. */
+ if (m68k_legitimate_constant_address_p (x, reach, strict_p))
+ {
+ address->offset = x;
+ return true;
+ }
+
+ /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
+ tablejumps.
+
+ ??? do_tablejump creates these addresses before placing the target
+ label, so we have to assume that unplaced labels are jump table
+ references. It seems unlikely that we would ever generate indexed
+ accesses to unplaced labels in other cases. */
+ if (GET_CODE (x) == PLUS
+ && m68k_jump_table_ref_p (XEXP (x, 1))
+ && m68k_decompose_index (XEXP (x, 0), strict_p, address))
+ {
+ address->offset = XEXP (x, 1);
+ return true;
+ }
+
+ /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
+ (bd,An,Xn.SIZE*SCALE) addresses. */
+
+ if (TARGET_68020)
+ {
+ /* Check for a nonzero base displacement. */
+ if (GET_CODE (x) == PLUS
+ && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
+ {
+ address->offset = XEXP (x, 1);
+ x = XEXP (x, 0);
+ }
+
+ /* Check for a suppressed index register. */
+ if (m68k_legitimate_base_reg_p (x, strict_p))
+ {
+ address->base = x;
+ return true;
+ }
+
+ /* Check for a suppressed base register. Do not allow this case
+ for non-symbolic offsets as it effectively gives gcc freedom
+ to treat data registers as base registers, which can generate
+ worse code. */
+ if (address->offset
+ && symbolic_operand (address->offset, VOIDmode)
+ && m68k_decompose_index (x, strict_p, address))
+ return true;
+ }
+ else
+ {
+ /* Check for a nonzero base displacement. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
+ {
+ address->offset = XEXP (x, 1);
+ x = XEXP (x, 0);
+ }
+ }
+
+ /* We now expect the sum of a base and an index. */
+ if (GET_CODE (x) == PLUS)
+ {
+ if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
+ && m68k_decompose_index (XEXP (x, 1), strict_p, address))
+ {
+ address->base = XEXP (x, 0);
+ return true;
+ }
+
+ if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
+ && m68k_decompose_index (XEXP (x, 0), strict_p, address))
+ {
+ address->base = XEXP (x, 1);
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Return true if X is a legitimate address for values of mode MODE.
+ STRICT_P says whether strict checking is needed. */
+
+bool
+m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
+{
+ struct m68k_address address;
+
+ return m68k_decompose_address (mode, x, strict_p, &address);
+}
+
+/* Return true if X is a memory, describing its address in ADDRESS if so.
+ Apply strict checking if called during or after reload. */
+
+static bool
+m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
+{
+ return (MEM_P (x)
+ && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
+ reload_in_progress || reload_completed,
+ address));
+}
+
+/* Return true if X matches the 'Q' constraint. It must be a memory
+ with a base address and no constant offset or index. */
+
+bool
+m68k_matches_q_p (rtx x)
+{
+ struct m68k_address address;
+
+ return (m68k_legitimate_mem_p (x, &address)
+ && address.code == UNKNOWN
+ && address.base
+ && !address.offset
+ && !address.index);
+}
+
+/* Return true if X matches the 'U' constraint. It must be a base address
+ with a constant offset and no index. */
+
+bool
+m68k_matches_u_p (rtx x)
+{
+ struct m68k_address address;
+
+ return (m68k_legitimate_mem_p (x, &address)
+ && address.code == UNKNOWN
+ && address.base
+ && address.offset
+ && !address.index);
+}
+
+/* Return GOT pointer. */
+
+static rtx
+m68k_get_gp (void)
+{
+ if (pic_offset_table_rtx == NULL_RTX)
+ pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
+
+ crtl->uses_pic_offset_table = 1;
+
+ return pic_offset_table_rtx;
+}
+
+/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
+ wrappers. */
+enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
+ RELOC_TLSIE, RELOC_TLSLE };
+
+#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
+
+/* Wrap symbol X into unspec representing relocation RELOC.
+ BASE_REG - register that should be added to the result.
+ TEMP_REG - if non-null, temporary register. */
+
+static rtx
+m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
+{
+ bool use_x_p;
+
+ use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
+
+ if (TARGET_COLDFIRE && use_x_p)
+ /* When compiling with -mx{got, tls} switch the code will look like this:
+
+ move.l <X>@<RELOC>,<TEMP_REG>
+ add.l <BASE_REG>,<TEMP_REG> */
+ {
+ /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
+ to put @RELOC after reference. */
+ x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
+ UNSPEC_RELOC32);
+ x = gen_rtx_CONST (Pmode, x);
+
+ if (temp_reg == NULL)
+ {
+ gcc_assert (can_create_pseudo_p ());
+ temp_reg = gen_reg_rtx (Pmode);
+ }
+
+ emit_move_insn (temp_reg, x);
+ emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
+ x = temp_reg;
+ }
+ else
+ {
+ x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
+ UNSPEC_RELOC16);
+ x = gen_rtx_CONST (Pmode, x);
+
+ x = gen_rtx_PLUS (Pmode, base_reg, x);
+ }
+
+ return x;
+}
+
+/* Helper for m68k_unwrap_symbol.
+ Also, if unwrapping was successful (that is if (ORIG != <return value>)),
+ sets *RELOC_PTR to relocation type for the symbol. */
+
+static rtx
+m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
+ enum m68k_reloc *reloc_ptr)
+{
+ if (GET_CODE (orig) == CONST)
+ {
+ rtx x;
+ enum m68k_reloc dummy;
+
+ x = XEXP (orig, 0);
+
+ if (reloc_ptr == NULL)
+ reloc_ptr = &dummy;
+
+ /* Handle an addend. */
+ if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
+ && CONST_INT_P (XEXP (x, 1)))
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) == UNSPEC)
+ {
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_RELOC16:
+ orig = XVECEXP (x, 0, 0);
+ *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
+ break;
+
+ case UNSPEC_RELOC32:
+ if (unwrap_reloc32_p)
+ {
+ orig = XVECEXP (x, 0, 0);
+ *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ return orig;
+}
+
+/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
+ UNSPEC_RELOC32 wrappers. */
+
+rtx
+m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
+{
+ return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
+}
+
+/* Helper for m68k_final_prescan_insn. */
+
+static int
+m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
+{
+ rtx x = *x_ptr;
+
+ if (m68k_unwrap_symbol (x, true) != x)
+ /* For rationale of the below, see comment in m68k_final_prescan_insn. */
+ {
+ rtx plus;
+
+ gcc_assert (GET_CODE (x) == CONST);
+ plus = XEXP (x, 0);
+
+ if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
+ {
+ rtx unspec;
+ rtx addend;
+
+ unspec = XEXP (plus, 0);
+ gcc_assert (GET_CODE (unspec) == UNSPEC);
+ addend = XEXP (plus, 1);
+ gcc_assert (CONST_INT_P (addend));
+
+ /* We now have all the pieces, rearrange them. */
+
+ /* Move symbol to plus. */
+ XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
+
+ /* Move plus inside unspec. */
+ XVECEXP (unspec, 0, 0) = plus;
+
+ /* Move unspec to top level of const. */
+ XEXP (x, 0) = unspec;
+ }
+
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Prescan insn before outputing assembler for it. */
+
+void
+m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
+ rtx *operands, int n_operands)
+{
+ int i;
+
+ /* Combine and, possibly, other optimizations may do good job
+ converting
+ (const (unspec [(symbol)]))
+ into
+ (const (plus (unspec [(symbol)])
+ (const_int N))).
+ The problem with this is emitting @TLS or @GOT decorations.
+ The decoration is emitted when processing (unspec), so the
+ result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
+
+ It seems that the easiest solution to this is to convert such
+ operands to
+ (const (unspec [(plus (symbol)
+ (const_int N))])).
+ Note, that the top level of operand remains intact, so we don't have
+ to patch up anything outside of the operand. */
+
+ for (i = 0; i < n_operands; ++i)
+ {
+ rtx op;
+
+ op = operands[i];
+
+ for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
+ }
+}
+
+/* Move X to a register and add REG_EQUAL note pointing to ORIG.
+ If REG is non-null, use it; generate new pseudo otherwise. */
+
+static rtx
+m68k_move_to_reg (rtx x, rtx orig, rtx reg)
+{
+ rtx insn;
+
+ if (reg == NULL_RTX)
+ {
+ gcc_assert (can_create_pseudo_p ());
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ insn = emit_move_insn (reg, x);
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ set_unique_reg_note (insn, REG_EQUAL, orig);
+
+ return reg;
+}
+
+/* Does the same as m68k_wrap_symbol, but returns a memory reference to
+ GOT slot. */
+
+static rtx
+m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
+{
+ x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
+
+ x = gen_rtx_MEM (Pmode, x);
+ MEM_READONLY_P (x) = 1;
+
+ return x;
+}
+
+/* Legitimize PIC addresses. If the address is already
+ position-independent, we return ORIG. Newly generated
+ position-independent addresses go to REG. If we need more
+ than one register, we lose.
+
+ An address is legitimized by making an indirect reference
+ through the Global Offset Table with the name of the symbol
+ used as an offset.
+
+ The assembler and linker are responsible for placing the
+ address of the symbol in the GOT. The function prologue
+ is responsible for initializing a5 to the starting address
+ of the GOT.
+
+ The assembler is also responsible for translating a symbol name
+ into a constant displacement from the start of the GOT.
+
+ A quick example may make things a little clearer:
+
+ When not generating PIC code to store the value 12345 into _foo
+ we would generate the following code:
+
+ movel #12345, _foo
+
+ When generating PIC two transformations are made. First, the compiler
+ loads the address of foo into a register. So the first transformation makes:
+
+ lea _foo, a0
+ movel #12345, a0@
+
+ The code in movsi will intercept the lea instruction and call this
+ routine which will transform the instructions into:
+
+ movel a5@(_foo:w), a0
+ movel #12345, a0@
+
+
+ That (in a nutshell) is how *all* symbol and label references are
+ handled. */
+
+rtx
+legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx reg)
+{
+ rtx pic_ref = orig;
+
+ /* First handle a simple SYMBOL_REF or LABEL_REF */
+ if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
+ {
+ gcc_assert (reg);
+
+ pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
+ pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base;
+
+ /* Make sure this has not already been legitimized. */
+ if (m68k_unwrap_symbol (orig, true) != orig)
+ return orig;
+
+ gcc_assert (reg);
+
+ /* legitimize both operands of the PLUS */
+ gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
+
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg);
+
+ if (GET_CODE (orig) == CONST_INT)
+ pic_ref = plus_constant (base, INTVAL (orig));
+ else
+ pic_ref = gen_rtx_PLUS (Pmode, base, orig);
+ }
+
+ return pic_ref;
+}
+
+/* The __tls_get_addr symbol. */
+static GTY(()) rtx m68k_tls_get_addr;
+
+/* Return SYMBOL_REF for __tls_get_addr. */
+
+static rtx
+m68k_get_tls_get_addr (void)
+{
+ if (m68k_tls_get_addr == NULL_RTX)
+ m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
+
+ return m68k_tls_get_addr;
+}
+
+/* Return libcall result in A0 instead of usual D0. */
+static bool m68k_libcall_value_in_a0_p = false;
+
+/* Emit instruction sequence that calls __tls_get_addr. X is
+ the TLS symbol we are referencing and RELOC is the symbol type to use
+ (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
+ emitted. A pseudo register with result of __tls_get_addr call is
+ returned. */
+
+static rtx
+m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
+{
+ rtx a0;
+ rtx insns;
+ rtx dest;
+
+ /* Emit the call sequence. */
+ start_sequence ();
+
+ /* FIXME: Unfortunately, emit_library_call_value does not
+ consider (plus (%a5) (const (unspec))) to be a good enough
+ operand for push, so it forces it into a register. The bad
+ thing about this is that combiner, due to copy propagation and other
+ optimizations, sometimes can not later fix this. As a consequence,
+ additional register may be allocated resulting in a spill.
+ For reference, see args processing loops in
+ calls.c:emit_library_call_value_1.
+ For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
+ x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
+
+ /* __tls_get_addr() is not a libcall, but emitting a libcall_value
+ is the simpliest way of generating a call. The difference between
+ __tls_get_addr() and libcall is that the result is returned in D0
+ instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
+ which temporarily switches returning the result to A0. */
+
+ m68k_libcall_value_in_a0_p = true;
+ a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
+ Pmode, 1, x, Pmode);
+ m68k_libcall_value_in_a0_p = false;
+
+ insns = get_insns ();
+ end_sequence ();
+
+ gcc_assert (can_create_pseudo_p ());
+ dest = gen_reg_rtx (Pmode);
+ emit_libcall_block (insns, dest, a0, eqv);
+
+ return dest;
+}
+
+/* The __tls_get_addr symbol. */
+static GTY(()) rtx m68k_read_tp;
+
+/* Return SYMBOL_REF for __m68k_read_tp. */
+
+static rtx
+m68k_get_m68k_read_tp (void)
+{
+ if (m68k_read_tp == NULL_RTX)
+ m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
+
+ return m68k_read_tp;
+}
+
+/* Emit instruction sequence that calls __m68k_read_tp.
+ A pseudo register with result of __m68k_read_tp call is returned. */
+
+static rtx
+m68k_call_m68k_read_tp (void)
+{
+ rtx a0;
+ rtx eqv;
+ rtx insns;
+ rtx dest;
+
+ start_sequence ();
+
+ /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
+ is the simpliest way of generating a call. The difference between
+ __m68k_read_tp() and libcall is that the result is returned in D0
+ instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
+ which temporarily switches returning the result to A0. */
+
+ /* Emit the call sequence. */
+ m68k_libcall_value_in_a0_p = true;
+ a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
+ Pmode, 0);
+ m68k_libcall_value_in_a0_p = false;
+ insns = get_insns ();
+ end_sequence ();
+
+ /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
+ share the m68k_read_tp result with other IE/LE model accesses. */
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
+
+ gcc_assert (can_create_pseudo_p ());
+ dest = gen_reg_rtx (Pmode);
+ emit_libcall_block (insns, dest, a0, eqv);
+
+ return dest;
+}
+
+/* Return a legitimized address for accessing TLS SYMBOL_REF X.
+ For explanations on instructions sequences see TLS/NPTL ABI for m68k and
+ ColdFire. */
+
+rtx
+m68k_legitimize_tls_address (rtx orig)
+{
+ switch (SYMBOL_REF_TLS_MODEL (orig))
+ {
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
+ break;
+
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ {
+ rtx eqv;
+ rtx a0;
+ rtx x;
+
+ /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
+ share the LDM result with other LD model accesses. */
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
+ UNSPEC_RELOC32);
+
+ a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
+
+ x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
+
+ if (can_create_pseudo_p ())
+ x = m68k_move_to_reg (x, orig, NULL_RTX);
+
+ orig = x;
+ break;
+ }
+
+ case TLS_MODEL_INITIAL_EXEC:
+ {
+ rtx a0;
+ rtx x;
+
+ a0 = m68k_call_m68k_read_tp ();
+
+ x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
+ x = gen_rtx_PLUS (Pmode, x, a0);
+
+ if (can_create_pseudo_p ())
+ x = m68k_move_to_reg (x, orig, NULL_RTX);
+
+ orig = x;
+ break;
+ }
+
+ case TLS_MODEL_LOCAL_EXEC:
+ {
+ rtx a0;
+ rtx x;
+
+ a0 = m68k_call_m68k_read_tp ();
+
+ x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
+
+ if (can_create_pseudo_p ())
+ x = m68k_move_to_reg (x, orig, NULL_RTX);
+
+ orig = x;
+ break;
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return orig;
+}
+
+/* Return true if X is a TLS symbol. */
+
+static bool
+m68k_tls_symbol_p (rtx x)
+{
+ if (!TARGET_HAVE_TLS)
+ return false;
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ return false;
+
+ return SYMBOL_REF_TLS_MODEL (x) != 0;
+}
+
+/* Helper for m68k_tls_referenced_p. */
+
+static int
+m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
+{
+ /* Note: this is not the same as m68k_tls_symbol_p. */
+ if (GET_CODE (*x_ptr) == SYMBOL_REF)
+ return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
+
+ /* Don't recurse into legitimate TLS references. */
+ if (m68k_tls_reference_p (*x_ptr, true))
+ return -1;
+
+ return 0;
+}
+
+/* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
+ though illegitimate one.
+ If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
+
+bool
+m68k_tls_reference_p (rtx x, bool legitimate_p)
+{
+ if (!TARGET_HAVE_TLS)
+ return false;
+
+ if (!legitimate_p)
+ return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
+ else
+ {
+ enum m68k_reloc reloc = RELOC_GOT;
+
+ return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
+ && TLS_RELOC_P (reloc));
+ }
+}
+
+
+
+#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
+
+/* Return the type of move that should be used for integer I. */
+
+M68K_CONST_METHOD
+m68k_const_method (HOST_WIDE_INT i)
+{
+ unsigned u;
+
+ if (USE_MOVQ (i))
+ return MOVQ;
+
+ /* The ColdFire doesn't have byte or word operations. */
+ /* FIXME: This may not be useful for the m68060 either. */
+ if (!TARGET_COLDFIRE)
+ {
+ /* if -256 < N < 256 but N is not in range for a moveq
+ N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
+ if (USE_MOVQ (i ^ 0xff))
+ return NOTB;
+ /* Likewise, try with not.w */
+ if (USE_MOVQ (i ^ 0xffff))
+ return NOTW;
+ /* This is the only value where neg.w is useful */
+ if (i == -65408)
+ return NEGW;
+ }
+
+ /* Try also with swap. */
+ u = i;
+ if (USE_MOVQ ((u >> 16) | (u << 16)))
+ return SWAP;
+
+ if (TARGET_ISAB)
+ {
+ /* Try using MVZ/MVS with an immediate value to load constants. */
+ if (i >= 0 && i <= 65535)
+ return MVZ;
+ if (i >= -32768 && i <= 32767)
+ return MVS;
+ }
+
+ /* Otherwise, use move.l */
+ return MOVL;
+}
+
+/* Return the cost of moving constant I into a data register. */
+
+static int
+const_int_cost (HOST_WIDE_INT i)
+{
+ switch (m68k_const_method (i))
+ {
+ case MOVQ:
+ /* Constants between -128 and 127 are cheap due to moveq. */
+ return 0;
+ case MVZ:
+ case MVS:
+ case NOTB:
+ case NOTW:
+ case NEGW:
+ case SWAP:
+ /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
+ return 1;
+ case MOVL:
+ return 2;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+static bool
+m68k_rtx_costs (rtx x, int code, int outer_code, int *total,
+ bool speed ATTRIBUTE_UNUSED)
+{
+ switch (code)
+ {
+ case CONST_INT:
+ /* Constant zero is super cheap due to clr instruction. */
+ if (x == const0_rtx)
+ *total = 0;
+ else
+ *total = const_int_cost (INTVAL (x));
+ return true;
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ *total = 3;
+ return true;
+
+ case CONST_DOUBLE:
+ /* Make 0.0 cheaper than other floating constants to
+ encourage creating tstsf and tstdf insns. */
+ if (outer_code == COMPARE
+ && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
+ *total = 4;
+ else
+ *total = 5;
+ return true;
+
+ /* These are vaguely right for a 68020. */
+ /* The costs for long multiply have been adjusted to work properly
+ in synth_mult on the 68020, relative to an average of the time
+ for add and the time for shift, taking away a little more because
+ sometimes move insns are needed. */
+ /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
+ terms. */
+#define MULL_COST \
+ (TUNE_68060 ? 2 \
+ : TUNE_68040 ? 5 \
+ : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
+ : (TUNE_CFV2 && TUNE_MAC) ? 4 \
+ : TUNE_CFV2 ? 8 \
+ : TARGET_COLDFIRE ? 3 : 13)
+
+#define MULW_COST \
+ (TUNE_68060 ? 2 \
+ : TUNE_68040 ? 3 \
+ : TUNE_68000_10 ? 5 \
+ : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
+ : (TUNE_CFV2 && TUNE_MAC) ? 2 \
+ : TUNE_CFV2 ? 8 \
+ : TARGET_COLDFIRE ? 2 : 8)
+
+#define DIVW_COST \
+ (TARGET_CF_HWDIV ? 11 \
+ : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
+
+ case PLUS:
+ /* An lea costs about three times as much as a simple add. */
+ if (GET_MODE (x) == SImode
+ && GET_CODE (XEXP (x, 1)) == REG
+ && GET_CODE (XEXP (x, 0)) == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
+ || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
+ || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
+ {
+ /* lea an@(dx:l:i),am */
+ *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
+ return true;
+ }
+ return false;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ if (TUNE_68060)
+ {
+ *total = COSTS_N_INSNS(1);
+ return true;
+ }
+ if (TUNE_68000_10)
+ {
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ if (INTVAL (XEXP (x, 1)) < 16)
+ *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
+ else
+ /* We're using clrw + swap for these cases. */
+ *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
+ }
+ else
+ *total = COSTS_N_INSNS (10); /* Worst case. */
+ return true;
+ }
+ /* A shift by a big integer takes an extra instruction. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (INTVAL (XEXP (x, 1)) == 16))
+ {
+ *total = COSTS_N_INSNS (2); /* clrw;swap */
+ return true;
+ }
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && !(INTVAL (XEXP (x, 1)) > 0
+ && INTVAL (XEXP (x, 1)) <= 8))
+ {
+ *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
+ return true;
+ }
+ return false;
+
+ case MULT:
+ if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+ && GET_MODE (x) == SImode)
+ *total = COSTS_N_INSNS (MULW_COST);
+ else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
+ *total = COSTS_N_INSNS (MULW_COST);
+ else
+ *total = COSTS_N_INSNS (MULL_COST);
+ return true;
+
+ case DIV:
+ case UDIV:
+ case MOD:
+ case UMOD:
+ if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
+ *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
+ else if (TARGET_CF_HWDIV)
+ *total = COSTS_N_INSNS (18);
+ else
+ *total = COSTS_N_INSNS (43); /* div.l */
+ return true;
+
+ case ZERO_EXTRACT:
+ if (outer_code == COMPARE)
+ *total = 0;
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+/* Return an instruction to move CONST_INT OPERANDS[1] into data register
+ OPERANDS[0]. */
+
+static const char *
+output_move_const_into_data_reg (rtx *operands)
+{
+ HOST_WIDE_INT i;
+
+ i = INTVAL (operands[1]);
+ switch (m68k_const_method (i))
+ {
+ case MVZ:
+ return "mvzw %1,%0";
+ case MVS:
+ return "mvsw %1,%0";
+ case MOVQ:
+ return "moveq %1,%0";
+ case NOTB:
+ CC_STATUS_INIT;
+ operands[1] = GEN_INT (i ^ 0xff);
+ return "moveq %1,%0\n\tnot%.b %0";
+ case NOTW:
+ CC_STATUS_INIT;
+ operands[1] = GEN_INT (i ^ 0xffff);
+ return "moveq %1,%0\n\tnot%.w %0";
+ case NEGW:
+ CC_STATUS_INIT;
+ return "moveq #-128,%0\n\tneg%.w %0";
+ case SWAP:
+ {
+ unsigned u = i;
+
+ operands[1] = GEN_INT ((u << 16) | (u >> 16));
+ return "moveq %1,%0\n\tswap %0";
+ }
+ case MOVL:
+ return "move%.l %1,%0";
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return true if I can be handled by ISA B's mov3q instruction. */
+
+bool
+valid_mov3q_const (HOST_WIDE_INT i)
+{
+ return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
+}
+
+/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
+ I is the value of OPERANDS[1]. */
+
+static const char *
+output_move_simode_const (rtx *operands)
+{
+ rtx dest;
+ HOST_WIDE_INT src;
+
+ dest = operands[0];
+ src = INTVAL (operands[1]);
+ if (src == 0
+ && (DATA_REG_P (dest) || MEM_P (dest))
+ /* clr insns on 68000 read before writing. */
+ && ((TARGET_68010 || TARGET_COLDFIRE)
+ || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
+ return "clr%.l %0";
+ else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
+ return "mov3q%.l %1,%0";
+ else if (src == 0 && ADDRESS_REG_P (dest))
+ return "sub%.l %0,%0";
+ else if (DATA_REG_P (dest))
+ return output_move_const_into_data_reg (operands);
+ else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
+ {
+ if (valid_mov3q_const (src))
+ return "mov3q%.l %1,%0";
+ return "move%.w %1,%0";
+ }
+ else if (MEM_P (dest)
+ && GET_CODE (XEXP (dest, 0)) == PRE_DEC
+ && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
+ && IN_RANGE (src, -0x8000, 0x7fff))
+ {
+ if (valid_mov3q_const (src))
+ return "mov3q%.l %1,%-";
+ return "pea %a1";
+ }
+ return "move%.l %1,%0";
+}
+
+const char *
+output_move_simode (rtx *operands)
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ return output_move_simode_const (operands);
+ else if ((GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == CONST)
+ && push_operand (operands[0], SImode))
+ return "pea %a1";
+ else if ((GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == CONST)
+ && ADDRESS_REG_P (operands[0]))
+ return "lea %a1,%0";
+ return "move%.l %1,%0";
+}
+
+const char *
+output_move_himode (rtx *operands)
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (operands[1] == const0_rtx
+ && (DATA_REG_P (operands[0])
+ || GET_CODE (operands[0]) == MEM)
+ /* clr insns on 68000 read before writing. */
+ && ((TARGET_68010 || TARGET_COLDFIRE)
+ || !(GET_CODE (operands[0]) == MEM
+ && MEM_VOLATILE_P (operands[0]))))
+ return "clr%.w %0";
+ else if (operands[1] == const0_rtx
+ && ADDRESS_REG_P (operands[0]))
+ return "sub%.l %0,%0";
+ else if (DATA_REG_P (operands[0])
+ && INTVAL (operands[1]) < 128
+ && INTVAL (operands[1]) >= -128)
+ return "moveq %1,%0";
+ else if (INTVAL (operands[1]) < 0x8000
+ && INTVAL (operands[1]) >= -0x8000)
+ return "move%.w %1,%0";
+ }
+ else if (CONSTANT_P (operands[1]))
+ return "move%.l %1,%0";
+ return "move%.w %1,%0";
+}
+
+const char *
+output_move_qimode (rtx *operands)
+{
+ /* 68k family always modifies the stack pointer by at least 2, even for
+ byte pushes. The 5200 (ColdFire) does not do this. */
+
+ /* This case is generated by pushqi1 pattern now. */
+ gcc_assert (!(GET_CODE (operands[0]) == MEM
+ && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
+ && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
+ && ! ADDRESS_REG_P (operands[1])
+ && ! TARGET_COLDFIRE));
+
+ /* clr and st insns on 68000 read before writing. */
+ if (!ADDRESS_REG_P (operands[0])
+ && ((TARGET_68010 || TARGET_COLDFIRE)
+ || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
+ {
+ if (operands[1] == const0_rtx)
+ return "clr%.b %0";
+ if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
+ && GET_CODE (operands[1]) == CONST_INT
+ && (INTVAL (operands[1]) & 255) == 255)
+ {
+ CC_STATUS_INIT;
+ return "st %0";
+ }
+ }
+ if (GET_CODE (operands[1]) == CONST_INT
+ && DATA_REG_P (operands[0])
+ && INTVAL (operands[1]) < 128
+ && INTVAL (operands[1]) >= -128)
+ return "moveq %1,%0";
+ if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
+ return "sub%.l %0,%0";
+ if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
+ return "move%.l %1,%0";
+ /* 68k family (including the 5200 ColdFire) does not support byte moves to
+ from address registers. */
+ if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
+ return "move%.w %1,%0";
+ return "move%.b %1,%0";
+}
+
+const char *
+output_move_stricthi (rtx *operands)
+{
+ if (operands[1] == const0_rtx
+ /* clr insns on 68000 read before writing. */
+ && ((TARGET_68010 || TARGET_COLDFIRE)
+ || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
+ return "clr%.w %0";
+ return "move%.w %1,%0";
+}
+
+const char *
+output_move_strictqi (rtx *operands)
+{
+ if (operands[1] == const0_rtx
+ /* clr insns on 68000 read before writing. */
+ && ((TARGET_68010 || TARGET_COLDFIRE)
+ || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
+ return "clr%.b %0";
+ return "move%.b %1,%0";
+}
+
+/* Return the best assembler insn template
+ for moving operands[1] into operands[0] as a fullword. */
+
+static const char *
+singlemove_string (rtx *operands)
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ return output_move_simode_const (operands);
+ return "move%.l %1,%0";
+}
+
+
+/* Output assembler or rtl code to perform a doubleword move insn
+ with operands OPERANDS.
+ Pointers to 3 helper functions should be specified:
+ HANDLE_REG_ADJUST to adjust a register by a small value,
+ HANDLE_COMPADR to compute an address and
+ HANDLE_MOVSI to move 4 bytes. */
+
+static void
+handle_move_double (rtx operands[2],
+ void (*handle_reg_adjust) (rtx, int),
+ void (*handle_compadr) (rtx [2]),
+ void (*handle_movsi) (rtx [2]))
+{
+ enum
+ {
+ REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
+ } optype0, optype1;
+ rtx latehalf[2];
+ rtx middlehalf[2];
+ rtx xops[2];
+ rtx addreg0 = 0, addreg1 = 0;
+ int dest_overlapped_low = 0;
+ int size = GET_MODE_SIZE (GET_MODE (operands[0]));
+
+ middlehalf[0] = 0;
+ middlehalf[1] = 0;
+
+ /* First classify both operands. */
+
+ if (REG_P (operands[0]))
+ optype0 = REGOP;
+ else if (offsettable_memref_p (operands[0]))
+ optype0 = OFFSOP;
+ else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
+ optype0 = POPOP;
+ else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
+ optype0 = PUSHOP;
+ else if (GET_CODE (operands[0]) == MEM)
+ optype0 = MEMOP;
+ else
+ optype0 = RNDOP;
+
+ if (REG_P (operands[1]))
+ optype1 = REGOP;
+ else if (CONSTANT_P (operands[1]))
+ optype1 = CNSTOP;
+ else if (offsettable_memref_p (operands[1]))
+ optype1 = OFFSOP;
+ else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
+ optype1 = POPOP;
+ else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
+ optype1 = PUSHOP;
+ else if (GET_CODE (operands[1]) == MEM)
+ optype1 = MEMOP;
+ else
+ optype1 = RNDOP;
+
+ /* Check for the cases that the operand constraints are not supposed
+ to allow to happen. Generating code for these cases is
+ painful. */
+ gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
+
+ /* If one operand is decrementing and one is incrementing
+ decrement the former register explicitly
+ and change that operand into ordinary indexing. */
+
+ if (optype0 == PUSHOP && optype1 == POPOP)
+ {
+ operands[0] = XEXP (XEXP (operands[0], 0), 0);
+
+ handle_reg_adjust (operands[0], -size);
+
+ if (GET_MODE (operands[1]) == XFmode)
+ operands[0] = gen_rtx_MEM (XFmode, operands[0]);
+ else if (GET_MODE (operands[0]) == DFmode)
+ operands[0] = gen_rtx_MEM (DFmode, operands[0]);
+ else
+ operands[0] = gen_rtx_MEM (DImode, operands[0]);
+ optype0 = OFFSOP;
+ }
+ if (optype0 == POPOP && optype1 == PUSHOP)
+ {
+ operands[1] = XEXP (XEXP (operands[1], 0), 0);
+
+ handle_reg_adjust (operands[1], -size);
+
+ if (GET_MODE (operands[1]) == XFmode)
+ operands[1] = gen_rtx_MEM (XFmode, operands[1]);
+ else if (GET_MODE (operands[1]) == DFmode)
+ operands[1] = gen_rtx_MEM (DFmode, operands[1]);
+ else
+ operands[1] = gen_rtx_MEM (DImode, operands[1]);
+ optype1 = OFFSOP;
+ }
+
+ /* If an operand is an unoffsettable memory ref, find a register
+ we can increment temporarily to make it refer to the second word. */
+
+ if (optype0 == MEMOP)
+ addreg0 = find_addr_reg (XEXP (operands[0], 0));
+
+ if (optype1 == MEMOP)
+ addreg1 = find_addr_reg (XEXP (operands[1], 0));
+
+ /* Ok, we can do one word at a time.
+ Normally we do the low-numbered word first,
+ but if either operand is autodecrementing then we
+ do the high-numbered word first.
+
+ In either case, set up in LATEHALF the operands to use
+ for the high-numbered word and in some cases alter the
+ operands in OPERANDS to be suitable for the low-numbered word. */
+
+ if (size == 12)
+ {
+ if (optype0 == REGOP)
+ {
+ latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
+ middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ }
+ else if (optype0 == OFFSOP)
+ {
+ middlehalf[0] = adjust_address (operands[0], SImode, 4);
+ latehalf[0] = adjust_address (operands[0], SImode, size - 4);
+ }
+ else
+ {
+ middlehalf[0] = adjust_address (operands[0], SImode, 0);
+ latehalf[0] = adjust_address (operands[0], SImode, 0);
+ }
+
+ if (optype1 == REGOP)
+ {
+ latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
+ middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ }
+ else if (optype1 == OFFSOP)
+ {
+ middlehalf[1] = adjust_address (operands[1], SImode, 4);
+ latehalf[1] = adjust_address (operands[1], SImode, size - 4);
+ }
+ else if (optype1 == CNSTOP)
+ {
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ REAL_VALUE_TYPE r;
+ long l[3];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
+ operands[1] = GEN_INT (l[0]);
+ middlehalf[1] = GEN_INT (l[1]);
+ latehalf[1] = GEN_INT (l[2]);
+ }
+ else
+ {
+ /* No non-CONST_DOUBLE constant should ever appear
+ here. */
+ gcc_assert (!CONSTANT_P (operands[1]));
+ }
+ }
+ else
+ {
+ middlehalf[1] = adjust_address (operands[1], SImode, 0);
+ latehalf[1] = adjust_address (operands[1], SImode, 0);
+ }
+ }
+ else
+ /* size is not 12: */
+ {
+ if (optype0 == REGOP)
+ latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else if (optype0 == OFFSOP)
+ latehalf[0] = adjust_address (operands[0], SImode, size - 4);
+ else
+ latehalf[0] = adjust_address (operands[0], SImode, 0);
+
+ if (optype1 == REGOP)
+ latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ else if (optype1 == OFFSOP)
+ latehalf[1] = adjust_address (operands[1], SImode, size - 4);
+ else if (optype1 == CNSTOP)
+ split_double (operands[1], &operands[1], &latehalf[1]);
+ else
+ latehalf[1] = adjust_address (operands[1], SImode, 0);
+ }
+
+ /* If insn is effectively movd N(sp),-(sp) then we will do the
+ high word first. We should use the adjusted operand 1 (which is N+4(sp))
+ for the low word as well, to compensate for the first decrement of sp. */
+ if (optype0 == PUSHOP
+ && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
+ && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
+ operands[1] = middlehalf[1] = latehalf[1];
+
+ /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
+ if the upper part of reg N does not appear in the MEM, arrange to
+ emit the move late-half first. Otherwise, compute the MEM address
+ into the upper part of N and use that as a pointer to the memory
+ operand. */
+ if (optype0 == REGOP
+ && (optype1 == OFFSOP || optype1 == MEMOP))
+ {
+ rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
+
+ if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
+ && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
+ {
+ /* If both halves of dest are used in the src memory address,
+ compute the address into latehalf of dest.
+ Note that this can't happen if the dest is two data regs. */
+ compadr:
+ xops[0] = latehalf[0];
+ xops[1] = XEXP (operands[1], 0);
+
+ handle_compadr (xops);
+ if (GET_MODE (operands[1]) == XFmode)
+ {
+ operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
+ middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
+ latehalf[1] = adjust_address (operands[1], DImode, size - 4);
+ }
+ else
+ {
+ operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
+ latehalf[1] = adjust_address (operands[1], DImode, size - 4);
+ }
+ }
+ else if (size == 12
+ && reg_overlap_mentioned_p (middlehalf[0],
+ XEXP (operands[1], 0)))
+ {
+ /* Check for two regs used by both source and dest.
+ Note that this can't happen if the dest is all data regs.
+ It can happen if the dest is d6, d7, a0.
+ But in that case, latehalf is an addr reg, so
+ the code at compadr does ok. */
+
+ if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
+ || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
+ goto compadr;
+
+ /* JRV says this can't happen: */
+ gcc_assert (!addreg0 && !addreg1);
+
+ /* Only the middle reg conflicts; simply put it last. */
+ handle_movsi (operands);
+ handle_movsi (latehalf);
+ handle_movsi (middlehalf);
+
+ return;
+ }
+ else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
+ /* If the low half of dest is mentioned in the source memory
+ address, the arrange to emit the move late half first. */
+ dest_overlapped_low = 1;
+ }
+
+ /* If one or both operands autodecrementing,
+ do the two words, high-numbered first. */
+
+ /* Likewise, the first move would clobber the source of the second one,
+ do them in the other order. This happens only for registers;
+ such overlap can't happen in memory unless the user explicitly
+ sets it up, and that is an undefined circumstance. */
+
+ if (optype0 == PUSHOP || optype1 == PUSHOP
+ || (optype0 == REGOP && optype1 == REGOP
+ && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
+ || REGNO (operands[0]) == REGNO (latehalf[1])))
+ || dest_overlapped_low)
+ {
+ /* Make any unoffsettable addresses point at high-numbered word. */
+ if (addreg0)
+ handle_reg_adjust (addreg0, size - 4);
+ if (addreg1)
+ handle_reg_adjust (addreg1, size - 4);
+
+ /* Do that word. */
+ handle_movsi (latehalf);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ handle_reg_adjust (addreg0, -4);
+ if (addreg1)
+ handle_reg_adjust (addreg1, -4);
+
+ if (size == 12)
+ {
+ handle_movsi (middlehalf);
+
+ if (addreg0)
+ handle_reg_adjust (addreg0, -4);
+ if (addreg1)
+ handle_reg_adjust (addreg1, -4);
+ }
+
+ /* Do low-numbered word. */
+
+ handle_movsi (operands);
+ return;
+ }
+
+ /* Normal case: do the two words, low-numbered first. */
+
+ m68k_final_prescan_insn (NULL, operands, 2);
+ handle_movsi (operands);
+
+ /* Do the middle one of the three words for long double */
+ if (size == 12)
+ {
+ if (addreg0)
+ handle_reg_adjust (addreg0, 4);
+ if (addreg1)
+ handle_reg_adjust (addreg1, 4);
+
+ m68k_final_prescan_insn (NULL, middlehalf, 2);
+ handle_movsi (middlehalf);
+ }
+
+ /* Make any unoffsettable addresses point at high-numbered word. */
+ if (addreg0)
+ handle_reg_adjust (addreg0, 4);
+ if (addreg1)
+ handle_reg_adjust (addreg1, 4);
+
+ /* Do that word. */
+ m68k_final_prescan_insn (NULL, latehalf, 2);
+ handle_movsi (latehalf);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ handle_reg_adjust (addreg0, -(size - 4));
+ if (addreg1)
+ handle_reg_adjust (addreg1, -(size - 4));
+
+ return;
+}
+
+/* Output assembler code to adjust REG by N. */
+static void
+output_reg_adjust (rtx reg, int n)
+{
+ const char *s;
+
+ gcc_assert (GET_MODE (reg) == SImode
+ && -12 <= n && n != 0 && n <= 12);
+
+ switch (n)
+ {
+ case 12:
+ s = "add%.l #12,%0";
+ break;
+
+ case 8:
+ s = "addq%.l #8,%0";
+ break;
+
+ case 4:
+ s = "addq%.l #4,%0";
+ break;
+
+ case -12:
+ s = "sub%.l #12,%0";
+ break;
+
+ case -8:
+ s = "subq%.l #8,%0";
+ break;
+
+ case -4:
+ s = "subq%.l #4,%0";
+ break;
+
+ default:
+ gcc_unreachable ();
+ s = NULL;
+ }
+
+ output_asm_insn (s, &reg);
+}
+
+/* Emit rtl code to adjust REG by N. */
+static void
+emit_reg_adjust (rtx reg1, int n)
+{
+ rtx reg2;
+
+ gcc_assert (GET_MODE (reg1) == SImode
+ && -12 <= n && n != 0 && n <= 12);
+
+ reg1 = copy_rtx (reg1);
+ reg2 = copy_rtx (reg1);
+
+ if (n < 0)
+ emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
+ else if (n > 0)
+ emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
+ else
+ gcc_unreachable ();
+}
+
+/* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
+static void
+output_compadr (rtx operands[2])
+{
+ output_asm_insn ("lea %a1,%0", operands);
+}
+
+/* Output the best assembler insn for moving operands[1] into operands[0]
+ as a fullword. */
+static void
+output_movsi (rtx operands[2])
+{
+ output_asm_insn (singlemove_string (operands), operands);
+}
+
+/* Copy OP and change its mode to MODE. */
+static rtx
+copy_operand (rtx op, enum machine_mode mode)
+{
+ /* ??? This looks really ugly. There must be a better way
+ to change a mode on the operand. */
+ if (GET_MODE (op) != VOIDmode)
+ {
+ if (REG_P (op))
+ op = gen_rtx_REG (mode, REGNO (op));
+ else
+ {
+ op = copy_rtx (op);
+ PUT_MODE (op, mode);
+ }
+ }
+
+ return op;
+}
+
+/* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
+static void
+emit_movsi (rtx operands[2])
+{
+ operands[0] = copy_operand (operands[0], SImode);
+ operands[1] = copy_operand (operands[1], SImode);
+
+ emit_insn (gen_movsi (operands[0], operands[1]));
+}
+
+/* Output assembler code to perform a doubleword move insn
+ with operands OPERANDS. */
+const char *
+output_move_double (rtx *operands)
+{
+ handle_move_double (operands,
+ output_reg_adjust, output_compadr, output_movsi);
+
+ return "";
+}
+
+/* Output rtl code to perform a doubleword move insn
+ with operands OPERANDS. */
+void
+m68k_emit_move_double (rtx operands[2])
+{
+ handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
+}
+
+/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
+ new rtx with the correct mode. */
+
+static rtx
+force_mode (enum machine_mode mode, rtx orig)
+{
+ if (mode == GET_MODE (orig))
+ return orig;
+
+ if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ return gen_rtx_REG (mode, REGNO (orig));
+}
+
+static int
+fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return reg_renumber && FP_REG_P (op);
+}
+
+/* Emit insns to move operands[1] into operands[0].
+
+ Return 1 if we have written out everything that needs to be done to
+ do the move. Otherwise, return 0 and the caller will emit the move
+ normally.
+
+ Note SCRATCH_REG may not be in the proper mode depending on how it
+ will be used. This routine is responsible for creating a new copy
+ of SCRATCH_REG in the proper mode. */
+
+int
+emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
+{
+ register rtx operand0 = operands[0];
+ register rtx operand1 = operands[1];
+ register rtx tem;
+
+ if (scratch_reg
+ && reload_in_progress && GET_CODE (operand0) == REG
+ && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
+ operand0 = reg_equiv_mem[REGNO (operand0)];
+ else if (scratch_reg
+ && reload_in_progress && GET_CODE (operand0) == SUBREG
+ && GET_CODE (SUBREG_REG (operand0)) == REG
+ && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
+ {
+ /* We must not alter SUBREG_BYTE (operand0) since that would confuse
+ the code which tracks sets/uses for delete_output_reload. */
+ rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
+ reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
+ SUBREG_BYTE (operand0));
+ operand0 = alter_subreg (&temp);
+ }
+
+ if (scratch_reg
+ && reload_in_progress && GET_CODE (operand1) == REG
+ && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
+ operand1 = reg_equiv_mem[REGNO (operand1)];
+ else if (scratch_reg
+ && reload_in_progress && GET_CODE (operand1) == SUBREG
+ && GET_CODE (SUBREG_REG (operand1)) == REG
+ && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
+ {
+ /* We must not alter SUBREG_BYTE (operand0) since that would confuse
+ the code which tracks sets/uses for delete_output_reload. */
+ rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
+ reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
+ SUBREG_BYTE (operand1));
+ operand1 = alter_subreg (&temp);
+ }
+
+ if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
+ && ((tem = find_replacement (&XEXP (operand0, 0)))
+ != XEXP (operand0, 0)))
+ operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
+ if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
+ && ((tem = find_replacement (&XEXP (operand1, 0)))
+ != XEXP (operand1, 0)))
+ operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
+
+ /* Handle secondary reloads for loads/stores of FP registers where
+ the address is symbolic by using the scratch register */
+ if (fp_reg_operand (operand0, mode)
+ && ((GET_CODE (operand1) == MEM
+ && ! memory_address_p (DFmode, XEXP (operand1, 0)))
+ || ((GET_CODE (operand1) == SUBREG
+ && GET_CODE (XEXP (operand1, 0)) == MEM
+ && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
+ && scratch_reg)
+ {
+ if (GET_CODE (operand1) == SUBREG)
+ operand1 = XEXP (operand1, 0);
+
+ /* SCRATCH_REG will hold an address. We want
+ it in SImode regardless of what mode it was originally given
+ to us. */
+ scratch_reg = force_mode (SImode, scratch_reg);
+
+ /* D might not fit in 14 bits either; for such cases load D into
+ scratch reg. */
+ if (!memory_address_p (Pmode, XEXP (operand1, 0)))
+ {
+ emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
+ emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
+ Pmode,
+ XEXP (XEXP (operand1, 0), 0),
+ scratch_reg));
+ }
+ else
+ emit_move_insn (scratch_reg, XEXP (operand1, 0));
+ emit_insn (gen_rtx_SET (VOIDmode, operand0,
+ gen_rtx_MEM (mode, scratch_reg)));
+ return 1;
+ }
+ else if (fp_reg_operand (operand1, mode)
+ && ((GET_CODE (operand0) == MEM
+ && ! memory_address_p (DFmode, XEXP (operand0, 0)))
+ || ((GET_CODE (operand0) == SUBREG)
+ && GET_CODE (XEXP (operand0, 0)) == MEM
+ && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
+ && scratch_reg)
+ {
+ if (GET_CODE (operand0) == SUBREG)
+ operand0 = XEXP (operand0, 0);
+
+ /* SCRATCH_REG will hold an address and maybe the actual data. We want
+ it in SIMODE regardless of what mode it was originally given
+ to us. */
+ scratch_reg = force_mode (SImode, scratch_reg);
+
+ /* D might not fit in 14 bits either; for such cases load D into
+ scratch reg. */
+ if (!memory_address_p (Pmode, XEXP (operand0, 0)))
+ {
+ emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
+ emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
+ 0)),
+ Pmode,
+ XEXP (XEXP (operand0, 0),
+ 0),
+ scratch_reg));
+ }
+ else
+ emit_move_insn (scratch_reg, XEXP (operand0, 0));
+ emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
+ operand1));
+ return 1;
+ }
+ /* Handle secondary reloads for loads of FP registers from constant
+ expressions by forcing the constant into memory.
+
+ use scratch_reg to hold the address of the memory location.
+
+ The proper fix is to change PREFERRED_RELOAD_CLASS to return
+ NO_REGS when presented with a const_int and an register class
+ containing only FP registers. Doing so unfortunately creates
+ more problems than it solves. Fix this for 2.5. */
+ else if (fp_reg_operand (operand0, mode)
+ && CONSTANT_P (operand1)
+ && scratch_reg)
+ {
+ rtx xoperands[2];
+
+ /* SCRATCH_REG will hold an address and maybe the actual data. We want
+ it in SIMODE regardless of what mode it was originally given
+ to us. */
+ scratch_reg = force_mode (SImode, scratch_reg);
+
+ /* Force the constant into memory and put the address of the
+ memory location into scratch_reg. */
+ xoperands[0] = scratch_reg;
+ xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
+ emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
+
+ /* Now load the destination register. */
+ emit_insn (gen_rtx_SET (mode, operand0,
+ gen_rtx_MEM (mode, scratch_reg)));
+ return 1;
+ }
+
+ /* Now have insn-emit do whatever it normally does. */
+ return 0;
+}
+
+/* Split one or more DImode RTL references into pairs of SImode
+ references. The RTL can be REG, offsettable MEM, integer constant, or
+ CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
+ split and "num" is its length. lo_half and hi_half are output arrays
+ that parallel "operands". */
+
+void
+split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
+{
+ while (num--)
+ {
+ rtx op = operands[num];
+
+ /* simplify_subreg refuses to split volatile memory addresses,
+ but we still have to handle it. */
+ if (GET_CODE (op) == MEM)
+ {
+ lo_half[num] = adjust_address (op, SImode, 4);
+ hi_half[num] = adjust_address (op, SImode, 0);
+ }
+ else
+ {
+ lo_half[num] = simplify_gen_subreg (SImode, op,
+ GET_MODE (op) == VOIDmode
+ ? DImode : GET_MODE (op), 4);
+ hi_half[num] = simplify_gen_subreg (SImode, op,
+ GET_MODE (op) == VOIDmode
+ ? DImode : GET_MODE (op), 0);
+ }
+ }
+}
+
+/* Split X into a base and a constant offset, storing them in *BASE
+ and *OFFSET respectively. */
+
+static void
+m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
+{
+ *offset = 0;
+ if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ *offset += INTVAL (XEXP (x, 1));
+ x = XEXP (x, 0);
+ }
+ *base = x;
+}
+
+/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
+ instruction. STORE_P says whether the move is a load or store.
+
+ If the instruction uses post-increment or pre-decrement addressing,
+ AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
+ adjustment. This adjustment will be made by the first element of
+ PARALLEL, with the loads or stores starting at element 1. If the
+ instruction does not use post-increment or pre-decrement addressing,
+ AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
+ start at element 0. */
+
+bool
+m68k_movem_pattern_p (rtx pattern, rtx automod_base,
+ HOST_WIDE_INT automod_offset, bool store_p)
+{
+ rtx base, mem_base, set, mem, reg, last_reg;
+ HOST_WIDE_INT offset, mem_offset;
+ int i, first, len;
+ enum reg_class rclass;
+
+ len = XVECLEN (pattern, 0);
+ first = (automod_base != NULL);
+
+ if (automod_base)
+ {
+ /* Stores must be pre-decrement and loads must be post-increment. */
+ if (store_p != (automod_offset < 0))
+ return false;
+
+ /* Work out the base and offset for lowest memory location. */
+ base = automod_base;
+ offset = (automod_offset < 0 ? automod_offset : 0);
+ }
+ else
+ {
+ /* Allow any valid base and offset in the first access. */
+ base = NULL;
+ offset = 0;
+ }
+
+ last_reg = NULL;
+ rclass = NO_REGS;
+ for (i = first; i < len; i++)
+ {
+ /* We need a plain SET. */
+ set = XVECEXP (pattern, 0, i);
+ if (GET_CODE (set) != SET)
+ return false;
+
+ /* Check that we have a memory location... */
+ mem = XEXP (set, !store_p);
+ if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
+ return false;
+
+ /* ...with the right address. */
+ if (base == NULL)
+ {
+ m68k_split_offset (XEXP (mem, 0), &base, &offset);
+ /* The ColdFire instruction only allows (An) and (d16,An) modes.
+ There are no mode restrictions for 680x0 besides the
+ automodification rules enforced above. */
+ if (TARGET_COLDFIRE
+ && !m68k_legitimate_base_reg_p (base, reload_completed))
+ return false;
+ }
+ else
+ {
+ m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
+ if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
+ return false;
+ }
+
+ /* Check that we have a register of the required mode and class. */
+ reg = XEXP (set, store_p);
+ if (!REG_P (reg)
+ || !HARD_REGISTER_P (reg)
+ || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
+ return false;
+
+ if (last_reg)
+ {
+ /* The register must belong to RCLASS and have a higher number
+ than the register in the previous SET. */
+ if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
+ || REGNO (last_reg) >= REGNO (reg))
+ return false;
+ }
+ else
+ {
+ /* Work out which register class we need. */
+ if (INT_REGNO_P (REGNO (reg)))
+ rclass = GENERAL_REGS;
+ else if (FP_REGNO_P (REGNO (reg)))
+ rclass = FP_REGS;
+ else
+ return false;
+ }
+
+ last_reg = reg;
+ offset += GET_MODE_SIZE (GET_MODE (reg));
+ }
+
+ /* If we have an automodification, check whether the final offset is OK. */
+ if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
+ return false;
+
+ /* Reject unprofitable cases. */
+ if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
+ return false;
+
+ return true;
+}
+
+/* Return the assembly code template for a movem or fmovem instruction
+ whose pattern is given by PATTERN. Store the template's operands
+ in OPERANDS.
+
+ If the instruction uses post-increment or pre-decrement addressing,
+ AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
+ is true if this is a store instruction. */
+
+const char *
+m68k_output_movem (rtx *operands, rtx pattern,
+ HOST_WIDE_INT automod_offset, bool store_p)
+{
+ unsigned int mask;
+ int i, first;
+
+ gcc_assert (GET_CODE (pattern) == PARALLEL);
+ mask = 0;
+ first = (automod_offset != 0);
+ for (i = first; i < XVECLEN (pattern, 0); i++)
+ {
+ /* When using movem with pre-decrement addressing, register X + D0_REG
+ is controlled by bit 15 - X. For all other addressing modes,
+ register X + D0_REG is controlled by bit X. Confusingly, the
+ register mask for fmovem is in the opposite order to that for
+ movem. */
+ unsigned int regno;
+
+ gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
+ gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
+ regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
+ if (automod_offset < 0)
+ {
+ if (FP_REGNO_P (regno))
+ mask |= 1 << (regno - FP0_REG);
+ else
+ mask |= 1 << (15 - (regno - D0_REG));
+ }
+ else
+ {
+ if (FP_REGNO_P (regno))
+ mask |= 1 << (7 - (regno - FP0_REG));
+ else
+ mask |= 1 << (regno - D0_REG);
+ }
+ }
+ CC_STATUS_INIT;
+
+ if (automod_offset == 0)
+ operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
+ else if (automod_offset < 0)
+ operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
+ else
+ operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
+ operands[1] = GEN_INT (mask);
+ if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
+ {
+ if (store_p)
+ return "fmovem %1,%a0";
+ else
+ return "fmovem %a0,%1";
+ }
+ else
+ {
+ if (store_p)
+ return "movem%.l %1,%a0";
+ else
+ return "movem%.l %a0,%1";
+ }
+}
+
+/* Return a REG that occurs in ADDR with coefficient 1.
+ ADDR can be effectively incremented by incrementing REG. */
+
+static rtx
+find_addr_reg (rtx addr)
+{
+ while (GET_CODE (addr) == PLUS)
+ {
+ if (GET_CODE (XEXP (addr, 0)) == REG)
+ addr = XEXP (addr, 0);
+ else if (GET_CODE (XEXP (addr, 1)) == REG)
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 0)))
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ addr = XEXP (addr, 0);
+ else
+ gcc_unreachable ();
+ }
+ gcc_assert (GET_CODE (addr) == REG);
+ return addr;
+}
+
+/* Output assembler code to perform a 32-bit 3-operand add. */
+
+const char *
+output_addsi3 (rtx *operands)
+{
+ if (! operands_match_p (operands[0], operands[1]))
+ {
+ if (!ADDRESS_REG_P (operands[1]))
+ {
+ rtx tmp = operands[1];
+
+ operands[1] = operands[2];
+ operands[2] = tmp;
+ }
+
+ /* These insns can result from reloads to access
+ stack slots over 64k from the frame pointer. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
+ return "move%.l %2,%0\n\tadd%.l %1,%0";
+ if (GET_CODE (operands[2]) == REG)
+ return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
+ return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
+ }
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) <= 8)
+ return "addq%.l %2,%0";
+ if (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) >= -8)
+ {
+ operands[2] = GEN_INT (- INTVAL (operands[2]));
+ return "subq%.l %2,%0";
+ }
+ /* On the CPU32 it is faster to use two addql instructions to
+ add a small integer (8 < N <= 16) to a register.
+ Likewise for subql. */
+ if (TUNE_CPU32 && REG_P (operands[0]))
+ {
+ if (INTVAL (operands[2]) > 8
+ && INTVAL (operands[2]) <= 16)
+ {
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
+ return "addq%.l #8,%0\n\taddq%.l %2,%0";
+ }
+ if (INTVAL (operands[2]) < -8
+ && INTVAL (operands[2]) >= -16)
+ {
+ operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
+ return "subq%.l #8,%0\n\tsubq%.l %2,%0";
+ }
+ }
+ if (ADDRESS_REG_P (operands[0])
+ && INTVAL (operands[2]) >= -0x8000
+ && INTVAL (operands[2]) < 0x8000)
+ {
+ if (TUNE_68040)
+ return "add%.w %2,%0";
+ else
+ return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
+ }
+ }
+ return "add%.l %2,%0";
+}
+
+/* Store in cc_status the expressions that the condition codes will
+ describe after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+/* On the 68000, all the insns to store in an address register fail to
+ set the cc's. However, in some cases these instructions can make it
+ possibly invalid to use the saved cc's. In those cases we clear out
+ some or all of the saved cc's so they won't be used. */
+
+void
+notice_update_cc (rtx exp, rtx insn)
+{
+ if (GET_CODE (exp) == SET)
+ {
+ if (GET_CODE (SET_SRC (exp)) == CALL)
+ CC_STATUS_INIT;
+ else if (ADDRESS_REG_P (SET_DEST (exp)))
+ {
+ if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
+ cc_status.value1 = 0;
+ if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
+ cc_status.value2 = 0;
+ }
+ /* fmoves to memory or data registers do not set the condition
+ codes. Normal moves _do_ set the condition codes, but not in
+ a way that is appropriate for comparison with 0, because -0.0
+ would be treated as a negative nonzero number. Note that it
+ isn't appropriate to conditionalize this restriction on
+ HONOR_SIGNED_ZEROS because that macro merely indicates whether
+ we care about the difference between -0.0 and +0.0. */
+ else if (!FP_REG_P (SET_DEST (exp))
+ && SET_DEST (exp) != cc0_rtx
+ && (FP_REG_P (SET_SRC (exp))
+ || GET_CODE (SET_SRC (exp)) == FIX
+ || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
+ CC_STATUS_INIT;
+ /* A pair of move insns doesn't produce a useful overall cc. */
+ else if (!FP_REG_P (SET_DEST (exp))
+ && !FP_REG_P (SET_SRC (exp))
+ && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
+ && (GET_CODE (SET_SRC (exp)) == REG
+ || GET_CODE (SET_SRC (exp)) == MEM
+ || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
+ CC_STATUS_INIT;
+ else if (SET_DEST (exp) != pc_rtx)
+ {
+ cc_status.flags = 0;
+ cc_status.value1 = SET_DEST (exp);
+ cc_status.value2 = SET_SRC (exp);
+ }
+ }
+ else if (GET_CODE (exp) == PARALLEL
+ && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
+ {
+ rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
+ rtx src = SET_SRC (XVECEXP (exp, 0, 0));
+
+ if (ADDRESS_REG_P (dest))
+ CC_STATUS_INIT;
+ else if (dest != pc_rtx)
+ {
+ cc_status.flags = 0;
+ cc_status.value1 = dest;
+ cc_status.value2 = src;
+ }
+ }
+ else
+ CC_STATUS_INIT;
+ if (cc_status.value2 != 0
+ && ADDRESS_REG_P (cc_status.value2)
+ && GET_MODE (cc_status.value2) == QImode)
+ CC_STATUS_INIT;
+ if (cc_status.value2 != 0)
+ switch (GET_CODE (cc_status.value2))
+ {
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT:
+ case ROTATE: case ROTATERT:
+ /* These instructions always clear the overflow bit, and set
+ the carry to the bit shifted out. */
+ cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
+ break;
+
+ case PLUS: case MINUS: case MULT:
+ case DIV: case UDIV: case MOD: case UMOD: case NEG:
+ if (GET_MODE (cc_status.value2) != VOIDmode)
+ cc_status.flags |= CC_NO_OVERFLOW;
+ break;
+ case ZERO_EXTEND:
+ /* (SET r1 (ZERO_EXTEND r2)) on this machine
+ ends with a move insn moving r2 in r2's mode.
+ Thus, the cc's are set for r2.
+ This can set N bit spuriously. */
+ cc_status.flags |= CC_NOT_NEGATIVE;
+
+ default:
+ break;
+ }
+ if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
+ && cc_status.value2
+ && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
+ cc_status.value2 = 0;
+ if (((cc_status.value1 && FP_REG_P (cc_status.value1))
+ || (cc_status.value2 && FP_REG_P (cc_status.value2))))
+ cc_status.flags = CC_IN_68881;
+ if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
+ && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
+ {
+ cc_status.flags = CC_IN_68881;
+ if (!FP_REG_P (XEXP (cc_status.value2, 0)))
+ cc_status.flags |= CC_REVERSED;
+ }
+}
+
+const char *
+output_move_const_double (rtx *operands)
+{
+ int code = standard_68881_constant_p (operands[1]);
+
+ if (code != 0)
+ {
+ static char buf[40];
+
+ sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
+ return buf;
+ }
+ return "fmove%.d %1,%0";
+}
+
+const char *
+output_move_const_single (rtx *operands)
+{
+ int code = standard_68881_constant_p (operands[1]);
+
+ if (code != 0)
+ {
+ static char buf[40];
+
+ sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
+ return buf;
+ }
+ return "fmove%.s %f1,%0";
+}
+
+/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
+ from the "fmovecr" instruction.
+ The value, anded with 0xff, gives the code to use in fmovecr
+ to get the desired constant. */
+
+/* This code has been fixed for cross-compilation. */
+
+static int inited_68881_table = 0;
+
+static const char *const strings_68881[7] = {
+ "0.0",
+ "1.0",
+ "10.0",
+ "100.0",
+ "10000.0",
+ "1e8",
+ "1e16"
+};
+
+static const int codes_68881[7] = {
+ 0x0f,
+ 0x32,
+ 0x33,
+ 0x34,
+ 0x35,
+ 0x36,
+ 0x37
+};
+
+REAL_VALUE_TYPE values_68881[7];
+
+/* Set up values_68881 array by converting the decimal values
+ strings_68881 to binary. */
+
+void
+init_68881_table (void)
+{
+ int i;
+ REAL_VALUE_TYPE r;
+ enum machine_mode mode;
+
+ mode = SFmode;
+ for (i = 0; i < 7; i++)
+ {
+ if (i == 6)
+ mode = DFmode;
+ r = REAL_VALUE_ATOF (strings_68881[i], mode);
+ values_68881[i] = r;
+ }
+ inited_68881_table = 1;
+}
+
+int
+standard_68881_constant_p (rtx x)
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
+ used at all on those chips. */
+ if (TUNE_68040_60)
+ return 0;
+
+ if (! inited_68881_table)
+ init_68881_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+
+ /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
+ is rejected. */
+ for (i = 0; i < 6; i++)
+ {
+ if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
+ return (codes_68881[i]);
+ }
+
+ if (GET_MODE (x) == SFmode)
+ return 0;
+
+ if (REAL_VALUES_EQUAL (r, values_68881[6]))
+ return (codes_68881[6]);
+
+ /* larger powers of ten in the constants ram are not used
+ because they are not equal to a `double' C constant. */
+ return 0;
+}
+
+/* If X is a floating-point constant, return the logarithm of X base 2,
+ or 0 if X is not a power of 2. */
+
+int
+floating_exact_log2 (rtx x)
+{
+ REAL_VALUE_TYPE r, r1;
+ int exp;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+
+ if (REAL_VALUES_LESS (r, dconst1))
+ return 0;
+
+ exp = real_exponent (&r);
+ real_2expN (&r1, exp, DFmode);
+ if (REAL_VALUES_EQUAL (r1, r))
+ return exp;
+
+ return 0;
+}
+
+/* A C compound statement to output to stdio stream STREAM the
+ assembler syntax for an instruction operand X. X is an RTL
+ expression.
+
+ CODE is a value that can be used to specify one of several ways
+ of printing the operand. It is used when identical operands
+ must be printed differently depending on the context. CODE
+ comes from the `%' specification that was used to request
+ printing of the operand. If the specification was just `%DIGIT'
+ then CODE is 0; if the specification was `%LTR DIGIT' then CODE
+ is the ASCII code for LTR.
+
+ If X is a register, this macro should print the register's name.
+ The names can be found in an array `reg_names' whose type is
+ `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
+
+ When the machine description has a specification `%PUNCT' (a `%'
+ followed by a punctuation character), this macro is called with
+ a null pointer for X and the punctuation character for CODE.
+
+ The m68k specific codes are:
+
+ '.' for dot needed in Motorola-style opcode names.
+ '-' for an operand pushing on the stack:
+ sp@-, -(sp) or -(%sp) depending on the style of syntax.
+ '+' for an operand pushing on the stack:
+ sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
+ '@' for a reference to the top word on the stack:
+ sp@, (sp) or (%sp) depending on the style of syntax.
+ '#' for an immediate operand prefix (# in MIT and Motorola syntax
+ but & in SGS syntax).
+ '!' for the cc register (used in an `and to cc' insn).
+ '$' for the letter `s' in an op code, but only on the 68040.
+ '&' for the letter `d' in an op code, but only on the 68040.
+ '/' for register prefix needed by longlong.h.
+ '?' for m68k_library_id_string
+
+ 'b' for byte insn (no effect, on the Sun; this is for the ISI).
+ 'd' to force memory addressing to be absolute, not relative.
+ 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
+ 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
+ or print pair of registers as rx:ry.
+ 'p' print an address with @PLTPC attached, but only if the operand
+ is not locally-bound. */
+
+void
+print_operand (FILE *file, rtx op, int letter)
+{
+ if (letter == '.')
+ {
+ if (MOTOROLA)
+ fprintf (file, ".");
+ }
+ else if (letter == '#')
+ asm_fprintf (file, "%I");
+ else if (letter == '-')
+ asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
+ else if (letter == '+')
+ asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
+ else if (letter == '@')
+ asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
+ else if (letter == '!')
+ asm_fprintf (file, "%Rfpcr");
+ else if (letter == '$')
+ {
+ if (TARGET_68040)
+ fprintf (file, "s");
+ }
+ else if (letter == '&')
+ {
+ if (TARGET_68040)
+ fprintf (file, "d");
+ }
+ else if (letter == '/')
+ asm_fprintf (file, "%R");
+ else if (letter == '?')
+ asm_fprintf (file, m68k_library_id_string);
+ else if (letter == 'p')
+ {
+ output_addr_const (file, op);
+ if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
+ fprintf (file, "@PLTPC");
+ }
+ else if (GET_CODE (op) == REG)
+ {
+ if (letter == 'R')
+ /* Print out the second register name of a register pair.
+ I.e., R (6) => 7. */
+ fputs (M68K_REGNAME(REGNO (op) + 1), file);
+ else
+ fputs (M68K_REGNAME(REGNO (op)), file);
+ }
+ else if (GET_CODE (op) == MEM)
+ {
+ output_address (XEXP (op, 0));
+ if (letter == 'd' && ! TARGET_68020
+ && CONSTANT_ADDRESS_P (XEXP (op, 0))
+ && !(GET_CODE (XEXP (op, 0)) == CONST_INT
+ && INTVAL (XEXP (op, 0)) < 0x8000
+ && INTVAL (XEXP (op, 0)) >= -0x8000))
+ fprintf (file, MOTOROLA ? ".l" : ":l");
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
+ {
+ REAL_VALUE_TYPE r;
+ long l;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, op);
+ REAL_VALUE_TO_TARGET_SINGLE (r, l);
+ asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
+ {
+ REAL_VALUE_TYPE r;
+ long l[3];
+ REAL_VALUE_FROM_CONST_DOUBLE (r, op);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
+ asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
+ l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
+ {
+ REAL_VALUE_TYPE r;
+ long l[2];
+ REAL_VALUE_FROM_CONST_DOUBLE (r, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+ asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
+ }
+ else
+ {
+ /* Use `print_operand_address' instead of `output_addr_const'
+ to ensure that we print relevant PIC stuff. */
+ asm_fprintf (file, "%I");
+ if (TARGET_PCREL
+ && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
+ print_operand_address (file, op);
+ else
+ output_addr_const (file, op);
+ }
+}
+
+/* Return string for TLS relocation RELOC. */
+
+static const char *
+m68k_get_reloc_decoration (enum m68k_reloc reloc)
+{
+ /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
+ gcc_assert (MOTOROLA || reloc == RELOC_GOT);
+
+ switch (reloc)
+ {
+ case RELOC_GOT:
+ if (MOTOROLA)
+ {
+ if (flag_pic == 1 && TARGET_68020)
+ return "@GOT.w";
+ else
+ return "@GOT";
+ }
+ else
+ {
+ if (TARGET_68020)
+ {
+ switch (flag_pic)
+ {
+ case 1:
+ return ":w";
+ case 2:
+ return ":l";
+ default:
+ return "";
+ }
+ }
+ }
+
+ case RELOC_TLSGD:
+ return "@TLSGD";
+
+ case RELOC_TLSLDM:
+ return "@TLSLDM";
+
+ case RELOC_TLSLDO:
+ return "@TLSLDO";
+
+ case RELOC_TLSIE:
+ return "@TLSIE";
+
+ case RELOC_TLSLE:
+ return "@TLSLE";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */
+
+bool
+m68k_output_addr_const_extra (FILE *file, rtx x)
+{
+ if (GET_CODE (x) == UNSPEC)
+ {
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_RELOC16:
+ case UNSPEC_RELOC32:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs (m68k_get_reloc_decoration
+ ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
+ return true;
+
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
+
+static void
+m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
+{
+ gcc_assert (size == 4);
+ fputs ("\t.long\t", file);
+ output_addr_const (file, x);
+ fputs ("@TLSLDO+0x8000", file);
+}
+
+/* In the name of slightly smaller debug output, and to cater to
+ general assembler lossage, recognize various UNSPEC sequences
+ and turn them back into a direct symbol reference. */
+
+static rtx
+m68k_delegitimize_address (rtx orig_x)
+{
+ rtx x;
+ struct m68k_address addr;
+ rtx unspec;
+
+ orig_x = delegitimize_mem_from_attrs (orig_x);
+ x = orig_x;
+ if (MEM_P (x))
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
+ return orig_x;
+
+ if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
+ || addr.offset == NULL_RTX
+ || GET_CODE (addr.offset) != CONST)
+ return orig_x;
+
+ unspec = XEXP (addr.offset, 0);
+ if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
+ unspec = XEXP (unspec, 0);
+ if (GET_CODE (unspec) != UNSPEC
+ || (XINT (unspec, 1) != UNSPEC_RELOC16
+ && XINT (unspec, 1) != UNSPEC_RELOC32))
+ return orig_x;
+ x = XVECEXP (unspec, 0, 0);
+ gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
+ if (unspec != XEXP (addr.offset, 0))
+ x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
+ if (addr.index)
+ {
+ rtx idx = addr.index;
+ if (addr.scale != 1)
+ idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
+ x = gen_rtx_PLUS (Pmode, idx, x);
+ }
+ if (addr.base)
+ x = gen_rtx_PLUS (Pmode, addr.base, x);
+ if (MEM_P (orig_x))
+ x = replace_equiv_address_nv (orig_x, x);
+ return x;
+}
+
+
+/* A C compound statement to output to stdio stream STREAM the
+ assembler syntax for an instruction operand that is a memory
+ reference whose address is ADDR. ADDR is an RTL expression.
+
+ Note that this contains a kludge that knows that the only reason
+ we have an address (plus (label_ref...) (reg...)) when not generating
+ PIC code is in the insn before a tablejump, and we know that m68k.md
+ generates a label LInnn: on such an insn.
+
+ It is possible for PIC to generate a (plus (label_ref...) (reg...))
+ and we handle that just like we would a (plus (symbol_ref...) (reg...)).
+
+ This routine is responsible for distinguishing between -fpic and -fPIC
+ style relocations in an address. When generating -fpic code the
+ offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
+ -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
+
+void
+print_operand_address (FILE *file, rtx addr)
+{
+ struct m68k_address address;
+
+ if (!m68k_decompose_address (QImode, addr, true, &address))
+ gcc_unreachable ();
+
+ if (address.code == PRE_DEC)
+ fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
+ M68K_REGNAME (REGNO (address.base)));
+ else if (address.code == POST_INC)
+ fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
+ M68K_REGNAME (REGNO (address.base)));
+ else if (!address.base && !address.index)
+ {
+ /* A constant address. */
+ gcc_assert (address.offset == addr);
+ if (GET_CODE (addr) == CONST_INT)
+ {
+ /* (xxx).w or (xxx).l. */
+ if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
+ fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
+ }
+ else if (TARGET_PCREL)
+ {
+ /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
+ fputc ('(', file);
+ output_addr_const (file, addr);
+ asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
+ }
+ else
+ {
+ /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
+ name ends in `.<letter>', as the last 2 characters can be
+ mistaken as a size suffix. Put the name in parentheses. */
+ if (GET_CODE (addr) == SYMBOL_REF
+ && strlen (XSTR (addr, 0)) > 2
+ && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
+ {
+ putc ('(', file);
+ output_addr_const (file, addr);
+ putc (')', file);
+ }
+ else
+ output_addr_const (file, addr);
+ }
+ }
+ else
+ {
+ int labelno;
+
+ /* If ADDR is a (d8,pc,Xn) address, this is the number of the
+ label being accessed, otherwise it is -1. */
+ labelno = (address.offset
+ && !address.base
+ && GET_CODE (address.offset) == LABEL_REF
+ ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
+ : -1);
+ if (MOTOROLA)
+ {
+ /* Print the "offset(base" component. */
+ if (labelno >= 0)
+ asm_fprintf (file, "%LL%d(%Rpc,", labelno);
+ else
+ {
+ if (address.offset)
+ output_addr_const (file, address.offset);
+
+ putc ('(', file);
+ if (address.base)
+ fputs (M68K_REGNAME (REGNO (address.base)), file);
+ }
+ /* Print the ",index" component, if any. */
+ if (address.index)
+ {
+ if (address.base)
+ putc (',', file);
+ fprintf (file, "%s.%c",
+ M68K_REGNAME (REGNO (address.index)),
+ GET_MODE (address.index) == HImode ? 'w' : 'l');
+ if (address.scale != 1)
+ fprintf (file, "*%d", address.scale);
+ }
+ putc (')', file);
+ }
+ else /* !MOTOROLA */
+ {
+ if (!address.offset && !address.index)
+ fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
+ else
+ {
+ /* Print the "base@(offset" component. */
+ if (labelno >= 0)
+ asm_fprintf (file, "%Rpc@(%LL%d", labelno);
+ else
+ {
+ if (address.base)
+ fputs (M68K_REGNAME (REGNO (address.base)), file);
+ fprintf (file, "@(");
+ if (address.offset)
+ output_addr_const (file, address.offset);
+ }
+ /* Print the ",index" component, if any. */
+ if (address.index)
+ {
+ fprintf (file, ",%s:%c",
+ M68K_REGNAME (REGNO (address.index)),
+ GET_MODE (address.index) == HImode ? 'w' : 'l');
+ if (address.scale != 1)
+ fprintf (file, ":%d", address.scale);
+ }
+ putc (')', file);
+ }
+ }
+ }
+}
+
+/* Check for cases where a clr insns can be omitted from code using
+ strict_low_part sets. For example, the second clrl here is not needed:
+ clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
+
+ MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
+ insn we are checking for redundancy. TARGET is the register set by the
+ clear insn. */
+
+bool
+strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
+ rtx target)
+{
+ rtx p = first_insn;
+
+ while ((p = PREV_INSN (p)))
+ {
+ if (NOTE_INSN_BASIC_BLOCK_P (p))
+ return false;
+
+ if (NOTE_P (p))
+ continue;
+
+ /* If it isn't an insn, then give up. */
+ if (!INSN_P (p))
+ return false;
+
+ if (reg_set_p (target, p))
+ {
+ rtx set = single_set (p);
+ rtx dest;
+
+ /* If it isn't an easy to recognize insn, then give up. */
+ if (! set)
+ return false;
+
+ dest = SET_DEST (set);
+
+ /* If this sets the entire target register to zero, then our
+ first_insn is redundant. */
+ if (rtx_equal_p (dest, target)
+ && SET_SRC (set) == const0_rtx)
+ return true;
+ else if (GET_CODE (dest) == STRICT_LOW_PART
+ && GET_CODE (XEXP (dest, 0)) == REG
+ && REGNO (XEXP (dest, 0)) == REGNO (target)
+ && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
+ <= GET_MODE_SIZE (mode)))
+ /* This is a strict low part set which modifies less than
+ we are using, so it is safe. */
+ ;
+ else
+ return false;
+ }
+ }
+
+ return false;
+}
+
+/* Operand predicates for implementing asymmetric pc-relative addressing
+ on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
+ when used as a source operand, but not as a destination operand.
+
+ We model this by restricting the meaning of the basic predicates
+ (general_operand, memory_operand, etc) to forbid the use of this
+ addressing mode, and then define the following predicates that permit
+ this addressing mode. These predicates can then be used for the
+ source operands of the appropriate instructions.
+
+ n.b. While it is theoretically possible to change all machine patterns
+ to use this addressing more where permitted by the architecture,
+ it has only been implemented for "common" cases: SImode, HImode, and
+ QImode operands, and only for the principle operations that would
+ require this addressing mode: data movement and simple integer operations.
+
+ In parallel with these new predicates, two new constraint letters
+ were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
+ 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
+ In the pcrel case 's' is only valid in combination with 'a' registers.
+ See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
+ of how these constraints are used.
+
+ The use of these predicates is strictly optional, though patterns that
+ don't will cause an extra reload register to be allocated where one
+ was not necessary:
+
+ lea (abc:w,%pc),%a0 ; need to reload address
+ moveq &1,%d1 ; since write to pc-relative space
+ movel %d1,%a0@ ; is not allowed
+ ...
+ lea (abc:w,%pc),%a1 ; no need to reload address here
+ movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
+
+ For more info, consult tiemann@cygnus.com.
+
+
+ All of the ugliness with predicates and constraints is due to the
+ simple fact that the m68k does not allow a pc-relative addressing
+ mode as a destination. gcc does not distinguish between source and
+ destination addresses. Hence, if we claim that pc-relative address
+ modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
+ end up with invalid code. To get around this problem, we left
+ pc-relative modes as invalid addresses, and then added special
+ predicates and constraints to accept them.
+
+ A cleaner way to handle this is to modify gcc to distinguish
+ between source and destination addresses. We can then say that
+ pc-relative is a valid source address but not a valid destination
+ address, and hopefully avoid a lot of the predicate and constraint
+ hackery. Unfortunately, this would be a pretty big change. It would
+ be a useful change for a number of ports, but there aren't any current
+ plans to undertake this.
+
+ ***************************************************************************/
+
+
+const char *
+output_andsi3 (rtx *operands)
+{
+ int logval;
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) | 0xffff) == -1
+ && (DATA_REG_P (operands[0])
+ || offsettable_memref_p (operands[0]))
+ && !TARGET_COLDFIRE)
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[0] = adjust_address (operands[0], HImode, 2);
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
+ /* Do not delete a following tstl %0 insn; that would be incorrect. */
+ CC_STATUS_INIT;
+ if (operands[2] == const0_rtx)
+ return "clr%.w %0";
+ return "and%.w %2,%0";
+ }
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
+ && (DATA_REG_P (operands[0])
+ || offsettable_memref_p (operands[0])))
+ {
+ if (DATA_REG_P (operands[0]))
+ operands[1] = GEN_INT (logval);
+ else
+ {
+ operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
+ operands[1] = GEN_INT (logval % 8);
+ }
+ /* This does not set condition codes in a standard way. */
+ CC_STATUS_INIT;
+ return "bclr %1,%0";
+ }
+ return "and%.l %2,%0";
+}
+
+const char *
+output_iorsi3 (rtx *operands)
+{
+ register int logval;
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) >> 16 == 0
+ && (DATA_REG_P (operands[0])
+ || offsettable_memref_p (operands[0]))
+ && !TARGET_COLDFIRE)
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[0] = adjust_address (operands[0], HImode, 2);
+ /* Do not delete a following tstl %0 insn; that would be incorrect. */
+ CC_STATUS_INIT;
+ if (INTVAL (operands[2]) == 0xffff)
+ return "mov%.w %2,%0";
+ return "or%.w %2,%0";
+ }
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
+ && (DATA_REG_P (operands[0])
+ || offsettable_memref_p (operands[0])))
+ {
+ if (DATA_REG_P (operands[0]))
+ operands[1] = GEN_INT (logval);
+ else
+ {
+ operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
+ operands[1] = GEN_INT (logval % 8);
+ }
+ CC_STATUS_INIT;
+ return "bset %1,%0";
+ }
+ return "or%.l %2,%0";
+}
+
+const char *
+output_xorsi3 (rtx *operands)
+{
+ register int logval;
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) >> 16 == 0
+ && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
+ && !TARGET_COLDFIRE)
+ {
+ if (! DATA_REG_P (operands[0]))
+ operands[0] = adjust_address (operands[0], HImode, 2);
+ /* Do not delete a following tstl %0 insn; that would be incorrect. */
+ CC_STATUS_INIT;
+ if (INTVAL (operands[2]) == 0xffff)
+ return "not%.w %0";
+ return "eor%.w %2,%0";
+ }
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
+ && (DATA_REG_P (operands[0])
+ || offsettable_memref_p (operands[0])))
+ {
+ if (DATA_REG_P (operands[0]))
+ operands[1] = GEN_INT (logval);
+ else
+ {
+ operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
+ operands[1] = GEN_INT (logval % 8);
+ }
+ CC_STATUS_INIT;
+ return "bchg %1,%0";
+ }
+ return "eor%.l %2,%0";
+}
+
+/* Return the instruction that should be used for a call to address X,
+ which is known to be in operand 0. */
+
+const char *
+output_call (rtx x)
+{
+ if (symbolic_operand (x, VOIDmode))
+ return m68k_symbolic_call;
+ else
+ return "jsr %a0";
+}
+
+/* Likewise sibling calls. */
+
+const char *
+output_sibcall (rtx x)
+{
+ if (symbolic_operand (x, VOIDmode))
+ return m68k_symbolic_jump;
+ else
+ return "jmp %a0";
+}
+
+static void
+m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ rtx this_slot, offset, addr, mem, insn, tmp;
+
+ /* Avoid clobbering the struct value reg by using the
+ static chain reg as a temporary. */
+ tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
+
+ /* Pretend to be a post-reload pass while generating rtl. */
+ reload_completed = 1;
+
+ /* The "this" pointer is stored at 4(%sp). */
+ this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
+
+ /* Add DELTA to THIS. */
+ if (delta != 0)
+ {
+ /* Make the offset a legitimate operand for memory addition. */
+ offset = GEN_INT (delta);
+ if ((delta < -8 || delta > 8)
+ && (TARGET_COLDFIRE || USE_MOVQ (delta)))
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
+ offset = gen_rtx_REG (Pmode, D0_REG);
+ }
+ emit_insn (gen_add3_insn (copy_rtx (this_slot),
+ copy_rtx (this_slot), offset));
+ }
+
+ /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
+ if (vcall_offset != 0)
+ {
+ /* Set the static chain register to *THIS. */
+ emit_move_insn (tmp, this_slot);
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
+
+ /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
+ addr = plus_constant (tmp, vcall_offset);
+ if (!m68k_legitimate_address_p (Pmode, addr, true))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
+ addr = tmp;
+ }
+
+ /* Load the offset into %d0 and add it to THIS. */
+ emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
+ gen_rtx_MEM (Pmode, addr));
+ emit_insn (gen_add3_insn (copy_rtx (this_slot),
+ copy_rtx (this_slot),
+ gen_rtx_REG (Pmode, D0_REG)));
+ }
+
+ /* Jump to the target function. Use a sibcall if direct jumps are
+ allowed, otherwise load the address into a register first. */
+ mem = DECL_RTL (function);
+ if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
+ {
+ gcc_assert (flag_pic);
+
+ if (!TARGET_SEP_DATA)
+ {
+ /* Use the static chain register as a temporary (call-clobbered)
+ GOT pointer for this function. We can use the static chain
+ register because it isn't live on entry to the thunk. */
+ SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
+ emit_insn (gen_load_got (pic_offset_table_rtx));
+ }
+ legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
+ mem = replace_equiv_address (mem, tmp);
+ }
+ insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
+ SIBLING_CALL_P (insn) = 1;
+
+ /* Run just enough of rest_of_compilation. */
+ insn = get_insns ();
+ split_all_insns_noflow ();
+ final_start_function (insn, file, 1);
+ final (insn, file, 1);
+ final_end_function ();
+
+ /* Clean up the vars set above. */
+ reload_completed = 0;
+
+ /* Restore the original PIC register. */
+ if (flag_pic)
+ SET_REGNO (pic_offset_table_rtx, PIC_REG);
+}
+
+/* Worker function for TARGET_STRUCT_VALUE_RTX. */
+
+static rtx
+m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
+ int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
+}
+
+/* Return nonzero if register old_reg can be renamed to register new_reg. */
+int
+m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
+ unsigned int new_reg)
+{
+
+ /* Interrupt functions can only use registers that have already been
+ saved by the prologue, even if they would normally be
+ call-clobbered. */
+
+ if ((m68k_get_function_kind (current_function_decl)
+ == m68k_fk_interrupt_handler)
+ && !df_regs_ever_live_p (new_reg))
+ return 0;
+
+ return 1;
+}
+
+/* Value is true if hard register REGNO can hold a value of machine-mode
+ MODE. On the 68000, we let the cpu registers can hold any mode, but
+ restrict the 68881 registers to floating-point modes. */
+
+bool
+m68k_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ if (DATA_REGNO_P (regno))
+ {
+ /* Data Registers, can hold aggregate if fits in. */
+ if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
+ return true;
+ }
+ else if (ADDRESS_REGNO_P (regno))
+ {
+ if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
+ return true;
+ }
+ else if (FP_REGNO_P (regno))
+ {
+ /* FPU registers, hold float or complex float of long double or
+ smaller. */
+ if ((GET_MODE_CLASS (mode) == MODE_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
+ return true;
+ }
+ return false;
+}
+
+/* Implement SECONDARY_RELOAD_CLASS. */
+
+enum reg_class
+m68k_secondary_reload_class (enum reg_class rclass,
+ enum machine_mode mode, rtx x)
+{
+ int regno;
+
+ regno = true_regnum (x);
+
+ /* If one operand of a movqi is an address register, the other
+ operand must be a general register or constant. Other types
+ of operand must be reloaded through a data register. */
+ if (GET_MODE_SIZE (mode) == 1
+ && reg_classes_intersect_p (rclass, ADDR_REGS)
+ && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
+ return DATA_REGS;
+
+ /* PC-relative addresses must be loaded into an address register first. */
+ if (TARGET_PCREL
+ && !reg_class_subset_p (rclass, ADDR_REGS)
+ && symbolic_operand (x, VOIDmode))
+ return ADDR_REGS;
+
+ return NO_REGS;
+}
+
+/* Implement PREFERRED_RELOAD_CLASS. */
+
+enum reg_class
+m68k_preferred_reload_class (rtx x, enum reg_class rclass)
+{
+ enum reg_class secondary_class;
+
+ /* If RCLASS might need a secondary reload, try restricting it to
+ a class that doesn't. */
+ secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
+ if (secondary_class != NO_REGS
+ && reg_class_subset_p (secondary_class, rclass))
+ return secondary_class;
+
+ /* Prefer to use moveq for in-range constants. */
+ if (GET_CODE (x) == CONST_INT
+ && reg_class_subset_p (DATA_REGS, rclass)
+ && IN_RANGE (INTVAL (x), -0x80, 0x7f))
+ return DATA_REGS;
+
+ /* ??? Do we really need this now? */
+ if (GET_CODE (x) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
+ return FP_REGS;
+
+ return NO_REGS;
+ }
+
+ return rclass;
+}
+
+/* Return floating point values in a 68881 register. This makes 68881 code
+ a little bit faster. It also makes -msoft-float code incompatible with
+ hard-float code, so people have to be careful not to mix the two.
+ For ColdFire it was decided the ABI incompatibility is undesirable.
+ If there is need for a hard-float ABI it is probably worth doing it
+ properly and also passing function arguments in FP registers. */
+rtx
+m68k_libcall_value (enum machine_mode mode)
+{
+ switch (mode) {
+ case SFmode:
+ case DFmode:
+ case XFmode:
+ if (TARGET_68881)
+ return gen_rtx_REG (mode, FP0_REG);
+ break;
+ default:
+ break;
+ }
+
+ return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
+}
+
+/* Location in which function value is returned.
+ NOTE: Due to differences in ABIs, don't call this function directly,
+ use FUNCTION_VALUE instead. */
+rtx
+m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode;
+
+ mode = TYPE_MODE (valtype);
+ switch (mode) {
+ case SFmode:
+ case DFmode:
+ case XFmode:
+ if (TARGET_68881)
+ return gen_rtx_REG (mode, FP0_REG);
+ break;
+ default:
+ break;
+ }
+
+ /* If the function returns a pointer, push that into %a0. */
+ if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
+ /* For compatibility with the large body of existing code which
+ does not always properly declare external functions returning
+ pointer types, the m68k/SVR4 convention is to copy the value
+ returned for pointer functions from a0 to d0 in the function
+ epilogue, so that callers that have neglected to properly
+ declare the callee can still find the correct return value in
+ d0. */
+ return gen_rtx_PARALLEL
+ (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, A0_REG),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, D0_REG),
+ const0_rtx)));
+ else if (POINTER_TYPE_P (valtype))
+ return gen_rtx_REG (mode, A0_REG);
+ else
+ return gen_rtx_REG (mode, D0_REG);
+}
+
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
+static bool
+m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode = TYPE_MODE (type);
+
+ if (mode == BLKmode)
+ return true;
+
+ /* If TYPE's known alignment is less than the alignment of MODE that
+ would contain the structure, then return in memory. We need to
+ do so to maintain the compatibility between code compiled with
+ -mstrict-align and that compiled with -mno-strict-align. */
+ if (AGGREGATE_TYPE_P (type)
+ && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
+ return true;
+
+ return false;
+}
+#endif
+
+/* CPU to schedule the program for. */
+enum attr_cpu m68k_sched_cpu;
+
+/* MAC to schedule the program for. */
+enum attr_mac m68k_sched_mac;
+
+/* Operand type. */
+enum attr_op_type
+ {
+ /* No operand. */
+ OP_TYPE_NONE,
+
+ /* Integer register. */
+ OP_TYPE_RN,
+
+ /* FP register. */
+ OP_TYPE_FPN,
+
+ /* Implicit mem reference (e.g. stack). */
+ OP_TYPE_MEM1,
+
+ /* Memory without offset or indexing. EA modes 2, 3 and 4. */
+ OP_TYPE_MEM234,
+
+ /* Memory with offset but without indexing. EA mode 5. */
+ OP_TYPE_MEM5,
+
+ /* Memory with indexing. EA mode 6. */
+ OP_TYPE_MEM6,
+
+ /* Memory referenced by absolute address. EA mode 7. */
+ OP_TYPE_MEM7,
+
+ /* Immediate operand that doesn't require extension word. */
+ OP_TYPE_IMM_Q,
+
+ /* Immediate 16 bit operand. */
+ OP_TYPE_IMM_W,
+
+ /* Immediate 32 bit operand. */
+ OP_TYPE_IMM_L
+ };
+
+/* Return type of memory ADDR_RTX refers to. */
+static enum attr_op_type
+sched_address_type (enum machine_mode mode, rtx addr_rtx)
+{
+ struct m68k_address address;
+
+ if (symbolic_operand (addr_rtx, VOIDmode))
+ return OP_TYPE_MEM7;
+
+ if (!m68k_decompose_address (mode, addr_rtx,
+ reload_completed, &address))
+ {
+ gcc_assert (!reload_completed);
+ /* Reload will likely fix the address to be in the register. */
+ return OP_TYPE_MEM234;
+ }
+
+ if (address.scale != 0)
+ return OP_TYPE_MEM6;
+
+ if (address.base != NULL_RTX)
+ {
+ if (address.offset == NULL_RTX)
+ return OP_TYPE_MEM234;
+
+ return OP_TYPE_MEM5;
+ }
+
+ gcc_assert (address.offset != NULL_RTX);
+
+ return OP_TYPE_MEM7;
+}
+
+/* Return X or Y (depending on OPX_P) operand of INSN. */
+static rtx
+sched_get_operand (rtx insn, bool opx_p)
+{
+ int i;
+
+ if (recog_memoized (insn) < 0)
+ gcc_unreachable ();
+
+ extract_constrain_insn_cached (insn);
+
+ if (opx_p)
+ i = get_attr_opx (insn);
+ else
+ i = get_attr_opy (insn);
+
+ if (i >= recog_data.n_operands)
+ return NULL;
+
+ return recog_data.operand[i];
+}
+
+/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
+ If ADDRESS_P is true, return type of memory location operand refers to. */
+static enum attr_op_type
+sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
+{
+ rtx op;
+
+ op = sched_get_operand (insn, opx_p);
+
+ if (op == NULL)
+ {
+ gcc_assert (!reload_completed);
+ return OP_TYPE_RN;
+ }
+
+ if (address_p)
+ return sched_address_type (QImode, op);
+
+ if (memory_operand (op, VOIDmode))
+ return sched_address_type (GET_MODE (op), XEXP (op, 0));
+
+ if (register_operand (op, VOIDmode))
+ {
+ if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
+ || (reload_completed && FP_REG_P (op)))
+ return OP_TYPE_FPN;
+
+ return OP_TYPE_RN;
+ }
+
+ if (GET_CODE (op) == CONST_INT)
+ {
+ int ival;
+
+ ival = INTVAL (op);
+
+ /* Check for quick constants. */
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALUQ_L:
+ if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
+ return OP_TYPE_IMM_Q;
+
+ gcc_assert (!reload_completed);
+ break;
+
+ case TYPE_MOVEQ_L:
+ if (USE_MOVQ (ival))
+ return OP_TYPE_IMM_Q;
+
+ gcc_assert (!reload_completed);
+ break;
+
+ case TYPE_MOV3Q_L:
+ if (valid_mov3q_const (ival))
+ return OP_TYPE_IMM_Q;
+
+ gcc_assert (!reload_completed);
+ break;
+
+ default:
+ break;
+ }
+
+ if (IN_RANGE (ival, -0x8000, 0x7fff))
+ return OP_TYPE_IMM_W;
+
+ return OP_TYPE_IMM_L;
+ }
+
+ if (GET_CODE (op) == CONST_DOUBLE)
+ {
+ switch (GET_MODE (op))
+ {
+ case SFmode:
+ return OP_TYPE_IMM_W;
+
+ case VOIDmode:
+ case DFmode:
+ return OP_TYPE_IMM_L;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (GET_CODE (op) == CONST
+ || symbolic_operand (op, VOIDmode)
+ || LABEL_P (op))
+ {
+ switch (GET_MODE (op))
+ {
+ case QImode:
+ return OP_TYPE_IMM_Q;
+
+ case HImode:
+ return OP_TYPE_IMM_W;
+
+ case SImode:
+ return OP_TYPE_IMM_L;
+
+ default:
+ if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
+ /* Just a guess. */
+ return OP_TYPE_IMM_W;
+
+ return OP_TYPE_IMM_L;
+ }
+ }
+
+ gcc_assert (!reload_completed);
+
+ if (FLOAT_MODE_P (GET_MODE (op)))
+ return OP_TYPE_FPN;
+
+ return OP_TYPE_RN;
+}
+
+/* Implement opx_type attribute.
+ Return type of INSN's operand X.
+ If ADDRESS_P is true, return type of memory location operand refers to. */
+enum attr_opx_type
+m68k_sched_attr_opx_type (rtx insn, int address_p)
+{
+ switch (sched_attr_op_type (insn, true, address_p != 0))
+ {
+ case OP_TYPE_RN:
+ return OPX_TYPE_RN;
+
+ case OP_TYPE_FPN:
+ return OPX_TYPE_FPN;
+
+ case OP_TYPE_MEM1:
+ return OPX_TYPE_MEM1;
+
+ case OP_TYPE_MEM234:
+ return OPX_TYPE_MEM234;
+
+ case OP_TYPE_MEM5:
+ return OPX_TYPE_MEM5;
+
+ case OP_TYPE_MEM6:
+ return OPX_TYPE_MEM6;
+
+ case OP_TYPE_MEM7:
+ return OPX_TYPE_MEM7;
+
+ case OP_TYPE_IMM_Q:
+ return OPX_TYPE_IMM_Q;
+
+ case OP_TYPE_IMM_W:
+ return OPX_TYPE_IMM_W;
+
+ case OP_TYPE_IMM_L:
+ return OPX_TYPE_IMM_L;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Implement opy_type attribute.
+ Return type of INSN's operand Y.
+ If ADDRESS_P is true, return type of memory location operand refers to. */
+enum attr_opy_type
+m68k_sched_attr_opy_type (rtx insn, int address_p)
+{
+ switch (sched_attr_op_type (insn, false, address_p != 0))
+ {
+ case OP_TYPE_RN:
+ return OPY_TYPE_RN;
+
+ case OP_TYPE_FPN:
+ return OPY_TYPE_FPN;
+
+ case OP_TYPE_MEM1:
+ return OPY_TYPE_MEM1;
+
+ case OP_TYPE_MEM234:
+ return OPY_TYPE_MEM234;
+
+ case OP_TYPE_MEM5:
+ return OPY_TYPE_MEM5;
+
+ case OP_TYPE_MEM6:
+ return OPY_TYPE_MEM6;
+
+ case OP_TYPE_MEM7:
+ return OPY_TYPE_MEM7;
+
+ case OP_TYPE_IMM_Q:
+ return OPY_TYPE_IMM_Q;
+
+ case OP_TYPE_IMM_W:
+ return OPY_TYPE_IMM_W;
+
+ case OP_TYPE_IMM_L:
+ return OPY_TYPE_IMM_L;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return size of INSN as int. */
+static int
+sched_get_attr_size_int (rtx insn)
+{
+ int size;
+
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IGNORE:
+ /* There should be no references to m68k_sched_attr_size for 'ignore'
+ instructions. */
+ gcc_unreachable ();
+ return 0;
+
+ case TYPE_MUL_L:
+ size = 2;
+ break;
+
+ default:
+ size = 1;
+ break;
+ }
+
+ switch (get_attr_opx_type (insn))
+ {
+ case OPX_TYPE_NONE:
+ case OPX_TYPE_RN:
+ case OPX_TYPE_FPN:
+ case OPX_TYPE_MEM1:
+ case OPX_TYPE_MEM234:
+ case OPY_TYPE_IMM_Q:
+ break;
+
+ case OPX_TYPE_MEM5:
+ case OPX_TYPE_MEM6:
+ /* Here we assume that most absolute references are short. */
+ case OPX_TYPE_MEM7:
+ case OPY_TYPE_IMM_W:
+ ++size;
+ break;
+
+ case OPY_TYPE_IMM_L:
+ size += 2;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ switch (get_attr_opy_type (insn))
+ {
+ case OPY_TYPE_NONE:
+ case OPY_TYPE_RN:
+ case OPY_TYPE_FPN:
+ case OPY_TYPE_MEM1:
+ case OPY_TYPE_MEM234:
+ case OPY_TYPE_IMM_Q:
+ break;
+
+ case OPY_TYPE_MEM5:
+ case OPY_TYPE_MEM6:
+ /* Here we assume that most absolute references are short. */
+ case OPY_TYPE_MEM7:
+ case OPY_TYPE_IMM_W:
+ ++size;
+ break;
+
+ case OPY_TYPE_IMM_L:
+ size += 2;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (size > 3)
+ {
+ gcc_assert (!reload_completed);
+
+ size = 3;
+ }
+
+ return size;
+}
+
+/* Return size of INSN as attribute enum value. */
+enum attr_size
+m68k_sched_attr_size (rtx insn)
+{
+ switch (sched_get_attr_size_int (insn))
+ {
+ case 1:
+ return SIZE_1;
+
+ case 2:
+ return SIZE_2;
+
+ case 3:
+ return SIZE_3;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return operand X or Y (depending on OPX_P) of INSN,
+ if it is a MEM, or NULL overwise. */
+static enum attr_op_type
+sched_get_opxy_mem_type (rtx insn, bool opx_p)
+{
+ if (opx_p)
+ {
+ switch (get_attr_opx_type (insn))
+ {
+ case OPX_TYPE_NONE:
+ case OPX_TYPE_RN:
+ case OPX_TYPE_FPN:
+ case OPX_TYPE_IMM_Q:
+ case OPX_TYPE_IMM_W:
+ case OPX_TYPE_IMM_L:
+ return OP_TYPE_RN;
+
+ case OPX_TYPE_MEM1:
+ case OPX_TYPE_MEM234:
+ case OPX_TYPE_MEM5:
+ case OPX_TYPE_MEM7:
+ return OP_TYPE_MEM1;
+
+ case OPX_TYPE_MEM6:
+ return OP_TYPE_MEM6;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ switch (get_attr_opy_type (insn))
+ {
+ case OPY_TYPE_NONE:
+ case OPY_TYPE_RN:
+ case OPY_TYPE_FPN:
+ case OPY_TYPE_IMM_Q:
+ case OPY_TYPE_IMM_W:
+ case OPY_TYPE_IMM_L:
+ return OP_TYPE_RN;
+
+ case OPY_TYPE_MEM1:
+ case OPY_TYPE_MEM234:
+ case OPY_TYPE_MEM5:
+ case OPY_TYPE_MEM7:
+ return OP_TYPE_MEM1;
+
+ case OPY_TYPE_MEM6:
+ return OP_TYPE_MEM6;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+}
+
+/* Implement op_mem attribute. */
+enum attr_op_mem
+m68k_sched_attr_op_mem (rtx insn)
+{
+ enum attr_op_type opx;
+ enum attr_op_type opy;
+
+ opx = sched_get_opxy_mem_type (insn, true);
+ opy = sched_get_opxy_mem_type (insn, false);
+
+ if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
+ return OP_MEM_00;
+
+ if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
+ {
+ switch (get_attr_opx_access (insn))
+ {
+ case OPX_ACCESS_R:
+ return OP_MEM_10;
+
+ case OPX_ACCESS_W:
+ return OP_MEM_01;
+
+ case OPX_ACCESS_RW:
+ return OP_MEM_11;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
+ {
+ switch (get_attr_opx_access (insn))
+ {
+ case OPX_ACCESS_R:
+ return OP_MEM_I0;
+
+ case OPX_ACCESS_W:
+ return OP_MEM_0I;
+
+ case OPX_ACCESS_RW:
+ return OP_MEM_I1;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
+ return OP_MEM_10;
+
+ if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
+ {
+ switch (get_attr_opx_access (insn))
+ {
+ case OPX_ACCESS_W:
+ return OP_MEM_11;
+
+ default:
+ gcc_assert (!reload_completed);
+ return OP_MEM_11;
+ }
+ }
+
+ if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
+ {
+ switch (get_attr_opx_access (insn))
+ {
+ case OPX_ACCESS_W:
+ return OP_MEM_1I;
+
+ default:
+ gcc_assert (!reload_completed);
+ return OP_MEM_1I;
+ }
+ }
+
+ if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
+ return OP_MEM_I0;
+
+ if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
+ {
+ switch (get_attr_opx_access (insn))
+ {
+ case OPX_ACCESS_W:
+ return OP_MEM_I1;
+
+ default:
+ gcc_assert (!reload_completed);
+ return OP_MEM_I1;
+ }
+ }
+
+ gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
+ gcc_assert (!reload_completed);
+ return OP_MEM_I1;
+}
+
+/* Jump instructions types. Indexed by INSN_UID.
+ The same rtl insn can be expanded into different asm instructions
+ depending on the cc0_status. To properly determine type of jump
+ instructions we scan instruction stream and map jumps types to this
+ array. */
+static enum attr_type *sched_branch_type;
+
+/* Return the type of the jump insn. */
+enum attr_type
+m68k_sched_branch_type (rtx insn)
+{
+ enum attr_type type;
+
+ type = sched_branch_type[INSN_UID (insn)];
+
+ gcc_assert (type != 0);
+
+ return type;
+}
+
+/* Data for ColdFire V4 index bypass.
+ Producer modifies register that is used as index in consumer with
+ specified scale. */
+static struct
+{
+ /* Producer instruction. */
+ rtx pro;
+
+ /* Consumer instruction. */
+ rtx con;
+
+ /* Scale of indexed memory access within consumer.
+ Or zero if bypass should not be effective at the moment. */
+ int scale;
+} sched_cfv4_bypass_data;
+
+/* An empty state that is used in m68k_sched_adjust_cost. */
+static state_t sched_adjust_cost_state;
+
+/* Implement adjust_cost scheduler hook.
+ Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
+static int
+m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
+ int cost)
+{
+ int delay;
+
+ if (recog_memoized (def_insn) < 0
+ || recog_memoized (insn) < 0)
+ return cost;
+
+ if (sched_cfv4_bypass_data.scale == 1)
+ /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
+ {
+ /* haifa-sched.c: insn_cost () calls bypass_p () just before
+ targetm.sched.adjust_cost (). Hence, we can be relatively sure
+ that the data in sched_cfv4_bypass_data is up to date. */
+ gcc_assert (sched_cfv4_bypass_data.pro == def_insn
+ && sched_cfv4_bypass_data.con == insn);
+
+ if (cost < 3)
+ cost = 3;
+
+ sched_cfv4_bypass_data.pro = NULL;
+ sched_cfv4_bypass_data.con = NULL;
+ sched_cfv4_bypass_data.scale = 0;
+ }
+ else
+ gcc_assert (sched_cfv4_bypass_data.pro == NULL
+ && sched_cfv4_bypass_data.con == NULL
+ && sched_cfv4_bypass_data.scale == 0);
+
+ /* Don't try to issue INSN earlier than DFA permits.
+ This is especially useful for instructions that write to memory,
+ as their true dependence (default) latency is better to be set to 0
+ to workaround alias analysis limitations.
+ This is, in fact, a machine independent tweak, so, probably,
+ it should be moved to haifa-sched.c: insn_cost (). */
+ delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
+ if (delay > cost)
+ cost = delay;
+
+ return cost;
+}
+
+/* Return maximal number of insns that can be scheduled on a single cycle. */
+static int
+m68k_sched_issue_rate (void)
+{
+ switch (m68k_sched_cpu)
+ {
+ case CPU_CFV1:
+ case CPU_CFV2:
+ case CPU_CFV3:
+ return 1;
+
+ case CPU_CFV4:
+ return 2;
+
+ default:
+ gcc_unreachable ();
+ return 0;
+ }
+}
+
+/* Maximal length of instruction for current CPU.
+ E.g. it is 3 for any ColdFire core. */
+static int max_insn_size;
+
+/* Data to model instruction buffer of CPU. */
+struct _sched_ib
+{
+ /* True if instruction buffer model is modeled for current CPU. */
+ bool enabled_p;
+
+ /* Size of the instruction buffer in words. */
+ int size;
+
+ /* Number of filled words in the instruction buffer. */
+ int filled;
+
+ /* Additional information about instruction buffer for CPUs that have
+ a buffer of instruction records, rather then a plain buffer
+ of instruction words. */
+ struct _sched_ib_records
+ {
+ /* Size of buffer in records. */
+ int n_insns;
+
+ /* Array to hold data on adjustements made to the size of the buffer. */
+ int *adjust;
+
+ /* Index of the above array. */
+ int adjust_index;
+ } records;
+
+ /* An insn that reserves (marks empty) one word in the instruction buffer. */
+ rtx insn;
+};
+
+static struct _sched_ib sched_ib;
+
+/* ID of memory unit. */
+static int sched_mem_unit_code;
+
+/* Implementation of the targetm.sched.variable_issue () hook.
+ It is called after INSN was issued. It returns the number of insns
+ that can possibly get scheduled on the current cycle.
+ It is used here to determine the effect of INSN on the instruction
+ buffer. */
+static int
+m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx insn, int can_issue_more)
+{
+ int insn_size;
+
+ if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
+ {
+ switch (m68k_sched_cpu)
+ {
+ case CPU_CFV1:
+ case CPU_CFV2:
+ insn_size = sched_get_attr_size_int (insn);
+ break;
+
+ case CPU_CFV3:
+ insn_size = sched_get_attr_size_int (insn);
+
+ /* ColdFire V3 and V4 cores have instruction buffers that can
+ accumulate up to 8 instructions regardless of instructions'
+ sizes. So we should take care not to "prefetch" 24 one-word
+ or 12 two-words instructions.
+ To model this behavior we temporarily decrease size of the
+ buffer by (max_insn_size - insn_size) for next 7 instructions. */
+ {
+ int adjust;
+
+ adjust = max_insn_size - insn_size;
+ sched_ib.size -= adjust;
+
+ if (sched_ib.filled > sched_ib.size)
+ sched_ib.filled = sched_ib.size;
+
+ sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
+ }
+
+ ++sched_ib.records.adjust_index;
+ if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
+ sched_ib.records.adjust_index = 0;
+
+ /* Undo adjustement we did 7 instructions ago. */
+ sched_ib.size
+ += sched_ib.records.adjust[sched_ib.records.adjust_index];
+
+ break;
+
+ case CPU_CFV4:
+ gcc_assert (!sched_ib.enabled_p);
+ insn_size = 0;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ gcc_assert (insn_size <= sched_ib.filled);
+ --can_issue_more;
+ }
+ else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
+ || asm_noperands (PATTERN (insn)) >= 0)
+ insn_size = sched_ib.filled;
+ else
+ insn_size = 0;
+
+ sched_ib.filled -= insn_size;
+
+ return can_issue_more;
+}
+
+/* Return how many instructions should scheduler lookahead to choose the
+ best one. */
+static int
+m68k_sched_first_cycle_multipass_dfa_lookahead (void)
+{
+ return m68k_sched_issue_rate () - 1;
+}
+
+/* Implementation of targetm.sched.init_global () hook.
+ It is invoked once per scheduling pass and is used here
+ to initialize scheduler constants. */
+static void
+m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ int n_insns ATTRIBUTE_UNUSED)
+{
+ /* Init branch types. */
+ {
+ rtx insn;
+
+ sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1);
+
+ for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ {
+ if (JUMP_P (insn))
+ /* !!! FIXME: Implement real scan here. */
+ sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
+ }
+ }
+
+#ifdef ENABLE_CHECKING
+ /* Check that all instructions have DFA reservations and
+ that all instructions can be issued from a clean state. */
+ {
+ rtx insn;
+ state_t state;
+
+ state = alloca (state_size ());
+
+ for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ {
+ if (INSN_P (insn) && recog_memoized (insn) >= 0)
+ {
+ gcc_assert (insn_has_dfa_reservation_p (insn));
+
+ state_reset (state);
+ if (state_transition (state, insn) >= 0)
+ gcc_unreachable ();
+ }
+ }
+ }
+#endif
+
+ /* Setup target cpu. */
+
+ /* ColdFire V4 has a set of features to keep its instruction buffer full
+ (e.g., a separate memory bus for instructions) and, hence, we do not model
+ buffer for this CPU. */
+ sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
+
+ switch (m68k_sched_cpu)
+ {
+ case CPU_CFV4:
+ sched_ib.filled = 0;
+
+ /* FALLTHRU */
+
+ case CPU_CFV1:
+ case CPU_CFV2:
+ max_insn_size = 3;
+ sched_ib.records.n_insns = 0;
+ sched_ib.records.adjust = NULL;
+ break;
+
+ case CPU_CFV3:
+ max_insn_size = 3;
+ sched_ib.records.n_insns = 8;
+ sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
+
+ sched_adjust_cost_state = xmalloc (state_size ());
+ state_reset (sched_adjust_cost_state);
+
+ start_sequence ();
+ emit_insn (gen_ib ());
+ sched_ib.insn = get_insns ();
+ end_sequence ();
+}
+
+/* Scheduling pass is now finished. Free/reset static variables. */
+static void
+m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED)
+{
+ sched_ib.insn = NULL;
+
+ free (sched_adjust_cost_state);
+ sched_adjust_cost_state = NULL;
+
+ sched_mem_unit_code = 0;
+
+ free (sched_ib.records.adjust);
+ sched_ib.records.adjust = NULL;
+ sched_ib.records.n_insns = 0;
+ max_insn_size = 0;
+
+ free (sched_branch_type);
+ sched_branch_type = NULL;
+}
+
+/* Implementation of targetm.sched.init () hook.
+ It is invoked each time scheduler starts on the new block (basic block or
+ extended basic block). */
+static void
+m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ int n_insns ATTRIBUTE_UNUSED)
+{
+ switch (m68k_sched_cpu)
+ {
+ case CPU_CFV1:
+ case CPU_CFV2:
+ sched_ib.size = 6;
+ break;
+
+ case CPU_CFV3:
+ sched_ib.size = sched_ib.records.n_insns * max_insn_size;
+
+ memset (sched_ib.records.adjust, 0,
+ sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
+ sched_ib.records.adjust_index = 0;
+ break;
+
+ case CPU_CFV4:
+ gcc_assert (!sched_ib.enabled_p);
+ sched_ib.size = 0;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (sched_ib.enabled_p)
+ /* haifa-sched.c: schedule_block () calls advance_cycle () just before
+ the first cycle. Workaround that. */
+ sched_ib.filled = -2;
+}
+
+/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
+ It is invoked just before current cycle finishes and is used here
+ to track if instruction buffer got its two words this cycle. */
+static void
+m68k_sched_dfa_pre_advance_cycle (void)
+{
+ if (!sched_ib.enabled_p)
+ return;
+
+ if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
+ {
+ sched_ib.filled += 2;
+
+ if (sched_ib.filled > sched_ib.size)
+ sched_ib.filled = sched_ib.size;
+ }
+}
+
+/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
+ It is invoked just after new cycle begins and is used here
+ to setup number of filled words in the instruction buffer so that
+ instructions which won't have all their words prefetched would be
+ stalled for a cycle. */
+static void
+m68k_sched_dfa_post_advance_cycle (void)
+{
+ int i;
+
+ if (!sched_ib.enabled_p)
+ return;
+
+ /* Setup number of prefetched instruction words in the instruction
+ buffer. */
+ i = max_insn_size - sched_ib.filled;
+
+ while (--i >= 0)
+ {
+ if (state_transition (curr_state, sched_ib.insn) >= 0)
+ gcc_unreachable ();
+ }
+}
+
+/* Return X or Y (depending on OPX_P) operand of INSN,
+ if it is an integer register, or NULL overwise. */
+static rtx
+sched_get_reg_operand (rtx insn, bool opx_p)
+{
+ rtx op = NULL;
+
+ if (opx_p)
+ {
+ if (get_attr_opx_type (insn) == OPX_TYPE_RN)
+ {
+ op = sched_get_operand (insn, true);
+ gcc_assert (op != NULL);
+
+ if (!reload_completed && !REG_P (op))
+ return NULL;
+ }
+ }
+ else
+ {
+ if (get_attr_opy_type (insn) == OPY_TYPE_RN)
+ {
+ op = sched_get_operand (insn, false);
+ gcc_assert (op != NULL);
+
+ if (!reload_completed && !REG_P (op))
+ return NULL;
+ }
+ }
+
+ return op;
+}
+
+/* Return true, if X or Y (depending on OPX_P) operand of INSN
+ is a MEM. */
+static bool
+sched_mem_operand_p (rtx insn, bool opx_p)
+{
+ switch (sched_get_opxy_mem_type (insn, opx_p))
+ {
+ case OP_TYPE_MEM1:
+ case OP_TYPE_MEM6:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Return X or Y (depending on OPX_P) operand of INSN,
+ if it is a MEM, or NULL overwise. */
+static rtx
+sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
+{
+ bool opx_p;
+ bool opy_p;
+
+ opx_p = false;
+ opy_p = false;
+
+ if (must_read_p)
+ {
+ opx_p = true;
+ opy_p = true;
+ }
+
+ if (must_write_p)
+ {
+ opx_p = true;
+ opy_p = false;
+ }
+
+ if (opy_p && sched_mem_operand_p (insn, false))
+ return sched_get_operand (insn, false);
+
+ if (opx_p && sched_mem_operand_p (insn, true))
+ return sched_get_operand (insn, true);
+
+ gcc_unreachable ();
+ return NULL;
+}
+
+/* Return non-zero if PRO modifies register used as part of
+ address in CON. */
+int
+m68k_sched_address_bypass_p (rtx pro, rtx con)
+{
+ rtx pro_x;
+ rtx con_mem_read;
+
+ pro_x = sched_get_reg_operand (pro, true);
+ if (pro_x == NULL)
+ return 0;
+
+ con_mem_read = sched_get_mem_operand (con, true, false);
+ gcc_assert (con_mem_read != NULL);
+
+ if (reg_mentioned_p (pro_x, con_mem_read))
+ return 1;
+
+ return 0;
+}
+
+/* Helper function for m68k_sched_indexed_address_bypass_p.
+ if PRO modifies register used as index in CON,
+ return scale of indexed memory access in CON. Return zero overwise. */
+static int
+sched_get_indexed_address_scale (rtx pro, rtx con)
+{
+ rtx reg;
+ rtx mem;
+ struct m68k_address address;
+
+ reg = sched_get_reg_operand (pro, true);
+ if (reg == NULL)
+ return 0;
+
+ mem = sched_get_mem_operand (con, true, false);
+ gcc_assert (mem != NULL && MEM_P (mem));
+
+ if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
+ &address))
+ gcc_unreachable ();
+
+ if (REGNO (reg) == REGNO (address.index))
+ {
+ gcc_assert (address.scale != 0);
+ return address.scale;
+ }
+
+ return 0;
+}
+
+/* Return non-zero if PRO modifies register used
+ as index with scale 2 or 4 in CON. */
+int
+m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
+{
+ gcc_assert (sched_cfv4_bypass_data.pro == NULL
+ && sched_cfv4_bypass_data.con == NULL
+ && sched_cfv4_bypass_data.scale == 0);
+
+ switch (sched_get_indexed_address_scale (pro, con))
+ {
+ case 1:
+ /* We can't have a variable latency bypass, so
+ remember to adjust the insn cost in adjust_cost hook. */
+ sched_cfv4_bypass_data.pro = pro;
+ sched_cfv4_bypass_data.con = con;
+ sched_cfv4_bypass_data.scale = 1;
+ return 0;
+
+ case 2:
+ case 4:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* We generate a two-instructions program at M_TRAMP :
+ movea.l &CHAIN_VALUE,%a0
+ jmp FNADDR
+ where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
+
+static void
+m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+ rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
+ rtx mem;
+
+ gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
+
+ mem = adjust_address (m_tramp, HImode, 0);
+ emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
+ mem = adjust_address (m_tramp, SImode, 2);
+ emit_move_insn (mem, chain_value);
+
+ mem = adjust_address (m_tramp, HImode, 6);
+ emit_move_insn (mem, GEN_INT(0x4EF9));
+ mem = adjust_address (m_tramp, SImode, 8);
+ emit_move_insn (mem, fnaddr);
+
+ FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
+}
+
+/* On the 68000, the RTS insn cannot pop anything.
+ On the 68010, the RTD insn may be used to pop them if the number
+ of args is fixed, but if the number is variable then the caller
+ must pop them all. RTD can't be used for library calls now
+ because the library is compiled with the Unix compiler.
+ Use of RTD is a selectable option, since it is incompatible with
+ standard Unix calling sequences. If the option is not selected,
+ the caller must always pop the args. */
+
+static int
+m68k_return_pops_args (tree fundecl, tree funtype, int size)
+{
+ return ((TARGET_RTD
+ && (!fundecl
+ || TREE_CODE (fundecl) != IDENTIFIER_NODE)
+ && (!stdarg_p (funtype)))
+ ? size : 0);
+}
+
+/* Make sure everything's fine if we *don't* have a given processor.
+ This assumes that putting a register in fixed_regs will keep the
+ compiler's mitts completely off it. We don't bother to zero it out
+ of register classes. */
+
+static void
+m68k_conditional_register_usage (void)
+{
+ int i;
+ HARD_REG_SET x;
+ if (!TARGET_HARD_FLOAT)
+ {
+ COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (x, i))
+ fixed_regs[i] = call_used_regs[i] = 1;
+ }
+ if (flag_pic)
+ fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
+}
+
+#include "gt-m68k.h"
diff --git a/gcc/config/m68k/m68k.h b/gcc/config/m68k/m68k.h
new file mode 100644
index 000000000..71b7c4f27
--- /dev/null
+++ b/gcc/config/m68k/m68k.h
@@ -0,0 +1,1034 @@
+/* Definitions of target machine for GCC for Motorola 680x0/ColdFire.
+ Copyright (C) 1987, 1988, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* We need to have MOTOROLA always defined (either 0 or 1) because we use
+ if-statements and ?: on it. This way we have compile-time error checking
+ for both the MOTOROLA and MIT code paths. We do rely on the host compiler
+ to optimize away all constant tests. */
+#if MOTOROLA /* Use the Motorola assembly syntax. */
+# define TARGET_VERSION fprintf (stderr, " (68k, Motorola syntax)")
+#else
+# define MOTOROLA 0 /* Use the MIT assembly syntax. */
+# define TARGET_VERSION fprintf (stderr, " (68k, MIT syntax)")
+#endif
+
+/* Handle --with-cpu default option from configure script. */
+#define OPTION_DEFAULT_SPECS \
+ { "cpu", "%{!mc68000:%{!m68000:%{!m68302:%{!m68010:%{!mc68020:%{!m68020:\
+%{!m68030:%{!m68040:%{!m68020-40:%{!m68020-60:%{!m68060:%{!mcpu32:\
+%{!m68332:%{!m5200:%{!m5206e:%{!m528x:%{!m5307:%{!m5407:%{!mcfv4e:\
+%{!mcpu=*:%{!march=*:-%(VALUE)}}}}}}}}}}}}}}}}}}}}}" },
+
+/* Pass flags to gas indicating which type of processor we have. This
+ can be simplified when we can rely on the assembler supporting .cpu
+ and .arch directives. */
+
+#define ASM_CPU_SPEC "\
+%{m68851}%{mno-68851} %{m68881}%{mno-68881} %{msoft-float:-mno-float} \
+%{m68000}%{m68302}%{mc68000}%{m68010}%{m68020}%{mc68020}%{m68030}\
+%{m68040}%{m68020-40:-m68040}%{m68020-60:-m68040}\
+%{m68060}%{mcpu32}%{m68332}%{m5200}%{m5206e}%{m528x}%{m5307}%{m5407}%{mcfv4e}\
+%{mcpu=*:-mcpu=%*}%{march=*:-march=%*}\
+"
+#define ASM_PCREL_SPEC "%{fPIC|fpic|mpcrel:--pcrel} \
+ %{msep-data|mid-shared-library:--pcrel} \
+"
+
+#define ASM_SPEC "%(asm_cpu_spec) %(asm_pcrel_spec)"
+
+#define EXTRA_SPECS \
+ { "asm_cpu_spec", ASM_CPU_SPEC }, \
+ { "asm_pcrel_spec", ASM_PCREL_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+
+/* Note that some other tm.h files include this one and then override
+ many of the definitions that relate to assembler syntax. */
+
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__m68k__"); \
+ builtin_define_std ("mc68000"); \
+ /* The other mc680x0 macros have traditionally been derived \
+ from the tuning setting. For example, -m68020-60 defines \
+ m68060, even though it generates pure 68020 code. */ \
+ switch (m68k_tune) \
+ { \
+ case u68010: \
+ builtin_define_std ("mc68010"); \
+ break; \
+ \
+ case u68020: \
+ builtin_define_std ("mc68020"); \
+ break; \
+ \
+ case u68030: \
+ builtin_define_std ("mc68030"); \
+ break; \
+ \
+ case u68040: \
+ builtin_define_std ("mc68040"); \
+ break; \
+ \
+ case u68060: \
+ builtin_define_std ("mc68060"); \
+ break; \
+ \
+ case u68020_60: \
+ builtin_define_std ("mc68060"); \
+ /* Fall through. */ \
+ case u68020_40: \
+ builtin_define_std ("mc68040"); \
+ builtin_define_std ("mc68030"); \
+ builtin_define_std ("mc68020"); \
+ break; \
+ \
+ case ucpu32: \
+ builtin_define_std ("mc68332"); \
+ builtin_define_std ("mcpu32"); \
+ builtin_define_std ("mc68020"); \
+ break; \
+ \
+ case ucfv1: \
+ builtin_define ("__mcfv1__"); \
+ break; \
+ \
+ case ucfv2: \
+ builtin_define ("__mcfv2__"); \
+ break; \
+ \
+ case ucfv3: \
+ builtin_define ("__mcfv3__"); \
+ break; \
+ \
+ case ucfv4: \
+ builtin_define ("__mcfv4__"); \
+ break; \
+ \
+ case ucfv4e: \
+ builtin_define ("__mcfv4e__"); \
+ break; \
+ \
+ case ucfv5: \
+ builtin_define ("__mcfv5__"); \
+ break; \
+ \
+ default: \
+ break; \
+ } \
+ \
+ if (TARGET_68881) \
+ builtin_define ("__HAVE_68881__"); \
+ \
+ if (TARGET_COLDFIRE) \
+ { \
+ const char *tmp; \
+ \
+ tmp = m68k_cpp_cpu_ident ("cf"); \
+ if (tmp) \
+ builtin_define (tmp); \
+ tmp = m68k_cpp_cpu_family ("cf"); \
+ if (tmp) \
+ builtin_define (tmp); \
+ builtin_define ("__mcoldfire__"); \
+ \
+ if (TARGET_ISAC) \
+ builtin_define ("__mcfisac__"); \
+ else if (TARGET_ISAB) \
+ { \
+ builtin_define ("__mcfisab__"); \
+ /* ISA_B: Legacy 5407 defines. */ \
+ builtin_define ("__mcf5400__"); \
+ builtin_define ("__mcf5407__"); \
+ } \
+ else if (TARGET_ISAAPLUS) \
+ { \
+ builtin_define ("__mcfisaaplus__"); \
+ /* ISA_A+: legacy defines. */ \
+ builtin_define ("__mcf528x__"); \
+ builtin_define ("__mcf5200__"); \
+ } \
+ else \
+ { \
+ builtin_define ("__mcfisaa__"); \
+ /* ISA_A: legacy defines. */ \
+ switch (m68k_tune) \
+ { \
+ case ucfv2: \
+ builtin_define ("__mcf5200__"); \
+ break; \
+ \
+ case ucfv3: \
+ builtin_define ("__mcf5307__"); \
+ builtin_define ("__mcf5300__"); \
+ break; \
+ \
+ default: \
+ break; \
+ } \
+ } \
+ } \
+ \
+ if (TARGET_COLDFIRE_FPU) \
+ builtin_define ("__mcffpu__"); \
+ \
+ if (TARGET_CF_HWDIV) \
+ builtin_define ("__mcfhwdiv__"); \
+ \
+ if (TARGET_FIDOA) \
+ builtin_define ("__mfido__"); \
+ \
+ builtin_assert ("cpu=m68k"); \
+ builtin_assert ("machine=m68k"); \
+ } \
+ while (0)
+
+/* Classify the groups of pseudo-ops used to assemble QI, HI and SI
+ quantities. */
+#define INT_OP_STANDARD 0 /* .byte, .short, .long */
+#define INT_OP_DOT_WORD 1 /* .byte, .word, .long */
+#define INT_OP_NO_DOT 2 /* byte, short, long */
+#define INT_OP_DC 3 /* dc.b, dc.w, dc.l */
+
+/* Set the default. */
+#define INT_OP_GROUP INT_OP_DOT_WORD
+
+/* Bit values used by m68k-devices.def to identify processor capabilities. */
+#define FL_BITFIELD (1 << 0) /* Support bitfield instructions. */
+#define FL_68881 (1 << 1) /* (Default) support for 68881/2. */
+#define FL_COLDFIRE (1 << 2) /* ColdFire processor. */
+#define FL_CF_HWDIV (1 << 3) /* ColdFire hardware divide supported. */
+#define FL_CF_MAC (1 << 4) /* ColdFire MAC unit supported. */
+#define FL_CF_EMAC (1 << 5) /* ColdFire eMAC unit supported. */
+#define FL_CF_EMAC_B (1 << 6) /* ColdFire eMAC-B unit supported. */
+#define FL_CF_USP (1 << 7) /* ColdFire User Stack Pointer supported. */
+#define FL_CF_FPU (1 << 8) /* ColdFire FPU supported. */
+#define FL_ISA_68000 (1 << 9)
+#define FL_ISA_68010 (1 << 10)
+#define FL_ISA_68020 (1 << 11)
+#define FL_ISA_68040 (1 << 12)
+#define FL_ISA_A (1 << 13)
+#define FL_ISA_APLUS (1 << 14)
+#define FL_ISA_B (1 << 15)
+#define FL_ISA_C (1 << 16)
+#define FL_FIDOA (1 << 17)
+#define FL_MMU 0 /* Used by multilib machinery. */
+#define FL_UCLINUX 0 /* Used by multilib machinery. */
+
+#define TARGET_68010 ((m68k_cpu_flags & FL_ISA_68010) != 0)
+#define TARGET_68020 ((m68k_cpu_flags & FL_ISA_68020) != 0)
+#define TARGET_68040 ((m68k_cpu_flags & FL_ISA_68040) != 0)
+#define TARGET_COLDFIRE ((m68k_cpu_flags & FL_COLDFIRE) != 0)
+#define TARGET_COLDFIRE_FPU (m68k_fpu == FPUTYPE_COLDFIRE)
+#define TARGET_68881 (m68k_fpu == FPUTYPE_68881)
+#define TARGET_FIDOA ((m68k_cpu_flags & FL_FIDOA) != 0)
+
+/* Size (in bytes) of FPU registers. */
+#define TARGET_FP_REG_SIZE (TARGET_COLDFIRE ? 8 : 12)
+
+#define TARGET_ISAAPLUS ((m68k_cpu_flags & FL_ISA_APLUS) != 0)
+#define TARGET_ISAB ((m68k_cpu_flags & FL_ISA_B) != 0)
+#define TARGET_ISAC ((m68k_cpu_flags & FL_ISA_C) != 0)
+
+/* Some instructions are common to more than one ISA. */
+#define ISA_HAS_MVS_MVZ (TARGET_ISAB || TARGET_ISAC)
+#define ISA_HAS_FF1 (TARGET_ISAAPLUS || TARGET_ISAC)
+
+#define TUNE_68000 (m68k_tune == u68000)
+#define TUNE_68010 (m68k_tune == u68010)
+#define TUNE_68000_10 (TUNE_68000 || TUNE_68010)
+#define TUNE_68030 (m68k_tune == u68030 \
+ || m68k_tune == u68020_40 \
+ || m68k_tune == u68020_60)
+#define TUNE_68040 (m68k_tune == u68040 \
+ || m68k_tune == u68020_40 \
+ || m68k_tune == u68020_60)
+#define TUNE_68060 (m68k_tune == u68060 || m68k_tune == u68020_60)
+#define TUNE_68040_60 (TUNE_68040 || TUNE_68060)
+#define TUNE_CPU32 (m68k_tune == ucpu32)
+#define TUNE_CFV1 (m68k_tune == ucfv1)
+#define TUNE_CFV2 (m68k_tune == ucfv2)
+#define TUNE_CFV3 (m68k_tune == ucfv3)
+#define TUNE_CFV4 (m68k_tune == ucfv4 || m68k_tune == ucfv4e)
+
+#define TUNE_MAC ((m68k_tune_flags & FL_CF_MAC) != 0)
+#define TUNE_EMAC ((m68k_tune_flags & FL_CF_EMAC) != 0)
+
+/* These are meant to be redefined in the host dependent files */
+#define SUBTARGET_OVERRIDE_OPTIONS
+
+/* target machine storage layout */
+
+/* "long double" is the same as "double" on ColdFire and fido
+ targets. */
+
+#define LONG_DOUBLE_TYPE_SIZE \
+ ((TARGET_COLDFIRE || TARGET_FIDOA) ? 64 : 80)
+
+/* We need to know the size of long double at compile-time in libgcc2. */
+
+#if defined(__mcoldfire__) || defined(__mfido__)
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#else
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 80
+#endif
+
+/* Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
+ instructions, we get proper intermediate rounding, otherwise we
+ get extended precision results. */
+#define TARGET_FLT_EVAL_METHOD ((TARGET_68040 || ! TARGET_68881) ? 0 : 2)
+
+#define BITS_BIG_ENDIAN 1
+#define BYTES_BIG_ENDIAN 1
+#define WORDS_BIG_ENDIAN 1
+
+#define UNITS_PER_WORD 4
+
+#define PARM_BOUNDARY (TARGET_SHORT ? 16 : 32)
+#define STACK_BOUNDARY 16
+#define FUNCTION_BOUNDARY 16
+#define EMPTY_FIELD_BOUNDARY 16
+/* ColdFire and fido strongly prefer a 32-bit aligned stack. */
+#define PREFERRED_STACK_BOUNDARY \
+ ((TARGET_COLDFIRE || TARGET_FIDOA) ? 32 : 16)
+
+/* No data type wants to be aligned rounder than this.
+ Most published ABIs say that ints should be aligned on 16-bit
+ boundaries, but CPUs with 32-bit busses get better performance
+ aligned on 32-bit boundaries. */
+#define BIGGEST_ALIGNMENT (TARGET_ALIGN_INT ? 32 : 16)
+
+#define STRICT_ALIGNMENT (TARGET_STRICT_ALIGNMENT)
+#define M68K_HONOR_TARGET_STRICT_ALIGNMENT 1
+
+#define DWARF_CIE_DATA_ALIGNMENT -2
+
+#define INT_TYPE_SIZE (TARGET_SHORT ? 16 : 32)
+
+/* Define these to avoid dependence on meaning of `int'. */
+#define WCHAR_TYPE "long int"
+#define WCHAR_TYPE_SIZE 32
+
+/* Maximum number of library IDs we permit with -mid-shared-library. */
+#define MAX_LIBRARY_ID 255
+
+
+/* Standard register usage. */
+
+/* For the m68k, we give the data registers numbers 0-7,
+ the address registers numbers 010-017 (8-15),
+ and the 68881 floating point registers numbers 020-027 (16-23).
+ We also have a fake `arg-pointer' register 030 (24) used for
+ register elimination. */
+#define FIRST_PSEUDO_REGISTER 25
+
+/* All m68k targets (except AmigaOS) use %a5 as the PIC register */
+#define PIC_OFFSET_TABLE_REGNUM \
+ (!flag_pic ? INVALID_REGNUM \
+ : reload_completed ? REGNO (pic_offset_table_rtx) \
+ : PIC_REG)
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ On the m68k, only the stack pointer is such.
+ Our fake arg-pointer is obviously fixed as well. */
+#define FIXED_REGISTERS \
+ {/* Data registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ \
+ /* Address registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 1, \
+ \
+ /* Floating point registers \
+ (if available). */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ \
+ /* Arg pointer. */ \
+ 1 }
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+#define CALL_USED_REGISTERS \
+ {/* Data registers. */ \
+ 1, 1, 0, 0, 0, 0, 0, 0, \
+ \
+ /* Address registers. */ \
+ 1, 1, 0, 0, 0, 0, 0, 1, \
+ \
+ /* Floating point registers \
+ (if available). */ \
+ 1, 1, 0, 0, 0, 0, 0, 0, \
+ \
+ /* Arg pointer. */ \
+ 1 }
+
+#define REG_ALLOC_ORDER \
+{ /* d0/d1/a0/a1 */ \
+ 0, 1, 8, 9, \
+ /* d2-d7 */ \
+ 2, 3, 4, 5, 6, 7, \
+ /* a2-a7/arg */ \
+ 10, 11, 12, 13, 14, 15, 24, \
+ /* fp0-fp7 */ \
+ 16, 17, 18, 19, 20, 21, 22, 23\
+}
+
+
+/* On the m68k, ordinary registers hold 32 bits worth;
+ for the 68881 registers, a single register is always enough for
+ anything that can be stored in them at all. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((REGNO) >= 16 ? GET_MODE_NUNITS (MODE) \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* A C expression that is nonzero if hard register NEW_REG can be
+ considered for use as a rename register for OLD_REG register. */
+
+#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \
+ m68k_hard_regno_rename_ok (OLD_REG, NEW_REG)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ m68k_regno_mode_ok ((REGNO), (MODE))
+
+#define SECONDARY_RELOAD_CLASS(CLASS, MODE, X) \
+ m68k_secondary_reload_class (CLASS, MODE, X)
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (! TARGET_HARD_FLOAT \
+ || ((GET_MODE_CLASS (MODE1) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE1) == MODE_COMPLEX_FLOAT) \
+ == (GET_MODE_CLASS (MODE2) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE2) == MODE_COMPLEX_FLOAT)))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+#define STACK_POINTER_REGNUM SP_REG
+
+/* Most m68k targets use %a6 as a frame pointer. The AmigaOS
+ ABI uses %a6 for shared library calls, therefore the frame
+ pointer is shifted to %a5 on this target. */
+#define FRAME_POINTER_REGNUM A6_REG
+
+/* Base register for access to arguments of the function.
+ * This isn't a hardware register. It will be eliminated to the
+ * stack pointer or frame pointer.
+ */
+#define ARG_POINTER_REGNUM 24
+
+#define STATIC_CHAIN_REGNUM A0_REG
+#define M68K_STATIC_CHAIN_REG_NAME REGISTER_PREFIX "a0"
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define M68K_STRUCT_VALUE_REGNUM A1_REG
+
+
+
+/* The m68k has three kinds of registers, so eight classes would be
+ a complete set. One of them is not needed. */
+enum reg_class {
+ NO_REGS, DATA_REGS,
+ ADDR_REGS, FP_REGS,
+ GENERAL_REGS, DATA_OR_FP_REGS,
+ ADDR_OR_FP_REGS, ALL_REGS,
+ LIM_REG_CLASSES };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+ { "NO_REGS", "DATA_REGS", \
+ "ADDR_REGS", "FP_REGS", \
+ "GENERAL_REGS", "DATA_OR_FP_REGS", \
+ "ADDR_OR_FP_REGS", "ALL_REGS" }
+
+#define REG_CLASS_CONTENTS \
+{ \
+ {0x00000000}, /* NO_REGS */ \
+ {0x000000ff}, /* DATA_REGS */ \
+ {0x0100ff00}, /* ADDR_REGS */ \
+ {0x00ff0000}, /* FP_REGS */ \
+ {0x0100ffff}, /* GENERAL_REGS */ \
+ {0x00ff00ff}, /* DATA_OR_FP_REGS */ \
+ {0x01ffff00}, /* ADDR_OR_FP_REGS */ \
+ {0x01ffffff}, /* ALL_REGS */ \
+}
+
+extern enum reg_class regno_reg_class[];
+#define REGNO_REG_CLASS(REGNO) (regno_reg_class[(REGNO)])
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS ADDR_REGS
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ m68k_preferred_reload_class (X, CLASS)
+
+/* On the m68k, this is the size of MODE in words,
+ except in the FP regs, where a single reg is always enough. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FP_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between fp regs and other regs are two insns. */
+#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \
+ ((((CLASS1) == FP_REGS) != ((CLASS2) == FP_REGS)) ? 4 : 2)
+
+#define IRA_COVER_CLASSES \
+{ \
+ ALL_REGS, LIM_REG_CLASSES \
+}
+
+/* Stack layout; function entry, exit and calling. */
+
+#define STACK_GROWS_DOWNWARD 1
+#define FRAME_GROWS_DOWNWARD 1
+#define STARTING_FRAME_OFFSET 0
+
+/* On the 680x0, sp@- in a byte insn really pushes a word.
+ On the ColdFire, sp@- in a byte insn pushes just a byte. */
+#define PUSH_ROUNDING(BYTES) (TARGET_COLDFIRE ? BYTES : ((BYTES) + 1) & ~1)
+
+#define FIRST_PARM_OFFSET(FNDECL) 8
+
+/* On the m68k the return value defaults to D0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx_REG (TYPE_MODE (VALTYPE), D0_REG)
+
+/* On the m68k the return value defaults to D0. */
+#define LIBCALL_VALUE(MODE) gen_rtx_REG (MODE, D0_REG)
+
+/* On the m68k, D0 is usually the only register used. */
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == D0_REG)
+
+/* Define this to be true when FUNCTION_VALUE_REGNO_P is true for
+ more than one register.
+ XXX This macro is m68k specific and used only for m68kemb.h. */
+#define NEEDS_UNTYPED_CALL 0
+
+/* On the m68k, all arguments are usually pushed on the stack. */
+#define FUNCTION_ARG_REGNO_P(N) 0
+
+/* On the m68k, this is a single integer, which is a number of bytes
+ of arguments scanned so far. */
+#define CUMULATIVE_ARGS int
+
+/* On the m68k, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ ((CUM) = 0)
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ asm_fprintf (FILE, "\tlea %LLP%d,%Ra0\n\tjsr mcount\n", (LABELNO))
+
+#define EXIT_IGNORE_STACK 1
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the m68k, the trampoline looks like this:
+ movl #STATIC,a0
+ jmp FUNCTION
+
+ WARNING: Targets that may run on 68040+ cpus must arrange for
+ the instruction cache to be flushed. Previous incarnations of
+ the m68k trampoline code attempted to get around this by either
+ using an out-of-line transfer function or pc-relative data, but
+ the fact remains that the code to jump to the transfer function
+ or the code to load the pc-relative data needs to be flushed
+ just as much as the "variable" portion of the trampoline.
+ Recognizing that a cache flush is going to be required anyway,
+ dispense with such notions and build a smaller trampoline.
+
+ Since more instructions are required to move a template into
+ place than to create it on the spot, don't use a template. */
+
+#define TRAMPOLINE_SIZE 12
+#define TRAMPOLINE_ALIGNMENT 16
+
+/* Targets redefine this to invoke code to either flush the cache,
+ or enable stack execution (or both). */
+#ifndef FINALIZE_TRAMPOLINE
+#define FINALIZE_TRAMPOLINE(TRAMP)
+#endif
+
+/* This is the library routine that is used to transfer control from the
+ trampoline to the actual nested function. It is defined for backward
+ compatibility, for linking with object code that used the old trampoline
+ definition.
+
+ A colon is used with no explicit operands to cause the template string
+ to be scanned for %-constructs.
+
+ The function name __transfer_from_trampoline is not actually used.
+ The function definition just permits use of "asm with operands"
+ (though the operand list is empty). */
+#define TRANSFER_FROM_TRAMPOLINE \
+void \
+__transfer_from_trampoline () \
+{ \
+ register char *a0 asm (M68K_STATIC_CHAIN_REG_NAME); \
+ asm (GLOBAL_ASM_OP "___trampoline"); \
+ asm ("___trampoline:"); \
+ asm volatile ("move%.l %0,%@" : : "m" (a0[22])); \
+ asm volatile ("move%.l %1,%0" : "=a" (a0) : "m" (a0[18])); \
+ asm ("rts":); \
+}
+
+/* There are two registers that can always be eliminated on the m68k.
+ The frame pointer and the arg pointer can be replaced by either the
+ hard frame pointer or to the stack pointer, depending upon the
+ circumstances. The hard frame pointer is not used before reload and
+ so it is not eligible for elimination. */
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = m68k_initial_elimination_offset(FROM, TO)
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* True for data registers, D0 through D7. */
+#define DATA_REGNO_P(REGNO) IN_RANGE (REGNO, 0, 7)
+
+/* True for address registers, A0 through A7. */
+#define ADDRESS_REGNO_P(REGNO) IN_RANGE (REGNO, 8, 15)
+
+/* True for integer registers, D0 through D7 and A0 through A7. */
+#define INT_REGNO_P(REGNO) IN_RANGE (REGNO, 0, 15)
+
+/* True for floating point registers, FP0 through FP7. */
+#define FP_REGNO_P(REGNO) IN_RANGE (REGNO, 16, 23)
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ (INT_REGNO_P (REGNO) \
+ || INT_REGNO_P (reg_renumber[REGNO]))
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ (ADDRESS_REGNO_P (REGNO) \
+ || ADDRESS_REGNO_P (reg_renumber[REGNO]))
+
+#define REGNO_OK_FOR_INDEX_NONSTRICT_P(REGNO) \
+ (INT_REGNO_P (REGNO) \
+ || REGNO == ARG_POINTER_REGNUM \
+ || REGNO >= FIRST_PSEUDO_REGISTER)
+
+#define REGNO_OK_FOR_BASE_NONSTRICT_P(REGNO) \
+ (ADDRESS_REGNO_P (REGNO) \
+ || REGNO == ARG_POINTER_REGNUM \
+ || REGNO >= FIRST_PSEUDO_REGISTER)
+
+/* Now macros that check whether X is a register and also,
+ strictly, whether it is in a specified class.
+
+ These macros are specific to the m68k, and may be used only
+ in code for printing assembler insns and in conditions for
+ define_optimization. */
+
+/* 1 if X is a data register. */
+#define DATA_REG_P(X) (REG_P (X) && DATA_REGNO_P (REGNO (X)))
+
+/* 1 if X is an fp register. */
+#define FP_REG_P(X) (REG_P (X) && FP_REGNO_P (REGNO (X)))
+
+/* 1 if X is an address register */
+#define ADDRESS_REG_P(X) (REG_P (X) && ADDRESS_REGNO_P (REGNO (X)))
+
+/* True if SYMBOL + OFFSET constants must refer to something within
+ SYMBOL's section. */
+#ifndef M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
+#define M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P 0
+#endif
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#define CONSTANT_ADDRESS_P(X) \
+ ((GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
+ || GET_CODE (X) == HIGH) \
+ && LEGITIMATE_CONSTANT_P (X))
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_MODE (X) != XFmode \
+ && !m68k_illegitimate_symbolic_constant_p (X))
+
+#ifndef REG_OK_STRICT
+#define REG_STRICT_P 0
+#else
+#define REG_STRICT_P 1
+#endif
+
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+ (!symbolic_operand (X, VOIDmode) \
+ || (TARGET_PCREL && REG_STRICT_P) \
+ || m68k_tls_reference_p (X, true))
+
+#define REG_OK_FOR_BASE_P(X) \
+ m68k_legitimate_base_reg_p (X, REG_STRICT_P)
+
+#define REG_OK_FOR_INDEX_P(X) \
+ m68k_legitimate_index_reg_p (X, REG_STRICT_P)
+
+
+/* This address is OK as it stands. */
+#define PIC_CASE_VECTOR_ADDRESS(index) index
+#define CASE_VECTOR_MODE HImode
+#define CASE_VECTOR_PC_RELATIVE 1
+
+#define DEFAULT_SIGNED_CHAR 1
+#define MOVE_MAX 4
+#define SLOW_BYTE_ACCESS 0
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* The ColdFire FF1 instruction returns 32 for zero. */
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1)
+
+#define STORE_FLAG_VALUE (-1)
+
+#define Pmode SImode
+#define FUNCTION_MODE QImode
+
+
+/* Tell final.c how to eliminate redundant test instructions. */
+
+/* Here we define machine-dependent flags and fields in cc_status
+ (see `conditions.h'). */
+
+/* Set if the cc value is actually in the 68881, so a floating point
+ conditional branch must be output. */
+#define CC_IN_68881 04000
+
+/* On the 68000, all the insns to store in an address register fail to
+ set the cc's. However, in some cases these instructions can make it
+ possibly invalid to use the saved cc's. In those cases we clear out
+ some or all of the saved cc's so they won't be used. */
+#define NOTICE_UPDATE_CC(EXP,INSN) notice_update_cc (EXP, INSN)
+
+/* The shift instructions always clear the overflow bit. */
+#define CC_OVERFLOW_UNUSABLE 01000
+
+/* The shift instructions use the carry bit in a way not compatible with
+ conditional branches. conditions.h uses CC_NO_OVERFLOW for this purpose.
+ Rename it to something more understandable. */
+#define CC_NO_CARRY CC_NO_OVERFLOW
+
+#define OUTPUT_JUMP(NORMAL, FLOAT, NO_OV) \
+do { if (cc_prev_status.flags & CC_IN_68881) \
+ return FLOAT; \
+ if (cc_prev_status.flags & CC_NO_OVERFLOW) \
+ return NO_OV; \
+ return NORMAL; } while (0)
+
+/* Control the assembler format that we output. */
+
+#define ASM_APP_ON "#APP\n"
+#define ASM_APP_OFF "#NO_APP\n"
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+#define GLOBAL_ASM_OP "\t.globl\t"
+#define REGISTER_PREFIX ""
+#define LOCAL_LABEL_PREFIX ""
+#define USER_LABEL_PREFIX "_"
+#define IMMEDIATE_PREFIX "#"
+
+#define REGISTER_NAMES \
+{REGISTER_PREFIX"d0", REGISTER_PREFIX"d1", REGISTER_PREFIX"d2", \
+ REGISTER_PREFIX"d3", REGISTER_PREFIX"d4", REGISTER_PREFIX"d5", \
+ REGISTER_PREFIX"d6", REGISTER_PREFIX"d7", \
+ REGISTER_PREFIX"a0", REGISTER_PREFIX"a1", REGISTER_PREFIX"a2", \
+ REGISTER_PREFIX"a3", REGISTER_PREFIX"a4", REGISTER_PREFIX"a5", \
+ REGISTER_PREFIX"a6", REGISTER_PREFIX"sp", \
+ REGISTER_PREFIX"fp0", REGISTER_PREFIX"fp1", REGISTER_PREFIX"fp2", \
+ REGISTER_PREFIX"fp3", REGISTER_PREFIX"fp4", REGISTER_PREFIX"fp5", \
+ REGISTER_PREFIX"fp6", REGISTER_PREFIX"fp7", REGISTER_PREFIX"argptr" }
+
+#define M68K_FP_REG_NAME REGISTER_PREFIX"fp"
+
+/* Return a register name by index, handling %fp nicely.
+ We don't replace %fp for targets that don't map it to %a6
+ since it may confuse GAS. */
+#define M68K_REGNAME(r) ( \
+ ((FRAME_POINTER_REGNUM == A6_REG) \
+ && ((r) == FRAME_POINTER_REGNUM) \
+ && frame_pointer_needed) ? \
+ M68K_FP_REG_NAME : reg_names[(r)])
+
+/* On the Sun-3, the floating point registers have numbers
+ 18 to 25, not 16 to 23 as they do in the compiler. */
+#define DBX_REGISTER_NUMBER(REGNO) ((REGNO) < 16 ? (REGNO) : (REGNO) + 2)
+
+/* Before the prologue, RA is at 0(%sp). */
+#define INCOMING_RETURN_ADDR_RTX \
+ gen_rtx_MEM (VOIDmode, gen_rtx_REG (VOIDmode, STACK_POINTER_REGNUM))
+
+/* After the prologue, RA is at 4(AP) in the current frame. */
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT) == 0 \
+ ? gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, UNITS_PER_WORD)) \
+ : gen_rtx_MEM (Pmode, plus_constant (FRAME, UNITS_PER_WORD)))
+
+/* We must not use the DBX register numbers for the DWARF 2 CFA column
+ numbers because that maps to numbers beyond FIRST_PSEUDO_REGISTER.
+ Instead use the identity mapping. */
+#define DWARF_FRAME_REGNUM(REG) \
+ (INT_REGNO_P (REG) || FP_REGNO_P (REG) ? (REG) : INVALID_REGNUM)
+
+/* The return column was originally 24, but gcc used 25 for a while too.
+ Define both registers 24 and 25 as Pmode ones and use 24 in our own
+ unwind information. */
+#define DWARF_FRAME_REGISTERS 25
+#define DWARF_FRAME_RETURN_COLUMN 24
+#define DWARF_ALT_FRAME_RETURN_COLUMN 25
+
+/* Before the prologue, the top of the frame is at 4(%sp). */
+#define INCOMING_FRAME_SP_OFFSET 4
+
+/* All registers are live on exit from an interrupt routine. */
+#define EPILOGUE_USES(REGNO) \
+ (reload_completed \
+ && (m68k_get_function_kind (current_function_decl) \
+ == m68k_fk_interrupt_handler))
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) \
+ ((N) < 2 ? (N) : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, A0_REG)
+#define EH_RETURN_HANDLER_RTX \
+ gen_rtx_MEM (Pmode, \
+ gen_rtx_PLUS (Pmode, arg_pointer_rtx, \
+ plus_constant (EH_RETURN_STACKADJ_RTX, \
+ UNITS_PER_WORD)))
+
+/* Select a format to encode pointers in exception handling data. CODE
+ is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
+ true if the symbol may be affected by dynamic relocations.
+
+ TARGET_ID_SHARED_LIBRARY and TARGET_SEP_DATA are designed to support
+ a read-only text segment without imposing a fixed gap between the
+ text and data segments. As a result, the text segment cannot refer
+ to anything in the data segment, even in PC-relative form. Because
+ .eh_frame refers to both code and data, it follows that .eh_frame
+ must be in the data segment itself, and that the offset between
+ .eh_frame and code will not be a link-time constant.
+
+ In theory, we could create a read-only .eh_frame by using DW_EH_PE_pcrel
+ | DW_EH_PE_indirect for all code references. However, gcc currently
+ handles indirect references using a per-TU constant pool. This means
+ that if a function and its eh_frame are removed by the linker, the
+ eh_frame's indirect references to the removed function will not be
+ removed, leading to an unresolved symbol error.
+
+ It isn't clear that any -msep-data or -mid-shared-library target
+ would benefit from a read-only .eh_frame anyway. In particular,
+ no known target that supports these options has a feature like
+ PT_GNU_RELRO. Without any such feature to motivate them, indirect
+ references would be unnecessary bloat, so we simply use an absolute
+ pointer for code and global references. We still use pc-relative
+ references to data, as this avoids a relocation. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
+ (flag_pic \
+ && !((TARGET_ID_SHARED_LIBRARY || TARGET_SEP_DATA) \
+ && ((GLOBAL) || (CODE))) \
+ ? ((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4 \
+ : DW_EH_PE_absptr)
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ asm_fprintf (FILE, "%U%s", NAME)
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s%s%ld", LOCAL_LABEL_PREFIX, PREFIX, (long)(NUM))
+
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ asm_fprintf (FILE, (MOTOROLA \
+ ? "\tmove.l %s,-(%Rsp)\n" \
+ : "\tmovel %s,%Rsp@-\n"), \
+ reg_names[REGNO])
+
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ asm_fprintf (FILE, (MOTOROLA \
+ ? "\tmove.l (%Rsp)+,%s\n" \
+ : "\tmovel %Rsp@+,%s\n"), \
+ reg_names[REGNO])
+
+/* The m68k does not use absolute case-vectors, but we must define this macro
+ anyway. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ asm_fprintf (FILE, "\t.long %LL%d\n", VALUE)
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ asm_fprintf (FILE, "\t.word %LL%d-%LL%d\n", VALUE, REL)
+
+/* We don't have a way to align to more than a two-byte boundary, so do the
+ best we can and don't complain. */
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) >= 1) \
+ fprintf (FILE, "\t.even\n");
+
+#ifdef HAVE_GAS_BALIGN_AND_P2ALIGN
+/* Use "move.l %a4,%a4" to advance within code. */
+#define ASM_OUTPUT_ALIGN_WITH_NOP(FILE,LOG) \
+ if ((LOG) > 0) \
+ fprintf ((FILE), "\t.balignw %u,0x284c\n", 1 << (LOG));
+#endif
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.skip %u\n", (int)(SIZE))
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (int)(ROUNDED)))
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (int)(ROUNDED)))
+
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ m68k_final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+/* On the 68000, we use several CODE characters:
+ '.' for dot needed in Motorola-style opcode names.
+ '-' for an operand pushing on the stack:
+ sp@-, -(sp) or -(%sp) depending on the style of syntax.
+ '+' for an operand pushing on the stack:
+ sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
+ '@' for a reference to the top word on the stack:
+ sp@, (sp) or (%sp) depending on the style of syntax.
+ '#' for an immediate operand prefix (# in MIT and Motorola syntax
+ but & in SGS syntax).
+ '!' for the fpcr register (used in some float-to-fixed conversions).
+ '$' for the letter `s' in an op code, but only on the 68040.
+ '&' for the letter `d' in an op code, but only on the 68040.
+ '/' for register prefix needed by longlong.h.
+ '?' for m68k_library_id_string
+
+ 'b' for byte insn (no effect, on the Sun; this is for the ISI).
+ 'd' to force memory addressing to be absolute, not relative.
+ 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
+ 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
+ or print pair of registers as rx:ry. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '.' || (CODE) == '#' || (CODE) == '-' \
+ || (CODE) == '+' || (CODE) == '@' || (CODE) == '!' \
+ || (CODE) == '$' || (CODE) == '&' || (CODE) == '/' || (CODE) == '?')
+
+
+/* See m68k.c for the m68k specific codes. */
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
+
+#define OUTPUT_ADDR_CONST_EXTRA(FILE, X, FAIL) \
+do { \
+ if (! m68k_output_addr_const_extra (FILE, (X))) \
+ goto FAIL; \
+} while (0);
+
+/* Values used in the MICROARCH argument to M68K_DEVICE. */
+enum uarch_type
+{
+ u68000,
+ u68010,
+ u68020,
+ u68020_40,
+ u68020_60,
+ u68030,
+ u68040,
+ u68060,
+ ucpu32,
+ ucfv1,
+ ucfv2,
+ ucfv3,
+ ucfv4,
+ ucfv4e,
+ ucfv5,
+ unk_arch
+};
+
+/* An enumeration of all supported target devices. */
+enum target_device
+{
+#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
+ ENUM_VALUE,
+#include "m68k-devices.def"
+#undef M68K_DEVICE
+ unk_device
+};
+
+enum fpu_type
+{
+ FPUTYPE_NONE,
+ FPUTYPE_68881,
+ FPUTYPE_COLDFIRE
+};
+
+enum m68k_function_kind
+{
+ m68k_fk_normal_function,
+ m68k_fk_interrupt_handler,
+ m68k_fk_interrupt_thread
+};
+
+/* Variables in m68k.c; see there for details. */
+extern const char *m68k_library_id_string;
+extern enum target_device m68k_cpu;
+extern enum uarch_type m68k_tune;
+extern enum fpu_type m68k_fpu;
+extern unsigned int m68k_cpu_flags;
+extern unsigned int m68k_tune_flags;
+extern const char *m68k_symbolic_call;
+extern const char *m68k_symbolic_jump;
+
+enum M68K_SYMBOLIC_CALL { M68K_SYMBOLIC_CALL_NONE, M68K_SYMBOLIC_CALL_JSR,
+ M68K_SYMBOLIC_CALL_BSR_C, M68K_SYMBOLIC_CALL_BSR_P };
+
+extern enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
+
+/* ??? HOST_WIDE_INT is not being defined for auto-generated files.
+ Workaround that. */
+#ifdef HOST_WIDE_INT
+typedef enum { MOVL, SWAP, NEGW, NOTW, NOTB, MOVQ, MVS, MVZ }
+ M68K_CONST_METHOD;
+
+extern M68K_CONST_METHOD m68k_const_method (HOST_WIDE_INT);
+#endif
+
+extern void m68k_emit_move_double (rtx [2]);
+
+extern int m68k_sched_address_bypass_p (rtx, rtx);
+extern int m68k_sched_indexed_address_bypass_p (rtx, rtx);
+
+#define CPU_UNITS_QUERY 1
diff --git a/gcc/config/m68k/m68k.md b/gcc/config/m68k/m68k.md
new file mode 100644
index 000000000..f89037f2e
--- /dev/null
+++ b/gcc/config/m68k/m68k.md
@@ -0,0 +1,7808 @@
+;;- Machine description for GNU compiler, Motorola 68000 Version
+;; Copyright (C) 1987, 1988, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2001,
+;; 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+;; Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;;- Information about MCF5200 port.
+
+;;- The MCF5200 "ColdFire" architecture is a reduced version of the
+;;- 68k ISA. Differences include reduced support for byte and word
+;;- operands and the removal of BCD, bitfield, rotate, and integer
+;;- divide instructions. The TARGET_COLDFIRE flag turns the use of the
+;;- removed opcodes and addressing modes off.
+;;-
+
+
+;;- instruction definitions
+
+;;- @@The original PO technology requires these to be ordered by speed,
+;;- @@ so that assigner will pick the fastest.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;;- When naming insn's (operand 0 of define_insn) be careful about using
+;;- names from other targets machine descriptions.
+
+;;- cpp macro #define NOTICE_UPDATE_CC in file tm.h handles condition code
+;;- updates for most instructions.
+
+;;- Operand classes for the register allocator:
+;;- 'a' one of the address registers can be used.
+;;- 'd' one of the data registers can be used.
+;;- 'f' one of the m68881/fpu registers can be used
+;;- 'r' either a data or an address register can be used.
+
+;;- Immediate Floating point operator constraints
+;;- 'G' a floating point constant that is *NOT* one of the standard
+;; 68881 constant values (to force calling output_move_const_double
+;; to get it from rom if it is a 68881 constant).
+;;
+;; See the functions standard_XXX_constant_p in output-m68k.c for more
+;; info.
+
+;;- Immediate integer operand constraints:
+;;- 'I' 1 .. 8
+;;- 'J' -32768 .. 32767
+;;- 'K' all integers EXCEPT -128 .. 127
+;;- 'L' -8 .. -1
+;;- 'M' all integers EXCEPT -256 .. 255
+;;- 'N' 24 .. 31
+;;- 'O' 16
+;;- 'P' 8 .. 15
+
+;;- Assembler specs:
+;;- "%." size separator ("." or "") move%.l d0,d1
+;;- "%-" push operand "sp@-" move%.l d0,%-
+;;- "%+" pop operand "sp@+" move%.l d0,%+
+;;- "%@" top of stack "sp@" move%.l d0,%@
+;;- "%!" fpcr register
+;;- "%$" single-precision fp specifier ("s" or "") f%$add.x fp0,fp1
+;;- "%&" double-precision fp specifier ("d" or "") f%&add.x fp0,fp1
+
+;;- Information about 68040 port.
+
+;;- The 68040 executes all 68030 and 68881/2 instructions, but some must
+;;- be emulated in software by the OS. It is faster to avoid these
+;;- instructions and issue a library call rather than trapping into
+;;- the kernel. The affected instructions are fintrz and fscale. The
+;;- TUNE_68040 flag turns the use of the opcodes off.
+
+;;- The '040 also implements a set of new floating-point instructions
+;;- which specify the rounding precision in the opcode. This finally
+;;- permit the 68k series to be truly IEEE compliant, and solves all
+;;- issues of excess precision accumulating in the extended registers.
+;;- By default, GCC does not use these instructions, since such code will
+;;- not run on an '030. To use these instructions, use the -m68040-only
+;;- switch.
+
+;;- These new instructions aren't directly in the md. They are brought
+;;- into play by defining "%$" and "%&" to expand to "s" and "d" rather
+;;- than "".
+
+;;- Information about 68060 port.
+
+;;- The 68060 executes all 68030 and 68881/2 instructions, but some must
+;;- be emulated in software by the OS. It is faster to avoid these
+;;- instructions and issue a library call rather than trapping into
+;;- the kernel. The affected instructions are: divs.l <ea>,Dr:Dq;
+;;- divu.l <ea>,Dr:Dq; muls.l <ea>,Dr:Dq; mulu.l <ea>,Dr:Dq; and
+;;- fscale. The TUNE_68060 flag turns the use of the opcodes off.
+
+;;- Some of these insn's are composites of several m68000 op codes.
+;;- The assembler (or final @@??) insures that the appropriate one is
+;;- selected.
+
+;; UNSPEC usage:
+
+(define_constants
+ [(UNSPEC_SIN 1)
+ (UNSPEC_COS 2)
+ (UNSPEC_GOT 3)
+ (UNSPEC_IB 4)
+ (UNSPEC_TIE 5)
+ (UNSPEC_RELOC16 6)
+ (UNSPEC_RELOC32 7)
+ ])
+
+;; UNSPEC_VOLATILE usage:
+
+(define_constants
+ [(UNSPECV_BLOCKAGE 0)
+ ])
+
+;; Registers by name.
+(define_constants
+ [(D0_REG 0)
+ (A0_REG 8)
+ (A1_REG 9)
+ (PIC_REG 13)
+ (A6_REG 14)
+ (SP_REG 15)
+ (FP0_REG 16)
+ ])
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; ::::::::::::::::::::
+;; ::
+;; :: Attributes
+;; ::
+;; ::::::::::::::::::::
+
+;; Processor type.
+(define_attr "cpu" "cfv1, cfv2, cfv3, cfv4, unknown"
+ (const (symbol_ref "m68k_sched_cpu")))
+
+;; MAC type.
+(define_attr "mac" "no, cf_mac, cf_emac"
+ (const (symbol_ref "m68k_sched_mac")))
+
+;; Instruction type for use in scheduling description.
+;; _l and _w suffixes indicate size of the operands of instruction.
+;; alu - usual arithmetic or logic instruction.
+;; aluq - arithmetic or logic instruction which has a quick immediate (the one
+;; that is encoded in the instruction word) for its Y operand.
+;; alux - Arithmetic instruction that uses carry bit (e.g., addx and subx).
+;; bcc - conditional branch.
+;; bitr - bit operation that only updates flags.
+;; bitrw - bit operation that updates flags and output operand.
+;; bra, bsr, clr, cmp, div, ext - corresponding instruction.
+;; falu, fbcc, fcmp, fdiv, fmove, fmul, fneg, fsqrt, ftst - corresponding
+;; instruction.
+;; ib - fake instruction to subscribe slots in ColdFire V1,V2,V3 instruction
+;; buffer.
+;; ignore - fake instruction.
+;; jmp, jsr, lea, link, mov3q, move, moveq, mul - corresponding instruction.
+;; mvsz - mvs or mvz instruction.
+;; neg, nop, pea, rts, scc - corresponding instruction.
+;; shift - arithmetic or logical shift instruction.
+;; trap, tst, unlk - corresponding instruction.
+(define_attr "type"
+ "alu_l,aluq_l,alux_l,bcc,bitr,bitrw,bra,bsr,clr,clr_l,cmp,cmp_l,
+ div_w,div_l,ext,
+ falu,fbcc,fcmp,fdiv,fmove,fmul,fneg,fsqrt,ftst,
+ ib,ignore,
+ jmp,jsr,lea,link,mov3q_l,move,move_l,moveq_l,mul_w,mul_l,mvsz,neg_l,nop,
+ pea,rts,scc,shift,
+ trap,tst,tst_l,unlk,
+ unknown"
+ (const_string "unknown"))
+
+;; Index of the X or Y operand in recog_data.operand[].
+;; Should be used only within opx_type and opy_type.
+(define_attr "opx" "" (const_int 0))
+(define_attr "opy" "" (const_int 1))
+
+;; Type of the Y operand.
+;; See m68k.c: enum attr_op_type.
+(define_attr "opy_type"
+ "none,Rn,FPn,mem1,mem234,mem5,mem6,mem7,imm_q,imm_w,imm_l"
+ (cond [(eq_attr "type" "ext,fbcc,ftst,neg_l,bcc,bra,bsr,clr,clr_l,ib,ignore,
+ jmp,jsr,nop,rts,scc,trap,tst,tst_l,
+ unlk,unknown") (const_string "none")
+ (eq_attr "type" "lea,pea")
+ (symbol_ref "m68k_sched_attr_opy_type (insn, 1)")]
+ (symbol_ref "m68k_sched_attr_opy_type (insn, 0)")))
+
+;; Type of the X operand.
+;; See m68k.c: enum attr_op_type.
+(define_attr "opx_type"
+ "none,Rn,FPn,mem1,mem234,mem5,mem6,mem7,imm_q,imm_w,imm_l"
+ (cond [(eq_attr "type" "ib,ignore,nop,rts,trap,unlk,
+ unknown") (const_string "none")
+ (eq_attr "type" "pea") (const_string "mem1")
+ (eq_attr "type" "jmp,jsr")
+ (symbol_ref "m68k_sched_attr_opx_type (insn, 1)")]
+ (symbol_ref "m68k_sched_attr_opx_type (insn, 0)")))
+
+;; Access to the X operand: none, read, write, read/write, unknown.
+;; Access to the Y operand is either none (if opy_type is none)
+;; or read otherwise.
+(define_attr "opx_access" "none, r, w, rw"
+ (cond [(eq_attr "type" "ib,ignore,nop,rts,trap,unlk,
+ unknown") (const_string "none")
+ (eq_attr "type" "bcc,bra,bsr,bitr,cmp,cmp_l,fbcc,fcmp,ftst,
+ jmp,jsr,tst,tst_l") (const_string "r")
+ (eq_attr "type" "clr,clr_l,fneg,fmove,lea,
+ mov3q_l,move,move_l,moveq_l,mvsz,
+ pea,scc") (const_string "w")
+ (eq_attr "type" "alu_l,aluq_l,alux_l,bitrw,div_w,div_l,ext,
+ falu,fdiv,fmul,fsqrt,link,mul_w,mul_l,
+ neg_l,shift") (const_string "rw")]
+ ;; Should never be used.
+ (symbol_ref "(gcc_unreachable (), OPX_ACCESS_NONE)")))
+
+;; Memory accesses of the insn.
+;; 00 - no memory references
+;; 10 - memory is read
+;; i0 - indexed memory is read
+;; 01 - memory is written
+;; 0i - indexed memory is written
+;; 11 - memory is read, memory is written
+;; i1 - indexed memory is read, memory is written
+;; 1i - memory is read, indexed memory is written
+(define_attr "op_mem" "00, 10, i0, 01, 0i, 11, i1, 1i"
+ (symbol_ref "m68k_sched_attr_op_mem (insn)"))
+
+;; Instruction size in words.
+(define_attr "size" "1,2,3"
+ (symbol_ref "m68k_sched_attr_size (insn)"))
+
+;; Alternative is OK for ColdFire.
+(define_attr "ok_for_coldfire" "yes,no" (const_string "yes"))
+
+;; Define 'enabled' attribute.
+(define_attr "enabled" ""
+ (cond [(and (ne (symbol_ref "TARGET_COLDFIRE") (const_int 0))
+ (eq_attr "ok_for_coldfire" "no"))
+ (const_int 0)]
+ (const_int 1)))
+
+;; Mode macros for floating point operations.
+;; Valid floating point modes
+(define_mode_iterator FP [SF DF (XF "TARGET_68881")])
+;; Mnemonic infix to round result
+(define_mode_attr round [(SF "%$") (DF "%&") (XF "")])
+;; Mnemonic infix to round result for mul or div instruction
+(define_mode_attr round_mul [(SF "sgl") (DF "%&") (XF "")])
+;; Suffix specifying source operand format
+(define_mode_attr prec [(SF "s") (DF "d") (XF "x")])
+;; Allowable D registers
+(define_mode_attr dreg [(SF "d") (DF "") (XF "")])
+;; Allowable 68881 constant constraints
+(define_mode_attr const [(SF "F") (DF "G") (XF "")])
+
+
+(define_insn_and_split "*movdf_internal"
+ [(set (match_operand:DF 0 "push_operand" "=m, m")
+ (match_operand:DF 1 "general_operand" "f, ro<>E"))]
+ ""
+ "@
+ fmove%.d %f1,%0
+ #"
+ "&& reload_completed && (extract_constrain_insn_cached (insn), which_alternative == 1)"
+ [(const_int 0)]
+{
+ m68k_emit_move_double (operands);
+ DONE;
+}
+ [(set_attr "type" "fmove,*")])
+
+(define_insn_and_split "pushdi"
+ [(set (match_operand:DI 0 "push_operand" "=m")
+ (match_operand:DI 1 "general_operand" "ro<>Fi"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ m68k_emit_move_double (operands);
+ DONE;
+})
+
+;; We don't want to allow a constant operand for test insns because
+;; (set (cc0) (const_int foo)) has no mode information. Such insns will
+;; be folded while optimizing anyway.
+
+(define_insn "tstdi"
+ [(set (cc0)
+ (compare (match_operand:DI 0 "nonimmediate_operand" "am,d")
+ (const_int 0)))
+ (clobber (match_scratch:SI 1 "=X,d"))
+ (clobber (match_scratch:DI 2 "=d,X"))]
+ ""
+{
+ if (which_alternative == 0)
+ {
+ rtx xoperands[2];
+
+ xoperands[0] = operands[2];
+ xoperands[1] = operands[0];
+ output_move_double (xoperands);
+ cc_status.flags |= CC_REVERSED; /*|*/
+ return "neg%.l %R2\;negx%.l %2";
+ }
+ if (find_reg_note (insn, REG_DEAD, operands[0]))
+ {
+ cc_status.flags |= CC_REVERSED; /*|*/
+ return "neg%.l %R0\;negx%.l %0";
+ }
+ else
+ /*
+ 'sub' clears %1, and also clears the X cc bit
+ 'tst' sets the Z cc bit according to the low part of the DImode operand
+ 'subx %1' (i.e. subx #0) acts as a (non-existent) tstx on the high part.
+ */
+ return "sub%.l %1,%1\;tst%.l %R0\;subx%.l %1,%0";
+})
+
+;; If you think that the 68020 does not support tstl a0,
+;; reread page B-167 of the 68020 manual more carefully.
+(define_insn "*tstsi_internal_68020_cf"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "rm")
+ (const_int 0)))]
+ "TARGET_68020 || TARGET_COLDFIRE"
+ "tst%.l %0"
+ [(set_attr "type" "tst_l")])
+
+;; On an address reg, cmpw may replace cmpl.
+(define_insn "*tstsi_internal"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "dm,r")
+ (const_int 0)))]
+ "!(TARGET_68020 || TARGET_COLDFIRE)"
+ "@
+ tst%.l %0
+ cmp%.w #0,%0"
+ [(set_attr "type" "tst_l,cmp")])
+
+;; This can't use an address register, because comparisons
+;; with address registers as second operand always test the whole word.
+(define_insn "*tsthi_internal"
+ [(set (cc0)
+ (compare (match_operand:HI 0 "nonimmediate_operand" "dm")
+ (const_int 0)))]
+ ""
+ "tst%.w %0"
+ [(set_attr "type" "tst")])
+
+(define_insn "*tstqi_internal"
+ [(set (cc0)
+ (compare (match_operand:QI 0 "nonimmediate_operand" "dm")
+ (const_int 0)))]
+ ""
+ "tst%.b %0"
+ [(set_attr "type" "tst")])
+
+(define_insn "tst<mode>_68881"
+ [(set (cc0)
+ (compare (match_operand:FP 0 "general_operand" "f<FP:dreg>m")
+ (match_operand:FP 1 "const0_operand" "H")))]
+ "TARGET_68881"
+{
+ cc_status.flags = CC_IN_68881;
+ if (FP_REG_P (operands[0]))
+ return "ftst%.x %0";
+ return "ftst%.<FP:prec> %0";
+}
+ [(set_attr "type" "ftst")])
+
+(define_insn "tst<mode>_cf"
+ [(set (cc0)
+ (compare (match_operand:FP 0 "general_operand" "f<FP:dreg><Q>U")
+ (match_operand:FP 1 "const0_operand" "H")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ cc_status.flags = CC_IN_68881;
+ if (FP_REG_P (operands[0]))
+ return "ftst%.d %0";
+ return "ftst%.<FP:prec> %0";
+}
+ [(set_attr "type" "ftst")])
+
+
+;; compare instructions.
+
+(define_insn "*cmpdi_internal"
+ [(set (cc0)
+ (compare (match_operand:DI 1 "nonimmediate_operand" "0,d")
+ (match_operand:DI 2 "general_operand" "d,0")))
+ (clobber (match_scratch:DI 0 "=d,d"))]
+ ""
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ return "sub%.l %R2,%R0\;subx%.l %2,%0";
+ else
+ {
+ cc_status.flags |= CC_REVERSED; /*|*/
+ return "sub%.l %R1,%R0\;subx%.l %1,%0";
+ }
+})
+
+(define_insn "cmpdi"
+ [(set (cc0)
+ (compare (match_operand:DI 0 "nonimmediate_operand")
+ (match_operand:DI 1 "general_operand")))
+ (clobber (match_scratch:DI 2))]
+ ""
+ "")
+
+
+(define_expand "cbranchdi4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:DI 1 "nonimmediate_operand")
+ (match_operand:DI 2 "general_operand")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+{
+ if (operands[2] == const0_rtx)
+ emit_insn (gen_tstdi (operands[1]));
+ else
+ emit_insn (gen_cmpdi (operands[1], operands[2]));
+ operands[1] = cc0_rtx;
+ operands[2] = const0_rtx;
+})
+
+(define_expand "cstoredi4"
+ [(set (match_operand:QI 0 "register_operand")
+ (match_operator:QI 1 "ordered_comparison_operator"
+ [(match_operand:DI 2 "nonimmediate_operand")
+ (match_operand:DI 3 "general_operand")]))]
+ ""
+{
+ if (operands[3] == const0_rtx)
+ emit_insn (gen_tstdi (operands[2]));
+ else
+ emit_insn (gen_cmpdi (operands[2], operands[3]));
+ operands[2] = cc0_rtx;
+ operands[3] = const0_rtx;
+})
+
+
+(define_expand "cbranchsi4"
+ [(set (cc0)
+ (compare (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "cstoresi4"
+ [(set (cc0)
+ (compare (match_operand:SI 2 "nonimmediate_operand" "")
+ (match_operand:SI 3 "general_operand" "")))
+ (set (match_operand:QI 0 "register_operand")
+ (match_operator:QI 1 "ordered_comparison_operator"
+ [(cc0) (const_int 0)]))]
+ ""
+ "")
+
+
+;; A composite of the cmp, cmpa, cmpi & cmpm m68000 op codes.
+(define_insn ""
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "rKT,rKs,mSr,mSa,>")
+ (match_operand:SI 1 "general_src_operand" "mSr,mSa,KTr,Ksr,>")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ return "cmpm%.l %1,%0";
+ if (REG_P (operands[1])
+ || (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM))
+ {
+ cc_status.flags |= CC_REVERSED; /*|*/
+ return "cmp%.l %d0,%d1";
+ }
+ if (ADDRESS_REG_P (operands[0])
+ && GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 0x8000
+ && INTVAL (operands[1]) >= -0x8000)
+ return "cmp%.w %1,%0";
+ return "cmp%.l %d1,%d0";
+})
+
+(define_insn "*cmpsi_cf"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "mrKs,r")
+ (match_operand:SI 1 "general_operand" "r,mrKs")))]
+ "TARGET_COLDFIRE"
+{
+ if (REG_P (operands[1])
+ || (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM))
+ {
+ cc_status.flags |= CC_REVERSED; /*|*/
+ return "cmp%.l %d0,%d1";
+ }
+ return "cmp%.l %d1,%d0";
+}
+ [(set_attr "type" "cmp_l")])
+
+(define_expand "cbranchhi4"
+ [(set (cc0)
+ (compare (match_operand:HI 1 "nonimmediate_src_operand" "")
+ (match_operand:HI 2 "m68k_subword_comparison_operand" "")))
+ (set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "cstorehi4"
+ [(set (cc0)
+ (compare (match_operand:HI 2 "nonimmediate_operand" "")
+ (match_operand:HI 3 "m68k_subword_comparison_operand" "")))
+ (set (match_operand:QI 0 "register_operand")
+ (match_operator:QI 1 "ordered_comparison_operator"
+ [(cc0) (const_int 0)]))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (cc0)
+ (compare (match_operand:HI 0 "nonimmediate_src_operand" "rnmS,d,n,mS,>")
+ (match_operand:HI 1 "general_src_operand" "d,rnmS,mS,n,>")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ return "cmpm%.w %1,%0";
+ if ((REG_P (operands[1]) && !ADDRESS_REG_P (operands[1]))
+ || (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM))
+ {
+ cc_status.flags |= CC_REVERSED; /*|*/
+ return "cmp%.w %d0,%d1";
+ }
+ return "cmp%.w %d1,%d0";
+})
+
+(define_expand "cbranchqi4"
+ [(set (cc0)
+ (compare (match_operand:QI 1 "nonimmediate_src_operand" "")
+ (match_operand:QI 2 "m68k_subword_comparison_operand" "")))
+ (set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "cstoreqi4"
+ [(set (cc0)
+ (compare (match_operand:QI 2 "nonimmediate_src_operand" "")
+ (match_operand:QI 3 "m68k_subword_comparison_operand" "")))
+ (set (match_operand:QI 0 "register_operand")
+ (match_operator:QI 1 "ordered_comparison_operator"
+ [(cc0) (const_int 0)]))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (cc0)
+ (compare (match_operand:QI 0 "nonimmediate_src_operand" "dn,dmS,>")
+ (match_operand:QI 1 "general_src_operand" "dmS,nd,>")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ return "cmpm%.b %1,%0";
+ if (REG_P (operands[1])
+ || (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM))
+ {
+ cc_status.flags |= CC_REVERSED; /*|*/
+ return "cmp%.b %d0,%d1";
+ }
+ return "cmp%.b %d1,%d0";
+})
+
+(define_expand "cbranch<mode>4"
+ [(set (cc0)
+ (compare (match_operand:FP 1 "register_operand" "")
+ (match_operand:FP 2 "fp_src_operand" "")))
+ (set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_expand "cstore<mode>4"
+ [(set (cc0)
+ (compare (match_operand:FP 2 "register_operand" "")
+ (match_operand:FP 3 "fp_src_operand" "")))
+ (set (match_operand:QI 0 "register_operand")
+ (match_operator:QI 1 "m68k_cstore_comparison_operator"
+ [(cc0) (const_int 0)]))]
+ "TARGET_HARD_FLOAT && !(TUNE_68060 || TARGET_COLDFIRE_FPU)"
+ "if (TARGET_COLDFIRE && operands[2] != const0_rtx)
+ FAIL;")
+
+(define_insn "*cmp<mode>_68881"
+ [(set (cc0)
+ (compare (match_operand:FP 0 "fp_src_operand" "f,f,<FP:dreg>mF")
+ (match_operand:FP 1 "fp_src_operand" "f,<FP:dreg>mF,f")))]
+ "TARGET_68881
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+ "@
+ fcmp%.x %1,%0
+ fcmp%.<FP:prec> %f1,%0
+ fcmp%.<FP:prec> %0,%f1"
+ [(set_attr "type" "fcmp")])
+
+(define_insn "*cmp<mode>_cf"
+ [(set (cc0)
+ (compare (match_operand:FP 0 "fp_src_operand" "f,f,<FP:dreg><Q>U")
+ (match_operand:FP 1 "fp_src_operand" "f,<FP:dreg><Q>U,f")))]
+ "TARGET_COLDFIRE_FPU
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+ "@
+ fcmp%.d %1,%0
+ fcmp%.<FP:prec> %f1,%0
+ fcmp%.<FP:prec> %0,%f1"
+ [(set_attr "type" "fcmp")])
+
+;; Recognizers for btst instructions.
+
+;; ColdFire/5200 only allows "<Q>" type addresses when the bit position is
+;; specified as a constant, so we must disable all patterns that may extract
+;; from a MEM at a constant bit position if we can't use this as a constraint.
+
+(define_insn ""
+ [(set
+ (cc0)
+ (compare (zero_extract:SI (match_operand:QI 0 "memory_src_operand" "oS")
+ (const_int 1)
+ (minus:SI (const_int 7)
+ (match_operand:SI 1 "general_operand" "di")))
+ (const_int 0)))]
+ "!TARGET_COLDFIRE"
+{
+ return output_btst (operands, operands[1], operands[0], insn, 7);
+})
+
+;; This is the same as the above pattern except for the constraints. The 'i'
+;; has been deleted.
+
+(define_insn ""
+ [(set
+ (cc0)
+ (compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "o")
+ (const_int 1)
+ (minus:SI (const_int 7)
+ (match_operand:SI 1 "general_operand" "d")))
+ (const_int 0)))]
+ "TARGET_COLDFIRE"
+{
+ return output_btst (operands, operands[1], operands[0], insn, 7);
+})
+
+(define_insn ""
+ [(set
+ (cc0)
+ (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "d")
+ (const_int 1)
+ (minus:SI (const_int 31)
+ (match_operand:SI 1 "general_operand" "di")))
+ (const_int 0)))]
+ ""
+{
+ return output_btst (operands, operands[1], operands[0], insn, 31);
+})
+
+;; The following two patterns are like the previous two
+;; except that they use the fact that bit-number operands
+;; are automatically masked to 3 or 5 bits.
+
+(define_insn ""
+ [(set
+ (cc0)
+ (compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "o")
+ (const_int 1)
+ (minus:SI (const_int 7)
+ (and:SI
+ (match_operand:SI 1 "register_operand" "d")
+ (const_int 7))))
+ (const_int 0)))]
+ ""
+{
+ return output_btst (operands, operands[1], operands[0], insn, 7);
+})
+
+(define_insn ""
+ [(set
+ (cc0)
+ (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "d")
+ (const_int 1)
+ (minus:SI (const_int 31)
+ (and:SI
+ (match_operand:SI 1 "register_operand" "d")
+ (const_int 31))))
+ (const_int 0)))]
+ ""
+{
+ return output_btst (operands, operands[1], operands[0], insn, 31);
+})
+
+;; Nonoffsettable mem refs are ok in this one pattern
+;; since we don't try to adjust them.
+(define_insn ""
+ [(set
+ (cc0)
+ (compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "m")
+ (const_int 1)
+ (match_operand:SI 1 "const_int_operand" "n"))
+ (const_int 0)))]
+ "(unsigned) INTVAL (operands[1]) < 8 && !TARGET_COLDFIRE"
+{
+ operands[1] = GEN_INT (7 - INTVAL (operands[1]));
+ return output_btst (operands, operands[1], operands[0], insn, 7);
+})
+
+(define_insn ""
+ [(set
+ (cc0)
+ (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "do")
+ (const_int 1)
+ (match_operand:SI 1 "const_int_operand" "n"))
+ (const_int 0)))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ operands[0] = adjust_address (operands[0], QImode,
+ INTVAL (operands[1]) / 8);
+ operands[1] = GEN_INT (7 - INTVAL (operands[1]) % 8);
+ return output_btst (operands, operands[1], operands[0], insn, 7);
+ }
+ operands[1] = GEN_INT (31 - INTVAL (operands[1]));
+ return output_btst (operands, operands[1], operands[0], insn, 31);
+})
+
+;; This is the same as the above pattern except for the constraints.
+;; The 'o' has been replaced with 'Q'.
+
+(define_insn ""
+ [(set
+ (cc0)
+ (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "dQ")
+ (const_int 1)
+ (match_operand:SI 1 "const_int_operand" "n"))
+ (const_int 0)))]
+ "TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ operands[0] = adjust_address (operands[0], QImode,
+ INTVAL (operands[1]) / 8);
+ operands[1] = GEN_INT (7 - INTVAL (operands[1]) % 8);
+ return output_btst (operands, operands[1], operands[0], insn, 7);
+ }
+ operands[1] = GEN_INT (31 - INTVAL (operands[1]));
+ return output_btst (operands, operands[1], operands[0], insn, 31);
+})
+
+
+;; move instructions
+
+;; A special case in which it is not desirable
+;; to reload the constant into a data register.
+(define_insn "pushexthisi_const"
+ [(set (match_operand:SI 0 "push_operand" "=m,m,m")
+ (match_operand:SI 1 "const_int_operand" "C0,R,J"))]
+ "INTVAL (operands[1]) >= -0x8000 && INTVAL (operands[1]) < 0x8000"
+ "@
+ clr%.l %0
+ mov3q%.l %1,%-
+ pea %a1"
+ [(set_attr "type" "clr_l,mov3q_l,pea")])
+
+;This is never used.
+;(define_insn "swapsi"
+; [(set (match_operand:SI 0 "nonimmediate_operand" "+r")
+; (match_operand:SI 1 "general_operand" "+r"))
+; (set (match_dup 1) (match_dup 0))]
+; ""
+; "exg %1,%0")
+
+;; Special case of fullword move when source is zero for 68000_10.
+;; moveq is faster on the 68000.
+(define_insn "*movsi_const0_68000_10"
+ [(set (match_operand:SI 0 "movsi_const0_operand" "=d,a,g")
+ (const_int 0))]
+ "TUNE_68000_10"
+ "@
+ moveq #0,%0
+ sub%.l %0,%0
+ clr%.l %0"
+ [(set_attr "type" "moveq_l,alu_l,clr_l")
+ (set_attr "opy" "*,0,*")])
+
+;; Special case of fullword move when source is zero for 68040_60.
+;; On the '040, 'subl an,an' takes 2 clocks while lea takes only 1
+(define_insn "*movsi_const0_68040_60"
+ [(set (match_operand:SI 0 "movsi_const0_operand" "=a,g")
+ (const_int 0))]
+ "TUNE_68040_60"
+{
+ if (which_alternative == 0)
+ return MOTOROLA ? "lea 0.w,%0" : "lea 0:w,%0";
+ else if (which_alternative == 1)
+ return "clr%.l %0";
+ else
+ {
+ gcc_unreachable ();
+ return "";
+ }
+}
+ [(set_attr "type" "lea,clr_l")])
+
+;; Special case of fullword move when source is zero.
+(define_insn "*movsi_const0"
+ [(set (match_operand:SI 0 "movsi_const0_operand" "=a,g")
+ (const_int 0))]
+ "!(TUNE_68000_10 || TUNE_68040_60)"
+ "@
+ sub%.l %0,%0
+ clr%.l %0"
+ [(set_attr "type" "alu_l,clr_l")
+ (set_attr "opy" "0,*")])
+
+;; General case of fullword move.
+;;
+;; This is the main "hook" for PIC code. When generating
+;; PIC, movsi is responsible for determining when the source address
+;; needs PIC relocation and appropriately calling legitimize_pic_address
+;; to perform the actual relocation.
+;;
+;; In both the PIC and non-PIC cases the patterns generated will
+;; matched by the next define_insn.
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))]
+ ""
+{
+ rtx tmp, base, offset;
+
+ /* Recognize the case where operand[1] is a reference to thread-local
+ data and load its address to a register. */
+ if (!TARGET_PCREL && m68k_tls_reference_p (operands[1], false))
+ {
+ rtx tmp = operands[1];
+ rtx addend = NULL;
+
+ if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
+ {
+ addend = XEXP (XEXP (tmp, 0), 1);
+ tmp = XEXP (XEXP (tmp, 0), 0);
+ }
+
+ gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
+ gcc_assert (SYMBOL_REF_TLS_MODEL (tmp) != 0);
+
+ tmp = m68k_legitimize_tls_address (tmp);
+
+ if (addend)
+ {
+ if (!REG_P (tmp))
+ {
+ rtx reg;
+
+ reg = gen_reg_rtx (Pmode);
+ emit_move_insn (reg, tmp);
+ tmp = reg;
+ }
+
+ tmp = gen_rtx_PLUS (SImode, tmp, addend);
+ }
+
+ operands[1] = tmp;
+ }
+ else if (flag_pic && !TARGET_PCREL && symbolic_operand (operands[1], SImode))
+ {
+ /* The source is an address which requires PIC relocation.
+ Call legitimize_pic_address with the source, mode, and a relocation
+ register (a new pseudo, or the final destination if reload_in_progress
+ is set). Then fall through normally */
+ rtx temp = reload_in_progress ? operands[0] : gen_reg_rtx (Pmode);
+ operands[1] = legitimize_pic_address (operands[1], SImode, temp);
+ }
+ else if (flag_pic && TARGET_PCREL && ! reload_in_progress)
+ {
+ /* Don't allow writes to memory except via a register;
+ the m68k doesn't consider PC-relative addresses to be writable. */
+ if (symbolic_operand (operands[0], SImode))
+ operands[0] = force_reg (SImode, XEXP (operands[0], 0));
+ else if (GET_CODE (operands[0]) == MEM
+ && symbolic_operand (XEXP (operands[0], 0), SImode))
+ operands[0] = gen_rtx_MEM (SImode,
+ force_reg (SImode, XEXP (operands[0], 0)));
+ }
+ if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
+ {
+ split_const (operands[1], &base, &offset);
+ if (GET_CODE (base) == SYMBOL_REF
+ && !offset_within_block_p (base, INTVAL (offset)))
+ {
+ tmp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (SImode);
+ emit_move_insn (tmp, base);
+ emit_insn (gen_addsi3 (operands[0], tmp, offset));
+ DONE;
+ }
+ }
+})
+
+;; General case of fullword move.
+(define_insn "*movsi_m68k"
+ ;; Notes: make sure no alternative allows g vs g.
+ ;; We don't allow f-regs since fixed point cannot go in them.
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g,d,a<")
+ (match_operand:SI 1 "general_src_operand" "damSnT,n,i"))]
+ "!TARGET_COLDFIRE && reload_completed"
+{
+ return output_move_simode (operands);
+})
+
+;; Before reload is completed the register constraints
+;; force integer constants in range for a moveq to be reloaded
+;; if they are headed for memory.
+(define_insn "*movsi_m68k2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g,d,a<")
+ (match_operand:SI 1 "general_src_operand" "damSKT,n,i"))]
+
+ "!TARGET_COLDFIRE"
+{
+ return output_move_simode (operands);
+})
+
+;; ColdFire move instructions can have at most one operand of mode >= 6.
+(define_insn "*movsi_cf"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g,d, d, d, d, d, a,Ap, a, r<Q>,g, U")
+ (match_operand:SI 1 "general_operand" " R,CQ,CW,CZ,CS,Ci,J,J Cs,Cs, g, Rr<Q>,U"))]
+ "TARGET_COLDFIRE"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "mov3q%.l %1,%0";
+
+ case 1:
+ return "moveq %1,%0";
+
+ case 2:
+ {
+ unsigned u = INTVAL (operands[1]);
+
+ operands[1] = GEN_INT ((u << 16) | (u >> 16)); /*|*/
+ return "moveq %1,%0\n\tswap %0";
+ }
+
+ case 3:
+ return "mvz%.w %1,%0";
+
+ case 4:
+ return "mvs%.w %1,%0";
+
+ case 5:
+ return "move%.l %1,%0";
+
+ case 6:
+ return "move%.w %1,%0";
+
+ case 7:
+ return "pea %a1";
+
+ case 8:
+ return "lea %a1,%0";
+
+ case 9:
+ case 10:
+ case 11:
+ return "move%.l %1,%0";
+
+ default:
+ gcc_unreachable ();
+ return "";
+ }
+}
+ [(set_attr "type" "mov3q_l,moveq_l,*,mvsz,mvsz,move_l,move,pea,lea,move_l,move_l,move_l")])
+
+;; Special case of fullword move, where we need to get a non-GOT PIC
+;; reference into an address register.
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=a<")
+ (match_operand:SI 1 "pcrel_address" ""))]
+ "TARGET_PCREL"
+{
+ if (push_operand (operands[0], SImode))
+ return "pea %a1";
+ return "lea %a1,%0";
+})
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=g")
+ (match_operand:HI 1 "general_src_operand" "gS"))]
+ "!TARGET_COLDFIRE"
+ "* return output_move_himode (operands);")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r<Q>,g,U")
+ (match_operand:HI 1 "general_operand" "g,r<Q>,U"))]
+ "TARGET_COLDFIRE"
+ "* return output_move_himode (operands);")
+
+(define_expand "movstricthi"
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" ""))
+ (match_operand:HI 1 "general_src_operand" ""))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
+ (match_operand:HI 1 "general_src_operand" "rmSn"))]
+ "!TARGET_COLDFIRE"
+ "* return output_move_stricthi (operands);")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+d,m"))
+ (match_operand:HI 1 "general_src_operand" "rmn,r"))]
+ "TARGET_COLDFIRE"
+ "* return output_move_stricthi (operands);")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "general_src_operand" ""))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=d,*a,m")
+ (match_operand:QI 1 "general_src_operand" "dmSi*a,di*a,dmSi"))]
+ "!TARGET_COLDFIRE"
+ "* return output_move_qimode (operands);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=d<Q>,dm,U,d*a")
+ (match_operand:QI 1 "general_src_operand" "dmi,d<Q>,U,di*a"))]
+ "TARGET_COLDFIRE"
+ "* return output_move_qimode (operands);")
+
+(define_expand "movstrictqi"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" ""))
+ (match_operand:QI 1 "general_src_operand" ""))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
+ (match_operand:QI 1 "general_src_operand" "dmSn"))]
+ "!TARGET_COLDFIRE"
+ "* return output_move_strictqi (operands);")
+
+(define_insn "*movstrictqi_cf"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+d, Ac, d,m"))
+ (match_operand:QI 1 "general_src_operand" "C0,C0, dmn,d"))]
+ "TARGET_COLDFIRE"
+ "@
+ clr%.b %0
+ clr%.b %0
+ move%.b %1,%0
+ move%.b %1,%0"
+ [(set_attr "type" "clr,clr,move,move")])
+
+(define_expand "pushqi1"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -2)))
+ (set (mem:QI (plus:SI (reg:SI SP_REG) (const_int 1)))
+ (match_operand:QI 0 "general_operand" ""))]
+ "!TARGET_COLDFIRE"
+ "")
+
+(define_expand "reload_insf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f")
+ (match_operand:SF 1 "general_operand" "mf"))
+ (clobber (match_operand:SI 2 "register_operand" "=&a"))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (emit_move_sequence (operands, SFmode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "reload_outsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "register_operand" "f"))
+ (clobber (match_operand:SI 2 "register_operand" "=&a"))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (emit_move_sequence (operands, SFmode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=rmf")
+ (match_operand:SF 1 "general_operand" "rmfF"))]
+ "!TARGET_COLDFIRE"
+{
+ if (FP_REG_P (operands[0]))
+ {
+ if (FP_REG_P (operands[1]))
+ return "f%$move%.x %1,%0";
+ else if (ADDRESS_REG_P (operands[1]))
+ return "move%.l %1,%-\;f%$move%.s %+,%0";
+ else if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ return output_move_const_single (operands);
+ return "f%$move%.s %f1,%0";
+ }
+ if (FP_REG_P (operands[1]))
+ {
+ if (ADDRESS_REG_P (operands[0]))
+ return "fmove%.s %1,%-\;move%.l %+,%0";
+ return "fmove%.s %f1,%0";
+ }
+ if (operands[1] == CONST0_RTX (SFmode)
+ /* clr insns on 68000 read before writing. */
+ && ((TARGET_68010 || TARGET_COLDFIRE)
+ || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
+ {
+ if (ADDRESS_REG_P (operands[0]))
+ {
+ /* On the '040, 'subl an,an' takes 2 clocks while lea takes only 1 */
+ if (TUNE_68040_60)
+ return MOTOROLA ? "lea 0.w,%0" : "lea 0:w,%0";
+ else
+ return "sub%.l %0,%0";
+ }
+ /* moveq is faster on the 68000. */
+ if (DATA_REG_P (operands[0]) && TUNE_68000_10)
+ return "moveq #0,%0";
+ return "clr%.l %0";
+ }
+ return "move%.l %1,%0";
+})
+
+(define_insn "movsf_cf_soft"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r<Q>,g,U")
+ (match_operand:SF 1 "general_operand" "g,r<Q>,U"))]
+ "TARGET_COLDFIRE && !TARGET_COLDFIRE_FPU"
+ "move%.l %1,%0"
+ [(set_attr "type" "move_l")])
+
+;; SFmode MEMs are restricted to modes 2-4 if TARGET_COLDFIRE_FPU.
+;; The move instructions can handle all combinations.
+(define_insn "movsf_cf_hard"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r<Q>U, f, f,mr,f,r<Q>,f
+,m")
+ (match_operand:SF 1 "general_operand" " f, r<Q>U,f,rm,F,F, m
+,f"))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (which_alternative == 4 || which_alternative == 5) {
+ rtx xoperands[2];
+ REAL_VALUE_TYPE r;
+ long l;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, l);
+ xoperands[0] = operands[0];
+ xoperands[1] = GEN_INT (l);
+ if (which_alternative == 5) {
+ if (l == 0) {
+ if (ADDRESS_REG_P (xoperands[0]))
+ output_asm_insn ("sub%.l %0,%0", xoperands);
+ else
+ output_asm_insn ("clr%.l %0", xoperands);
+ } else
+ if (GET_CODE (operands[0]) == MEM
+ && symbolic_operand (XEXP (operands[0], 0), SImode))
+ output_asm_insn ("move%.l %1,%-;move%.l %+,%0", xoperands);
+ else
+ output_asm_insn ("move%.l %1,%0", xoperands);
+ return "";
+ }
+ if (l != 0)
+ output_asm_insn ("move%.l %1,%-;fsmove%.s %+,%0", xoperands);
+ else
+ output_asm_insn ("clr%.l %-;fsmove%.s %+,%0", xoperands);
+ return "";
+ }
+ if (FP_REG_P (operands[0]))
+ {
+ if (ADDRESS_REG_P (operands[1]))
+ return "move%.l %1,%-;fsmove%.s %+,%0";
+ if (FP_REG_P (operands[1]))
+ return "fsmove%.d %1,%0";
+ return "fsmove%.s %f1,%0";
+ }
+ if (FP_REG_P (operands[1]))
+ {
+ if (ADDRESS_REG_P (operands[0]))
+ return "fmove%.s %1,%-;move%.l %+,%0";
+ return "fmove%.s %f1,%0";
+ }
+ if (operands[1] == CONST0_RTX (SFmode))
+ {
+ if (ADDRESS_REG_P (operands[0]))
+ return "sub%.l %0,%0";
+ return "clr%.l %0";
+ }
+ return "move%.l %1,%0";
+})
+
+(define_expand "reload_indf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f")
+ (match_operand:DF 1 "general_operand" "mf"))
+ (clobber (match_operand:SI 2 "register_operand" "=&a"))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (emit_move_sequence (operands, DFmode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "reload_outdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "register_operand" "f"))
+ (clobber (match_operand:SI 2 "register_operand" "=&a"))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (emit_move_sequence (operands, DFmode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+{
+ if (TARGET_COLDFIRE_FPU)
+ if (emit_move_sequence (operands, DFmode, 0))
+ DONE;
+})
+
+(define_insn ""
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=rm,rf,rf,&rof<>")
+ (match_operand:DF 1 "general_operand" "*rf,m,0,*rofE<>"))]
+; [(set (match_operand:DF 0 "nonimmediate_operand" "=rm,&rf,&rof<>")
+; (match_operand:DF 1 "general_operand" "rf,m,rofF<>"))]
+ "!TARGET_COLDFIRE"
+{
+ if (FP_REG_P (operands[0]))
+ {
+ if (FP_REG_P (operands[1]))
+ return "f%&move%.x %1,%0";
+ if (REG_P (operands[1]))
+ {
+ rtx xoperands[2];
+ xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ output_asm_insn ("move%.l %1,%-", xoperands);
+ output_asm_insn ("move%.l %1,%-", operands);
+ return "f%&move%.d %+,%0";
+ }
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ return output_move_const_double (operands);
+ return "f%&move%.d %f1,%0";
+ }
+ else if (FP_REG_P (operands[1]))
+ {
+ if (REG_P (operands[0]))
+ {
+ output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands);
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ return "move%.l %+,%0";
+ }
+ else
+ return "fmove%.d %f1,%0";
+ }
+ return output_move_double (operands);
+})
+
+(define_insn_and_split "movdf_cf_soft"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,g")
+ (match_operand:DF 1 "general_operand" "g,r"))]
+ "TARGET_COLDFIRE && !TARGET_COLDFIRE_FPU"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ m68k_emit_move_double (operands);
+ DONE;
+})
+
+(define_insn "movdf_cf_hard"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f, <Q>U,r,f,r,r,m,f")
+ (match_operand:DF 1 "general_operand" " f<Q>U,f, f,r,r,m,r,E"))]
+ "TARGET_COLDFIRE_FPU"
+{
+ rtx xoperands[3];
+ REAL_VALUE_TYPE r;
+ long l[2];
+
+ switch (which_alternative)
+ {
+ default:
+ return "fdmove%.d %1,%0";
+ case 1:
+ return "fmove%.d %1,%0";
+ case 2:
+ return "fmove%.d %1,%-;move%.l %+,%0;move%.l %+,%R0";
+ case 3:
+ return "move%.l %R1,%-;move%.l %1,%-;fdmove%.d %+,%0";
+ case 4: case 5: case 6:
+ return output_move_double (operands);
+ case 7:
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+ xoperands[0] = operands[0];
+ xoperands[1] = GEN_INT (l[0]);
+ xoperands[2] = GEN_INT (l[1]);
+ if (operands[1] == CONST0_RTX (DFmode))
+ output_asm_insn ("clr%.l %-;clr%.l %-;fdmove%.d %+,%0",
+ xoperands);
+ else
+ if (l[1] == 0)
+ output_asm_insn ("clr%.l %-;move%.l %1,%-;fdmove%.d %+,%0",
+ xoperands);
+ else
+ output_asm_insn ("move%.l %2,%-;move%.l %1,%-;fdmove%.d %+,%0",
+ xoperands);
+ return "";
+ }
+})
+
+;; ??? The XFmode patterns are schizophrenic about whether constants are
+;; allowed. Most but not all have predicates and constraint that disallow
+;; constants. Most but not all have output templates that handle constants.
+;; See also LEGITIMATE_CONSTANT_P.
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
+ ""
+{
+ /* We can't rewrite operands during reload. */
+ if (! reload_in_progress)
+ {
+ if (CONSTANT_P (operands[1]))
+ {
+ operands[1] = force_const_mem (XFmode, operands[1]);
+ if (! memory_address_p (XFmode, XEXP (operands[1], 0)))
+ operands[1] = adjust_address (operands[1], XFmode, 0);
+ }
+ if (flag_pic && TARGET_PCREL)
+ {
+ /* Don't allow writes to memory except via a register; the
+ m68k doesn't consider PC-relative addresses to be writable. */
+ if (GET_CODE (operands[0]) == MEM
+ && symbolic_operand (XEXP (operands[0], 0), SImode))
+ operands[0] = gen_rtx_MEM (XFmode,
+ force_reg (SImode, XEXP (operands[0], 0)));
+ }
+ }
+})
+
+(define_insn ""
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f,m,f,!r,!f,!r,m,!r")
+ (match_operand:XF 1 "nonimmediate_operand" "m,f,f,f,r,!r,!r,m"))]
+ "TARGET_68881"
+{
+ if (FP_REG_P (operands[0]))
+ {
+ if (FP_REG_P (operands[1]))
+ return "fmove%.x %1,%0";
+ if (REG_P (operands[1]))
+ {
+ rtx xoperands[2];
+ xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
+ output_asm_insn ("move%.l %1,%-", xoperands);
+ xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ output_asm_insn ("move%.l %1,%-", xoperands);
+ output_asm_insn ("move%.l %1,%-", operands);
+ return "fmove%.x %+,%0";
+ }
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ return "fmove%.x %1,%0";
+ return "fmove%.x %f1,%0";
+ }
+ if (FP_REG_P (operands[1]))
+ {
+ if (REG_P (operands[0]))
+ {
+ output_asm_insn ("fmove%.x %f1,%-\;move%.l %+,%0", operands);
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ output_asm_insn ("move%.l %+,%0", operands);
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ return "move%.l %+,%0";
+ }
+ /* Must be memory destination. */
+ return "fmove%.x %f1,%0";
+ }
+ return output_move_double (operands);
+})
+
+(define_insn ""
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=rm,rf,&rof<>")
+ (match_operand:XF 1 "nonimmediate_operand" "rf,m,rof<>"))]
+ "! TARGET_68881 && ! TARGET_COLDFIRE"
+{
+ if (FP_REG_P (operands[0]))
+ {
+ if (FP_REG_P (operands[1]))
+ return "fmove%.x %1,%0";
+ if (REG_P (operands[1]))
+ {
+ rtx xoperands[2];
+ xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
+ output_asm_insn ("move%.l %1,%-", xoperands);
+ xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ output_asm_insn ("move%.l %1,%-", xoperands);
+ output_asm_insn ("move%.l %1,%-", operands);
+ return "fmove%.x %+,%0";
+ }
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ return "fmove%.x %1,%0";
+ return "fmove%.x %f1,%0";
+ }
+ if (FP_REG_P (operands[1]))
+ {
+ if (REG_P (operands[0]))
+ {
+ output_asm_insn ("fmove%.x %f1,%-\;move%.l %+,%0", operands);
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ output_asm_insn ("move%.l %+,%0", operands);
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ return "move%.l %+,%0";
+ }
+ else
+ return "fmove%.x %f1,%0";
+ }
+ return output_move_double (operands);
+})
+
+(define_insn ""
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=r,g")
+ (match_operand:XF 1 "nonimmediate_operand" "g,r"))]
+ "! TARGET_68881 && TARGET_COLDFIRE"
+ "* return output_move_double (operands);")
+
+(define_expand "movdi"
+ ;; Let's see if it really still needs to handle fp regs, and, if so, why.
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "")
+
+;; movdi can apply to fp regs in some cases
+(define_insn ""
+ ;; Let's see if it really still needs to handle fp regs, and, if so, why.
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r,&ro<>")
+ (match_operand:DI 1 "general_operand" "rF,m,roi<>F"))]
+; [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,&r,&ro<>,!&rm,!&f")
+; (match_operand:DI 1 "general_operand" "r,m,roi<>,fF"))]
+; [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,&rf,&ro<>,!&rm,!&f")
+; (match_operand:DI 1 "general_operand" "r,m,roi<>,fF,rfF"))]
+ "!TARGET_COLDFIRE"
+{
+ if (FP_REG_P (operands[0]))
+ {
+ if (FP_REG_P (operands[1]))
+ return "fmove%.x %1,%0";
+ if (REG_P (operands[1]))
+ {
+ rtx xoperands[2];
+ xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ output_asm_insn ("move%.l %1,%-", xoperands);
+ output_asm_insn ("move%.l %1,%-", operands);
+ return "fmove%.d %+,%0";
+ }
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ return output_move_const_double (operands);
+ return "fmove%.d %f1,%0";
+ }
+ else if (FP_REG_P (operands[1]))
+ {
+ if (REG_P (operands[0]))
+ {
+ output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands);
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ return "move%.l %+,%0";
+ }
+ else
+ return "fmove%.d %f1,%0";
+ }
+ return output_move_double (operands);
+})
+
+(define_insn ""
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,g")
+ (match_operand:DI 1 "general_operand" "g,r"))]
+ "TARGET_COLDFIRE"
+ "* return output_move_double (operands);")
+
+;; Thus goes after the move instructions
+;; because the move instructions are better (require no spilling)
+;; when they can apply. It goes before the add/sub insns
+;; so we will prefer it to them.
+
+(define_insn "pushasi"
+ [(set (match_operand:SI 0 "push_operand" "=m")
+ (match_operand:SI 1 "address_operand" "p"))]
+ ""
+ "pea %a1"
+ [(set_attr "type" "pea")])
+
+;; truncation instructions
+(define_insn "truncsiqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=dm,d")
+ (truncate:QI
+ (match_operand:SI 1 "general_src_operand" "doJS,i")))]
+ ""
+{
+ if (GET_CODE (operands[0]) == REG)
+ {
+ /* Must clear condition codes, since the move.l bases them on
+ the entire 32 bits, not just the desired 8 bits. */
+ CC_STATUS_INIT;
+ return "move%.l %1,%0";
+ }
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = adjust_address (operands[1], QImode, 3);
+ return "move%.b %1,%0";
+})
+
+(define_insn "trunchiqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=dm,d")
+ (truncate:QI
+ (match_operand:HI 1 "general_src_operand" "doJS,i")))]
+ ""
+{
+ if (GET_CODE (operands[0]) == REG
+ && (GET_CODE (operands[1]) == MEM
+ || GET_CODE (operands[1]) == CONST_INT))
+ {
+ /* Must clear condition codes, since the move.w bases them on
+ the entire 16 bits, not just the desired 8 bits. */
+ CC_STATUS_INIT;
+ return "move%.w %1,%0";
+ }
+ if (GET_CODE (operands[0]) == REG)
+ {
+ /* Must clear condition codes, since the move.l bases them on
+ the entire 32 bits, not just the desired 8 bits. */
+ CC_STATUS_INIT;
+ return "move%.l %1,%0";
+ }
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = adjust_address (operands[1], QImode, 1);
+ return "move%.b %1,%0";
+})
+
+(define_insn "truncsihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=dm,d")
+ (truncate:HI
+ (match_operand:SI 1 "general_src_operand" "roJS,i")))]
+ ""
+{
+ if (GET_CODE (operands[0]) == REG)
+ {
+ /* Must clear condition codes, since the move.l bases them on
+ the entire 32 bits, not just the desired 8 bits. */
+ CC_STATUS_INIT;
+ return "move%.l %1,%0";
+ }
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = adjust_address (operands[1], QImode, 2);
+ return "move%.w %1,%0";
+})
+
+;; zero extension instructions
+
+;; two special patterns to match various post_inc/pre_dec patterns
+(define_insn_and_split "*zero_extend_inc"
+ [(set (match_operand 0 "post_inc_operand" "")
+ (zero_extend (match_operand 1 "register_operand" "")))]
+ "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT &&
+ GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT &&
+ GET_MODE_SIZE (GET_MODE (operands[0])) == GET_MODE_SIZE (GET_MODE (operands[1])) * 2"
+ "#"
+ ""
+ [(set (match_dup 0)
+ (const_int 0))
+ (set (match_dup 0)
+ (match_dup 1))]
+{
+ operands[0] = adjust_address (operands[0], GET_MODE (operands[1]), 0);
+})
+
+(define_insn_and_split "*zero_extend_dec"
+ [(set (match_operand 0 "pre_dec_operand" "")
+ (zero_extend (match_operand 1 "register_operand" "")))]
+ "(GET_MODE (operands[0]) != HImode || XEXP (XEXP (operands[0], 0), 0) != stack_pointer_rtx) &&
+ GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT &&
+ GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT &&
+ GET_MODE_SIZE (GET_MODE (operands[0])) == GET_MODE_SIZE (GET_MODE (operands[1])) * 2"
+ "#"
+ ""
+ [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 0)
+ (const_int 0))]
+{
+ operands[0] = adjust_address (operands[0], GET_MODE (operands[1]), 0);
+})
+
+(define_insn_and_split "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_src_operand" "")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 2)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 3)
+ (const_int 0))]
+{
+ operands[2] = gen_lowpart (SImode, operands[0]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+})
+
+(define_insn_and_split "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:HI 1 "nonimmediate_src_operand" "")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 2)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 3)
+ (const_int 0))]
+{
+ operands[2] = gen_lowpart (SImode, operands[0]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+})
+
+(define_expand "zero_extendsidi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_src_operand" "")))]
+ ""
+{
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+})
+
+(define_insn_and_split "*zero_extendsidi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_src_operand" "")))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
+ "#"
+ ""
+ [(set (match_dup 2)
+ (match_dup 1))
+ (set (match_dup 3)
+ (const_int 0))]
+{
+ operands[2] = gen_lowpart (SImode, operands[0]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+})
+
+(define_insn "*zero_extendhisi2_cf"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))]
+ "ISA_HAS_MVS_MVZ"
+ "mvz%.w %1,%0"
+ [(set_attr "type" "mvsz")])
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))]
+ ""
+ "#")
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_src_operand" "")))]
+ "!TARGET_COLDFIRE"
+ "")
+
+(define_insn "*zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))]
+ "!TARGET_COLDFIRE"
+ "#")
+
+(define_insn "*zero_extendqisi2_cfv4"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))]
+ "ISA_HAS_MVS_MVZ"
+ "mvz%.b %1,%0"
+ [(set_attr "type" "mvsz")])
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))]
+ ""
+ "#")
+
+;; these two pattern split everything else which isn't matched by
+;; something else above
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (zero_extend (match_operand 1 "nonimmediate_src_operand" "")))]
+ "!ISA_HAS_MVS_MVZ
+ && reload_completed
+ && reg_mentioned_p (operands[0], operands[1])"
+ [(set (strict_low_part (match_dup 2))
+ (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 4 [(match_dup 0) (match_dup 3)]))]
+{
+ operands[2] = gen_lowpart (GET_MODE (operands[1]), operands[0]);
+ operands[3] = GEN_INT (GET_MODE_MASK (GET_MODE (operands[1])));
+ operands[4] = gen_rtx_AND (GET_MODE (operands[0]), operands[0], operands[3]);
+})
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (zero_extend (match_operand 1 "nonimmediate_src_operand" "")))]
+ "!ISA_HAS_MVS_MVZ && reload_completed"
+ [(set (match_dup 0)
+ (const_int 0))
+ (set (strict_low_part (match_dup 2))
+ (match_dup 1))]
+{
+ operands[2] = gen_lowpart (GET_MODE (operands[1]), operands[0]);
+})
+
+;; sign extension instructions
+
+(define_insn "extendqidi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d")
+ (sign_extend:DI (match_operand:QI 1 "general_src_operand" "rmS")))]
+ ""
+{
+ CC_STATUS_INIT;
+ operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ if (ISA_HAS_MVS_MVZ)
+ return "mvs%.b %1,%2\;smi %0\;extb%.l %0";
+ if (TARGET_68020 || TARGET_COLDFIRE)
+ {
+ if (ADDRESS_REG_P (operands[1]))
+ return "move%.w %1,%2\;extb%.l %2\;smi %0\;extb%.l %0";
+ else
+ return "move%.b %1,%2\;extb%.l %2\;smi %0\;extb%.l %0";
+ }
+ else
+ {
+ if (ADDRESS_REG_P (operands[1]))
+ return "move%.w %1,%2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0\;smi %0";
+ else
+ return "move%.b %1,%2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0\;smi %0";
+ }
+})
+
+(define_insn "extendhidi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d")
+ (sign_extend:DI
+ (match_operand:HI 1 "general_src_operand" "rmS")))]
+ ""
+{
+ CC_STATUS_INIT;
+ operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ if (ISA_HAS_MVS_MVZ)
+ return "mvs%.w %1,%2\;smi %0\;extb%.l %0";
+ if (TARGET_68020 || TARGET_COLDFIRE)
+ return "move%.w %1,%2\;ext%.l %2\;smi %0\;extb%.l %0";
+ else
+ return "move%.w %1,%2\;ext%.l %2\;smi %0\;ext%.w %0\;ext%.l %0";
+})
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d,o,o,<")
+ (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_src_operand" "rm,rm,r<Q>,rm")))
+ (clobber (match_scratch:SI 2 "=X,d,d,d"))]
+ ""
+{
+ CC_STATUS_INIT;
+
+ if (which_alternative == 0)
+ /* Handle alternative 0. */
+ {
+ if (TARGET_68020 || TARGET_COLDFIRE)
+ return "move%.l %1,%R0\;smi %0\;extb%.l %0";
+ else
+ return "move%.l %1,%R0\;smi %0\;ext%.w %0\;ext%.l %0";
+ }
+
+ /* Handle alternatives 1, 2 and 3. We don't need to adjust address by 4
+ in alternative 3 because autodecrement will do that for us. */
+ operands[3] = adjust_address (operands[0], SImode,
+ which_alternative == 3 ? 0 : 4);
+ operands[0] = adjust_address (operands[0], SImode, 0);
+
+ if (TARGET_68020 || TARGET_COLDFIRE)
+ return "move%.l %1,%3\;smi %2\;extb%.l %2\;move%.l %2,%0";
+ else
+ return "move%.l %1,%3\;smi %2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0";
+}
+ [(set_attr "ok_for_coldfire" "yes,no,yes,yes")])
+
+;; Special case when one can avoid register clobbering, copy and test
+;; Maybe there is a way to make that the general case, by forcing the
+;; result of the SI tree to be in the lower register of the DI target
+
+(define_insn "extendplussidi"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (sign_extend:DI (plus:SI (match_operand:SI 1 "general_operand" "%rmn")
+ (match_operand:SI 2 "general_operand" "rmn"))))]
+ ""
+{
+ CC_STATUS_INIT;
+ operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ if (GET_CODE (operands[1]) == CONST_INT
+ && (unsigned) INTVAL (operands[1]) > 8)
+ {
+ rtx tmp = operands[1];
+
+ operands[1] = operands[2];
+ operands[2] = tmp;
+ }
+ if (GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[3]))
+ output_asm_insn ("add%.l %2,%3", operands);
+ else
+ output_asm_insn ("move%.l %2,%3\;add%.l %1,%3", operands);
+ if (TARGET_68020 || TARGET_COLDFIRE)
+ return "smi %0\;extb%.l %0";
+ else
+ return "smi %0\;ext%.w %0\;ext%.l %0";
+})
+
+(define_expand "extendhisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (sign_extend:SI
+ (match_operand:HI 1 "nonimmediate_src_operand" "")))]
+ ""
+ "")
+
+(define_insn "*cfv4_extendhisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (sign_extend:SI
+ (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))]
+ "ISA_HAS_MVS_MVZ"
+ "mvs%.w %1,%0"
+ [(set_attr "type" "mvsz")])
+
+(define_insn "*68k_extendhisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=*d,a")
+ (sign_extend:SI
+ (match_operand:HI 1 "nonimmediate_src_operand" "0,rmS")))]
+ "!ISA_HAS_MVS_MVZ"
+ "@
+ ext%.l %0
+ move%.w %1,%0"
+ [(set_attr "type" "ext,move")])
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=d")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0")))]
+ ""
+ "ext%.w %0"
+ [(set_attr "type" "ext")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ "TARGET_68020 || TARGET_COLDFIRE"
+ "")
+
+(define_insn "*cfv4_extendqisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "rms")))]
+ "ISA_HAS_MVS_MVZ"
+ "mvs%.b %1,%0"
+ [(set_attr "type" "mvsz")])
+
+(define_insn "*68k_extendqisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0")))]
+ "TARGET_68020 || (TARGET_COLDFIRE && !ISA_HAS_MVS_MVZ)"
+ "extb%.l %0"
+ [(set_attr "type" "ext")])
+
+;; Conversions between float and double.
+
+(define_expand "extendsfdf2"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (float_extend:DF
+ (match_operand:SF 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=*fdm,f")
+ (float_extend:DF
+ (match_operand:SF 1 "general_operand" "f,dmF")))]
+ "TARGET_68881"
+{
+ if (FP_REG_P (operands[0]) && FP_REG_P (operands[1]))
+ {
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ /* Extending float to double in an fp-reg is a no-op.
+ NOTICE_UPDATE_CC has already assumed that the
+ cc will be set. So cancel what it did. */
+ cc_status = cc_prev_status;
+ return "";
+ }
+ return "f%&move%.x %1,%0";
+ }
+ if (FP_REG_P (operands[0]))
+ return "f%&move%.s %f1,%0";
+ if (DATA_REG_P (operands[0]) && FP_REG_P (operands[1]))
+ {
+ output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands);
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ return "move%.l %+,%0";
+ }
+ return "fmove%.d %f1,%0";
+})
+
+(define_insn "extendsfdf2_cf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f")
+ (float_extend:DF
+ (match_operand:SF 1 "general_operand" "f,<Q>U")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (FP_REG_P (operands[0]) && FP_REG_P (operands[1]))
+ {
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ /* Extending float to double in an fp-reg is a no-op.
+ NOTICE_UPDATE_CC has already assumed that the
+ cc will be set. So cancel what it did. */
+ cc_status = cc_prev_status;
+ return "";
+ }
+ return "fdmove%.d %1,%0";
+ }
+ return "fdmove%.s %f1,%0";
+})
+
+;; This cannot output into an f-reg because there is no way to be
+;; sure of truncating in that case.
+(define_expand "truncdfsf2"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (float_truncate:SF
+ (match_operand:DF 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+;; On the '040 we can truncate in a register accurately and easily.
+(define_insn ""
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "general_operand" "fmG")))]
+ "TARGET_68881 && TARGET_68040"
+{
+ if (FP_REG_P (operands[1]))
+ return "f%$move%.x %1,%0";
+ return "f%$move%.d %f1,%0";
+})
+
+(define_insn "truncdfsf2_cf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,d<Q>U")
+ (float_truncate:SF
+ (match_operand:DF 1 "general_operand" "<Q>U,f")))]
+ "TARGET_COLDFIRE_FPU"
+ "@
+ fsmove%.d %1,%0
+ fmove%.s %1,%0"
+ [(set_attr "type" "fmove")])
+
+(define_insn "*truncdfsf2_68881"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=dm")
+ (float_truncate:SF
+ (match_operand:DF 1 "general_operand" "f")))]
+ "TARGET_68881"
+ "fmove%.s %f1,%0"
+ [(set_attr "type" "fmove")])
+
+;; Conversion between fixed point and floating point.
+;; Note that among the fix-to-float insns
+;; the ones that start with SImode come first.
+;; That is so that an operand that is a CONST_INT
+;; (and therefore lacks a specific machine mode).
+;; will be recognized as SImode (which is always valid)
+;; rather than as QImode or HImode.
+
+(define_expand "floatsi<mode>2"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "")
+ (float:FP (match_operand:SI 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "floatsi<mode>2_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (float:FP (match_operand:SI 1 "general_operand" "dmi")))]
+ "TARGET_68881"
+ "f<FP:round>move%.l %1,%0"
+ [(set_attr "type" "fmove")])
+
+(define_insn "floatsi<mode>2_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (float:FP (match_operand:SI 1 "general_operand" "d<Q>U")))]
+ "TARGET_COLDFIRE_FPU"
+ "f<FP:prec>move%.l %1,%0"
+ [(set_attr "type" "fmove")])
+
+
+(define_expand "floathi<mode>2"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "")
+ (float:FP (match_operand:HI 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "floathi<mode>2_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (float:FP (match_operand:HI 1 "general_operand" "dmn")))]
+ "TARGET_68881"
+ "fmove%.w %1,%0"
+ [(set_attr "type" "fmove")])
+
+(define_insn "floathi<mode>2_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (float:FP (match_operand:HI 1 "general_operand" "d<Q>U")))]
+ "TARGET_COLDFIRE_FPU"
+ "fmove%.w %1,%0"
+ [(set_attr "type" "fmove")])
+
+
+(define_expand "floatqi<mode>2"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "")
+ (float:FP (match_operand:QI 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "floatqi<mode>2_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (float:FP (match_operand:QI 1 "general_operand" "dmn")))]
+ "TARGET_68881"
+ "fmove%.b %1,%0"
+ [(set_attr "type" "fmove")])
+
+(define_insn "floatqi<mode>2_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (float:FP (match_operand:QI 1 "general_operand" "d<Q>U")))]
+ "TARGET_COLDFIRE_FPU"
+ "fmove%.b %1,%0"
+ [(set_attr "type" "fmove")])
+
+
+;; New routines to convert floating-point values to integers
+;; to be used on the '040. These should be faster than trapping
+;; into the kernel to emulate fintrz. They should also be faster
+;; than calling the subroutines fixsfsi or fixdfsi.
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=dm")
+ (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
+ (clobber (match_scratch:SI 2 "=d"))
+ (clobber (match_scratch:SI 3 "=d"))]
+ "TARGET_68881 && TUNE_68040"
+{
+ CC_STATUS_INIT;
+ return "fmovem%.l %!,%2\;moveq #16,%3\;or%.l %2,%3\;and%.w #-33,%3\;fmovem%.l %3,%!\;fmove%.l %1,%0\;fmovem%.l %2,%!";
+})
+
+(define_insn "fix_truncdfhi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
+ (fix:HI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
+ (clobber (match_scratch:SI 2 "=d"))
+ (clobber (match_scratch:SI 3 "=d"))]
+ "TARGET_68881 && TUNE_68040"
+{
+ CC_STATUS_INIT;
+ return "fmovem%.l %!,%2\;moveq #16,%3\;or%.l %2,%3\;and%.w #-33,%3\;fmovem%.l %3,%!\;fmove%.w %1,%0\;fmovem%.l %2,%!";
+})
+
+(define_insn "fix_truncdfqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
+ (fix:QI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
+ (clobber (match_scratch:SI 2 "=d"))
+ (clobber (match_scratch:SI 3 "=d"))]
+ "TARGET_68881 && TUNE_68040"
+{
+ CC_STATUS_INIT;
+ return "fmovem%.l %!,%2\;moveq #16,%3\;or%.l %2,%3\;and%.w #-33,%3\;fmovem%.l %3,%!\;fmove%.b %1,%0\;fmovem%.l %2,%!";
+})
+
+;; Convert a float to a float whose value is an integer.
+;; This is the first stage of converting it to an integer type.
+
+(define_expand "ftrunc<mode>2"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "")
+ (fix:FP (match_operand:FP 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT && !TUNE_68040"
+ "")
+
+(define_insn "ftrunc<mode>2_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (fix:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m")))]
+ "TARGET_68881 && !TUNE_68040"
+{
+ if (FP_REG_P (operands[1]))
+ return "fintrz%.x %f1,%0";
+ return "fintrz%.<FP:prec> %f1,%0";
+}
+ [(set_attr "type" "falu")])
+
+(define_insn "ftrunc<mode>2_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (fix:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (FP_REG_P (operands[1]))
+ return "fintrz%.d %f1,%0";
+ return "fintrz%.<FP:prec> %f1,%0";
+}
+ [(set_attr "type" "falu")])
+
+;; Convert a float whose value is an integer
+;; to an actual integer. Second stage of converting float to integer type.
+(define_expand "fix<mode>qi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (fix:QI (match_operand:FP 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "fix<mode>qi2_68881"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
+ (fix:QI (match_operand:FP 1 "general_operand" "f")))]
+ "TARGET_68881"
+ "fmove%.b %1,%0"
+ [(set_attr "type" "fmove")])
+
+(define_insn "fix<mode>qi2_cf"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=d<Q>U")
+ (fix:QI (match_operand:FP 1 "general_operand" "f")))]
+ "TARGET_COLDFIRE_FPU"
+ "fmove%.b %1,%0"
+ [(set_attr "type" "fmove")])
+
+(define_expand "fix<mode>hi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (fix:HI (match_operand:FP 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "fix<mode>hi2_68881"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
+ (fix:HI (match_operand:FP 1 "general_operand" "f")))]
+ "TARGET_68881"
+ "fmove%.w %1,%0"
+ [(set_attr "type" "fmove")])
+
+(define_insn "fix<mode>hi2_cf"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=d<Q>U")
+ (fix:HI (match_operand:FP 1 "general_operand" "f")))]
+ "TARGET_COLDFIRE_FPU"
+ "fmove%.w %1,%0"
+ [(set_attr "type" "fmove")])
+
+(define_expand "fix<mode>si2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (fix:SI (match_operand:FP 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "fix<mode>si2_68881"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=dm")
+ (fix:SI (match_operand:FP 1 "general_operand" "f")))]
+ "TARGET_68881"
+ "fmove%.l %1,%0"
+ [(set_attr "type" "fmove")])
+
+(define_insn "fix<mode>si2_cf"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d<Q>U")
+ (fix:SI (match_operand:FP 1 "general_operand" "f")))]
+ "TARGET_COLDFIRE_FPU"
+ "fmove%.l %1,%0"
+ [(set_attr "type" "fmove")])
+
+
+;; add instructions
+
+(define_insn "adddi_lshrdi_63"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d")
+ (plus:DI (lshiftrt:DI (match_operand:DI 1 "general_operand" "rm")
+ (const_int 63))
+ (match_dup 1)))
+ (clobber (match_scratch:SI 2 "=d"))]
+ ""
+{
+ operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ if (REG_P (operands[1]) && REGNO (operands[1]) == REGNO (operands[0]))
+ return
+ "move%.l %1,%2\;add%.l %2,%2\;subx%.l %2,%2\;sub%.l %2,%3\;subx%.l %2,%0";
+ if (GET_CODE (operands[1]) == REG)
+ operands[4] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
+ operands[4] = operands[1];
+ else
+ operands[4] = adjust_address (operands[1], SImode, 4);
+ if (GET_CODE (operands[1]) == MEM
+ && GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
+ output_asm_insn ("move%.l %4,%3", operands);
+ output_asm_insn ("move%.l %1,%0\;smi %2", operands);
+ if (TARGET_68020 || TARGET_COLDFIRE)
+ output_asm_insn ("extb%.l %2", operands);
+ else
+ output_asm_insn ("ext%.w %2\;ext%.l %2", operands);
+ if (GET_CODE (operands[1]) != MEM
+ || GET_CODE (XEXP (operands[1], 0)) != PRE_DEC)
+ output_asm_insn ("move%.l %4,%3", operands);
+ return "sub%.l %2,%3\;subx%.l %2,%0";
+})
+
+(define_insn "adddi_sexthishl32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=o,a,*d,*d")
+ (plus:DI (ashift:DI (sign_extend:DI
+ (match_operand:HI 1 "general_operand" "rm,rm,rm,rm"))
+ (const_int 32))
+ (match_operand:DI 2 "general_operand" "0,0,0,0")))
+ (clobber (match_scratch:SI 3 "=&d,X,a,?d"))]
+ "!TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ if (ADDRESS_REG_P (operands[0]))
+ return "add%.w %1,%0";
+ else if (ADDRESS_REG_P (operands[3]))
+ return "move%.w %1,%3\;add%.l %3,%0";
+ else
+ return "move%.w %1,%3\;ext%.l %3\;add%.l %3,%0";
+})
+
+(define_insn "*adddi_dilshr32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d,o")
+ (plus:DI (lshiftrt:DI (match_operand:DI 1 "general_operand" "ro,d")
+ (const_int 32))
+ (match_operand:DI 2 "general_operand" "0,0")))]
+ "!TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ if (GET_CODE (operands[0]) == REG)
+ operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else
+ operands[2] = adjust_address (operands[0], SImode, 4);
+ return "add%.l %1,%2\;negx%.l %0\;neg%.l %0";
+})
+
+(define_insn "*adddi_dilshr32_cf"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (plus:DI (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "ro")
+ (const_int 32))
+ (match_operand:DI 2 "register_operand" "0")))]
+ "TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ return "add%.l %1,%R0\;negx%.l %0\;neg%.l %0";
+})
+
+(define_insn "adddi_dishl32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,o")
+;; (plus:DI (match_operand:DI 2 "general_operand" "%0")
+;; (ashift:DI (match_operand:DI 1 "general_operand" "ro")
+;; (const_int 32))))]
+ (plus:DI (ashift:DI (match_operand:DI 1 "general_operand" "ro,d")
+ (const_int 32))
+ (match_operand:DI 2 "general_operand" "0,0")))]
+ ""
+{
+ CC_STATUS_INIT;
+ if (GET_CODE (operands[1]) == REG)
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ else
+ operands[1] = adjust_address (operands[1], SImode, 4);
+ return "add%.l %1,%0";
+}
+ [(set_attr "type" "alu_l")])
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=o<>,d,d,d")
+ (plus:DI (match_operand:DI 1 "general_operand" "%0,0,0,0")
+ (match_operand:DI 2 "general_operand" "d,no>,d,a")))
+ (clobber (match_scratch:SI 3 "=&d,&d,X,&d"))]
+ ""
+{
+ if (DATA_REG_P (operands[0]))
+ {
+ if (DATA_REG_P (operands[2]))
+ return "add%.l %R2,%R0\;addx%.l %2,%0";
+ else if (GET_CODE (operands[2]) == MEM
+ && GET_CODE (XEXP (operands[2], 0)) == POST_INC)
+ return "move%.l %2,%3\;add%.l %2,%R0\;addx%.l %3,%0";
+ else
+ {
+ rtx high, low;
+ rtx xoperands[2];
+
+ if (GET_CODE (operands[2]) == REG)
+ {
+ low = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+ high = operands[2];
+ }
+ else if (CONSTANT_P (operands[2]))
+ split_double (operands[2], &high, &low);
+ else
+ {
+ low = adjust_address (operands[2], SImode, 4);
+ high = operands[2];
+ }
+
+ operands[1] = low, operands[2] = high;
+ xoperands[0] = operands[3];
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0)
+ xoperands[1] = GEN_INT (-INTVAL (operands[2]) - 1);
+ else
+ xoperands[1] = operands[2];
+
+ output_asm_insn (output_move_simode (xoperands), xoperands);
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8)
+ return "addq%.l %1,%R0\;addx%.l %3,%0";
+ else if (INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0)
+ {
+ operands[1] = GEN_INT (-INTVAL (operands[1]));
+ return "subq%.l %1,%R0\;subx%.l %3,%0";
+ }
+ }
+ return "add%.l %1,%R0\;addx%.l %3,%0";
+ }
+ }
+ else
+ {
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ CC_STATUS_INIT;
+ if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
+ {
+ operands[1] = gen_rtx_MEM (SImode,
+ plus_constant (XEXP(operands[0], 0), -8));
+ return "move%.l %0,%3\;add%.l %R2,%0\;addx%.l %2,%3\;move%.l %3,%1";
+ }
+ else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
+ {
+ operands[1] = XEXP(operands[0], 0);
+ return "add%.l %R2,%0\;move%.l %0,%3\;addx%.l %2,%3\;move%.l %3,%1";
+ }
+ else
+ {
+ operands[1] = adjust_address (operands[0], SImode, 4);
+ return "add%.l %R2,%1\;move%.l %0,%3\;addx%.l %2,%3\;move%.l %3,%0";
+ }
+ }
+})
+
+(define_insn "addsi_lshrsi_31"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=dm,dm,d<Q>")
+ (plus:SI (lshiftrt:SI (match_operand:SI 1 "general_operand" "rm,r<Q>,rm")
+ (const_int 31))
+ (match_dup 1)))]
+ ""
+{
+ operands[2] = operands[0];
+ operands[3] = gen_label_rtx();
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
+ operands[0] = gen_rtx_MEM (SImode, XEXP (XEXP (operands[0], 0), 0));
+ else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
+ operands[2] = gen_rtx_MEM (SImode, XEXP (XEXP (operands[0], 0), 0));
+ }
+ output_asm_insn ("move%.l %1,%0", operands);
+ output_asm_insn ("jpl %l3", operands);
+ output_asm_insn ("addq%.l #1,%2", operands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (operands[3]));
+ return "";
+}
+ [(set_attr "ok_for_coldfire" "no,yes,yes")])
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (plus:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_src_operand" "")))]
+ ""
+ "")
+
+;; Note that the middle two alternatives are near-duplicates
+;; in order to handle insns generated by reload.
+;; This is needed since they are not themselves reloaded,
+;; so commutativity won't apply to them.
+(define_insn "*addsi3_internal"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=m,?a,?a,d,a")
+ (plus:SI (match_operand:SI 1 "general_operand" "%0,a,rJK,0,0")
+ (match_operand:SI 2 "general_src_operand" "dIKLT,rJK,a,mSrIKLT,mSrIKLs")))]
+
+
+ "! TARGET_COLDFIRE"
+ "* return output_addsi3 (operands);")
+
+(define_insn_and_split "*addsi3_5200"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=mr,mr,a, m,r, ?a, ?a,?a,?a")
+ (plus:SI (match_operand:SI 1 "general_operand" "%0, 0, 0, 0,0, a, a, r, a")
+ (match_operand:SI 2 "general_src_operand" " I, L, JCu,d,mrKi,Cj, r, a, JCu")))]
+ "TARGET_COLDFIRE"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "addq%.l %2,%0";
+
+ case 1:
+ operands[2] = GEN_INT (- INTVAL (operands[2]));
+ return "subq%.l %2,%0";
+
+ case 3:
+ case 4:
+ return "add%.l %2,%0";
+
+ case 5:
+ /* move%.l %2,%0\n\tadd%.l %1,%0 */
+ return "#";
+
+ case 6:
+ return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
+
+ case 7:
+ return MOTOROLA ? "lea (%2,%1.l),%0" : "lea %2@(0,%1:l),%0";
+
+ case 2:
+ case 8:
+ return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
+
+ default:
+ gcc_unreachable ();
+ return "";
+ }
+}
+ "&& reload_completed && (extract_constrain_insn_cached (insn), which_alternative == 5) && !operands_match_p (operands[0], operands[1])"
+ [(set (match_dup 0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (match_dup 1)))]
+ ""
+ [(set_attr "type" "aluq_l,aluq_l,lea, alu_l,alu_l,*,lea, lea, lea")
+ (set_attr "opy" "2, 2, *, 2, 2, *,*, *, *")
+ (set_attr "opy_type" "*, *, mem5,*, *, *,mem6,mem6,mem5")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=a")
+ (plus:SI (match_operand:SI 1 "general_operand" "0")
+ (sign_extend:SI
+ (match_operand:HI 2 "nonimmediate_src_operand" "rmS"))))]
+ "!TARGET_COLDFIRE"
+ "add%.w %2,%0")
+
+(define_insn "addhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=m,r")
+ (plus:HI (match_operand:HI 1 "general_operand" "%0,0")
+ (match_operand:HI 2 "general_src_operand" "dn,rmSn")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ /* If the constant would be a negative number when interpreted as
+ HImode, make it negative. This is usually, but not always, done
+ elsewhere in the compiler. First check for constants out of range,
+ which could confuse us. */
+
+ if (INTVAL (operands[2]) >= 32768)
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 65536);
+
+ if (INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) <= 8)
+ return "addq%.w %2,%0";
+ if (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) >= -8)
+ {
+ operands[2] = GEN_INT (- INTVAL (operands[2]));
+ return "subq%.w %2,%0";
+ }
+ /* On the CPU32 it is faster to use two addqw instructions to
+ add a small integer (8 < N <= 16) to a register.
+ Likewise for subqw. */
+ if (TUNE_CPU32 && REG_P (operands[0]))
+ {
+ if (INTVAL (operands[2]) > 8
+ && INTVAL (operands[2]) <= 16)
+ {
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
+ return "addq%.w #8,%0\;addq%.w %2,%0";
+ }
+ if (INTVAL (operands[2]) < -8
+ && INTVAL (operands[2]) >= -16)
+ {
+ operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
+ return "subq%.w #8,%0\;subq%.w %2,%0";
+ }
+ }
+ if (ADDRESS_REG_P (operands[0]) && !TUNE_68040)
+ return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
+ }
+ return "add%.w %2,%0";
+})
+
+;; These insns must use MATCH_DUP instead of the more expected
+;; use of a matching constraint because the "output" here is also
+;; an input, so you can't use the matching constraint. That also means
+;; that you can't use the "%", so you need patterns with the matched
+;; operand in both positions.
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
+ (plus:HI (match_dup 0)
+ (match_operand:HI 1 "general_src_operand" "dn,rmSn")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ /* If the constant would be a negative number when interpreted as
+ HImode, make it negative. This is usually, but not always, done
+ elsewhere in the compiler. First check for constants out of range,
+ which could confuse us. */
+
+ if (INTVAL (operands[1]) >= 32768)
+ operands[1] = GEN_INT (INTVAL (operands[1]) - 65536);
+
+ if (INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) <= 8)
+ return "addq%.w %1,%0";
+ if (INTVAL (operands[1]) < 0
+ && INTVAL (operands[1]) >= -8)
+ {
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return "subq%.w %1,%0";
+ }
+ /* On the CPU32 it is faster to use two addqw instructions to
+ add a small integer (8 < N <= 16) to a register.
+ Likewise for subqw. */
+ if (TUNE_CPU32 && REG_P (operands[0]))
+ {
+ if (INTVAL (operands[1]) > 8
+ && INTVAL (operands[1]) <= 16)
+ {
+ operands[1] = GEN_INT (INTVAL (operands[1]) - 8);
+ return "addq%.w #8,%0\;addq%.w %1,%0";
+ }
+ if (INTVAL (operands[1]) < -8
+ && INTVAL (operands[1]) >= -16)
+ {
+ operands[1] = GEN_INT (- INTVAL (operands[1]) - 8);
+ return "subq%.w #8,%0\;subq%.w %1,%0";
+ }
+ }
+ if (ADDRESS_REG_P (operands[0]) && !TUNE_68040)
+ return MOTOROLA ? "lea (%c1,%0),%0" : "lea %0@(%c1),%0";
+ }
+ return "add%.w %1,%0";
+})
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
+ (plus:HI (match_operand:HI 1 "general_src_operand" "dn,rmSn")
+ (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ /* If the constant would be a negative number when interpreted as
+ HImode, make it negative. This is usually, but not always, done
+ elsewhere in the compiler. First check for constants out of range,
+ which could confuse us. */
+
+ if (INTVAL (operands[1]) >= 32768)
+ operands[1] = GEN_INT (INTVAL (operands[1]) - 65536);
+
+ if (INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) <= 8)
+ return "addq%.w %1,%0";
+ if (INTVAL (operands[1]) < 0
+ && INTVAL (operands[1]) >= -8)
+ {
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return "subq%.w %1,%0";
+ }
+ /* On the CPU32 it is faster to use two addqw instructions to
+ add a small integer (8 < N <= 16) to a register.
+ Likewise for subqw. */
+ if (TUNE_CPU32 && REG_P (operands[0]))
+ {
+ if (INTVAL (operands[1]) > 8
+ && INTVAL (operands[1]) <= 16)
+ {
+ operands[1] = GEN_INT (INTVAL (operands[1]) - 8);
+ return "addq%.w #8,%0\;addq%.w %1,%0";
+ }
+ if (INTVAL (operands[1]) < -8
+ && INTVAL (operands[1]) >= -16)
+ {
+ operands[1] = GEN_INT (- INTVAL (operands[1]) - 8);
+ return "subq%.w #8,%0\;subq%.w %1,%0";
+ }
+ }
+ if (ADDRESS_REG_P (operands[0]) && !TUNE_68040)
+ return MOTOROLA ? "lea (%c1,%0),%0" : "lea %0@(%c1),%0";
+ }
+ return "add%.w %1,%0";
+})
+
+(define_insn "addqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=m,d")
+ (plus:QI (match_operand:QI 1 "general_operand" "%0,0")
+ (match_operand:QI 2 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) >= 128)
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 256);
+
+ if (INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) <= 8)
+ return "addq%.b %2,%0";
+ if (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) >= -8)
+ {
+ operands[2] = GEN_INT (- INTVAL (operands[2]));
+ return "subq%.b %2,%0";
+ }
+ }
+ return "add%.b %2,%0";
+})
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
+ (plus:QI (match_dup 0)
+ (match_operand:QI 1 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) >= 128)
+ operands[1] = GEN_INT (INTVAL (operands[1]) - 256);
+
+ if (INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) <= 8)
+ return "addq%.b %1,%0";
+ if (INTVAL (operands[1]) < 0 && INTVAL (operands[1]) >= -8)
+ {
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return "subq%.b %1,%0";
+ }
+ }
+ return "add%.b %1,%0";
+})
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
+ (plus:QI (match_operand:QI 1 "general_src_operand" "dn,dmSn")
+ (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) >= 128)
+ operands[1] = GEN_INT (INTVAL (operands[1]) - 256);
+
+ if (INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) <= 8)
+ return "addq%.b %1,%0";
+ if (INTVAL (operands[1]) < 0 && INTVAL (operands[1]) >= -8)
+ {
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return "subq%.b %1,%0";
+ }
+ }
+ return "add%.b %1,%0";
+})
+
+(define_expand "add<mode>3"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "")
+ (plus:FP (match_operand:FP 1 "general_operand" "")
+ (match_operand:FP 2 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "add<mode>3_floatsi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (plus:FP (float:FP (match_operand:SI 2 "general_operand" "dmi"))
+ (match_operand:FP 1 "general_operand" "0")))]
+ "TARGET_68881"
+ "f<FP:round>add%.l %2,%0"
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+(define_insn "add<mode>3_floathi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (plus:FP (float:FP (match_operand:HI 2 "general_operand" "dmn"))
+ (match_operand:FP 1 "general_operand" "0")))]
+ "TARGET_68881"
+ "f<FP:round>add%.w %2,%0"
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+(define_insn "add<mode>3_floatqi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (plus:FP (float:FP (match_operand:QI 2 "general_operand" "dmn"))
+ (match_operand:FP 1 "general_operand" "0")))]
+ "TARGET_68881"
+ "f<FP:round>add%.b %2,%0"
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+(define_insn "add<mode>3_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (plus:FP (match_operand:FP 1 "general_operand" "%0")
+ (match_operand:FP 2 "general_operand" "f<FP:dreg>m<FP:const>")))]
+ "TARGET_68881"
+{
+ if (FP_REG_P (operands[2]))
+ return "f<FP:round>add%.x %2,%0";
+ return "f<FP:round>add%.<FP:prec> %f2,%0";
+}
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+(define_insn "add<mode>3_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (plus:FP (match_operand:FP 1 "general_operand" "%0")
+ (match_operand:FP 2 "general_operand" "f<FP:dreg><Q>U")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (FP_REG_P (operands[2]))
+ return "f<FP:prec>add%.d %2,%0";
+ return "f<FP:prec>add%.<FP:prec> %2,%0";
+}
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+;; subtract instructions
+
+(define_insn "subdi_sexthishl32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=o,a,*d,*d")
+ (minus:DI (match_operand:DI 1 "general_operand" "0,0,0,0")
+ (ashift:DI (sign_extend:DI (match_operand:HI 2 "general_operand" "rm,rm,rm,rm"))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&d,X,a,?d"))]
+ "!TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ if (ADDRESS_REG_P (operands[0]))
+ return "sub%.w %2,%0";
+ else if (ADDRESS_REG_P (operands[3]))
+ return "move%.w %2,%3\;sub%.l %3,%0";
+ else
+ return "move%.w %2,%3\;ext%.l %3\;sub%.l %3,%0";
+})
+
+(define_insn "subdi_dishl32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "+ro")
+ (minus:DI (match_dup 0)
+ (ashift:DI (match_operand:DI 1 "general_operand" "ro")
+ (const_int 32))))]
+ ""
+{
+ CC_STATUS_INIT;
+ if (GET_CODE (operands[1]) == REG)
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ else
+ operands[1] = adjust_address (operands[1], SImode, 4);
+ return "sub%.l %1,%0";
+}
+ [(set_attr "type" "alu_l")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=o<>,d,d,d")
+ (minus:DI (match_operand:DI 1 "general_operand" "0,0,0,0")
+ (match_operand:DI 2 "general_operand" "d,no>,d,a")))
+ (clobber (match_scratch:SI 3 "=&d,&d,X,&d"))]
+ ""
+{
+ if (DATA_REG_P (operands[0]))
+ {
+ if (DATA_REG_P (operands[2]))
+ return "sub%.l %R2,%R0\;subx%.l %2,%0";
+ else if (GET_CODE (operands[2]) == MEM
+ && GET_CODE (XEXP (operands[2], 0)) == POST_INC)
+ {
+ return "move%.l %2,%3\;sub%.l %2,%R0\;subx%.l %3,%0";
+ }
+ else
+ {
+ rtx high, low;
+ rtx xoperands[2];
+
+ if (GET_CODE (operands[2]) == REG)
+ {
+ low = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+ high = operands[2];
+ }
+ else if (CONSTANT_P (operands[2]))
+ split_double (operands[2], &high, &low);
+ else
+ {
+ low = adjust_address (operands[2], SImode, 4);
+ high = operands[2];
+ }
+
+ operands[1] = low, operands[2] = high;
+ xoperands[0] = operands[3];
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0)
+ xoperands[1] = GEN_INT (-INTVAL (operands[2]) - 1);
+ else
+ xoperands[1] = operands[2];
+
+ output_asm_insn (output_move_simode (xoperands), xoperands);
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8)
+ return "subq%.l %1,%R0\;subx%.l %3,%0";
+ else if (INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0)
+ {
+ operands[1] = GEN_INT (-INTVAL (operands[1]));
+ return "addq%.l %1,%R0\;addx%.l %3,%0";
+ }
+ }
+ return "sub%.l %1,%R0\;subx%.l %3,%0";
+ }
+ }
+ else
+ {
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ CC_STATUS_INIT;
+ if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
+ {
+ operands[1]
+ = gen_rtx_MEM (SImode, plus_constant (XEXP (operands[0], 0), -8));
+ return "move%.l %0,%3\;sub%.l %R2,%0\;subx%.l %2,%3\;move%.l %3,%1";
+ }
+ else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
+ {
+ operands[1] = XEXP(operands[0], 0);
+ return "sub%.l %R2,%0\;move%.l %0,%3\;subx%.l %2,%3\;move%.l %3,%1";
+ }
+ else
+ {
+ operands[1] = adjust_address (operands[0], SImode, 4);
+ return "sub%.l %R2,%1\;move%.l %0,%3\;subx%.l %2,%3\;move%.l %3,%0";
+ }
+ }
+})
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=mda,m,d,a")
+ (minus:SI (match_operand:SI 1 "general_operand" "0,0,0,0")
+ (match_operand:SI 2 "general_src_operand" "I,dT,mSrT,mSrs")))]
+ ""
+ "@
+ subq%.l %2, %0
+ sub%.l %2,%0
+ sub%.l %2,%0
+ sub%.l %2,%0"
+ [(set_attr "type" "aluq_l,alu_l,alu_l,alu_l")
+ (set_attr "opy" "2")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=a")
+ (minus:SI (match_operand:SI 1 "general_operand" "0")
+ (sign_extend:SI
+ (match_operand:HI 2 "nonimmediate_src_operand" "rmS"))))]
+ "!TARGET_COLDFIRE"
+ "sub%.w %2,%0")
+
+(define_insn "subhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=m,r")
+ (minus:HI (match_operand:HI 1 "general_operand" "0,0")
+ (match_operand:HI 2 "general_src_operand" "dn,rmSn")))]
+ "!TARGET_COLDFIRE"
+ "sub%.w %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
+ (minus:HI (match_dup 0)
+ (match_operand:HI 1 "general_src_operand" "dn,rmSn")))]
+ "!TARGET_COLDFIRE"
+ "sub%.w %1,%0")
+
+(define_insn "subqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=m,d")
+ (minus:QI (match_operand:QI 1 "general_operand" "0,0")
+ (match_operand:QI 2 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "sub%.b %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
+ (minus:QI (match_dup 0)
+ (match_operand:QI 1 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "sub%.b %1,%0")
+
+(define_expand "sub<mode>3"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "")
+ (minus:FP (match_operand:FP 1 "general_operand" "")
+ (match_operand:FP 2 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "sub<mode>3_floatsi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (minus:FP (match_operand:FP 1 "general_operand" "0")
+ (float:FP (match_operand:SI 2 "general_operand" "dmi"))))]
+ "TARGET_68881"
+ "f<FP:round>sub%.l %2,%0"
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+(define_insn "sub<mode>3_floathi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (minus:FP (match_operand:FP 1 "general_operand" "0")
+ (float:FP (match_operand:HI 2 "general_operand" "dmn"))))]
+ "TARGET_68881"
+ "f<FP:round>sub%.w %2,%0"
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+(define_insn "sub<mode>3_floatqi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (minus:FP (match_operand:FP 1 "general_operand" "0")
+ (float:FP (match_operand:QI 2 "general_operand" "dmn"))))]
+ "TARGET_68881"
+ "f<FP:round>sub%.b %2,%0"
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+(define_insn "sub<mode>3_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (minus:FP (match_operand:FP 1 "general_operand" "0")
+ (match_operand:FP 2 "general_operand" "f<FP:dreg>m<FP:const>")))]
+ "TARGET_68881"
+{
+ if (FP_REG_P (operands[2]))
+ return "f<FP:round>sub%.x %2,%0";
+ return "f<FP:round>sub%.<FP:prec> %f2,%0";
+}
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+(define_insn "sub<mode>3_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (minus:FP (match_operand:FP 1 "general_operand" "0")
+ (match_operand:FP 2 "general_operand" "f<FP:dreg><Q>U")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (FP_REG_P (operands[2]))
+ return "f<FP:prec>sub%.d %2,%0";
+ return "f<FP:prec>sub%.<FP:prec> %2,%0";
+}
+ [(set_attr "type" "falu")
+ (set_attr "opy" "2")])
+
+;; multiply instructions
+
+(define_insn "mulhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=d")
+ (mult:HI (match_operand:HI 1 "general_operand" "%0")
+ (match_operand:HI 2 "general_src_operand" "dmSn")))]
+ ""
+{
+ return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0";
+}
+ [(set_attr "type" "mul_w")
+ (set_attr "opy" "2")])
+
+(define_insn "mulhisi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:SI
+ (match_operand:HI 2 "nonimmediate_src_operand" "dmS"))))]
+ ""
+{
+ return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0";
+}
+ [(set_attr "type" "mul_w")
+ (set_attr "opy" "2")])
+
+(define_insn "*mulhisisi3_s"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "%0"))
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "INTVAL (operands[2]) >= -0x8000 && INTVAL (operands[2]) <= 0x7fff"
+{
+ return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0";
+}
+ [(set_attr "type" "mul_w")
+ (set_attr "opy" "2")])
+
+(define_expand "mulsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (mult:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ "TARGET_68020 || TARGET_COLDFIRE"
+ "")
+
+(define_insn "*mulsi3_68020"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (mult:SI (match_operand:SI 1 "general_operand" "%0")
+ (match_operand:SI 2 "general_src_operand" "dmSTK")))]
+
+ "TARGET_68020"
+ "muls%.l %2,%0"
+ [(set_attr "type" "mul_l")
+ (set_attr "opy" "2")])
+
+(define_insn "*mulsi3_cf"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (mult:SI (match_operand:SI 1 "general_operand" "%0")
+ (match_operand:SI 2 "general_operand" "d<Q>")))]
+ "TARGET_COLDFIRE"
+ "muls%.l %2,%0"
+ [(set_attr "type" "mul_l")
+ (set_attr "opy" "2")])
+
+(define_insn "umulhisi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:SI
+ (match_operand:HI 2 "nonimmediate_src_operand" "dmS"))))]
+ ""
+{
+ return MOTOROLA ? "mulu%.w %2,%0" : "mulu %2,%0";
+}
+ [(set_attr "type" "mul_w")
+ (set_attr "opy" "2")])
+
+(define_insn "*mulhisisi3_z"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "%0"))
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 0xffff"
+{
+ return MOTOROLA ? "mulu%.w %2,%0" : "mulu %2,%0";
+}
+ [(set_attr "type" "mul_w")
+ (set_attr "opy" "2")])
+
+;; We need a separate DEFINE_EXPAND for u?mulsidi3 to be able to use the
+;; proper matching constraint. This is because the matching is between
+;; the high-numbered word of the DImode operand[0] and operand[1].
+(define_expand "umulsidi3"
+ [(parallel
+ [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4)
+ (mult:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (set (subreg:SI (match_dup 0) 0)
+ (truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
+ (zero_extend:DI (match_dup 2)))
+ (const_int 32))))])]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "nonimmediate_operand" "dm")))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
+ (zero_extend:DI (match_dup 2)))
+ (const_int 32))))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "mulu%.l %2,%3:%0")
+
+; Match immediate case. For 2.4 only match things < 2^31.
+; It's tricky with larger values in these patterns since we need to match
+; values between the two parallel multiplies, between a CONST_DOUBLE and
+; a CONST_INT.
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
+ (match_dup 2))
+ (const_int 32))))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE
+ && (unsigned) INTVAL (operands[2]) <= 0x7fffffff"
+ "mulu%.l %2,%3:%0")
+
+(define_expand "mulsidi3"
+ [(parallel
+ [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4)
+ (mult:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (set (subreg:SI (match_dup 0) 0)
+ (truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (const_int 32))))])]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "nonimmediate_operand" "dm")))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (const_int 32))))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "muls%.l %2,%3:%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
+ (match_dup 2))
+ (const_int 32))))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "muls%.l %2,%3:%0")
+
+(define_expand "umulsi3_highpart"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "general_operand" "")))
+ (const_int 32))))
+ (clobber (match_dup 3))])]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+{
+ operands[3] = gen_reg_rtx (SImode);
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ operands[2] = immed_double_const (INTVAL (operands[2]) & 0xffffffff,
+ 0, DImode);
+
+ /* We have to adjust the operand order for the matching constraints. */
+ emit_insn (gen_const_umulsi3_highpart (operands[0], operands[3],
+ operands[1], operands[2]));
+ DONE;
+ }
+})
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI (match_operand:SI 2 "register_operand" "%1"))
+ (zero_extend:DI (match_operand:SI 3 "nonimmediate_operand" "dm")))
+ (const_int 32))))
+ (clobber (match_operand:SI 1 "register_operand" "=d"))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "mulu%.l %3,%0:%1")
+
+(define_insn "const_umulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI (match_operand:SI 2 "register_operand" "1"))
+ (match_operand:DI 3 "const_uint32_operand" "n"))
+ (const_int 32))))
+ (clobber (match_operand:SI 1 "register_operand" "=d"))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "mulu%.l %3,%0:%1")
+
+(define_expand "smulsi3_highpart"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "general_operand" "")))
+ (const_int 32))))
+ (clobber (match_dup 3))])]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+{
+ operands[3] = gen_reg_rtx (SImode);
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ /* We have to adjust the operand order for the matching constraints. */
+ emit_insn (gen_const_smulsi3_highpart (operands[0], operands[3],
+ operands[1], operands[2]));
+ DONE;
+ }
+})
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI (match_operand:SI 2 "register_operand" "%1"))
+ (sign_extend:DI (match_operand:SI 3 "nonimmediate_operand" "dm")))
+ (const_int 32))))
+ (clobber (match_operand:SI 1 "register_operand" "=d"))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "muls%.l %3,%0:%1")
+
+(define_insn "const_smulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI (match_operand:SI 2 "register_operand" "1"))
+ (match_operand:DI 3 "const_sint32_operand" "n"))
+ (const_int 32))))
+ (clobber (match_operand:SI 1 "register_operand" "=d"))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "muls%.l %3,%0:%1")
+
+(define_expand "mul<mode>3"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "")
+ (mult:FP (match_operand:FP 1 "general_operand" "")
+ (match_operand:FP 2 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "mul<mode>3_floatsi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (mult:FP (float:FP (match_operand:SI 2 "general_operand" "dmi"))
+ (match_operand:FP 1 "general_operand" "0")))]
+ "TARGET_68881"
+{
+ return TARGET_68040
+ ? "f<FP:round>mul%.l %2,%0"
+ : "f<FP:round_mul>mul%.l %2,%0";
+}
+ [(set_attr "type" "fmul")
+ (set_attr "opy" "2")])
+
+(define_insn "mul<mode>3_floathi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (mult:FP (float:FP (match_operand:HI 2 "general_operand" "dmn"))
+ (match_operand:FP 1 "general_operand" "0")))]
+ "TARGET_68881"
+{
+ return TARGET_68040
+ ? "f<FP:round>mul%.w %2,%0"
+ : "f<FP:round_mul>mul%.w %2,%0";
+}
+ [(set_attr "type" "fmul")
+ (set_attr "opy" "2")])
+
+(define_insn "mul<mode>3_floatqi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (mult:FP (float:FP (match_operand:QI 2 "general_operand" "dmn"))
+ (match_operand:FP 1 "general_operand" "0")))]
+ "TARGET_68881"
+{
+ return TARGET_68040
+ ? "f<FP:round>mul%.b %2,%0"
+ : "f<FP:round_mul>mul%.b %2,%0";
+}
+ [(set_attr "type" "fmul")
+ (set_attr "opy" "2")])
+
+(define_insn "muldf_68881"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f")
+ (mult:DF (match_operand:DF 1 "general_operand" "%0")
+ (match_operand:DF 2 "general_operand" "fmG")))]
+ "TARGET_68881"
+{
+ if (GET_CODE (operands[2]) == CONST_DOUBLE
+ && floating_exact_log2 (operands[2]) && !TUNE_68040_60)
+ {
+ int i = floating_exact_log2 (operands[2]);
+ operands[2] = GEN_INT (i);
+ return "fscale%.l %2,%0";
+ }
+ if (REG_P (operands[2]))
+ return "f%&mul%.x %2,%0";
+ return "f%&mul%.d %f2,%0";
+})
+
+(define_insn "mulsf_68881"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f")
+ (mult:SF (match_operand:SF 1 "general_operand" "%0")
+ (match_operand:SF 2 "general_operand" "fdmF")))]
+ "TARGET_68881"
+{
+ if (FP_REG_P (operands[2]))
+ return (TARGET_68040
+ ? "fsmul%.x %2,%0"
+ : "fsglmul%.x %2,%0");
+ return (TARGET_68040
+ ? "fsmul%.s %f2,%0"
+ : "fsglmul%.s %f2,%0");
+})
+
+(define_insn "mulxf3_68881"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f")
+ (mult:XF (match_operand:XF 1 "nonimmediate_operand" "%0")
+ (match_operand:XF 2 "nonimmediate_operand" "fm")))]
+ "TARGET_68881"
+{
+ return "fmul%.x %f2,%0";
+})
+
+(define_insn "fmul<mode>3_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (mult:FP (match_operand:FP 1 "general_operand" "%0")
+ (match_operand:FP 2 "general_operand" "f<Q>U<FP:dreg>")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (FP_REG_P (operands[2]))
+ return "f<FP:prec>mul%.d %2,%0";
+ return "f<FP:prec>mul%.<FP:prec> %2,%0";
+}
+ [(set_attr "type" "fmul")
+ (set_attr "opy" "2")])
+
+;; divide instructions
+
+(define_expand "div<mode>3"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "")
+ (div:FP (match_operand:FP 1 "general_operand" "")
+ (match_operand:FP 2 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "div<mode>3_floatsi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (div:FP (match_operand:FP 1 "general_operand" "0")
+ (float:FP (match_operand:SI 2 "general_operand" "dmi"))))]
+ "TARGET_68881"
+{
+ return TARGET_68040
+ ? "f<FP:round>div%.l %2,%0"
+ : "f<FP:round_mul>div%.l %2,%0";
+})
+
+(define_insn "div<mode>3_floathi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (div:FP (match_operand:FP 1 "general_operand" "0")
+ (float:FP (match_operand:HI 2 "general_operand" "dmn"))))]
+ "TARGET_68881"
+{
+ return TARGET_68040
+ ? "f<FP:round>div%.w %2,%0"
+ : "f<FP:round_mul>div%.w %2,%0";
+})
+
+(define_insn "div<mode>3_floatqi_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (div:FP (match_operand:FP 1 "general_operand" "0")
+ (float:FP (match_operand:QI 2 "general_operand" "dmn"))))]
+ "TARGET_68881"
+{
+ return TARGET_68040
+ ? "f<FP:round>div%.b %2,%0"
+ : "f<FP:round_mul>div%.b %2,%0";
+})
+
+(define_insn "div<mode>3_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (div:FP (match_operand:FP 1 "general_operand" "0")
+ (match_operand:FP 2 "general_operand" "f<FP:dreg>m<FP:const>")))]
+ "TARGET_68881"
+{
+ if (FP_REG_P (operands[2]))
+ return (TARGET_68040
+ ? "f<FP:round>div%.x %2,%0"
+ : "f<FP:round_mul>div%.x %2,%0");
+ return (TARGET_68040
+ ? "f<FP:round>div%.<FP:prec> %f2,%0"
+ : "f<FP:round_mul>div%.<FP:prec> %f2,%0");
+})
+
+(define_insn "div<mode>3_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (div:FP (match_operand:FP 1 "general_operand" "0")
+ (match_operand:FP 2 "general_operand" "f<Q>U<FP:dreg>")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (FP_REG_P (operands[2]))
+ return "f<FP:prec>div%.d %2,%0";
+ return "f<FP:prec>div%.<FP:prec> %2,%0";
+}
+ [(set_attr "type" "fdiv")
+ (set_attr "opy" "2")])
+
+;; Remainder instructions.
+
+(define_expand "divmodsi4"
+ [(parallel
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (div:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_src_operand" "")))
+ (set (match_operand:SI 3 "nonimmediate_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))])]
+ "TARGET_68020 || TARGET_CF_HWDIV"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (div:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "general_src_operand" "d<Q>U")))
+ (set (match_operand:SI 3 "nonimmediate_operand" "=&d")
+ (mod:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_CF_HWDIV"
+{
+ if (find_reg_note (insn, REG_UNUSED, operands[3]))
+ return "divs%.l %2,%0";
+ else if (find_reg_note (insn, REG_UNUSED, operands[0]))
+ return "rems%.l %2,%3:%0";
+ else
+ return "rems%.l %2,%3:%0\;divs%.l %2,%0";
+}
+ [(set_attr "type" "div_l")
+ (set_attr "opy" "2")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (div:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "general_src_operand" "dmSTK")))
+ (set (match_operand:SI 3 "nonimmediate_operand" "=d")
+ (mod:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_68020"
+{
+ if (find_reg_note (insn, REG_UNUSED, operands[3]))
+ return "divs%.l %2,%0";
+ else
+ return "divsl%.l %2,%3:%0";
+})
+
+(define_expand "udivmodsi4"
+ [(parallel
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (udiv:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "general_src_operand" "dmSTK")))
+ (set (match_operand:SI 3 "nonimmediate_operand" "=d")
+ (umod:SI (match_dup 1) (match_dup 2)))])]
+ "TARGET_68020 || TARGET_CF_HWDIV"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (udiv:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "general_src_operand" "d<Q>U")))
+ (set (match_operand:SI 3 "nonimmediate_operand" "=&d")
+ (umod:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_CF_HWDIV"
+{
+ if (find_reg_note (insn, REG_UNUSED, operands[3]))
+ return "divu%.l %2,%0";
+ else if (find_reg_note (insn, REG_UNUSED, operands[0]))
+ return "remu%.l %2,%3:%0";
+ else
+ return "remu%.l %2,%3:%0\;divu%.l %2,%0";
+}
+ [(set_attr "type" "div_l")
+ (set_attr "opy" "2")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (udiv:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "general_src_operand" "dmSTK")))
+ (set (match_operand:SI 3 "nonimmediate_operand" "=d")
+ (umod:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_68020 && !TARGET_COLDFIRE"
+{
+ if (find_reg_note (insn, REG_UNUSED, operands[3]))
+ return "divu%.l %2,%0";
+ else
+ return "divul%.l %2,%3:%0";
+})
+
+(define_insn "divmodhi4"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=d")
+ (div:HI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:HI 2 "general_src_operand" "dmSKT")))
+ (set (match_operand:HI 3 "nonimmediate_operand" "=d")
+ (mod:HI (match_dup 1) (match_dup 2)))]
+ "!TARGET_COLDFIRE || TARGET_CF_HWDIV"
+{
+ output_asm_insn (MOTOROLA ?
+ "ext%.l %0\;divs%.w %2,%0" :
+ "extl %0\;divs %2,%0",
+ operands);
+ if (!find_reg_note(insn, REG_UNUSED, operands[3]))
+ {
+ CC_STATUS_INIT;
+ return "move%.l %0,%3\;swap %3";
+ }
+ else
+ return "";
+})
+
+(define_insn "udivmodhi4"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=d")
+ (udiv:HI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:HI 2 "general_src_operand" "dmSKT")))
+ (set (match_operand:HI 3 "nonimmediate_operand" "=d")
+ (umod:HI (match_dup 1) (match_dup 2)))]
+ "!TARGET_COLDFIRE || TARGET_CF_HWDIV"
+{
+ if (ISA_HAS_MVS_MVZ)
+ output_asm_insn (MOTOROLA ?
+ "mvz%.w %0,%0\;divu%.w %2,%0" :
+ "mvz%.w %0,%0\;divu %2,%0",
+ operands);
+ else
+ output_asm_insn (MOTOROLA ?
+ "and%.l #0xFFFF,%0\;divu%.w %2,%0" :
+ "and%.l #0xFFFF,%0\;divu %2,%0",
+ operands);
+
+ if (!find_reg_note(insn, REG_UNUSED, operands[3]))
+ {
+ CC_STATUS_INIT;
+ return "move%.l %0,%3\;swap %3";
+ }
+ else
+ return "";
+})
+
+;; logical-and instructions
+
+;; "anddi3" is mainly here to help combine().
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=o,d")
+ (and:DI (match_operand:DI 1 "general_operand" "%0,0")
+ (match_operand:DI 2 "general_operand" "dn,don")))]
+ "!TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ /* We can get CONST_DOUBLE, but also const1_rtx etc. */
+ if (CONSTANT_P (operands[2]))
+ {
+ rtx hi, lo;
+
+ split_double (operands[2], &hi, &lo);
+
+ switch (INTVAL (hi))
+ {
+ case 0 :
+ output_asm_insn ("clr%.l %0", operands);
+ break;
+ case -1 :
+ break;
+ default :
+ {
+ rtx xoperands[3];
+
+ xoperands[0] = operands[0];
+ xoperands[2] = hi;
+ output_asm_insn (output_andsi3 (xoperands), xoperands);
+ }
+ }
+ if (GET_CODE (operands[0]) == REG)
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else
+ operands[0] = adjust_address (operands[0], SImode, 4);
+ switch (INTVAL (lo))
+ {
+ case 0 :
+ output_asm_insn ("clr%.l %0", operands);
+ break;
+ case -1 :
+ break;
+ default :
+ {
+ rtx xoperands[3];
+
+ xoperands[0] = operands[0];
+ xoperands[2] = lo;
+ output_asm_insn (output_andsi3 (xoperands), xoperands);
+ }
+ }
+ return "";
+ }
+ if (GET_CODE (operands[0]) != REG)
+ {
+ operands[1] = adjust_address (operands[0], SImode, 4);
+ return "and%.l %2,%0\;and%.l %R2,%1";
+ }
+ if (GET_CODE (operands[2]) != REG)
+ {
+ operands[1] = adjust_address (operands[2], SImode, 4);
+ return "and%.l %2,%0\;and%.l %1,%R0";
+ }
+ return "and%.l %2,%0\;and%.l %R2,%R0";
+})
+
+;; Prevent AND from being made with sp. This doesn't exist in the machine
+;; and reload will cause inefficient code. Since sp is a FIXED_REG, we
+;; can't allocate pseudos into it.
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "not_sp_operand" "")
+ (and:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_src_operand" "")))]
+ ""
+ "")
+
+;; produced by split operations after reload finished
+(define_insn "*andsi3_split"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (and:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "const_int_operand" "i")))]
+ "reload_completed && !TARGET_COLDFIRE"
+{
+ return output_andsi3 (operands);
+})
+
+(define_insn "andsi3_internal"
+ [(set (match_operand:SI 0 "not_sp_operand" "=m,d")
+ (and:SI (match_operand:SI 1 "general_operand" "%0,0")
+ (match_operand:SI 2 "general_src_operand" "dKT,dmSM")))]
+ "!TARGET_COLDFIRE"
+{
+ return output_andsi3 (operands);
+})
+
+(define_insn "andsi3_5200"
+ [(set (match_operand:SI 0 "not_sp_operand" "=m,d")
+ (and:SI (match_operand:SI 1 "general_operand" "%0,0")
+ (match_operand:SI 2 "general_src_operand" "d,dmsK")))]
+ "TARGET_COLDFIRE"
+{
+ if (ISA_HAS_MVS_MVZ
+ && DATA_REG_P (operands[0])
+ && GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) == 0x000000ff)
+ return "mvz%.b %0,%0";
+ else if (INTVAL (operands[2]) == 0x0000ffff)
+ return "mvz%.w %0,%0";
+ }
+ return output_andsi3 (operands);
+})
+
+(define_insn "andhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=m,d")
+ (and:HI (match_operand:HI 1 "general_operand" "%0,0")
+ (match_operand:HI 2 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "and%.w %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
+ (and:HI (match_dup 0)
+ (match_operand:HI 1 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "and%.w %1,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
+ (and:HI (match_operand:HI 1 "general_src_operand" "dn,dmSn")
+ (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "and%.w %1,%0")
+
+(define_insn "andqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=m,d")
+ (and:QI (match_operand:QI 1 "general_operand" "%0,0")
+ (match_operand:QI 2 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "and%.b %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
+ (and:QI (match_dup 0)
+ (match_operand:QI 1 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "and%.b %1,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
+ (and:QI (match_operand:QI 1 "general_src_operand" "dn,dmSn")
+ (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "and%.b %1,%0")
+
+;; inclusive-or instructions
+
+(define_insn "iordi_zext"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=o,d")
+ (ior:DI (zero_extend:DI (match_operand 1 "general_operand" "dn,dmn"))
+ (match_operand:DI 2 "general_operand" "0,0")))]
+ "!TARGET_COLDFIRE"
+{
+ int byte_mode;
+
+ CC_STATUS_INIT;
+ if (GET_CODE (operands[0]) == REG)
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else
+ operands[0] = adjust_address (operands[0], SImode, 4);
+ if (GET_MODE (operands[1]) == SImode)
+ return "or%.l %1,%0";
+ byte_mode = (GET_MODE (operands[1]) == QImode);
+ if (GET_CODE (operands[0]) == MEM)
+ operands[0] = adjust_address (operands[0], byte_mode ? QImode : HImode,
+ byte_mode ? 3 : 2);
+ if (byte_mode)
+ return "or%.b %1,%0";
+ else
+ return "or%.w %1,%0";
+})
+
+;; "iordi3" is mainly here to help combine().
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=o,d")
+ (ior:DI (match_operand:DI 1 "general_operand" "%0,0")
+ (match_operand:DI 2 "general_operand" "dn,don")))]
+ "!TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ /* We can get CONST_DOUBLE, but also const1_rtx etc. */
+ if (CONSTANT_P (operands[2]))
+ {
+ rtx hi, lo;
+
+ split_double (operands[2], &hi, &lo);
+
+ switch (INTVAL (hi))
+ {
+ case 0 :
+ break;
+ case -1 :
+ /* FIXME : a scratch register would be welcome here if operand[0]
+ is not a register */
+ output_asm_insn ("move%.l #-1,%0", operands);
+ break;
+ default :
+ {
+ rtx xoperands[3];
+
+ xoperands[0] = operands[0];
+ xoperands[2] = hi;
+ output_asm_insn (output_iorsi3 (xoperands), xoperands);
+ }
+ }
+ if (GET_CODE (operands[0]) == REG)
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else
+ operands[0] = adjust_address (operands[0], SImode, 4);
+ switch (INTVAL (lo))
+ {
+ case 0 :
+ break;
+ case -1 :
+ /* FIXME : a scratch register would be welcome here if operand[0]
+ is not a register */
+ output_asm_insn ("move%.l #-1,%0", operands);
+ break;
+ default :
+ {
+ rtx xoperands[3];
+
+ xoperands[0] = operands[0];
+ xoperands[2] = lo;
+ output_asm_insn (output_iorsi3 (xoperands), xoperands);
+ }
+ }
+ return "";
+ }
+ if (GET_CODE (operands[0]) != REG)
+ {
+ operands[1] = adjust_address (operands[0], SImode, 4);
+ return "or%.l %2,%0\;or%.l %R2,%1";
+ }
+ if (GET_CODE (operands[2]) != REG)
+ {
+ operands[1] = adjust_address (operands[2], SImode, 4);
+ return "or%.l %2,%0\;or%.l %1,%R0";
+ }
+ return "or%.l %2,%0\;or%.l %R2,%R0";
+})
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (ior:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_src_operand" "")))]
+ ""
+ "")
+
+(define_insn "iorsi3_internal"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=m,d")
+ (ior:SI (match_operand:SI 1 "general_operand" "%0,0")
+ (match_operand:SI 2 "general_src_operand" "dKT,dmSMT")))]
+ "! TARGET_COLDFIRE"
+{
+ return output_iorsi3 (operands);
+})
+
+(define_insn "iorsi3_5200"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=m,d")
+ (ior:SI (match_operand:SI 1 "general_operand" "%0,0")
+ (match_operand:SI 2 "general_src_operand" "d,dmsK")))]
+ "TARGET_COLDFIRE"
+{
+ return output_iorsi3 (operands);
+})
+
+(define_insn "iorhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=m,d")
+ (ior:HI (match_operand:HI 1 "general_operand" "%0,0")
+ (match_operand:HI 2 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "or%.w %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
+ (ior:HI (match_dup 0)
+ (match_operand:HI 1 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "or%.w %1,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
+ (ior:HI (match_operand:HI 1 "general_src_operand" "dn,dmSn")
+ (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "or%.w %1,%0")
+
+(define_insn "iorqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=m,d")
+ (ior:QI (match_operand:QI 1 "general_operand" "%0,0")
+ (match_operand:QI 2 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "or%.b %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
+ (ior:QI (match_dup 0)
+ (match_operand:QI 1 "general_src_operand" "dn,dmSn")))]
+ "!TARGET_COLDFIRE"
+ "or%.b %1,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
+ (ior:QI (match_operand:QI 1 "general_src_operand" "dn,dmSn")
+ (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "or%.b %1,%0")
+
+;; On all 68k models, this makes faster code in a special case.
+;; See also ashlsi_16, ashrsi_16 and lshrsi_16.
+
+(define_insn "iorsi_zexthi_ashl16"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=&d")
+ (ior:SI (zero_extend:SI (match_operand:HI 1 "general_operand" "rmn"))
+ (ashift:SI (match_operand:SI 2 "general_operand" "or")
+ (const_int 16))))]
+ ""
+{
+ CC_STATUS_INIT;
+ if (GET_CODE (operands[2]) != REG)
+ operands[2] = adjust_address (operands[2], HImode, 2);
+ if (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != REGNO (operands[0]))
+ output_asm_insn ("move%.w %2,%0", operands);
+ return "swap %0\;mov%.w %1,%0";
+})
+
+(define_insn "iorsi_zext"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=o,d")
+ (ior:SI (zero_extend:SI (match_operand 1 "general_operand" "dn,dmn"))
+ (match_operand:SI 2 "general_operand" "0,0")))]
+ "!TARGET_COLDFIRE"
+{
+ int byte_mode;
+
+ CC_STATUS_INIT;
+ byte_mode = (GET_MODE (operands[1]) == QImode);
+ if (GET_CODE (operands[0]) == MEM)
+ operands[0] = adjust_address (operands[0], byte_mode ? QImode : HImode,
+ byte_mode ? 3 : 2);
+ if (byte_mode)
+ return "or%.b %1,%0";
+ else
+ return "or%.w %1,%0";
+})
+
+;; xor instructions
+
+;; "xordi3" is mainly here to help combine().
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=od")
+ (xor:DI (match_operand:DI 1 "general_operand" "%0")
+ (match_operand:DI 2 "general_operand" "dn")))]
+ "!TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ /* We can get CONST_DOUBLE, but also const1_rtx etc. */
+
+ if (CONSTANT_P (operands[2]))
+ {
+ rtx hi, lo;
+
+ split_double (operands[2], &hi, &lo);
+
+ switch (INTVAL (hi))
+ {
+ case 0 :
+ break;
+ case -1 :
+ output_asm_insn ("not%.l %0", operands);
+ break;
+ default :
+ /* FIXME : a scratch register would be welcome here if
+ -128 <= INTVAL (hi) < -1 */
+ {
+ rtx xoperands[3];
+
+ xoperands[0] = operands[0];
+ xoperands[2] = hi;
+ output_asm_insn (output_xorsi3 (xoperands), xoperands);
+ }
+ }
+ if (GET_CODE (operands[0]) == REG)
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else
+ operands[0] = adjust_address (operands[0], SImode, 4);
+ switch (INTVAL (lo))
+ {
+ case 0 :
+ break;
+ case -1 :
+ output_asm_insn ("not%.l %0", operands);
+ break;
+ default :
+ /* FIXME : a scratch register would be welcome here if
+ -128 <= INTVAL (lo) < -1 */
+ operands[2] = lo;
+ /* FIXME : this should be merged with xorsi3 */
+ {
+ rtx xoperands[3];
+
+ xoperands[0] = operands[0];
+ xoperands[2] = lo;
+ output_asm_insn (output_xorsi3 (xoperands), xoperands);
+ }
+ }
+ return "";
+ }
+ if (GET_CODE (operands[0]) != REG)
+ {
+ operands[1] = adjust_address (operands[0], SImode, 4);
+ return "eor%.l %2,%0\;eor%.l %R2,%1";
+ }
+ if (GET_CODE (operands[2]) != REG)
+ {
+ operands[1] = adjust_address (operands[2], SImode, 4);
+ return "eor%.l %2,%0\;eor%.l %1,%R0";
+ }
+ return "eor%.l %2,%0\;eor%.l %R2,%R0";
+})
+
+(define_expand "xorsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (xor:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ""
+ "")
+
+(define_insn "xorsi3_internal"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=do,m")
+ (xor:SI (match_operand:SI 1 "general_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "di,dKT")))]
+
+ "!TARGET_COLDFIRE"
+{
+ return output_xorsi3 (operands);
+})
+
+(define_insn "xorsi3_5200"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=dm,d")
+ (xor:SI (match_operand:SI 1 "general_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "d,Ks")))]
+ "TARGET_COLDFIRE"
+{
+ return output_xorsi3 (operands);
+})
+
+(define_insn "xorhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
+ (xor:HI (match_operand:HI 1 "general_operand" "%0")
+ (match_operand:HI 2 "general_operand" "dn")))]
+ "!TARGET_COLDFIRE"
+ "eor%.w %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
+ (xor:HI (match_dup 0)
+ (match_operand:HI 1 "general_operand" "dn")))]
+ "!TARGET_COLDFIRE"
+ "eor%.w %1,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
+ (xor:HI (match_operand:HI 1 "general_operand" "dn")
+ (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "eor%.w %1,%0")
+
+(define_insn "xorqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
+ (xor:QI (match_operand:QI 1 "general_operand" "%0")
+ (match_operand:QI 2 "general_operand" "dn")))]
+ "!TARGET_COLDFIRE"
+ "eor%.b %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
+ (xor:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "dn")))]
+ "!TARGET_COLDFIRE"
+ "eor%.b %1,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
+ (xor:QI (match_operand:QI 1 "general_operand" "dn")
+ (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "eor%.b %1,%0")
+
+;; negation instructions
+
+(define_expand "negdi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (neg:DI (match_operand:DI 1 "general_operand" "")))]
+ ""
+{
+ if (TARGET_COLDFIRE)
+ emit_insn (gen_negdi2_5200 (operands[0], operands[1]));
+ else
+ emit_insn (gen_negdi2_internal (operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn "negdi2_internal"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=<,do,!*a")
+ (neg:DI (match_operand:DI 1 "general_operand" "0,0,0")))]
+ "!TARGET_COLDFIRE"
+{
+ if (which_alternative == 0)
+ return "neg%.l %0\;negx%.l %0";
+ if (GET_CODE (operands[0]) == REG)
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else
+ operands[1] = adjust_address (operands[0], SImode, 4);
+ if (ADDRESS_REG_P (operands[0]))
+ return "exg %/d0,%1\;neg%.l %/d0\;exg %/d0,%1\;exg %/d0,%0\;negx%.l %/d0\;exg %/d0,%0";
+ else
+ return "neg%.l %1\;negx%.l %0";
+})
+
+(define_insn "negdi2_5200"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d")
+ (neg:DI (match_operand:DI 1 "general_operand" "0")))]
+ "TARGET_COLDFIRE"
+{
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ return "neg%.l %1\;negx%.l %0";
+})
+
+(define_expand "negsi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (neg:SI (match_operand:SI 1 "general_operand" "")))]
+ ""
+{
+ if (TARGET_COLDFIRE)
+ emit_insn (gen_negsi2_5200 (operands[0], operands[1]));
+ else
+ emit_insn (gen_negsi2_internal (operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn "negsi2_internal"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=dm")
+ (neg:SI (match_operand:SI 1 "general_operand" "0")))]
+ "!TARGET_COLDFIRE"
+ "neg%.l %0"
+ [(set_attr "type" "neg_l")])
+
+(define_insn "negsi2_5200"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (neg:SI (match_operand:SI 1 "general_operand" "0")))]
+ "TARGET_COLDFIRE"
+ "neg%.l %0"
+ [(set_attr "type" "neg_l")])
+
+(define_insn "neghi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
+ (neg:HI (match_operand:HI 1 "general_operand" "0")))]
+ "!TARGET_COLDFIRE"
+ "neg%.w %0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
+ (neg:HI (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "neg%.w %0")
+
+(define_insn "negqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
+ (neg:QI (match_operand:QI 1 "general_operand" "0")))]
+ "!TARGET_COLDFIRE"
+ "neg%.b %0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
+ (neg:QI (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "neg%.b %0")
+
+;; If using software floating point, just flip the sign bit.
+
+(define_expand "negsf2"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (neg:SF (match_operand:SF 1 "general_operand" "")))]
+ ""
+{
+ if (!TARGET_HARD_FLOAT)
+ {
+ rtx result;
+ rtx target;
+
+ target = operand_subword_force (operands[0], 0, SFmode);
+ result = expand_binop (SImode, xor_optab,
+ operand_subword_force (operands[1], 0, SFmode),
+ GEN_INT (-2147483647 - 1), target, 0, OPTAB_WIDEN);
+ gcc_assert (result);
+
+ if (result != target)
+ emit_move_insn (result, target);
+
+ /* Make a place for REG_EQUAL. */
+ emit_move_insn (operands[0], operands[0]);
+ DONE;
+ }
+})
+
+(define_expand "negdf2"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (neg:DF (match_operand:DF 1 "general_operand" "")))]
+ ""
+{
+ if (!TARGET_HARD_FLOAT)
+ {
+ rtx result;
+ rtx target;
+ rtx insns;
+
+ start_sequence ();
+ target = operand_subword (operands[0], 0, 1, DFmode);
+ result = expand_binop (SImode, xor_optab,
+ operand_subword_force (operands[1], 0, DFmode),
+ GEN_INT (-2147483647 - 1), target, 0, OPTAB_WIDEN);
+ gcc_assert (result);
+
+ if (result != target)
+ emit_move_insn (result, target);
+
+ emit_move_insn (operand_subword (operands[0], 1, 1, DFmode),
+ operand_subword_force (operands[1], 1, DFmode));
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_insn (insns);
+ DONE;
+ }
+})
+
+(define_expand "negxf2"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "")
+ (neg:XF (match_operand:XF 1 "nonimmediate_operand" "")))]
+ ""
+{
+ if (!TARGET_68881)
+ {
+ rtx result;
+ rtx target;
+ rtx insns;
+
+ start_sequence ();
+ target = operand_subword (operands[0], 0, 1, XFmode);
+ result = expand_binop (SImode, xor_optab,
+ operand_subword_force (operands[1], 0, XFmode),
+ GEN_INT (-2147483647 - 1), target, 0, OPTAB_WIDEN);
+ gcc_assert (result);
+
+ if (result != target)
+ emit_move_insn (result, target);
+
+ emit_move_insn (operand_subword (operands[0], 1, 1, XFmode),
+ operand_subword_force (operands[1], 1, XFmode));
+ emit_move_insn (operand_subword (operands[0], 2, 1, XFmode),
+ operand_subword_force (operands[1], 2, XFmode));
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_insn (insns);
+ DONE;
+ }
+})
+
+(define_insn "neg<mode>2_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
+ (neg:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m<FP:const>,0")))]
+ "TARGET_68881"
+{
+ if (DATA_REG_P (operands[0]))
+ {
+ operands[1] = GEN_INT (31);
+ return "bchg %1,%0";
+ }
+ if (FP_REG_P (operands[1]))
+ return "f<FP:round>neg%.x %1,%0";
+ return "f<FP:round>neg%.<FP:prec> %f1,%0";
+})
+
+(define_insn "neg<mode>2_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
+ (neg:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U,0")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (DATA_REG_P (operands[0]))
+ {
+ operands[1] = GEN_INT (31);
+ return "bchg %1,%0";
+ }
+ if (FP_REG_P (operands[1]))
+ return "f<FP:prec>neg%.d %1,%0";
+ return "f<FP:prec>neg%.<FP:prec> %1,%0";
+})
+
+;; Sqrt instruction for the 68881
+
+(define_expand "sqrt<mode>2"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "")
+ (sqrt:FP (match_operand:FP 1 "general_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "sqrt<mode>2_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (sqrt:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m")))]
+ "TARGET_68881"
+{
+ if (FP_REG_P (operands[1]))
+ return "f<FP:round>sqrt%.x %1,%0";
+ return "f<FP:round>sqrt%.<FP:prec> %1,%0";
+}
+ [(set_attr "type" "fsqrt")])
+
+(define_insn "sqrt<mode>2_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (sqrt:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (FP_REG_P (operands[1]))
+ return "f<FP:prec>sqrt%.d %1,%0";
+ return "f<FP:prec>sqrt%.<FP:prec> %1,%0";
+}
+ [(set_attr "type" "fsqrt")])
+;; Absolute value instructions
+;; If using software floating point, just zero the sign bit.
+
+(define_expand "abssf2"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (abs:SF (match_operand:SF 1 "general_operand" "")))]
+ ""
+{
+ if (!TARGET_HARD_FLOAT)
+ {
+ rtx result;
+ rtx target;
+
+ target = operand_subword_force (operands[0], 0, SFmode);
+ result = expand_binop (SImode, and_optab,
+ operand_subword_force (operands[1], 0, SFmode),
+ GEN_INT (0x7fffffff), target, 0, OPTAB_WIDEN);
+ gcc_assert (result);
+
+ if (result != target)
+ emit_move_insn (result, target);
+
+ /* Make a place for REG_EQUAL. */
+ emit_move_insn (operands[0], operands[0]);
+ DONE;
+ }
+})
+
+(define_expand "absdf2"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (abs:DF (match_operand:DF 1 "general_operand" "")))]
+ ""
+{
+ if (!TARGET_HARD_FLOAT)
+ {
+ rtx result;
+ rtx target;
+ rtx insns;
+
+ start_sequence ();
+ target = operand_subword (operands[0], 0, 1, DFmode);
+ result = expand_binop (SImode, and_optab,
+ operand_subword_force (operands[1], 0, DFmode),
+ GEN_INT (0x7fffffff), target, 0, OPTAB_WIDEN);
+ gcc_assert (result);
+
+ if (result != target)
+ emit_move_insn (result, target);
+
+ emit_move_insn (operand_subword (operands[0], 1, 1, DFmode),
+ operand_subword_force (operands[1], 1, DFmode));
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_insn (insns);
+ DONE;
+ }
+})
+
+(define_expand "absxf2"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "")
+ (abs:XF (match_operand:XF 1 "nonimmediate_operand" "")))]
+ ""
+{
+ if (!TARGET_68881)
+ {
+ rtx result;
+ rtx target;
+ rtx insns;
+
+ start_sequence ();
+ target = operand_subword (operands[0], 0, 1, XFmode);
+ result = expand_binop (SImode, and_optab,
+ operand_subword_force (operands[1], 0, XFmode),
+ GEN_INT (0x7fffffff), target, 0, OPTAB_WIDEN);
+ gcc_assert (result);
+
+ if (result != target)
+ emit_move_insn (result, target);
+
+ emit_move_insn (operand_subword (operands[0], 1, 1, XFmode),
+ operand_subword_force (operands[1], 1, XFmode));
+ emit_move_insn (operand_subword (operands[0], 2, 1, XFmode),
+ operand_subword_force (operands[1], 2, XFmode));
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_insn (insns);
+ DONE;
+ }
+})
+
+(define_insn "abs<mode>2_68881"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
+ (abs:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m<FP:const>,0")))]
+ "TARGET_68881"
+{
+ if (DATA_REG_P (operands[0]))
+ {
+ operands[1] = GEN_INT (31);
+ return "bclr %1,%0";
+ }
+ if (FP_REG_P (operands[1]))
+ return "f<FP:round>abs%.x %1,%0";
+ return "f<FP:round>abs%.<FP:prec> %f1,%0";
+})
+
+(define_insn "abs<mode>2_cf"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
+ (abs:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U,0")))]
+ "TARGET_COLDFIRE_FPU"
+{
+ if (DATA_REG_P (operands[0]))
+ {
+ operands[1] = GEN_INT (31);
+ return "bclr %1,%0";
+ }
+ if (FP_REG_P (operands[1]))
+ return "f<FP:prec>abs%.d %1,%0";
+ return "f<FP:prec>abs%.<FP:prec> %1,%0";
+}
+ [(set_attr "type" "bitrw,fneg")])
+
+;; bit indexing instructions
+
+;; ColdFire ff1 instruction implements clz.
+(define_insn "clzsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (clz:SI (match_operand:SI 1 "register_operand" "0")))]
+ "ISA_HAS_FF1"
+ "ff1 %0"
+ [(set_attr "type" "ext")])
+
+;; one complement instructions
+
+;; "one_cmpldi2" is mainly here to help combine().
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=dm")
+ (not:DI (match_operand:DI 1 "general_operand" "0")))]
+ "!TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ if (GET_CODE (operands[0]) == REG)
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
+ operands[1] = operands[0];
+ else
+ operands[1] = adjust_address (operands[0], SImode, 4);
+ return "not%.l %1\;not%.l %0";
+})
+
+(define_expand "one_cmplsi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (not:SI (match_operand:SI 1 "general_operand" "")))]
+ ""
+{
+ if (TARGET_COLDFIRE)
+ emit_insn (gen_one_cmplsi2_5200 (operands[0], operands[1]));
+ else
+ emit_insn (gen_one_cmplsi2_internal (operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn "one_cmplsi2_internal"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=dm")
+ (not:SI (match_operand:SI 1 "general_operand" "0")))]
+ "!TARGET_COLDFIRE"
+ "not%.l %0")
+
+(define_insn "one_cmplsi2_5200"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (not:SI (match_operand:SI 1 "general_operand" "0")))]
+ "TARGET_COLDFIRE"
+ "not%.l %0"
+ [(set_attr "type" "neg_l")])
+
+(define_insn "one_cmplhi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
+ (not:HI (match_operand:HI 1 "general_operand" "0")))]
+ "!TARGET_COLDFIRE"
+ "not%.w %0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
+ (not:HI (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "not%.w %0")
+
+(define_insn "one_cmplqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
+ (not:QI (match_operand:QI 1 "general_operand" "0")))]
+ "!TARGET_COLDFIRE"
+ "not%.b %0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
+ (not:QI (match_dup 0)))]
+ "!TARGET_COLDFIRE"
+ "not%.b %0")
+
+;; arithmetic shift instructions
+;; We don't need the shift memory by 1 bit instruction
+
+(define_insn "ashldi_extsi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro")
+ (ashift:DI
+ (match_operator:DI 2 "extend_operator"
+ [(match_operand:SI 1 "general_operand" "rm")])
+ (const_int 32)))]
+ ""
+{
+ CC_STATUS_INIT;
+ if (GET_CODE (operands[0]) == REG)
+ operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else
+ operands[2] = adjust_address (operands[0], SImode, 4);
+ if (ADDRESS_REG_P (operands[0]))
+ return "move%.l %1,%0\;sub%.l %2,%2";
+ else
+ return "move%.l %1,%0\;clr%.l %2";
+})
+
+(define_insn "ashldi_sexthi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,a*d")
+ (ashift:DI (sign_extend:DI (match_operand:HI 1 "general_operand" "rm,rm"))
+ (const_int 32)))
+ (clobber (match_scratch:SI 2 "=a,X"))]
+ ""
+{
+ CC_STATUS_INIT;
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
+ return "clr%.l %0\;move%.w %1,%2\;move%.l %2,%0";
+ else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
+ return "move%.w %1,%2\;move%.l %2,%0\;clr%.l %0";
+ else
+ {
+ operands[3] = adjust_address (operands[0], SImode, 4);
+ return "move%.w %1,%2\;move%.l %2,%0\;clr%.l %3";
+ }
+ }
+ else if (DATA_REG_P (operands[0]))
+ return "move%.w %1,%0\;ext%.l %0\;clr%.l %R0";
+ else
+ return "move%.w %1,%0\;sub%.l %R0,%R0";
+})
+
+(define_insn "*ashldi3_const1"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ashift:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 1)))]
+ "!TARGET_COLDFIRE"
+ "add%.l %R0,%R0\;addx%.l %0,%0")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 2)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 0)
+ (ashift:DI (match_dup 1) (const_int 1)))
+ (set (match_dup 0)
+ (ashift:DI (match_dup 0) (const_int 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 3)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 0)
+ (ashift:DI (match_dup 1) (const_int 2)))
+ (set (match_dup 0)
+ (ashift:DI (match_dup 0) (const_int 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 8)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 2)
+ (rotate:SI (match_dup 2) (const_int 8)))
+ (set (match_dup 3)
+ (rotate:SI (match_dup 3) (const_int 8)))
+ (set (strict_low_part (subreg:QI (match_dup 0) 3))
+ (subreg:QI (match_dup 0) 7))
+ (set (strict_low_part (subreg:QI (match_dup 0) 7))
+ (const_int 0))]
+{
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 16)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 2)
+ (rotate:SI (match_dup 2) (const_int 16)))
+ (set (match_dup 3)
+ (rotate:SI (match_dup 3) (const_int 16)))
+ (set (strict_low_part (subreg:HI (match_dup 0) 2))
+ (subreg:HI (match_dup 0) 6))
+ (set (strict_low_part (subreg:HI (match_dup 0) 6))
+ (const_int 0))]
+{
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "pre_dec_operand" "")
+ (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (const_int 32)))]
+ "reload_completed"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0) (match_dup 1))]
+{
+ operands[0] = adjust_address(operands[0], SImode, 0);
+ operands[1] = gen_lowpart(SImode, operands[1]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "post_inc_operand" "")
+ (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (const_int 32)))]
+ "reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (const_int 0))]
+{
+ operands[0] = adjust_address(operands[0], SImode, 0);
+ operands[1] = gen_lowpart(SImode, operands[1]);
+})
+
+(define_insn_and_split "*ashldi3_const32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro<>")
+ (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "ro")
+ (const_int 32)))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4) (match_dup 3))
+ (set (match_dup 2) (const_int 0))]
+ "split_di(operands, 2, operands + 2, operands + 4);")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))]
+ "reload_completed && !TARGET_COLDFIRE
+ && INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 40"
+ [(set (match_dup 4) (ashift:SI (match_dup 4) (match_dup 2)))
+ (set (match_dup 3) (match_dup 4))
+ (set (match_dup 4) (const_int 0))]
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 48)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 2)
+ (rotate:SI (match_dup 2) (const_int 16)))
+ (set (match_dup 3) (const_int 0))
+ (set (strict_low_part (subreg:HI (match_dup 0) 2))
+ (const_int 0))]
+{
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))]
+ "reload_completed && !TARGET_COLDFIRE
+ && INTVAL (operands[2]) > 40 && INTVAL (operands[2]) <= 63"
+ [(set (match_dup 3) (match_dup 2))
+ (set (match_dup 4) (ashift:SI (match_dup 4) (match_dup 3)))
+ (set (match_dup 3) (match_dup 4))
+ (set (match_dup 4) (const_int 0))]
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_insn "*ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ashift:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand 2 "const_int_operand" "n")))]
+ "!TARGET_COLDFIRE
+ && ((INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3)
+ || INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16
+ || (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63))"
+ "#")
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))]
+ "!TARGET_COLDFIRE"
+{
+ /* ??? This is a named pattern like this is not allowed to FAIL based
+ on its operands. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || ((INTVAL (operands[2]) < 1 || INTVAL (operands[2]) > 3)
+ && INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16
+ && (INTVAL (operands[2]) < 32 || INTVAL (operands[2]) > 63)))
+ FAIL;
+})
+
+;; On most 68k models, this makes faster code in a special case.
+
+(define_insn "ashlsi_16"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 16)))]
+ "!TUNE_68060"
+{
+ CC_STATUS_INIT;
+ return "swap %0\;clr%.w %0";
+})
+
+;; ashift patterns : use lsl instead of asl, because lsl always clears the
+;; overflow bit, so we must not set CC_NO_OVERFLOW.
+
+;; On the 68000, this makes faster code in a special case.
+
+(define_insn "ashlsi_17_24"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "TUNE_68000_10
+ && INTVAL (operands[2]) > 16
+ && INTVAL (operands[2]) <= 24"
+{
+ CC_STATUS_INIT;
+
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 16);
+ return "lsl%.w %2,%0\;swap %0\;clr%.w %0";
+})
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "dI")))]
+ ""
+{
+ if (operands[2] == const1_rtx)
+ {
+ cc_status.flags = CC_NO_OVERFLOW;
+ return "add%.l %0,%0";
+ }
+ return "lsl%.l %2,%0";
+})
+
+(define_insn "ashlhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (ashift:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "lsl%.w %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
+ (ashift:HI (match_dup 0)
+ (match_operand:HI 1 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "lsl%.w %1,%0")
+
+(define_insn "ashlqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (ashift:QI (match_operand:QI 1 "register_operand" "0")
+ (match_operand:QI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "lsl%.b %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
+ (ashift:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "lsl%.b %1,%0")
+
+;; On most 68k models, this makes faster code in a special case.
+
+(define_insn "ashrsi_16"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 16)))]
+ "!TUNE_68060"
+ "swap %0\;ext%.l %0")
+
+;; On the 68000, this makes faster code in a special case.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "TUNE_68000_10
+ && INTVAL (operands[2]) > 16
+ && INTVAL (operands[2]) <= 24"
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 16);
+ return "swap %0\;asr%.w %2,%0\;ext%.l %0";
+})
+
+(define_insn "subreghi1ashrdi_const32"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (subreg:HI (ashiftrt:DI (match_operand:DI 1 "general_operand" "ro")
+ (const_int 32)) 6))]
+ ""
+{
+ if (GET_CODE (operands[1]) != REG)
+ operands[1] = adjust_address (operands[1], HImode, 2);
+ return "move%.w %1,%0";
+}
+ [(set_attr "type" "move")])
+
+(define_insn "subregsi1ashrdi_const32"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (subreg:SI (ashiftrt:DI (match_operand:DI 1 "general_operand" "ro")
+ (const_int 32)) 4))]
+ ""
+{
+ return "move%.l %1,%0";
+}
+ [(set_attr "type" "move_l")])
+
+(define_insn "*ashrdi3_const1"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 1)))]
+ "!TARGET_COLDFIRE"
+{
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ return "asr%.l #1,%0\;roxr%.l #1,%1";
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 2)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 0)
+ (ashiftrt:DI (match_dup 1) (const_int 1)))
+ (set (match_dup 0)
+ (ashiftrt:DI (match_dup 0) (const_int 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 3)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 0)
+ (ashiftrt:DI (match_dup 1) (const_int 2)))
+ (set (match_dup 0)
+ (ashiftrt:DI (match_dup 0) (const_int 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 8)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (strict_low_part (subreg:QI (match_dup 0) 7))
+ (subreg:QI (match_dup 0) 3))
+ (set (match_dup 2)
+ (ashiftrt:SI (match_dup 2) (const_int 8)))
+ (set (match_dup 3)
+ (rotatert:SI (match_dup 3) (const_int 8)))]
+{
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 16)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (strict_low_part (subreg:HI (match_dup 0) 6))
+ (subreg:HI (match_dup 0) 2))
+ (set (match_dup 2)
+ (rotate:SI (match_dup 2) (const_int 16)))
+ (set (match_dup 3)
+ (rotate:SI (match_dup 3) (const_int 16)))
+ (set (match_dup 2)
+ (sign_extend:SI (subreg:HI (match_dup 2) 2)))]
+{
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_insn "*ashrdi_const32"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ashiftrt:DI (match_operand:DI 1 "nonimmediate_src_operand" "ro")
+ (const_int 32)))]
+ ""
+{
+ CC_STATUS_INIT;
+ if (TARGET_68020)
+ return "move%.l %1,%R0\;smi %0\;extb%.l %0";
+ else
+ return "move%.l %1,%R0\;smi %0\;ext%.w %0\;ext%.l %0";
+})
+
+(define_insn "*ashrdi_const32_mem"
+ [(set (match_operand:DI 0 "memory_operand" "=o,<")
+ (ashiftrt:DI (match_operand:DI 1 "nonimmediate_src_operand" "ro,ro")
+ (const_int 32)))
+ (clobber (match_scratch:SI 2 "=d,d"))]
+ ""
+{
+ CC_STATUS_INIT;
+ operands[3] = adjust_address (operands[0], SImode,
+ which_alternative == 0 ? 4 : 0);
+ operands[0] = adjust_address (operands[0], SImode, 0);
+ if (TARGET_68020 || TARGET_COLDFIRE)
+ return "move%.l %1,%3\;smi %2\;extb%.l %2\;move%.l %2,%0";
+ else
+ return "move%.l %1,%3\;smi %2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0";
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 63)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 3)
+ (ashiftrt:SI (match_dup 3) (const_int 31)))
+ (set (match_dup 2)
+ (match_dup 3))]
+ "split_di(operands, 1, operands + 2, operands + 3);")
+
+;; The predicate below must be general_operand, because ashrdi3 allows that
+(define_insn "ashrdi_const"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand 2 "const_int_operand" "n")))]
+ "!TARGET_COLDFIRE
+ && ((INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3)
+ || INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16
+ || INTVAL (operands[2]) == 31
+ || (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63))"
+{
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ CC_STATUS_INIT;
+ if (INTVAL (operands[2]) == 48)
+ return "swap %0\;ext%.l %0\;move%.l %0,%1\;smi %0\;ext%.w %0";
+ if (INTVAL (operands[2]) == 31)
+ return "add%.l %1,%1\;addx%.l %0,%0\;move%.l %0,%1\;subx%.l %0,%0";
+ if (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63)
+ {
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
+ output_asm_insn (INTVAL (operands[2]) <= 8 ? "asr%.l %2,%0" :
+ "moveq %2,%1\;asr%.l %1,%0", operands);
+ output_asm_insn ("mov%.l %0,%1\;smi %0", operands);
+ return INTVAL (operands[2]) >= 15 ? "ext%.w %d0" :
+ TARGET_68020 ? "extb%.l %0" : "ext%.w %0\;ext%.l %0";
+ }
+ return "#";
+})
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))]
+ "!TARGET_COLDFIRE"
+{
+ /* ??? This is a named pattern like this is not allowed to FAIL based
+ on its operands. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || ((INTVAL (operands[2]) < 1 || INTVAL (operands[2]) > 3)
+ && INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16
+ && (INTVAL (operands[2]) < 31 || INTVAL (operands[2]) > 63)))
+ FAIL;
+})
+
+;; On all 68k models, this makes faster code in a special case.
+
+(define_insn "ashrsi_31"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 31)))]
+ ""
+{
+ return "add%.l %0,%0\;subx%.l %0,%0";
+})
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "dI")))]
+ ""
+ "asr%.l %2,%0"
+ [(set_attr "type" "shift")
+ (set_attr "opy" "2")])
+
+(define_insn "ashrhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (ashiftrt:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "asr%.w %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
+ (ashiftrt:HI (match_dup 0)
+ (match_operand:HI 1 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "asr%.w %1,%0")
+
+(define_insn "ashrqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (ashiftrt:QI (match_operand:QI 1 "register_operand" "0")
+ (match_operand:QI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "asr%.b %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
+ (ashiftrt:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "asr%.b %1,%0")
+
+;; logical shift instructions
+
+;; commented out because of reload problems in 950612-1.c
+;;(define_insn ""
+;; [(set (cc0)
+;; (subreg:SI (lshiftrt:DI (match_operand:DI 0 "general_operand" "ro")
+;; (const_int 32)) 4))
+;; (set (match_operand:SI 1 "nonimmediate_operand" "=dm")
+;; (subreg:SI (lshiftrt:DI (match_dup 0)
+;; (const_int 32)) 4))]
+;; ""
+;;{
+;; return "move%.l %0,%1";
+;;})
+;;
+;;(define_insn ""
+;; [(set (cc0)
+;; (subreg:SI (lshiftrt:DI (match_operand:DI 0 "general_operand" "ro")
+;; (const_int 32)) 0))
+;; (set (match_operand:DI 1 "nonimmediate_operand" "=do")
+;; (lshiftrt:DI (match_dup 0)
+;; (const_int 32)))]
+;; ""
+;;{
+;; if (GET_CODE (operands[1]) == REG)
+;; operands[2] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+;; else
+;; operands[2] = adjust_address (operands[1], SImode, 4);
+;; return "move%.l %0,%2\;clr%.l %1";
+;;})
+
+(define_insn "subreg1lshrdi_const32"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (subreg:SI (lshiftrt:DI (match_operand:DI 1 "general_operand" "ro")
+ (const_int 32)) 4))]
+ ""
+ "move%.l %1,%0"
+ [(set_attr "type" "move_l")])
+
+(define_insn "*lshrdi3_const1"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 1)))]
+ "!TARGET_COLDFIRE"
+ "lsr%.l #1,%0\;roxr%.l #1,%R0")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 2)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 0)
+ (lshiftrt:DI (match_dup 1) (const_int 1)))
+ (set (match_dup 0)
+ (lshiftrt:DI (match_dup 0) (const_int 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 3)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (match_dup 0)
+ (lshiftrt:DI (match_dup 1) (const_int 2)))
+ (set (match_dup 0)
+ (lshiftrt:DI (match_dup 0) (const_int 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 8)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (strict_low_part (subreg:QI (match_dup 0) 7))
+ (subreg:QI (match_dup 0) 3))
+ (set (match_dup 2)
+ (lshiftrt:SI (match_dup 2) (const_int 8)))
+ (set (match_dup 3)
+ (rotatert:SI (match_dup 3) (const_int 8)))]
+{
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 16)))]
+ "reload_completed && !TARGET_COLDFIRE"
+ [(set (strict_low_part (subreg:HI (match_dup 0) 6))
+ (subreg:HI (match_dup 0) 2))
+ (set (strict_low_part (subreg:HI (match_dup 0) 2))
+ (const_int 0))
+ (set (match_dup 3)
+ (rotate:SI (match_dup 3) (const_int 16)))
+ (set (match_dup 2)
+ (rotate:SI (match_dup 2) (const_int 16)))]
+{
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "pre_dec_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (const_int 32)))]
+ "reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (const_int 0))]
+{
+ operands[0] = adjust_address(operands[0], SImode, 0);
+ operands[1] = gen_highpart(SImode, operands[1]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "post_inc_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (const_int 32)))]
+ "reload_completed"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0) (match_dup 1))]
+{
+ operands[0] = adjust_address(operands[0], SImode, 0);
+ operands[1] = gen_highpart(SImode, operands[1]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (const_int 32)))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 5))
+ (set (match_dup 4) (const_int 0))]
+ "split_di(operands, 2, operands + 2, operands + 4);")
+
+(define_insn "*lshrdi_const32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro<>")
+ (lshiftrt:DI (match_operand:DI 1 "general_operand" "ro")
+ (const_int 32)))]
+ ""
+ "#")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))]
+ "reload_completed && !TARGET_COLDFIRE
+ && INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 40"
+ [(set (match_dup 3) (lshiftrt:SI (match_dup 3) (match_dup 2)))
+ (set (match_dup 4) (match_dup 3))
+ (set (match_dup 3) (const_int 0))]
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 48)))]
+ "reload_completed"
+ [(set (match_dup 3) (match_dup 2))
+ (set (strict_low_part (subreg:HI (match_dup 0) 6))
+ (const_int 0))
+ (set (match_dup 2) (const_int 0))
+ (set (match_dup 3)
+ (rotate:SI (match_dup 3) (const_int 16)))]
+{
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))]
+ "reload_completed && !TARGET_COLDFIRE
+ && INTVAL (operands[2]) > 40 && INTVAL (operands[2]) <= 62"
+ [(set (match_dup 4) (match_dup 2))
+ (set (match_dup 3) (lshiftrt:SI (match_dup 3) (match_dup 4)))
+ (set (match_dup 4) (match_dup 3))
+ (set (match_dup 3) (const_int 0))]
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[0]);
+})
+
+(define_insn "*lshrdi_const63"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 63)))]
+ ""
+ "add%.l %0,%0\;clr%.l %0\;clr%.l %R1\;addx%.l %R1,%R1")
+
+(define_insn "*lshrdi3_const"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand 2 "const_int_operand" "n")))]
+ "(!TARGET_COLDFIRE
+ && ((INTVAL (operands[2]) >= 2 && INTVAL (operands[2]) <= 3)
+ || INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16
+ || (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63)))"
+ "#")
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))]
+ "!TARGET_COLDFIRE"
+{
+ /* ??? This is a named pattern like this is not allowed to FAIL based
+ on its operands. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || ((INTVAL (operands[2]) < 1 || INTVAL (operands[2]) > 3)
+ && INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16
+ && (INTVAL (operands[2]) < 32 || INTVAL (operands[2]) > 63)))
+ FAIL;
+})
+
+;; On all 68k models, this makes faster code in a special case.
+
+(define_insn "lshrsi_31"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 31)))]
+ ""
+{
+ return "add%.l %0,%0\;subx%.l %0,%0\;neg%.l %0";
+})
+
+;; On most 68k models, this makes faster code in a special case.
+
+(define_insn "lshrsi_16"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 16)))]
+ "!TUNE_68060"
+{
+ CC_STATUS_INIT;
+ return "clr%.w %0\;swap %0";
+})
+
+;; On the 68000, this makes faster code in a special case.
+
+(define_insn "lshrsi_17_24"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "TUNE_68000_10
+ && INTVAL (operands[2]) > 16
+ && INTVAL (operands[2]) <= 24"
+{
+ /* I think lsr%.w sets the CC properly. */
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 16);
+ return "clr%.w %0\;swap %0\;lsr%.w %2,%0";
+})
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "dI")))]
+ ""
+ "lsr%.l %2,%0"
+ [(set_attr "type" "shift")
+ (set_attr "opy" "2")])
+
+(define_insn "lshrhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (lshiftrt:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "lsr%.w %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
+ (lshiftrt:HI (match_dup 0)
+ (match_operand:HI 1 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "lsr%.w %1,%0")
+
+(define_insn "lshrqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (lshiftrt:QI (match_operand:QI 1 "register_operand" "0")
+ (match_operand:QI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "lsr%.b %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
+ (lshiftrt:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "lsr%.b %1,%0")
+
+;; rotate instructions
+
+(define_insn "rotlsi_16"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (rotate:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 16)))]
+ ""
+ "swap %0"
+ [(set_attr "type" "shift")])
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (rotate:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "dINO")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 16)
+ return "swap %0";
+ else if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 16)
+ {
+ operands[2] = GEN_INT (32 - INTVAL (operands[2]));
+ return "ror%.l %2,%0";
+ }
+ else
+ return "rol%.l %2,%0";
+})
+
+(define_insn "rotlhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (rotate:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "general_operand" "dIP")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 8)
+ {
+ operands[2] = GEN_INT (16 - INTVAL (operands[2]));
+ return "ror%.w %2,%0";
+ }
+ else
+ return "rol%.w %2,%0";
+})
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
+ (rotate:HI (match_dup 0)
+ (match_operand:HI 1 "general_operand" "dIP")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 8)
+ {
+ operands[2] = GEN_INT (16 - INTVAL (operands[2]));
+ return "ror%.w %2,%0";
+ }
+ else
+ return "rol%.w %2,%0";
+})
+
+(define_insn "rotlqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (rotate:QI (match_operand:QI 1 "register_operand" "0")
+ (match_operand:QI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 4)
+ {
+ operands[2] = GEN_INT (8 - INTVAL (operands[2]));
+ return "ror%.b %2,%0";
+ }
+ else
+ return "rol%.b %2,%0";
+})
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
+ (rotate:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+{
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 4)
+ {
+ operands[2] = GEN_INT (8 - INTVAL (operands[2]));
+ return "ror%.b %2,%0";
+ }
+ else
+ return "rol%.b %2,%0";
+})
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "ror%.l %2,%0")
+
+(define_insn "rotrhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (rotatert:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "ror%.w %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
+ (rotatert:HI (match_dup 0)
+ (match_operand:HI 1 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "ror%.w %1,%0")
+
+(define_insn "rotrqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (rotatert:QI (match_operand:QI 1 "register_operand" "0")
+ (match_operand:QI 2 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "ror%.b %2,%0")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
+ (rotatert:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "dI")))]
+ "!TARGET_COLDFIRE"
+ "ror%.b %1,%0")
+
+
+;; Bit set/clear in memory byte.
+
+;; set bit, bit number is int
+(define_insn "bsetmemqi"
+ [(set (match_operand:QI 0 "memory_operand" "+m")
+ (ior:QI (subreg:QI (ashift:SI (const_int 1)
+ (match_operand:SI 1 "general_operand" "d")) 3)
+ (match_dup 0)))]
+ ""
+{
+ CC_STATUS_INIT;
+ return "bset %1,%0";
+}
+ [(set_attr "type" "bitrw")])
+
+;; set bit, bit number is (sign/zero)_extended from HImode/QImode
+(define_insn "*bsetmemqi_ext"
+ [(set (match_operand:QI 0 "memory_operand" "+m")
+ (ior:QI (subreg:QI (ashift:SI (const_int 1)
+ (match_operator:SI 2 "extend_operator"
+ [(match_operand 1 "general_operand" "d")])) 3)
+ (match_dup 0)))]
+ ""
+{
+ CC_STATUS_INIT;
+ return "bset %1,%0";
+}
+ [(set_attr "type" "bitrw")])
+
+;; clear bit, bit number is int
+(define_insn "bclrmemqi"
+ [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+m")
+ (const_int 1)
+ (minus:SI (const_int 7)
+ (match_operand:SI 1 "general_operand" "d")))
+ (const_int 0))]
+ ""
+{
+ CC_STATUS_INIT;
+ return "bclr %1,%0";
+}
+ [(set_attr "type" "bitrw")])
+
+;; clear bit, bit number is (sign/zero)_extended from HImode/QImode
+(define_insn "*bclrmemqi_ext"
+ [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+m")
+ (const_int 1)
+ (minus:SI (const_int 7)
+ (match_operator:SI 2 "extend_operator"
+ [(match_operand 1 "general_operand" "d")])))
+ (const_int 0))]
+ ""
+{
+ CC_STATUS_INIT;
+ return "bclr %1,%0";
+}
+ [(set_attr "type" "bitrw")])
+
+;; Special cases of bit-field insns which we should
+;; recognize in preference to the general case.
+;; These handle aligned 8-bit and 16-bit fields,
+;; which can usually be done with move instructions.
+
+;
+; Special case for 32-bit field in memory. This only occurs when 32-bit
+; alignment of structure members is specified.
+;
+; The move is allowed to be odd byte aligned, because that's still faster
+; than an odd byte aligned bit-field instruction.
+;
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
+ (const_int 32)
+ (match_operand:SI 1 "const_int_operand" "n"))
+ (match_operand:SI 2 "general_src_operand" "rmSi"))]
+ "TARGET_68020 && TARGET_BITFIELD
+ && (INTVAL (operands[1]) % 8) == 0
+ && ! mode_dependent_address_p (XEXP (operands[0], 0))"
+{
+ operands[0]
+ = adjust_address (operands[0], SImode, INTVAL (operands[1]) / 8);
+
+ return "move%.l %2,%0";
+})
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+do")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "register_operand" "d"))]
+ "TARGET_68020 && TARGET_BITFIELD
+ && (INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16)
+ && INTVAL (operands[2]) % INTVAL (operands[1]) == 0
+ && (GET_CODE (operands[0]) == REG
+ || ! mode_dependent_address_p (XEXP (operands[0], 0)))"
+{
+ if (REG_P (operands[0]))
+ {
+ if (INTVAL (operands[1]) + INTVAL (operands[2]) != 32)
+ return "bfins %3,%0{%b2:%b1}";
+ }
+ else
+ operands[0] = adjust_address (operands[0],
+ INTVAL (operands[1]) == 8 ? QImode : HImode,
+ INTVAL (operands[2]) / 8);
+
+ if (GET_CODE (operands[3]) == MEM)
+ operands[3] = adjust_address (operands[3],
+ INTVAL (operands[1]) == 8 ? QImode : HImode,
+ (32 - INTVAL (operands[1])) / 8);
+
+ if (INTVAL (operands[1]) == 8)
+ return "move%.b %3,%0";
+ return "move%.w %3,%0";
+})
+
+
+;
+; Special case for 32-bit field in memory. This only occurs when 32-bit
+; alignment of structure members is specified.
+;
+; The move is allowed to be odd byte aligned, because that's still faster
+; than an odd byte aligned bit-field instruction.
+;
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (zero_extract:SI (match_operand:QI 1 "memory_src_operand" "oS")
+ (const_int 32)
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "TARGET_68020 && TARGET_BITFIELD
+ && (INTVAL (operands[2]) % 8) == 0
+ && ! mode_dependent_address_p (XEXP (operands[1], 0))"
+{
+ operands[1]
+ = adjust_address (operands[1], SImode, INTVAL (operands[2]) / 8);
+
+ return "move%.l %1,%0";
+})
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=&d")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "do")
+ (match_operand:SI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "TARGET_68020 && TARGET_BITFIELD
+ && (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
+ && (GET_CODE (operands[1]) == REG
+ || ! mode_dependent_address_p (XEXP (operands[1], 0)))"
+{
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ if (REG_P (operands[1]))
+ {
+ if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32)
+ return "bfextu %1{%b3:%b2},%0";
+ }
+ else
+ operands[1]
+ = adjust_address (operands[1], SImode, INTVAL (operands[3]) / 8);
+
+ output_asm_insn ("clr%.l %0", operands);
+ if (GET_CODE (operands[0]) == MEM)
+ operands[0] = adjust_address (operands[0],
+ INTVAL (operands[2]) == 8 ? QImode : HImode,
+ (32 - INTVAL (operands[1])) / 8);
+
+ if (INTVAL (operands[2]) == 8)
+ return "move%.b %1,%0";
+ return "move%.w %1,%0";
+})
+
+;
+; Special case for 32-bit field in memory. This only occurs when 32-bit
+; alignment of structure members is specified.
+;
+; The move is allowed to be odd byte aligned, because that's still faster
+; than an odd byte aligned bit-field instruction.
+;
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (sign_extract:SI (match_operand:QI 1 "memory_src_operand" "oS")
+ (const_int 32)
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "TARGET_68020 && TARGET_BITFIELD
+ && (INTVAL (operands[2]) % 8) == 0
+ && ! mode_dependent_address_p (XEXP (operands[1], 0))"
+{
+ operands[1]
+ = adjust_address (operands[1], SImode, INTVAL (operands[2]) / 8);
+
+ return "move%.l %1,%0";
+})
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "do")
+ (match_operand:SI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "TARGET_68020 && TARGET_BITFIELD
+ && (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
+ && (GET_CODE (operands[1]) == REG
+ || ! mode_dependent_address_p (XEXP (operands[1], 0)))"
+{
+ if (REG_P (operands[1]))
+ {
+ if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32)
+ return "bfexts %1{%b3:%b2},%0";
+ }
+ else
+ operands[1]
+ = adjust_address (operands[1],
+ INTVAL (operands[2]) == 8 ? QImode : HImode,
+ INTVAL (operands[3]) / 8);
+
+ if (INTVAL (operands[2]) == 8)
+ return "move%.b %1,%0\;extb%.l %0";
+ return "move%.w %1,%0\;ext%.l %0";
+})
+
+;; Bit-field instructions, general cases.
+;; "o,d" constraint causes a nonoffsettable memref to match the "o"
+;; so that its address is reloaded.
+
+(define_expand "extv"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extract:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+ "TARGET_68020 && TARGET_BITFIELD"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (sign_extract:SI (match_operand:QI 1 "memory_operand" "o")
+ (match_operand:SI 2 "nonmemory_operand" "dn")
+ (match_operand:SI 3 "nonmemory_operand" "dn")))]
+ "TARGET_68020 && TARGET_BITFIELD"
+ "bfexts %1{%b3:%b2},%0")
+
+(define_expand "extzv"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extract:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+ "TARGET_68020 && TARGET_BITFIELD"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extract:SI (match_operand:QI 1 "memory_operand" "o")
+ (match_operand:SI 2 "nonmemory_operand" "dn")
+ (match_operand:SI 3 "nonmemory_operand" "dn")))]
+ "TARGET_68020 && TARGET_BITFIELD"
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) != 32)
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ }
+ else
+ {
+ CC_STATUS_INIT;
+ }
+ return "bfextu %1{%b3:%b2},%0";
+})
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
+ (match_operand:SI 1 "nonmemory_operand" "dn")
+ (match_operand:SI 2 "nonmemory_operand" "dn"))
+ (xor:SI (zero_extract:SI (match_dup 0) (match_dup 1) (match_dup 2))
+ (match_operand 3 "const_int_operand" "n")))]
+ "TARGET_68020 && TARGET_BITFIELD
+ && (INTVAL (operands[3]) == -1
+ || (GET_CODE (operands[1]) == CONST_INT
+ && (~ INTVAL (operands[3]) & ((1 << INTVAL (operands[1]))- 1)) == 0))"
+{
+ CC_STATUS_INIT;
+ return "bfchg %0{%b2:%b1}";
+})
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
+ (match_operand:SI 1 "nonmemory_operand" "dn")
+ (match_operand:SI 2 "nonmemory_operand" "dn"))
+ (const_int 0))]
+ "TARGET_68020 && TARGET_BITFIELD"
+{
+ CC_STATUS_INIT;
+ return "bfclr %0{%b2:%b1}";
+})
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
+ (match_operand:SI 1 "general_operand" "dn")
+ (match_operand:SI 2 "general_operand" "dn"))
+ (const_int -1))]
+ "TARGET_68020 && TARGET_BITFIELD"
+{
+ CC_STATUS_INIT;
+ return "bfset %0{%b2:%b1}";
+})
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "register_operand" ""))]
+ "TARGET_68020 && TARGET_BITFIELD"
+ "")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
+ (match_operand:SI 1 "nonmemory_operand" "dn")
+ (match_operand:SI 2 "nonmemory_operand" "dn"))
+ (match_operand:SI 3 "register_operand" "d"))]
+ "TARGET_68020 && TARGET_BITFIELD"
+ "bfins %3,%0{%b2:%b1}")
+
+;; Now recognize bit-field insns that operate on registers
+;; (or at least were intended to do so).
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "TARGET_68020 && TARGET_BITFIELD"
+ "bfexts %1{%b3:%b2},%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=d")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "TARGET_68020 && TARGET_BITFIELD"
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) != 32)
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ }
+ else
+ {
+ CC_STATUS_INIT;
+ }
+ return "bfextu %1{%b3:%b2},%0";
+})
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0))]
+ "TARGET_68020 && TARGET_BITFIELD"
+{
+ CC_STATUS_INIT;
+ return "bfclr %0{%b2:%b1}";
+})
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int -1))]
+ "TARGET_68020 && TARGET_BITFIELD"
+{
+ CC_STATUS_INIT;
+ return "bfset %0{%b2:%b1}";
+})
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "register_operand" "d"))]
+ "TARGET_68020 && TARGET_BITFIELD"
+{
+#if 0
+ /* These special cases are now recognized by a specific pattern. */
+ if (GET_CODE (operands[1]) == CONST_INT && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[1]) == 16 && INTVAL (operands[2]) == 16)
+ return "move%.w %3,%0";
+ if (GET_CODE (operands[1]) == CONST_INT && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[1]) == 24 && INTVAL (operands[2]) == 8)
+ return "move%.b %3,%0";
+#endif
+ return "bfins %3,%0{%b2:%b1}";
+})
+
+;; Special patterns for optimizing bit-field instructions.
+
+(define_insn ""
+ [(set (cc0)
+ (compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "o")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "general_operand" "dn"))
+ (const_int 0)))]
+ "TARGET_68020 && TARGET_BITFIELD"
+{
+ if (operands[1] == const1_rtx
+ && GET_CODE (operands[2]) == CONST_INT)
+ {
+ int width = GET_CODE (operands[0]) == REG ? 31 : 7;
+ return output_btst (operands,
+ GEN_INT (width - INTVAL (operands[2])),
+ operands[0], insn, 1000);
+ /* Pass 1000 as SIGNPOS argument so that btst will
+ not think we are testing the sign bit for an `and'
+ and assume that nonzero implies a negative result. */
+ }
+ if (INTVAL (operands[1]) != 32)
+ cc_status.flags = CC_NOT_NEGATIVE;
+ return "bftst %0{%b2:%b1}";
+})
+
+
+;;; now handle the register cases
+(define_insn ""
+ [(set (cc0)
+ (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "general_operand" "dn"))
+ (const_int 0)))]
+ "TARGET_68020 && TARGET_BITFIELD"
+{
+ if (operands[1] == const1_rtx
+ && GET_CODE (operands[2]) == CONST_INT)
+ {
+ int width = GET_CODE (operands[0]) == REG ? 31 : 7;
+ return output_btst (operands, GEN_INT (width - INTVAL (operands[2])),
+ operands[0], insn, 1000);
+ /* Pass 1000 as SIGNPOS argument so that btst will
+ not think we are testing the sign bit for an `and'
+ and assume that nonzero implies a negative result. */
+ }
+ if (INTVAL (operands[1]) != 32)
+ cc_status.flags = CC_NOT_NEGATIVE;
+ return "bftst %0{%b2:%b1}";
+})
+
+(define_insn "scc0_di"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
+ (match_operator 1 "ordered_comparison_operator"
+ [(match_operand:DI 2 "general_operand" "ro") (const_int 0)]))]
+ "! TARGET_COLDFIRE"
+{
+ return output_scc_di (operands[1], operands[2], const0_rtx, operands[0]);
+})
+
+(define_insn "scc0_di_5200"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=d")
+ (match_operator 1 "ordered_comparison_operator"
+ [(match_operand:DI 2 "general_operand" "ro") (const_int 0)]))]
+ "TARGET_COLDFIRE"
+{
+ return output_scc_di (operands[1], operands[2], const0_rtx, operands[0]);
+})
+
+(define_insn "scc_di"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=dm,dm")
+ (match_operator 1 "ordered_comparison_operator"
+ [(match_operand:DI 2 "general_operand" "ro,r")
+ (match_operand:DI 3 "general_operand" "r,ro")]))]
+ "! TARGET_COLDFIRE"
+{
+ return output_scc_di (operands[1], operands[2], operands[3], operands[0]);
+})
+
+(define_insn "scc_di_5200"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=d,d")
+ (match_operator 1 "ordered_comparison_operator"
+ [(match_operand:DI 2 "general_operand" "ro,r")
+ (match_operand:DI 3 "general_operand" "r,ro")]))]
+ "TARGET_COLDFIRE"
+{
+ return output_scc_di (operands[1], operands[2], operands[3], operands[0]);
+})
+
+;; Note that operand 0 of an SCC insn is supported in the hardware as
+;; memory, but we cannot allow it to be in memory in case the address
+;; needs to be reloaded.
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (eq:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ OUTPUT_JUMP ("seq %0", "fseq %0", "seq %0");
+})
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (ne:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ OUTPUT_JUMP ("sne %0", "fsne %0", "sne %0");
+})
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (gt:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ OUTPUT_JUMP ("sgt %0", "fsgt %0", 0);
+})
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (gtu:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ return "shi %0";
+})
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (lt:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ OUTPUT_JUMP ("slt %0", "fslt %0", "smi %0");
+})
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (ltu:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ return "scs %0";
+})
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (ge:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ OUTPUT_JUMP ("sge %0", "fsge %0", "spl %0");
+})
+
+(define_insn "*scc"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (geu:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ return "scc %0";
+}
+ [(set_attr "type" "scc")])
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (le:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ OUTPUT_JUMP ("sle %0", "fsle %0", 0);
+})
+
+(define_insn "*sls"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (leu:QI (cc0) (const_int 0)))]
+ ""
+{
+ cc_status = cc_prev_status;
+ return "sls %0";
+}
+ [(set_attr "type" "scc")])
+
+(define_insn "*sordered_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (ordered:QI (cc0) (const_int 0)))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsor %0";
+})
+
+(define_insn "*sunordered_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (unordered:QI (cc0) (const_int 0)))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsun %0";
+})
+
+(define_insn "*suneq_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (uneq:QI (cc0) (const_int 0)))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsueq %0";
+})
+
+(define_insn "*sunge_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (unge:QI (cc0) (const_int 0)))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsuge %0";
+})
+
+(define_insn "*sungt_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (ungt:QI (cc0) (const_int 0)))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsugt %0";
+})
+
+(define_insn "*sunle_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (unle:QI (cc0) (const_int 0)))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsule %0";
+})
+
+(define_insn "*sunlt_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (unlt:QI (cc0) (const_int 0)))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsult %0";
+})
+
+(define_insn "*sltgt_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (ltgt:QI (cc0) (const_int 0)))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsogl %0";
+})
+
+(define_insn "*fsogt_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (not:QI (unle:QI (cc0) (const_int 0))))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsogt %0";
+})
+
+(define_insn "*fsoge_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (not:QI (unlt:QI (cc0) (const_int 0))))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsoge %0";
+})
+
+(define_insn "*fsolt_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (not:QI (unge:QI (cc0) (const_int 0))))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsolt %0";
+})
+
+(define_insn "*fsole_1"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (not:QI (ungt:QI (cc0) (const_int 0))))]
+ "TARGET_68881 && !TUNE_68060"
+{
+ cc_status = cc_prev_status;
+ return "fsole %0";
+})
+
+;; Basic conditional jump instructions.
+
+(define_insn "beq0_di"
+ [(set (pc)
+ (if_then_else (eq (match_operand:DI 0 "general_operand" "d*ao,<>")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ","))
+ (pc)))
+ (clobber (match_scratch:SI 2 "=d,d"))]
+ ""
+{
+ CC_STATUS_INIT;
+ if (which_alternative == 1)
+ return "move%.l %0,%2\;or%.l %0,%2\;jeq %l1";
+ if ((cc_prev_status.value1
+ && rtx_equal_p (cc_prev_status.value1, operands[0]))
+ || (cc_prev_status.value2
+ && rtx_equal_p (cc_prev_status.value2, operands[0])))
+ {
+ cc_status = cc_prev_status;
+ return "jeq %l1";
+ }
+ if (GET_CODE (operands[0]) == REG)
+ operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else
+ operands[3] = adjust_address (operands[0], SImode, 4);
+ if (! ADDRESS_REG_P (operands[0]))
+ {
+ if (reg_overlap_mentioned_p (operands[2], operands[0]))
+ {
+ if (reg_overlap_mentioned_p (operands[2], operands[3]))
+ return "or%.l %0,%2\;jeq %l1";
+ else
+ return "or%.l %3,%2\;jeq %l1";
+ }
+ return "move%.l %0,%2\;or%.l %3,%2\;jeq %l1";
+ }
+ operands[4] = gen_label_rtx();
+ if (TARGET_68020 || TARGET_COLDFIRE)
+ output_asm_insn ("tst%.l %0\;jne %l4\;tst%.l %3\;jeq %l1", operands);
+ else
+ output_asm_insn ("cmp%.w #0,%0\;jne %l4\;cmp%.w #0,%3\;jeq %l1", operands);
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (operands[4]));
+ return "";
+})
+
+(define_insn "bne0_di"
+ [(set (pc)
+ (if_then_else (ne (match_operand:DI 0 "general_operand" "do,*a")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ","))
+ (pc)))
+ (clobber (match_scratch:SI 2 "=d,X"))]
+ ""
+{
+ if ((cc_prev_status.value1
+ && rtx_equal_p (cc_prev_status.value1, operands[0]))
+ || (cc_prev_status.value2
+ && rtx_equal_p (cc_prev_status.value2, operands[0])))
+ {
+ cc_status = cc_prev_status;
+ return "jne %l1";
+ }
+ CC_STATUS_INIT;
+ if (GET_CODE (operands[0]) == REG)
+ operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ else
+ operands[3] = adjust_address (operands[0], SImode, 4);
+ if (!ADDRESS_REG_P (operands[0]))
+ {
+ if (reg_overlap_mentioned_p (operands[2], operands[0]))
+ {
+ if (reg_overlap_mentioned_p (operands[2], operands[3]))
+ return "or%.l %0,%2\;jne %l1";
+ else
+ return "or%.l %3,%2\;jne %l1";
+ }
+ return "move%.l %0,%2\;or%.l %3,%2\;jne %l1";
+ }
+ if (TARGET_68020 || TARGET_COLDFIRE)
+ return "tst%.l %0\;jne %l1\;tst%.l %3\;jne %l1";
+ else
+ return "cmp%.w #0,%0\;jne %l1\;cmp%.w #0,%3\;jne %l1";
+})
+
+(define_insn "bge0_di"
+ [(set (pc)
+ (if_then_else (ge (match_operand:DI 0 "general_operand" "ro")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_prev_status.value1
+ && rtx_equal_p (cc_prev_status.value1, operands[0]))
+ || (cc_prev_status.value2
+ && rtx_equal_p (cc_prev_status.value2, operands[0])))
+ {
+ cc_status = cc_prev_status;
+ return cc_status.flags & CC_REVERSED ? "jle %l1" : "jpl %l1";
+ }
+ CC_STATUS_INIT;
+ if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (operands[0]))
+ output_asm_insn("tst%.l %0", operands);
+ else
+ {
+ /* On an address reg, cmpw may replace cmpl. */
+ output_asm_insn("cmp%.w #0,%0", operands);
+ }
+ return "jpl %l1";
+})
+
+(define_insn "blt0_di"
+ [(set (pc)
+ (if_then_else (lt (match_operand:DI 0 "general_operand" "ro")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_prev_status.value1
+ && rtx_equal_p (cc_prev_status.value1, operands[0]))
+ || (cc_prev_status.value2
+ && rtx_equal_p (cc_prev_status.value2, operands[0])))
+ {
+ cc_status = cc_prev_status;
+ return cc_status.flags & CC_REVERSED ? "jgt %l1" : "jmi %l1";
+ }
+ CC_STATUS_INIT;
+ if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (operands[0]))
+ output_asm_insn("tst%.l %0", operands);
+ else
+ {
+ /* On an address reg, cmpw may replace cmpl. */
+ output_asm_insn("cmp%.w #0,%0", operands);
+ }
+ return "jmi %l1";
+})
+
+(define_insn "beq"
+ [(set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ OUTPUT_JUMP ("jeq %l0", "fjeq %l0", "jeq %l0");
+}
+ [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))])
+
+(define_insn "bne"
+ [(set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ OUTPUT_JUMP ("jne %l0", "fjne %l0", "jne %l0");
+}
+ [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))])
+
+(define_insn "bgt"
+ [(set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ OUTPUT_JUMP ("jgt %l0", "fjgt %l0", 0);
+}
+ [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))])
+
+(define_insn "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ return "jhi %l0";
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "blt"
+ [(set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ OUTPUT_JUMP ("jlt %l0", "fjlt %l0", "jmi %l0");
+}
+ [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))])
+
+(define_insn "bltu"
+ [(set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ return "jcs %l0";
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "bge"
+ [(set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ OUTPUT_JUMP ("jge %l0", "fjge %l0", "jpl %l0");
+})
+
+(define_insn "bgeu"
+ [(set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ return "jcc %l0";
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "ble"
+ [(set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ OUTPUT_JUMP ("jle %l0", "fjle %l0", 0);
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "bleu"
+ [(set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ return "jls %l0";
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "bordered"
+ [(set (pc)
+ (if_then_else (ordered (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjor %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "bunordered"
+ [(set (pc)
+ (if_then_else (unordered (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjun %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "buneq"
+ [(set (pc)
+ (if_then_else (uneq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjueq %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "bunge"
+ [(set (pc)
+ (if_then_else (unge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjuge %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "bungt"
+ [(set (pc)
+ (if_then_else (ungt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjugt %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "bunle"
+ [(set (pc)
+ (if_then_else (unle (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjule %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "bunlt"
+ [(set (pc)
+ (if_then_else (unlt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjult %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "bltgt"
+ [(set (pc)
+ (if_then_else (ltgt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjogl %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+;; Negated conditional jump instructions.
+
+(define_insn "*beq_rev"
+ [(set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ OUTPUT_JUMP ("jne %l0", "fjne %l0", "jne %l0");
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*bne_rev"
+ [(set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ OUTPUT_JUMP ("jeq %l0", "fjeq %l0", "jeq %l0");
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*bgt_rev"
+ [(set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ OUTPUT_JUMP ("jle %l0", "fjngt %l0", 0);
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*bgtu_rev"
+ [(set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ return "jls %l0";
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*blt_rev"
+ [(set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ OUTPUT_JUMP ("jge %l0", "fjnlt %l0", "jpl %l0");
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*bltu_rev"
+ [(set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ return "jcc %l0";
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*bge_rev"
+ [(set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ OUTPUT_JUMP ("jlt %l0", "fjnge %l0", "jmi %l0");
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*bgeu_rev"
+ [(set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ return "jcs %l0";
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*ble_rev"
+ [(set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ OUTPUT_JUMP ("jgt %l0", "fjnle %l0", 0);
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*bleu_rev"
+ [(set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
+ {
+ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE;
+ return 0;
+ }
+
+ return "jhi %l0";
+}
+ [(set_attr "type" "bcc")])
+
+(define_insn "*bordered_rev"
+ [(set (pc)
+ (if_then_else (ordered (cc0) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjun %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "*bunordered_rev"
+ [(set (pc)
+ (if_then_else (unordered (cc0) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjor %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "*buneq_rev"
+ [(set (pc)
+ (if_then_else (uneq (cc0) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjogl %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "*bunge_rev"
+ [(set (pc)
+ (if_then_else (unge (cc0) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjolt %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "*bungt_rev"
+ [(set (pc)
+ (if_then_else (ungt (cc0) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjole %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "*bunle_rev"
+ [(set (pc)
+ (if_then_else (unle (cc0) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjogt %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "*bunlt_rev"
+ [(set (pc)
+ (if_then_else (unlt (cc0) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjoge %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+(define_insn "*bltgt_rev"
+ [(set (pc)
+ (if_then_else (ltgt (cc0) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ gcc_assert (cc_prev_status.flags & CC_IN_68881);
+ return "fjueq %l0";
+}
+ [(set_attr "type" "fbcc")])
+
+;; Unconditional and other jump instructions
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jra %l0"
+ [(set_attr "type" "bra")])
+
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand 0 "" ""))
+ (use (label_ref (match_operand 1 "" "")))])]
+ ""
+{
+#ifdef CASE_VECTOR_PC_RELATIVE
+ operands[0] = gen_rtx_PLUS (SImode, pc_rtx,
+ gen_rtx_SIGN_EXTEND (SImode, operands[0]));
+#endif
+})
+
+;; Jump to variable address from dispatch table of absolute addresses.
+(define_insn "*tablejump_internal"
+ [(set (pc) (match_operand:SI 0 "register_operand" "a"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+{
+ return MOTOROLA ? "jmp (%0)" : "jmp %0@";
+}
+ [(set_attr "type" "jmp")])
+
+;; Jump to variable address from dispatch table of relative addresses.
+(define_insn ""
+ [(set (pc)
+ (plus:SI (pc)
+ (sign_extend:SI (match_operand:HI 0 "register_operand" "r"))))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+{
+#ifdef ASM_RETURN_CASE_JUMP
+ ASM_RETURN_CASE_JUMP;
+#else
+ if (TARGET_COLDFIRE)
+ {
+ if (ADDRESS_REG_P (operands[0]))
+ return MOTOROLA ? "jmp (2,pc,%0.l)" : "jmp pc@(2,%0:l)";
+ else if (MOTOROLA)
+ return "ext%.l %0\;jmp (2,pc,%0.l)";
+ else
+ return "extl %0\;jmp pc@(2,%0:l)";
+ }
+ else
+ return MOTOROLA ? "jmp (2,pc,%0.w)" : "jmp pc@(2,%0:w)";
+#endif
+})
+
+;; Decrement-and-branch insns.
+(define_insn "*dbne_hi"
+ [(set (pc)
+ (if_then_else
+ (ne (match_operand:HI 0 "nonimmediate_operand" "+d*g")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:HI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ if (DATA_REG_P (operands[0]))
+ return "dbra %0,%l1";
+ if (GET_CODE (operands[0]) == MEM)
+ return "subq%.w #1,%0\;jcc %l1";
+ return "subq%.w #1,%0\;cmp%.w #-1,%0\;jne %l1";
+})
+
+(define_insn "*dbne_si"
+ [(set (pc)
+ (if_then_else
+ (ne (match_operand:SI 0 "nonimmediate_operand" "+d*g")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_COLDFIRE"
+{
+ CC_STATUS_INIT;
+ if (DATA_REG_P (operands[0]))
+ return "dbra %0,%l1\;clr%.w %0\;subq%.l #1,%0\;jcc %l1";
+ if (GET_CODE (operands[0]) == MEM)
+ return "subq%.l #1,%0\;jcc %l1";
+ return "subq%.l #1,%0\;cmp%.l #-1,%0\;jne %l1";
+})
+
+;; Two dbra patterns that use REG_NOTES info generated by strength_reduce.
+
+(define_insn "*dbge_hi"
+ [(set (pc)
+ (if_then_else
+ (ge (plus:HI (match_operand:HI 0 "nonimmediate_operand" "+d*am")
+ (const_int -1))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:HI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_COLDFIRE && find_reg_note (insn, REG_NONNEG, 0)"
+{
+ CC_STATUS_INIT;
+ if (DATA_REG_P (operands[0]))
+ return "dbra %0,%l1";
+ if (GET_CODE (operands[0]) == MEM)
+ return "subq%.w #1,%0\;jcc %l1";
+ return "subq%.w #1,%0\;cmp%.w #-1,%0\;jne %l1";
+})
+
+(define_expand "decrement_and_branch_until_zero"
+ [(parallel [(set (pc)
+ (if_then_else
+ (ge (plus:SI (match_operand:SI 0 "nonimmediate_operand" "")
+ (const_int -1))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))])]
+ ""
+ "")
+
+(define_insn "*dbge_si"
+ [(set (pc)
+ (if_then_else
+ (ge (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+d*am")
+ (const_int -1))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_COLDFIRE && find_reg_note (insn, REG_NONNEG, 0)"
+{
+ CC_STATUS_INIT;
+ if (DATA_REG_P (operands[0]))
+ return "dbra %0,%l1\;clr%.w %0\;subq%.l #1,%0\;jcc %l1";
+ if (GET_CODE (operands[0]) == MEM)
+ return "subq%.l #1,%0\;jcc %l1";
+ return "subq%.l #1,%0\;cmp%.l #-1,%0\;jne %l1";
+})
+
+(define_expand "sibcall"
+ [(call (match_operand:QI 0 "memory_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+{
+ operands[0] = m68k_legitimize_sibcall_address (operands[0]);
+})
+
+(define_insn "*sibcall"
+ [(call (mem:QI (match_operand:SI 0 "sibcall_operand" ""))
+ (match_operand:SI 1 "general_operand" ""))]
+ "SIBLING_CALL_P (insn)"
+{
+ return output_sibcall (operands[0]);
+})
+
+(define_expand "sibcall_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "memory_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ""
+{
+ operands[1] = m68k_legitimize_sibcall_address (operands[1]);
+})
+
+(define_insn "*sibcall_value"
+ [(set (match_operand 0 "" "=rf,rf")
+ (call (mem:QI (match_operand:SI 1 "sibcall_operand" ""))
+ (match_operand:SI 2 "general_operand" "")))]
+ "SIBLING_CALL_P (insn)"
+{
+ operands[0] = operands[1];
+ return output_sibcall (operands[0]);
+})
+
+;; Call subroutine with no return value.
+(define_expand "call"
+ [(call (match_operand:QI 0 "memory_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ;; Operand 1 not really used on the m68000.
+ ""
+{
+ operands[0] = m68k_legitimize_call_address (operands[0]);
+})
+
+(define_insn "*call"
+ [(call (mem:QI (match_operand:SI 0 "call_operand" "a,W"))
+ (match_operand:SI 1 "general_operand" "g,g"))]
+ ;; Operand 1 not really used on the m68000.
+ "!SIBLING_CALL_P (insn)"
+{
+ return output_call (operands[0]);
+}
+ [(set_attr "type" "jsr")])
+
+;; Call subroutine, returning value in operand 0
+;; (which must be a hard register).
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "memory_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ;; Operand 2 not really used on the m68000.
+ ""
+{
+ operands[1] = m68k_legitimize_call_address (operands[1]);
+})
+
+(define_insn "*non_symbolic_call_value"
+ [(set (match_operand 0 "" "=rf,rf")
+ (call (mem:QI (match_operand:SI 1 "non_symbolic_call_operand" "a,W"))
+ (match_operand:SI 2 "general_operand" "g,g")))]
+ ;; Operand 2 not really used on the m68000.
+ "!SIBLING_CALL_P (insn)"
+ "jsr %a1"
+ [(set_attr "type" "jsr")
+ (set_attr "opx" "1")])
+
+(define_insn "*symbolic_call_value_jsr"
+ [(set (match_operand 0 "" "=rf,rf")
+ (call (mem:QI (match_operand:SI 1 "symbolic_operand" "a,W"))
+ (match_operand:SI 2 "general_operand" "g,g")))]
+ ;; Operand 2 not really used on the m68000.
+ "!SIBLING_CALL_P (insn) && m68k_symbolic_call_var == M68K_SYMBOLIC_CALL_JSR"
+{
+ operands[0] = operands[1];
+ return m68k_symbolic_call;
+}
+ [(set_attr "type" "jsr")
+ (set_attr "opx" "1")])
+
+(define_insn "*symbolic_call_value_bsr"
+ [(set (match_operand 0 "" "=rf,rf")
+ (call (mem:QI (match_operand:SI 1 "symbolic_operand" "a,W"))
+ (match_operand:SI 2 "general_operand" "g,g")))]
+ ;; Operand 2 not really used on the m68000.
+ "!SIBLING_CALL_P (insn)
+ && (m68k_symbolic_call_var == M68K_SYMBOLIC_CALL_BSR_C
+ || m68k_symbolic_call_var == M68K_SYMBOLIC_CALL_BSR_P)"
+{
+ operands[0] = operands[1];
+ return m68k_symbolic_call;
+}
+ [(set_attr "type" "bsr")
+ (set_attr "opx" "1")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ "NEEDS_UNTYPED_CALL"
+{
+ int i;
+
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+})
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
+ ""
+ "")
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "type" "nop")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+{
+ m68k_expand_prologue ();
+ DONE;
+})
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+{
+ m68k_expand_epilogue (false);
+ DONE;
+})
+
+(define_expand "sibcall_epilogue"
+ [(return)]
+ ""
+{
+ m68k_expand_epilogue (true);
+ DONE;
+})
+
+;; Used for frameless functions which save no regs and allocate no locals.
+(define_expand "return"
+ [(return)]
+ "m68k_use_return_insn ()"
+ "")
+
+(define_insn "*return"
+ [(return)]
+ ""
+{
+ switch (m68k_get_function_kind (current_function_decl))
+ {
+ case m68k_fk_interrupt_handler:
+ return "rte";
+
+ case m68k_fk_interrupt_thread:
+ return "sleep";
+
+ default:
+ if (crtl->args.pops_args)
+ {
+ operands[0] = GEN_INT (crtl->args.pops_args);
+ return "rtd %0";
+ }
+ else
+ return "rts";
+ }
+}
+ [(set_attr "type" "rts")])
+
+(define_insn "*m68k_store_multiple"
+ [(match_parallel 0 "" [(match_operand 1 "")])]
+ "m68k_movem_pattern_p (operands[0], NULL, 0, true)"
+{
+ return m68k_output_movem (operands, operands[0], 0, true);
+})
+
+(define_insn "*m68k_store_multiple_automod"
+ [(match_parallel 0 ""
+ [(set (match_operand:SI 1 "register_operand" "=a")
+ (plus:SI (match_operand:SI 2 "register_operand" "1")
+ (match_operand:SI 3 "const_int_operand")))])]
+ "m68k_movem_pattern_p (operands[0], operands[1], INTVAL (operands[3]), true)"
+{
+ return m68k_output_movem (operands, operands[0], INTVAL (operands[3]), true);
+})
+
+(define_insn "*m68k_load_multiple"
+ [(match_parallel 0 "" [(match_operand 1 "")])]
+ "m68k_movem_pattern_p (operands[0], NULL, 0, false)"
+{
+ return m68k_output_movem (operands, operands[0], 0, false);
+})
+
+(define_insn "*m68k_load_multiple_automod"
+ [(match_parallel 0 ""
+ [(set (match_operand:SI 1 "register_operand" "=a")
+ (plus:SI (match_operand:SI 2 "register_operand" "1")
+ (match_operand:SI 3 "const_int_operand")))])]
+ "m68k_movem_pattern_p (operands[0], operands[1],
+ INTVAL (operands[3]), false)"
+{
+ return m68k_output_movem (operands, operands[0],
+ INTVAL (operands[3]), false);
+})
+
+(define_expand "link"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand")
+ (plus:SI (reg:SI SP_REG) (const_int -4)))
+ (set (match_dup 2)
+ (match_dup 0))
+ (set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 1 "const_int_operand")))])]
+ "TARGET_68020 || INTVAL (operands[1]) >= -0x8004"
+{
+ operands[2] = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, -4));
+})
+
+(define_insn "*link"
+ [(set (match_operand:SI 0 "register_operand" "+r")
+ (plus:SI (reg:SI SP_REG) (const_int -4)))
+ (set (mem:SI (plus:SI (reg:SI SP_REG) (const_int -4)))
+ (match_dup 0))
+ (set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 1 "const_int_operand")))]
+ "TARGET_68020 || INTVAL (operands[1]) >= -0x8004"
+{
+ operands[1] = GEN_INT (INTVAL (operands[1]) + 4);
+ if (!MOTOROLA)
+ return "link %0,%1";
+ else if (INTVAL (operands[1]) >= -0x8000)
+ return "link.w %0,%1";
+ else
+ return "link.l %0,%1";
+}
+ [(set_attr "type" "link")])
+
+(define_expand "unlink"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand")
+ (match_dup 1))
+ (set (reg:SI SP_REG)
+ (plus:SI (match_dup 0)
+ (const_int 4)))])]
+ ""
+{
+ operands[1] = gen_frame_mem (SImode, copy_rtx (operands[0]));
+})
+
+(define_insn "*unlink"
+ [(set (match_operand:SI 0 "register_operand" "+r")
+ (mem:SI (match_dup 0)))
+ (set (reg:SI SP_REG)
+ (plus:SI (match_dup 0)
+ (const_int 4)))]
+ ""
+ "unlk %0"
+ [(set_attr "type" "unlk")])
+
+(define_insn "load_got"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(const_int 0)] UNSPEC_GOT))]
+ ""
+{
+ if (TARGET_ID_SHARED_LIBRARY)
+ {
+ operands[1] = gen_rtx_REG (Pmode, PIC_REG);
+ return MOTOROLA ? "move.l %?(%1),%0" : "movel %1@(%?), %0";
+ }
+ else if (MOTOROLA)
+ {
+ if (TARGET_COLDFIRE)
+ /* Load the full 32-bit PC-relative offset of
+ _GLOBAL_OFFSET_TABLE_ into the PIC register, then use it to
+ calculate the absolute value. The offset and "lea"
+ operation word together occupy 6 bytes. */
+ return ("move.l #_GLOBAL_OFFSET_TABLE_@GOTPC, %0\n\t"
+ "lea (-6, %%pc, %0), %0");
+ else
+ return "lea (%%pc, _GLOBAL_OFFSET_TABLE_@GOTPC), %0";
+ }
+ else
+ return ("movel #_GLOBAL_OFFSET_TABLE_, %0\n\t"
+ "lea %%pc@(0,%0:l),%0");
+})
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
+ ""
+ "jmp %a0"
+ [(set_attr "type" "jmp")])
+
+;; This should not be used unless the add/sub insns can't be.
+
+(define_insn "*lea"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=a")
+ (match_operand:QI 1 "address_operand" "p"))]
+ ""
+ "lea %a1,%0")
+
+;; This is the first machine-dependent peephole optimization.
+;; It is useful when a floating value is returned from a function call
+;; and then is moved into an FP register.
+;; But it is mainly intended to test the support for these optimizations.
+
+(define_peephole2
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))
+ (set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" ""))]
+ "FP_REG_P (operands[0]) && !FP_REG_P (operands[1])"
+ [(set (mem:SI (reg:SI SP_REG)) (match_dup 1))
+ (set (mem:SI (pre_dec:SI (reg:SI SP_REG))) (match_dup 2))
+ (set (match_dup 0) (mem:DF (post_inc:SI (reg:SI SP_REG))))]
+ "split_di(operands + 1, 1, operands + 1, operands + 2);")
+
+;; Optimize a stack-adjust followed by a push of an argument.
+;; This is said to happen frequently with -msoft-float
+;; when there are consecutive library calls.
+
+(define_peephole2
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))
+ (set (match_operand:SF 0 "push_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ "!reg_mentioned_p (stack_pointer_rtx, operands[0])"
+ [(set (match_dup 0) (match_dup 1))]
+ "operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);")
+
+(define_peephole2
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 0 "const_int_operand" "")))
+ (set (match_operand:SF 1 "push_operand" "")
+ (match_operand:SF 2 "general_operand" ""))]
+ "INTVAL (operands[0]) > 4
+ && !reg_mentioned_p (stack_pointer_rtx, operands[2])"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (match_dup 0)))
+ (set (match_dup 1) (match_dup 2))]
+{
+ operands[0] = GEN_INT (INTVAL (operands[0]) - 4);
+ operands[1] = replace_equiv_address (operands[1], stack_pointer_rtx);
+})
+
+;; Speed up stack adjust followed by a fullword fixedpoint push.
+;; Constant operands need special care, as replacing a "pea X.w" with
+;; "move.l #X,(%sp)" is often not a win.
+
+;; Already done by the previous csa pass, left as reference.
+(define_peephole2
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))
+ (set (match_operand:SI 0 "push_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ "!reg_mentioned_p (stack_pointer_rtx, operands[1])"
+ [(set (match_dup 0) (match_dup 1))]
+ "operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);")
+
+;; Try to use moveq, after stack push has been changed into a simple move.
+(define_peephole2
+ [(match_scratch:SI 2 "d")
+ (set (match_operand:SI 0 "memory_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "GET_CODE (XEXP (operands[0], 0)) != PRE_DEC
+ && INTVAL (operands[1]) != 0
+ && IN_RANGE (INTVAL (operands[1]), -0x80, 0x7f)
+ && !valid_mov3q_const (INTVAL (operands[1]))"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))])
+
+;; This sequence adds an instruction, but is two bytes shorter.
+(define_peephole2
+ [(match_scratch:SI 2 "d")
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 12)))
+ (set (match_operand:SI 0 "push_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) != 0
+ && IN_RANGE (INTVAL (operands[1]), -0x80, 0x7f)
+ && !valid_mov3q_const (INTVAL (operands[1]))"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 8)))
+ (set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);")
+
+;; Changing pea X.w into a move.l is no real win here.
+(define_peephole2
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 0 "const_int_operand" "")))
+ (set (match_operand:SI 1 "push_operand" "")
+ (match_operand:SI 2 "general_operand" ""))]
+ "INTVAL (operands[0]) > 4
+ && !reg_mentioned_p (stack_pointer_rtx, operands[2])
+ && !(CONST_INT_P (operands[2]) && INTVAL (operands[2]) != 0
+ && IN_RANGE (INTVAL (operands[2]), -0x8000, 0x7fff)
+ && !valid_mov3q_const (INTVAL (operands[2])))"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (match_dup 0)))
+ (set (match_dup 1) (match_dup 2))]
+{
+ operands[0] = GEN_INT (INTVAL (operands[0]) - 4);
+ operands[1] = replace_equiv_address (operands[1], stack_pointer_rtx);
+})
+
+;; Speed up pushing a single byte/two bytes but leaving four bytes of space
+;; (which differs slightly between m680x0 and ColdFire).
+
+(define_peephole2
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -4)))
+ (set (match_operand:QI 0 "memory_operand" "")
+ (match_operand:QI 1 "register_operand" ""))]
+ "!reg_mentioned_p (stack_pointer_rtx, operands[1])
+ && GET_CODE (XEXP (operands[0], 0)) == PLUS
+ && rtx_equal_p (XEXP (XEXP (operands[0], 0), 0), stack_pointer_rtx)
+ && CONST_INT_P (XEXP (XEXP (operands[0], 0), 1))
+ && INTVAL (XEXP (XEXP (operands[0], 0), 1)) == 3"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ rtx addr = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx);
+ operands[0] = adjust_automodify_address (operands[0], SImode, addr, -3);
+ operands[1] = simplify_gen_subreg (SImode, operands[1], QImode, 0);
+})
+
+(define_peephole2
+ [(set (match_operand:QI 0 "push_operand" "")
+ (match_operand:QI 1 "register_operand" ""))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -3)))]
+ "!reg_mentioned_p (stack_pointer_rtx, operands[1])"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ operands[0] = adjust_automodify_address (operands[0], SImode,
+ XEXP (operands[0], 0), -3);
+ operands[1] = simplify_gen_subreg (SImode, operands[1], QImode, 0);
+})
+
+(define_peephole2
+ [(set (match_operand:HI 0 "push_operand" "")
+ (match_operand:HI 1 "register_operand" ""))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -2)))]
+ "!reg_mentioned_p (stack_pointer_rtx, operands[1])"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ operands[0] = adjust_automodify_address (operands[0], SImode,
+ XEXP (operands[0], 0), -2);
+ operands[1] = simplify_gen_subreg (SImode, operands[1], HImode, 0);
+})
+
+;; Optimize a series of strict_low_part assignments
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand" "")
+ (const_int 0))
+ (set (strict_low_part (match_operand:HI 1 "register_operand" ""))
+ (match_operand:HI 2 "general_operand" ""))]
+ "REGNO (operands[0]) == REGNO (operands[1])
+ && strict_low_part_peephole_ok (HImode, insn, operands[0])"
+ [(set (strict_low_part (match_dup 1)) (match_dup 2))]
+ "")
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand" "")
+ (const_int 0))
+ (set (strict_low_part (match_operand:QI 1 "register_operand" ""))
+ (match_operand:QI 2 "general_operand" ""))]
+ "REGNO (operands[0]) == REGNO (operands[1])
+ && strict_low_part_peephole_ok (QImode, insn, operands[0])"
+ [(set (strict_low_part (match_dup 1)) (match_dup 2))]
+ "")
+
+;; dbCC peepholes
+;;
+;; Turns
+;; loop:
+;; [ ... ]
+;; jCC label ; abnormal loop termination
+;; dbra dN, loop ; normal loop termination
+;;
+;; Into
+;; loop:
+;; [ ... ]
+;; dbCC dN, loop
+;; jCC label
+;;
+;; Which moves the jCC condition outside the inner loop for free.
+;;
+
+(define_peephole
+ [(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (parallel
+ [(set (pc)
+ (if_then_else
+ (ne (match_operand:HI 0 "register_operand" "")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:HI (match_dup 0)
+ (const_int -1)))])]
+ "!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()"
+{
+ CC_STATUS_INIT;
+ output_dbcc_and_branch (operands);
+ return "";
+})
+
+(define_peephole
+ [(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (parallel
+ [(set (pc)
+ (if_then_else
+ (ne (match_operand:SI 0 "register_operand" "")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))])]
+ "!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()"
+{
+ CC_STATUS_INIT;
+ output_dbcc_and_branch (operands);
+ return "";
+})
+
+(define_peephole
+ [(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (parallel
+ [(set (pc)
+ (if_then_else
+ (ge (plus:HI (match_operand:HI 0 "register_operand" "")
+ (const_int -1))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:HI (match_dup 0)
+ (const_int -1)))])]
+ "!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()"
+{
+ CC_STATUS_INIT;
+ output_dbcc_and_branch (operands);
+ return "";
+})
+
+(define_peephole
+ [(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (parallel
+ [(set (pc)
+ (if_then_else
+ (ge (plus:SI (match_operand:SI 0 "register_operand" "")
+ (const_int -1))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))])]
+ "!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()"
+{
+ CC_STATUS_INIT;
+ output_dbcc_and_branch (operands);
+ return "";
+})
+
+
+(define_insn "extendsfxf2"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=fm,f")
+ (float_extend:XF (match_operand:SF 1 "general_operand" "f,rmF")))]
+ "TARGET_68881"
+{
+ if (FP_REG_P (operands[0]) && FP_REG_P (operands[1]))
+ {
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ /* Extending float to double in an fp-reg is a no-op.
+ NOTICE_UPDATE_CC has already assumed that the
+ cc will be set. So cancel what it did. */
+ cc_status = cc_prev_status;
+ return "";
+ }
+ return "f%$move%.x %1,%0";
+ }
+ if (FP_REG_P (operands[0]))
+ {
+ if (FP_REG_P (operands[1]))
+ return "f%$move%.x %1,%0";
+ else if (ADDRESS_REG_P (operands[1]))
+ return "move%.l %1,%-\;f%$move%.s %+,%0";
+ else if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ return output_move_const_single (operands);
+ return "f%$move%.s %f1,%0";
+ }
+ return "fmove%.x %f1,%0";
+})
+
+
+(define_insn "extenddfxf2"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=fm,f")
+ (float_extend:XF
+ (match_operand:DF 1 "general_operand" "f,rmE")))]
+ "TARGET_68881"
+{
+ if (FP_REG_P (operands[0]) && FP_REG_P (operands[1]))
+ {
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ /* Extending float to double in an fp-reg is a no-op.
+ NOTICE_UPDATE_CC has already assumed that the
+ cc will be set. So cancel what it did. */
+ cc_status = cc_prev_status;
+ return "";
+ }
+ return "fmove%.x %1,%0";
+ }
+ if (FP_REG_P (operands[0]))
+ {
+ if (REG_P (operands[1]))
+ {
+ rtx xoperands[2];
+ xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ output_asm_insn ("move%.l %1,%-", xoperands);
+ output_asm_insn ("move%.l %1,%-", operands);
+ return "f%&move%.d %+,%0";
+ }
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ return output_move_const_double (operands);
+ return "f%&move%.d %f1,%0";
+ }
+ return "fmove%.x %f1,%0";
+})
+
+(define_insn "truncxfdf2"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,!r")
+ (float_truncate:DF
+ (match_operand:XF 1 "general_operand" "f,f")))]
+ "TARGET_68881"
+{
+ if (REG_P (operands[0]))
+ {
+ output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands);
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ return "move%.l %+,%0";
+ }
+ return "fmove%.d %f1,%0";
+})
+
+(define_insn "truncxfsf2"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=dm")
+ (float_truncate:SF
+ (match_operand:XF 1 "general_operand" "f")))]
+ "TARGET_68881"
+ "fmove%.s %f1,%0")
+
+(define_insn "sin<mode>2"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (unspec:FP
+ [(match_operand:FP 1 "general_operand" "f<FP:dreg>m")] UNSPEC_SIN))]
+ "TARGET_68881 && flag_unsafe_math_optimizations"
+{
+ if (FP_REG_P (operands[1]))
+ return "fsin%.x %1,%0";
+ else
+ return "fsin%.<FP:prec> %1,%0";
+})
+
+(define_insn "cos<mode>2"
+ [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
+ (unspec:FP
+ [(match_operand:FP 1 "general_operand" "f<FP:dreg>m")] UNSPEC_COS))]
+ "TARGET_68881 && flag_unsafe_math_optimizations"
+{
+ if (FP_REG_P (operands[1]))
+ return "fcos%.x %1,%0";
+ else
+ return "fcos%.<FP:prec> %1,%0";
+})
+
+;; Unconditional traps are assumed to have (const_int 1) for the condition.
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 7))]
+ ""
+ "trap #7"
+ [(set_attr "type" "trap")])
+
+(define_expand "ctrapdi4"
+ [(trap_if (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:SI 3 "const1_operand" ""))]
+ "TARGET_68020"
+{
+ if (operands[2] == const0_rtx)
+ emit_insn (gen_tstdi (operands[1]));
+ else
+ emit_insn (gen_cmpdi (operands[1], operands[2]));
+ operands[1] = cc0_rtx;
+ operands[2] = const0_rtx;
+})
+
+(define_expand "ctrapsi4"
+ [(set (cc0)
+ (compare (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (trap_if (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:SI 3 "const1_operand" ""))]
+ "TARGET_68020"
+ "")
+
+(define_expand "ctraphi4"
+ [(set (cc0)
+ (compare (match_operand:HI 1 "nonimmediate_src_operand" "")
+ (match_operand:HI 2 "general_src_operand" "")))
+ (trap_if (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:SI 3 "const1_operand" ""))]
+ "TARGET_68020"
+ "")
+
+(define_expand "ctrapqi4"
+ [(set (cc0)
+ (compare (match_operand:QI 1 "nonimmediate_src_operand" "")
+ (match_operand:QI 2 "general_src_operand" "")))
+ (trap_if (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:SI 3 "const1_operand" ""))]
+ "TARGET_68020"
+ "")
+
+(define_insn "*conditional_trap"
+ [(trap_if (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:SI 1 "const1_operand" "I"))]
+ "TARGET_68020 && ! flags_in_68881 ()"
+{
+ switch (GET_CODE (operands[0]))
+ {
+ case EQ: return "trapeq";
+ case NE: return "trapne";
+ case GT: return "trapgt";
+ case GTU: return "traphi";
+ case LT: return "traplt";
+ case LTU: return "trapcs";
+ case GE: return "trapge";
+ case GEU: return "trapcc";
+ case LE: return "traple";
+ case LEU: return "trapls";
+ default: gcc_unreachable ();
+ }
+})
+
+;; These are to prevent the scheduler from moving stores to the frame
+;; before the stack adjustment.
+(define_insn "stack_tie"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_TIE))]
+ ""
+ ""
+ [(set_attr "type" "ignore")])
+
+;; Instruction that subscribes one word in ColdFire instruction buffer.
+;; This instruction is used within scheduler only and should not appear
+;; in the instruction stream.
+(define_insn "ib"
+ [(unspec [(const_int 0)] UNSPEC_IB)]
+ ""
+ "#"
+ [(set_attr "type" "ib")])
+
+(include "cf.md")
diff --git a/gcc/config/m68k/m68k.opt b/gcc/config/m68k/m68k.opt
new file mode 100644
index 000000000..d5aa9fa76
--- /dev/null
+++ b/gcc/config/m68k/m68k.opt
@@ -0,0 +1,188 @@
+; Options for the Motorola 68000 port of the compiler.
+
+; Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+m5200
+Target RejectNegative
+Generate code for a 520X
+
+m5206e
+Target RejectNegative
+Generate code for a 5206e
+
+m528x
+Target RejectNegative
+Generate code for a 528x
+
+m5307
+Target RejectNegative
+Generate code for a 5307
+
+m5407
+Target RejectNegative
+Generate code for a 5407
+
+m68000
+Target RejectNegative
+Generate code for a 68000
+
+m68010
+Target RejectNegative
+Generate code for a 68010
+
+m68020
+Target RejectNegative
+Generate code for a 68020
+
+m68020-40
+Target RejectNegative
+Generate code for a 68040, without any new instructions
+
+m68020-60
+Target RejectNegative
+Generate code for a 68060, without any new instructions
+
+m68030
+Target RejectNegative
+Generate code for a 68030
+
+m68040
+Target RejectNegative
+Generate code for a 68040
+
+m68060
+Target RejectNegative
+Generate code for a 68060
+
+m68302
+Target RejectNegative
+Generate code for a 68302
+
+m68332
+Target RejectNegative
+Generate code for a 68332
+
+; Has no effect on gcc
+m68851
+Target
+Generate code for a 68851
+
+m68881
+Target RejectNegative Mask(HARD_FLOAT)
+Generate code that uses 68881 floating-point instructions
+
+malign-int
+Target Report Mask(ALIGN_INT)
+Align variables on a 32-bit boundary
+
+march=
+Target RejectNegative Joined
+Specify the name of the target architecture
+
+mbitfield
+Target Report Mask(BITFIELD)
+Use the bit-field instructions
+
+mc68000
+Target RejectNegative
+Generate code for a 68000
+
+mc68020
+Target RejectNegative
+Generate code for a 68020
+
+mcfv4e
+Target RejectNegative
+Generate code for a ColdFire v4e
+
+mcpu=
+Target RejectNegative Joined
+Specify the target CPU
+
+mcpu32
+Target RejectNegative
+Generate code for a cpu32
+
+mdiv
+Target Report Mask(CF_HWDIV)
+Use hardware division instructions on ColdFire
+
+mfidoa
+Target RejectNegative
+Generate code for a Fido A
+
+mhard-float
+Target RejectNegative Mask(HARD_FLOAT) MaskExists
+Generate code which uses hardware floating point instructions
+
+mid-shared-library
+Target Report Mask(ID_SHARED_LIBRARY)
+Enable ID based shared library
+
+mnobitfield
+Target RejectNegative InverseMask(BITFIELD)
+Do not use the bit-field instructions
+
+mnortd
+Target RejectNegative InverseMask(RTD)
+Use normal calling convention
+
+mnoshort
+Target RejectNegative InverseMask(SHORT)
+Consider type 'int' to be 32 bits wide
+
+mpcrel
+Target Report Mask(PCREL)
+Generate pc-relative code
+
+mrtd
+Target Report Mask(RTD)
+Use different calling convention using 'rtd'
+
+msep-data
+Target Report Mask(SEP_DATA)
+Enable separate data segment
+
+mshared-library-id=
+Target RejectNegative Joined UInteger
+ID of shared library to build
+
+mshort
+Target Report Mask(SHORT)
+Consider type 'int' to be 16 bits wide
+
+msoft-float
+Target RejectNegative InverseMask(HARD_FLOAT)
+Generate code with library calls for floating point
+
+mstrict-align
+Target Report Mask(STRICT_ALIGNMENT)
+Do not use unaligned memory references
+
+mtune=
+Target RejectNegative Joined
+Tune for the specified target CPU or architecture
+
+mxgot
+Target Report Mask(XGOT)
+Support more than 8192 GOT entries on ColdFire
+
+mxtls
+Target Report Mask(XTLS)
+Support TLS segment larger than 64K
diff --git a/gcc/config/m68k/m68kelf.h b/gcc/config/m68k/m68kelf.h
new file mode 100644
index 000000000..d3fc41a4e
--- /dev/null
+++ b/gcc/config/m68k/m68kelf.h
@@ -0,0 +1,164 @@
+/* m68kelf support, derived from m68kv4.h */
+
+/* Target definitions for GNU compiler for mc680x0 running System V.4
+ Copyright (C) 1991, 1993, 2000, 2002, 2003, 2004, 2007, 2010
+ Free Software Foundation, Inc.
+
+ Written by Ron Guilmette (rfg@netcom.com) and Fred Fish (fnf@cygnus.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef SWBEG_ASM_OP
+#define SWBEG_ASM_OP "\t.swbeg\t"
+#endif
+
+/* Here are three prefixes that are used by asm_fprintf to
+ facilitate customization for alternate assembler syntaxes.
+ Machines with no likelihood of an alternate syntax need not
+ define these and need not use asm_fprintf. */
+
+/* The prefix for register names. Note that REGISTER_NAMES
+ is supposed to include this prefix. Also note that this is NOT an
+ fprintf format string, it is a literal string */
+
+#undef REGISTER_PREFIX
+#define REGISTER_PREFIX "%"
+
+/* The prefix for local (compiler generated) labels.
+ These labels will not appear in the symbol table. */
+
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* config/m68k.md has an explicit reference to the program counter,
+ prefix this by the register prefix. */
+
+#define ASM_RETURN_CASE_JUMP \
+ do { \
+ if (TARGET_COLDFIRE) \
+ { \
+ if (ADDRESS_REG_P (operands[0])) \
+ return "jmp %%pc@(2,%0:l)"; \
+ else \
+ return "ext%.l %0\n\tjmp %%pc@(2,%0:l)"; \
+ } \
+ else \
+ return "jmp %%pc@(2,%0:w)"; \
+ } while (0)
+
+/* This is how to output an assembler line that says to advance the
+ location counter to a multiple of 2**LOG bytes. */
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+do { \
+ if ((LOG) > 0) \
+ fprintf ((FILE), "%s%u\n", ALIGN_ASM_OP, 1 << (LOG)); \
+} while (0)
+
+/* Register in which address to store a structure value is passed to a
+ function. The default in m68k.h is a1. For m68k/SVR4 it is a0. */
+
+#undef M68K_STRUCT_VALUE_REGNUM
+#define M68K_STRUCT_VALUE_REGNUM A0_REG
+
+/* The static chain regnum defaults to a0, but we use that for
+ structure return, so have to use a1 for the static chain. */
+
+#undef STATIC_CHAIN_REGNUM
+#define STATIC_CHAIN_REGNUM A1_REG
+#undef M68K_STATIC_CHAIN_REG_NAME
+#define M68K_STATIC_CHAIN_REG_NAME REGISTER_PREFIX "a1"
+
+#define ASM_COMMENT_START "|"
+
+/* Define how the m68k registers should be numbered for Dwarf output.
+ The numbering provided here should be compatible with the native
+ SVR4 SDB debugger in the m68k/SVR4 reference port, where d0-d7
+ are 0-7, a0-a8 are 8-15, and fp0-fp7 are 16-23. */
+
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+#if 0
+/* SVR4 m68k assembler is bitching on the `comm i,1,1' which askes for
+ 1 byte alignment. Don't generate alignment for COMMON seems to be
+ safer until we the assembler is fixed. */
+#undef ASM_OUTPUT_ALIGNED_COMMON
+/* Same problem with this one. */
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#endif
+
+#undef ASM_OUTPUT_COMMON
+#undef ASM_OUTPUT_LOCAL
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (int)(SIZE)))
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (int)(SIZE)))
+
+/* Currently, JUMP_TABLES_IN_TEXT_SECTION must be defined in order to
+ keep switch tables in the text section. */
+
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* In m68k svr4, using swbeg is the standard way to do switch
+ table. */
+#undef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
+ fprintf ((FILE), "%s&%d\n", SWBEG_ASM_OP, XVECLEN (PATTERN (TABLE), 1));
+/* end of stuff from m68kv4.h */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin.o%s"
+
+/* If defined, a C expression whose value is a string containing the
+ assembler operation to identify the following data as
+ uninitialized global data. If not defined, and neither
+ `ASM_OUTPUT_BSS' nor `ASM_OUTPUT_ALIGNED_BSS' are defined,
+ uninitialized global data will be output in the data section if
+ `-fno-common' is passed, otherwise `ASM_OUTPUT_COMMON' will be
+ used. */
+#ifndef BSS_SECTION_ASM_OP
+#define BSS_SECTION_ASM_OP "\t.section\t.bss"
+#endif
+
+/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
+ separate, explicit argument. If you define this macro, it is used
+ in place of `ASM_OUTPUT_BSS', and gives you more flexibility in
+ handling the required alignment of the variable. The alignment is
+ specified as the number of bits.
+
+ Try to use function `asm_output_aligned_bss' defined in file
+ `varasm.c' when defining this macro. */
+#ifndef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+#endif
diff --git a/gcc/config/m68k/m68kemb.h b/gcc/config/m68k/m68kemb.h
new file mode 100644
index 000000000..5d917f91c
--- /dev/null
+++ b/gcc/config/m68k/m68kemb.h
@@ -0,0 +1,53 @@
+/* Definitions of target machine for GNU compiler. "embedded" 68XXX.
+ This is meant to be included after m68k.h.
+ Copyright (C) 1994, 1995, 1998, 1999, 2004, 2006
+ Free Software Foundation, Inc. */
+
+/* Override the SVR4 ABI for this target. */
+
+#define PTRDIFF_TYPE "long int"
+#define SIZE_TYPE "long unsigned int"
+
+/* In order for bitfields to work on a 68000, or with -mnobitfield, we must
+ define either PCC_BITFIELD_TYPE_MATTERS or STRUCTURE_SIZE_BOUNDARY.
+ Defining STRUCTURE_SIZE_BOUNDARY results in structure packing problems,
+ so we define PCC_BITFIELD_TYPE_MATTERS. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Don't default to pcc-struct-return, so that we can return small structures
+ and unions in registers, which is slightly more efficient. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#undef FUNCTION_VALUE
+#define FUNCTION_VALUE(VALTYPE,FUNC) LIBCALL_VALUE (TYPE_MODE (VALTYPE))
+
+#undef LIBCALL_VALUE
+#define LIBCALL_VALUE(MODE) \
+ m68k_libcall_value (MODE)
+
+#undef FUNCTION_VALUE_REGNO_P
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ((N) == D0_REG || (TARGET_68881 && (N) == FP0_REG))
+
+#undef NEEDS_UNTYPED_CALL
+#define NEEDS_UNTYPED_CALL 1
+
+/* Target OS builtins. */
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__embedded__"); \
+ } \
+ while (0)
+
+/* Override the default LIB_SPEC from gcc.c. We don't currently support
+ profiling, or libg.a. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "-lc"
+
+/* Make this be null, since we want the crt0.o to come from the linker
+ script */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
diff --git a/gcc/config/m68k/math-68881.h b/gcc/config/m68k/math-68881.h
new file mode 100644
index 000000000..6d9f8b2d4
--- /dev/null
+++ b/gcc/config/m68k/math-68881.h
@@ -0,0 +1,529 @@
+/******************************************************************\
+* *
+* <math-68881.h> last modified: 23 May 1992. *
+* *
+* Copyright (C) 1989 by Matthew Self. *
+* You may freely distribute verbatim copies of this software *
+* provided that this copyright notice is retained in all copies. *
+* You may distribute modifications to this software under the *
+* conditions above if you also clearly note such modifications *
+* with their author and date. *
+* *
+* Note: errno is not set to EDOM when domain errors occur for *
+* most of these functions. Rather, it is assumed that the *
+* 68881's OPERR exception will be enabled and handled *
+* appropriately by the operating system. Similarly, overflow *
+* and underflow do not set errno to ERANGE. *
+* *
+* Send bugs to Matthew Self (self@bayes.arc.nasa.gov). *
+* *
+\******************************************************************/
+
+/* This file is NOT a part of GCC, just distributed with it. */
+
+/* If you find this in GCC,
+ please send bug reports to bug-gcc@prep.ai.mit.edu. */
+
+/* Changed by Richard Stallman:
+ May 1993, add conditional to prevent multiple inclusion.
+ % inserted before a #.
+ New function `hypot' added.
+ Nans written in hex to avoid 0rnan.
+ May 1992, use %! for fpcr register. Break lines before function names.
+ December 1989, add parens around `&' in pow.
+ November 1990, added alternate definition of HUGE_VAL for Sun. */
+
+/* Changed by Jim Wilson:
+ September 1993, Use #undef before HUGE_VAL instead of #ifdef/#endif. */
+
+/* Changed by Ian Lance Taylor:
+ September 1994, use extern inline instead of static inline. */
+
+#ifndef __math_68881
+#define __math_68881
+
+#include <errno.h>
+
+#undef HUGE_VAL
+#ifdef __sun__
+/* The Sun assembler fails to handle the hex constant in the usual defn. */
+#define HUGE_VAL \
+({ \
+ static union { int i[2]; double d; } u = { {0x7ff00000, 0} }; \
+ u.d; \
+})
+#else
+#define HUGE_VAL \
+({ \
+ double huge_val; \
+ \
+ __asm ("fmove%.d #0x7ff0000000000000,%0" /* Infinity */ \
+ : "=f" (huge_val) \
+ : /* no inputs */); \
+ huge_val; \
+})
+#endif
+
+__inline extern double
+sin (double x)
+{
+ double value;
+
+ __asm ("fsin%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+cos (double x)
+{
+ double value;
+
+ __asm ("fcos%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+tan (double x)
+{
+ double value;
+
+ __asm ("ftan%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+asin (double x)
+{
+ double value;
+
+ __asm ("fasin%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+acos (double x)
+{
+ double value;
+
+ __asm ("facos%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+atan (double x)
+{
+ double value;
+
+ __asm ("fatan%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+atan2 (double y, double x)
+{
+ double pi, pi_over_2;
+
+ __asm ("fmovecr%.x #0,%0" /* extended precision pi */
+ : "=f" (pi)
+ : /* no inputs */ );
+ __asm ("fscale%.b #-1,%0" /* no loss of accuracy */
+ : "=f" (pi_over_2)
+ : "0" (pi));
+ if (x > 0)
+ {
+ if (y > 0)
+ {
+ if (x > y)
+ return atan (y / x);
+ else
+ return pi_over_2 - atan (x / y);
+ }
+ else
+ {
+ if (x > -y)
+ return atan (y / x);
+ else
+ return - pi_over_2 - atan (x / y);
+ }
+ }
+ else
+ {
+ if (y < 0)
+ {
+ if (-x > -y)
+ return - pi + atan (y / x);
+ else
+ return - pi_over_2 - atan (x / y);
+ }
+ else
+ {
+ if (-x > y)
+ return pi + atan (y / x);
+ else if (y > 0)
+ return pi_over_2 - atan (x / y);
+ else
+ {
+ double value;
+
+ errno = EDOM;
+ __asm ("fmove%.d #0x7fffffffffffffff,%0" /* quiet NaN */
+ : "=f" (value)
+ : /* no inputs */);
+ return value;
+ }
+ }
+ }
+}
+
+__inline extern double
+sinh (double x)
+{
+ double value;
+
+ __asm ("fsinh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+cosh (double x)
+{
+ double value;
+
+ __asm ("fcosh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+tanh (double x)
+{
+ double value;
+
+ __asm ("ftanh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+atanh (double x)
+{
+ double value;
+
+ __asm ("fatanh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+exp (double x)
+{
+ double value;
+
+ __asm ("fetox%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+expm1 (double x)
+{
+ double value;
+
+ __asm ("fetoxm1%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+log (double x)
+{
+ double value;
+
+ __asm ("flogn%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+log1p (double x)
+{
+ double value;
+
+ __asm ("flognp1%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+log10 (double x)
+{
+ double value;
+
+ __asm ("flog10%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+sqrt (double x)
+{
+ double value;
+
+ __asm ("fsqrt%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+hypot (double x, double y)
+{
+ return sqrt (x*x + y*y);
+}
+
+__inline extern double
+pow (double x, double y)
+{
+ if (x > 0)
+ return exp (y * log (x));
+ else if (x == 0)
+ {
+ if (y > 0)
+ return 0.0;
+ else
+ {
+ double value;
+
+ errno = EDOM;
+ __asm ("fmove%.d #0x7fffffffffffffff,%0" /* quiet NaN */
+ : "=f" (value)
+ : /* no inputs */);
+ return value;
+ }
+ }
+ else
+ {
+ double temp;
+
+ __asm ("fintrz%.x %1,%0"
+ : "=f" (temp) /* integer-valued float */
+ : "f" (y));
+ if (y == temp)
+ {
+ int i = (int) y;
+
+ if ((i & 1) == 0) /* even */
+ return exp (y * log (-x));
+ else
+ return - exp (y * log (-x));
+ }
+ else
+ {
+ double value;
+
+ errno = EDOM;
+ __asm ("fmove%.d #0x7fffffffffffffff,%0" /* quiet NaN */
+ : "=f" (value)
+ : /* no inputs */);
+ return value;
+ }
+ }
+}
+
+__inline extern double
+fabs (double x)
+{
+ double value;
+
+ __asm ("fabs%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+ceil (double x)
+{
+ int rounding_mode, round_up;
+ double value;
+
+ __asm volatile ("fmove%.l %!,%0"
+ : "=dm" (rounding_mode)
+ : /* no inputs */ );
+ round_up = rounding_mode | 0x30;
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (round_up));
+ __asm volatile ("fint%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (rounding_mode));
+ return value;
+}
+
+__inline extern double
+floor (double x)
+{
+ int rounding_mode, round_down;
+ double value;
+
+ __asm volatile ("fmove%.l %!,%0"
+ : "=dm" (rounding_mode)
+ : /* no inputs */ );
+ round_down = (rounding_mode & ~0x10)
+ | 0x20;
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (round_down));
+ __asm volatile ("fint%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (rounding_mode));
+ return value;
+}
+
+__inline extern double
+rint (double x)
+{
+ int rounding_mode, round_nearest;
+ double value;
+
+ __asm volatile ("fmove%.l %!,%0"
+ : "=dm" (rounding_mode)
+ : /* no inputs */ );
+ round_nearest = rounding_mode & ~0x30;
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (round_nearest));
+ __asm volatile ("fint%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (rounding_mode));
+ return value;
+}
+
+__inline extern double
+fmod (double x, double y)
+{
+ double value;
+
+ __asm ("fmod%.x %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "f" (y));
+ return value;
+}
+
+__inline extern double
+drem (double x, double y)
+{
+ double value;
+
+ __asm ("frem%.x %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "f" (y));
+ return value;
+}
+
+__inline extern double
+scalb (double x, int n)
+{
+ double value;
+
+ __asm ("fscale%.l %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "dmi" (n));
+ return value;
+}
+
+__inline extern double
+logb (double x)
+{
+ double exponent;
+
+ __asm ("fgetexp%.x %1,%0"
+ : "=f" (exponent)
+ : "f" (x));
+ return exponent;
+}
+
+__inline extern double
+ldexp (double x, int n)
+{
+ double value;
+
+ __asm ("fscale%.l %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "dmi" (n));
+ return value;
+}
+
+__inline extern double
+frexp (double x, int *exp)
+{
+ double float_exponent;
+ int int_exponent;
+ double mantissa;
+
+ __asm ("fgetexp%.x %1,%0"
+ : "=f" (float_exponent) /* integer-valued float */
+ : "f" (x));
+ int_exponent = (int) float_exponent;
+ __asm ("fgetman%.x %1,%0"
+ : "=f" (mantissa) /* 1.0 <= mantissa < 2.0 */
+ : "f" (x));
+ if (mantissa != 0)
+ {
+ __asm ("fscale%.b #-1,%0"
+ : "=f" (mantissa) /* mantissa /= 2.0 */
+ : "0" (mantissa));
+ int_exponent += 1;
+ }
+ *exp = int_exponent;
+ return mantissa;
+}
+
+__inline extern double
+modf (double x, double *ip)
+{
+ double temp;
+
+ __asm ("fintrz%.x %1,%0"
+ : "=f" (temp) /* integer-valued float */
+ : "f" (x));
+ *ip = temp;
+ return x - temp;
+}
+
+#endif /* not __math_68881 */
diff --git a/gcc/config/m68k/netbsd-elf.h b/gcc/config/m68k/netbsd-elf.h
new file mode 100644
index 000000000..1238d26e7
--- /dev/null
+++ b/gcc/config/m68k/netbsd-elf.h
@@ -0,0 +1,315 @@
+/* Definitions of target machine for GNU compiler,
+ for m68k (including m68010) NetBSD platforms using the
+ ELF object format.
+ Copyright (C) 2002, 2003, 2004, 2006, 2007, 2009, 2010
+ Free Software Foundation, Inc.
+ Contributed by Wasabi Systems. Inc.
+
+ This file is derived from <m68k/m68kv4.h>, <m68k/m68kelf.h>,
+ and <m68k/linux.h>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ builtin_define ("__m68k__"); \
+ builtin_define ("__SVR4_ABI__"); \
+ builtin_define ("__motorola__"); \
+ if (TARGET_HARD_FLOAT) \
+ builtin_define ("__HAVE_FPU__"); \
+ } \
+ while (0)
+
+/* Don't try using XFmode on the 68010. */
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (TARGET_68020 ? 80 : 64)
+
+#undef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
+#ifdef __mc68010__
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#else
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 80
+#endif
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT },
+
+
+#undef TARGET_VERSION
+#define TARGET_VERSION \
+ fprintf (stderr, \
+ TARGET_68010 \
+ ? " (NetBSD/68010 ELF)" \
+ : " (NetBSD/m68k ELF)");
+
+
+/* Provide a CPP_SPEC appropriate for NetBSD m68k targets. Currently we
+ deal with the GCC option '-posix', as well as an indication as to
+ whether or not use of the FPU is allowed. */
+
+#undef CPP_SPEC
+#define CPP_SPEC NETBSD_CPP_SPEC
+
+
+/* Provide an ASM_SPEC appropriate for NetBSD m68k ELF targets. We need
+ to pass PIC code generation options. */
+
+#undef ASM_SPEC
+#define ASM_SPEC "%(asm_cpu_spec) %{fpic|fpie:-k} %{fPIC|fPIE:-k -K}"
+
+/* Provide a LINK_SPEC appropriate for a NetBSD/m68k ELF target. */
+
+#undef LINK_SPEC
+#define LINK_SPEC NETBSD_LINK_SPEC_ELF
+
+#define NETBSD_ENTRY_POINT "_start"
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function only. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+do \
+ { \
+ asm_fprintf (FILE, "\tlea (%LLP%d,%Rpc),%Ra1\n", (LABELNO)); \
+ if (flag_pic) \
+ fprintf (FILE, "\tbsr.l __mcount@PLTPC\n"); \
+ else \
+ fprintf (FILE, "\tjbsr __mcount\n"); \
+ } \
+while (0)
+
+
+/* Make gcc agree with <machine/ansi.h> */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+
+/* XXX
+ Here is a bunch of stuff lifted from m68kelf.h. We don't use that
+ file directly, because it has a lot of baggage we don't want. */
+
+
+/* The prefix for register names. Note that REGISTER_NAMES
+ is supposed to include this prefix. Also note that this is NOT an
+ fprintf format string, it is a literal string. */
+
+#undef REGISTER_PREFIX
+#define REGISTER_PREFIX "%"
+
+
+/* The prefix for local (compiler generated) lables.
+ These labels will not appear in the symbol table. */
+
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+
+#undef ASM_COMMENT_START
+#define ASM_COMMENT_START "|"
+
+
+/* Currently, JUMP_TABLES_IN_TEXT_SECTION must be defined in order to
+ keep switch tables in the text section. */
+
+#undef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+
+/* Use the default action for outputting the case label. */
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_RETURN_CASE_JUMP \
+ do { \
+ if (TARGET_COLDFIRE) \
+ { \
+ if (ADDRESS_REG_P (operands[0])) \
+ return "jmp %%pc@(2,%0:l)"; \
+ else \
+ return "ext%.l %0\n\tjmp %%pc@(2,%0:l)"; \
+ } \
+ else \
+ return "jmp %%pc@(2,%0:w)"; \
+ } while (0)
+
+
+/* This is how to output an assembler line that says to advance the
+ location counter to a multiple of 2**LOG bytes. */
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+do \
+ { \
+ if ((LOG) > 0) \
+ fprintf ((FILE), "%s%u\n", ALIGN_ASM_OP, 1 << (LOG)); \
+ } \
+while (0)
+
+
+/* If defined, a C expression whose value is a string containing the
+ assembler operation to identify the following data as uninitialized global
+ data. */
+
+#define BSS_SECTION_ASM_OP ".section\t.bss"
+
+
+/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
+ separate, explicit argument. If you define this macro, it is used
+ in place of `ASM_OUTPUT_BSS', and gives you more flexibility in
+ handling the required alignment of the variable. The alignment is
+ specified as the number of bits.
+
+ Try to use function `asm_output_aligned_bss' defined in file
+ `varasm.c' when defining this macro. */
+
+#undef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+
+
+#undef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (int)(SIZE)))
+
+#undef ASM_OUTPUT_LOCAL
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (int)(SIZE)))
+
+
+/* XXX
+ This is the end of the chunk lifted from m68kelf.h */
+
+
+/* XXX
+ The following chunk is more or less lifted from m68kv4.h.
+ We'd like to just #include that file, but it has not yet
+ been converted to the new include style.
+
+ Should there be a m68kv4-abi.h ?? */
+
+
+/* Register in which address to store a structure value is passed to a
+ function. The default in m68k.h is a1. For m68k/SVR4 it is a0. */
+
+#undef M68K_STRUCT_VALUE_REGNUM
+#define M68K_STRUCT_VALUE_REGNUM A0_REG
+
+
+/* Register in which static-chain is passed to a function. The
+ default isn m68k.h is a0, but that is already the struct value
+ regnum. Make it a1 instead. */
+
+#undef STATIC_CHAIN_REGNUM
+#define STATIC_CHAIN_REGNUM A1_REG
+#undef M68K_STATIC_CHAIN_REG_NAME
+#define M68K_STATIC_CHAIN_REG_NAME REGISTER_PREFIX "a1"
+
+
+/* Now to renumber registers for dbx and gdb.
+ We use the Sun-3 convention, which is:
+ floating point registers have numbers 18 to 25, not
+ 16 to 23 as they do in the compiler. */
+
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(REGNO) ((REGNO) < 16 ? (REGNO) : (REGNO) + 2)
+
+
+/* 1 if N is a possible register number for a function value. For
+ m68k/SVR4 allow d0, a0, or fp0 as return registers, for integral,
+ pointer, or floating types, respectively. Reject fp0 if not using
+ a 68881 coprocessor. */
+
+#undef FUNCTION_VALUE_REGNO_P
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ((N) == D0_REG || (N) == A0_REG || (TARGET_68881 && (N) == FP0_REG))
+
+
+/* Define this to be true when FUNCTION_VALUE_REGNO_P is true for
+ more than one register. */
+
+#undef NEEDS_UNTYPED_CALL
+#define NEEDS_UNTYPED_CALL 1
+
+
+/* Define how to generate (in the callee) the output value of a
+ function and how to find (in the caller) the value returned by a
+ function. VALTYPE is the data type of the value (as a tree). If
+ the precise function being called is known, FUNC is its
+ FUNCTION_DECL; otherwise, FUNC is 0. For m68k/SVR4 generate the
+ result in d0, a0, or fp0 as appropriate. */
+
+#undef FUNCTION_VALUE
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ m68k_function_value (VALTYPE, FUNC)
+
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE.
+ For m68k/SVR4 look for integer values in d0, pointer values in d0
+ (returned in both d0 and a0), and floating values in fp0. */
+
+#undef LIBCALL_VALUE
+#define LIBCALL_VALUE(MODE) \
+ m68k_libcall_value (MODE)
+
+
+/* Boundary (in *bits*) on which stack pointer should be aligned.
+ The m68k/SVR4 convention is to keep the stack pointer longword aligned. */
+
+#undef STACK_BOUNDARY
+#define STACK_BOUNDARY 32
+
+
+/* Alignment of field after `int : 0' in a structure.
+ For m68k/SVR4, this is the next longword boundary. */
+
+#undef EMPTY_FIELD_BOUNDARY
+#define EMPTY_FIELD_BOUNDARY 32
+
+
+/* No data type wants to be aligned rounder than this.
+ For m68k/SVR4, some types (doubles for example) are aligned on 8 byte
+ boundaries */
+
+#undef BIGGEST_ALIGNMENT
+#define BIGGEST_ALIGNMENT 64
+
+
+/* The svr4 ABI for the m68k says that records and unions are returned
+ in memory. */
+
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+
+/* XXX
+ This is the end of the chunk lifted from m68kv4.h */
diff --git a/gcc/config/m68k/openbsd.h b/gcc/config/m68k/openbsd.h
new file mode 100644
index 000000000..8478855df
--- /dev/null
+++ b/gcc/config/m68k/openbsd.h
@@ -0,0 +1,89 @@
+/* Configuration file for an m68k OpenBSD target.
+ Copyright (C) 1999, 2002, 2003, 2007, 2009, 2010
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Target OS builtins. */
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__unix__"); \
+ builtin_define ("__OpenBSD__"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=OpenBSD"); \
+ } \
+ while (0)
+
+/* Define __HAVE_68881__ in preprocessor, unless -msoft-float is specified.
+ This will control the use of inline 68881 insns in certain macros. */
+#undef CPP_SPEC
+#define CPP_SPEC "%{!msoft-float:-D__HAVE_68881__ -D__HAVE_FPU__} %{posix:-D_POSIX_SOURCE} %{pthread:-D_POSIX_THREADS}"
+
+#undef ASM_SPEC
+#define ASM_SPEC "%(asm_cpu_spec) %{fpic|fpie:-k} %{fPIC|fPIE:-k -K}"
+
+/* Layout of source language data types. */
+
+/* This must agree with <machine/ansi.h> */
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef WINT_TYPE
+#define WINT_TYPE "int"
+
+/* Storage layout. */
+
+/* Every structure or union's size must be a multiple of 2 bytes. */
+#define STRUCTURE_SIZE_BOUNDARY 16
+
+/* Specific options for DBX Output. */
+
+/* This is BSD, so it wants DBX format. */
+#define DBX_DEBUGGING_INFO 1
+
+/* Do not break .stabs pseudos into continuations. */
+#define DBX_CONTIN_LENGTH 0
+
+/* This is the char to use for continuation (in case we need to turn
+ continuation back on). */
+#define DBX_CONTIN_CHAR '?'
+
+/* Stack & calling: aggregate returns. */
+
+/* ??? This is traditional, but quite possibly wrong. It appears to
+ disagree with gdb. */
+#define PCC_STATIC_STRUCT_RETURN 1
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Assembler format: exception region output. */
+
+/* All configurations that don't use elf must be explicit about not using
+ dwarf unwind information. */
+#define DWARF2_UNWIND_INFO 0
diff --git a/gcc/config/m68k/predicates.md b/gcc/config/m68k/predicates.md
new file mode 100644
index 000000000..6ca261fb9
--- /dev/null
+++ b/gcc/config/m68k/predicates.md
@@ -0,0 +1,246 @@
+;; Predicate definitions for Motorola 68000.
+;; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Special case of a general operand that's used as a source
+;; operand. Use this to permit reads from PC-relative memory when
+;; -mpcrel is specified.
+
+(define_predicate "general_src_operand"
+ (match_code "const_int,const_double,const,symbol_ref,label_ref,subreg,reg,mem")
+{
+ if (TARGET_PCREL
+ && GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF
+ || GET_CODE (XEXP (op, 0)) == CONST))
+ return 1;
+ return general_operand (op, mode);
+})
+
+;; Special case of a nonimmediate operand that's used as a source. Use
+;; this to permit reads from PC-relative memory when -mpcrel is
+;; specified.
+
+(define_predicate "nonimmediate_src_operand"
+ (match_code "subreg,reg,mem")
+{
+ if (TARGET_PCREL && GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF
+ || GET_CODE (XEXP (op, 0)) == CONST))
+ return 1;
+ return nonimmediate_operand (op, mode);
+})
+
+;; Special case of a memory operand that's used as a source. Use this
+;; to permit reads from PC-relative memory when -mpcrel is specified.
+
+(define_predicate "memory_src_operand"
+ (match_code "subreg,mem")
+{
+ if (TARGET_PCREL && GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF
+ || GET_CODE (XEXP (op, 0)) == CONST))
+ return 1;
+ return memory_operand (op, mode);
+})
+
+;; Similar to general_operand, but exclude stack_pointer_rtx.
+
+(define_predicate "not_sp_operand"
+ (match_code "subreg,reg,mem")
+{
+ return op != stack_pointer_rtx && nonimmediate_operand (op, mode);
+})
+
+;; Predicate that accepts only a pc-relative address. This is needed
+;; because pc-relative addresses don't satisfy the predicate
+;; "general_src_operand".
+
+(define_predicate "pcrel_address"
+ (match_code "symbol_ref,label_ref,const"))
+
+;; Accept integer operands in the range 0..0xffffffff. We have to
+;; check the range carefully since this predicate is used in DImode
+;; contexts. Also, we need some extra crud to make it work when
+;; hosted on 64-bit machines.
+
+(define_predicate "const_uint32_operand"
+ (match_code "const_int,const_double")
+{
+ /* It doesn't make sense to ask this question with a mode that is
+ not larger than 32 bits. */
+ gcc_assert (GET_MODE_BITSIZE (mode) > 32);
+
+#if HOST_BITS_PER_WIDE_INT > 32
+ /* All allowed constants will fit a CONST_INT. */
+ return (GET_CODE (op) == CONST_INT
+ && (INTVAL (op) >= 0 && INTVAL (op) <= 0xffffffffL));
+#else
+ return (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_HIGH (op) == 0));
+#endif
+})
+
+;; Accept integer operands in the range -0x80000000..0x7fffffff. We
+;; have to check the range carefully since this predicate is used in
+;; DImode contexts.
+
+(define_predicate "const_sint32_operand"
+ (match_code "const_int")
+{
+ /* It doesn't make sense to ask this question with a mode that is
+ not larger than 32 bits. */
+ gcc_assert (GET_MODE_BITSIZE (mode) > 32);
+
+ /* All allowed constants will fit a CONST_INT. */
+ return (GET_CODE (op) == CONST_INT
+ && (INTVAL (op) >= (-0x7fffffff - 1) && INTVAL (op) <= 0x7fffffff));
+})
+
+;; Return true if X is a valid comparison operator for the dbcc
+;; instruction. Note it rejects floating point comparison
+;; operators. (In the future we could use Fdbcc). It also rejects
+;; some comparisons when CC_NO_OVERFLOW is set.
+
+(define_predicate "valid_dbcc_comparison_p"
+ (and (match_code "eq,ne,gtu,ltu,geu,leu,gt,lt,ge,le")
+ (match_test "valid_dbcc_comparison_p_2 (op, mode)")))
+
+(define_predicate "m68k_cstore_comparison_operator"
+ (if_then_else (match_test "TARGET_68881")
+ (match_operand 0 "comparison_operator")
+ (match_operand 0 "ordered_comparison_operator")))
+
+;; Check for sign_extend or zero_extend. Used for bit-count operands.
+
+(define_predicate "extend_operator"
+ (match_code "sign_extend,zero_extend"))
+
+;; Returns true if OP is either a symbol reference or a sum of a
+;; symbol reference and a constant. This predicate is for "raw"
+;; symbol references not yet processed by legitimize*_address,
+;; hence we do not handle UNSPEC_{XGOT, TLS, XTLS} here.
+
+(define_predicate "symbolic_operand"
+ (match_code "symbol_ref,label_ref,const")
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return true;
+
+ case CONST:
+ op = XEXP (op, 0);
+ return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT);
+
+#if 0 /* Deleted, with corresponding change in m68k.h,
+ so as to fit the specs. No CONST_DOUBLE is ever symbolic. */
+ case CONST_DOUBLE:
+ return GET_MODE (op) == mode;
+#endif
+
+ default:
+ return false;
+ }
+})
+
+;; A constant that can be used the address in a call insn
+(define_predicate "const_call_operand"
+ (ior (match_operand 0 "const_int_operand")
+ (and (match_test "m68k_symbolic_call != NULL")
+ (match_operand 0 "symbolic_operand"))))
+
+;; An operand that can be used as the address in a call insn.
+(define_predicate "call_operand"
+ (ior (match_operand 0 "const_call_operand")
+ (match_operand 0 "register_operand")))
+
+;; A constant that can be used the address in a sibcall insn
+(define_predicate "const_sibcall_operand"
+ (ior (match_operand 0 "const_int_operand")
+ (and (match_test "m68k_symbolic_jump != NULL")
+ (match_operand 0 "symbolic_operand"))))
+
+;; An operand that can be used as the address in a sibcall insn.
+(define_predicate "sibcall_operand"
+ (ior (match_operand 0 "const_sibcall_operand")
+ (and (match_code "reg")
+ (match_test "REGNO (op) == STATIC_CHAIN_REGNUM"))))
+
+;; TODO: Add a comment here.
+
+(define_predicate "post_inc_operand"
+ (and (match_code "mem")
+ (match_test "GET_CODE (XEXP (op, 0)) == POST_INC")))
+
+;; TODO: Add a comment here.
+
+(define_predicate "pre_dec_operand"
+ (and (match_code "mem")
+ (match_test "GET_CODE (XEXP (op, 0)) == PRE_DEC")))
+
+;; A zero constant.
+(define_predicate "const0_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; A one constant (operand for conditional_trap).
+(define_predicate "const1_operand"
+ (and (match_code "const_int")
+ (match_test "op == const1_rtx")))
+
+;; A valid operand for a HImode or QImode conditional operation.
+;; ColdFire has tst patterns, but not cmp patterns.
+(define_predicate "m68k_subword_comparison_operand"
+ (if_then_else (match_test "TARGET_COLDFIRE")
+ (and (match_code "const_int")
+ (match_test "op == const0_rtx"))
+ (match_operand 0 "general_src_operand")))
+
+;; An operand for movsi_const0 pattern.
+(define_predicate "movsi_const0_operand"
+ (and (match_operand 0 "nonimmediate_operand")
+ (match_test "(TARGET_68010 || TARGET_COLDFIRE)
+ || !(MEM_P (op) && MEM_VOLATILE_P (op))")))
+
+;; A non-symbolic call operand.
+;; We need to special case 'const_int' to ignore its mode while matching.
+(define_predicate "non_symbolic_call_operand"
+ (and (match_operand 0 "call_operand")
+ (ior (and (match_code "const_int")
+ (match_test "!symbolic_operand (op, mode)"))
+ (match_test "!symbolic_operand (op,mode)"))))
+
+;; Special case of general_src_operand, which rejects a few fp
+;; constants (which we prefer in registers) before reload.
+
+(define_predicate "fp_src_operand"
+ (match_operand 0 "general_src_operand")
+{
+ return !CONSTANT_P (op)
+ || (TARGET_68881
+ && (!standard_68881_constant_p (op)
+ || reload_in_progress
+ || reload_completed));
+})
diff --git a/gcc/config/m68k/print-sysroot-suffix.sh b/gcc/config/m68k/print-sysroot-suffix.sh
new file mode 100644
index 000000000..3cf1d8eb4
--- /dev/null
+++ b/gcc/config/m68k/print-sysroot-suffix.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+# Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+# This file is part of GCC.
+
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# This script takes the following arguments:
+#
+# - the target sysroot
+# - the value of $(MULTILIB_MATCHES)
+# - the value of $(MULTILIB_OPTIONS)
+#
+# It uses these arguments to construct a definition of SYSROOT_SUFFIX_SPEC,
+# which it prints to the standard output. For each multilib directory FOO,
+# the script checks whether $sysroot has a subdirectory FOO, and if so will
+# use /FOO for all compatible command-line options. It will not add a
+# suffix for /FOO's options otherwise. These suffixes are concatenated,
+# with one subspec for each space-separated entry in $(MULTILIB_OPTIONS).
+set -e
+sysroot=$1
+matches=$2
+options=$3
+
+# For each multilib option OPT, add to $substs a sed command of the
+# form "-e 's/OPT/OPT/'".
+substs=""
+for option in `echo "$options" | tr '/' ' '`
+do
+ substs="$substs -e 's/$option/$option/g'"
+done
+
+# For each ALIAS=CANONICAL entry in $MULTILIB_MATCHES, look for sed
+# arguments in $substs of the form "-e 's/CANONICAL/.../'". Replace
+# such entries with "-e 's/CANONICAL/ALIAS|.../'". Both the ALIAS and
+# CANONICAL parts of $MULTILIB_MATCHES use '?' to stand for '='.
+#
+# After this loop, a command of the form "echo FOO | eval sed $substs"
+# will replace a canonical option FOO with a %{...}-style spec pattern.
+for match in $matches
+do
+ canonical=`echo "$match" | sed -e 's/=.*//' -e 's/?/=/g'`
+ alias=`echo "$match" | sed -e 's/.*=//' -e 's/?/=/g'`
+ substs=`echo "$substs" | sed -e "s,s/$canonical/,&$alias|,"`
+done
+
+# Build up the final SYSROOT_SUFFIX_SPEC in $spec.
+spec=
+for combo in $options
+do
+ # See which option alternatives in $combo have their own sysroot
+ # directory. Create a subspec of the form "%{PAT1:/DIR1;...;PATn:DIRn}"
+ # from each such option OPTi, where DIRi is the directory associated
+ # with OPTi and PATi is the result of passing OPTi through $substs.
+ subspec=
+ for option in `echo "$combo" | tr '/' ' '`
+ do
+ dir=`echo "$option" | sed 's/cpu=//'`
+ if test -d "$sysroot/$dir"; then
+ test -z "$subspec" || subspec="$subspec;"
+ subspec="$subspec"`echo "$option" | eval sed $substs`":/$dir"
+ fi
+ done
+ # Concatenate all the subspecs.
+ test -z "$subspec" || spec="$spec%{$subspec}"
+done
+if test -n "$spec"; then
+ echo "#undef SYSROOT_SUFFIX_SPEC"
+ echo "#define SYSROOT_SUFFIX_SPEC \"$spec\""
+fi
diff --git a/gcc/config/m68k/rtemself.h b/gcc/config/m68k/rtemself.h
new file mode 100644
index 000000000..20861fbfe
--- /dev/null
+++ b/gcc/config/m68k/rtemself.h
@@ -0,0 +1,33 @@
+/* Definitions for rtems targeting a Motorola m68k using elf.
+ Copyright (C) 1999, 2000, 2002 National Research Council of Canada.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Contributed by Charles-Antoine Gauthier (charles.gauthier@nrc.ca).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* Target OS builtins. */
+#undef TARGET_OS_CPP_BUILTINS /* Defined in m68kemb.h. */
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("mc68000"); \
+ builtin_define ("__USE_INIT_FINI__"); \
+ builtin_define ("__rtems__"); \
+ builtin_assert ("system=rtems"); \
+ } \
+ while (0)
diff --git a/gcc/config/m68k/t-cf b/gcc/config/m68k/t-cf
new file mode 100644
index 000000000..7bf8e11ee
--- /dev/null
+++ b/gcc/config/m68k/t-cf
@@ -0,0 +1,7 @@
+# Select only ColdFire-specific CPUs.
+
+M68K_MLIB_CPU += && (CPU ~ "^mcf")
+M68K_ARCH := cf
+# Do not stamp the multilibs with a MAC type, as we never use those
+# instructions in compiler-generated code.
+MULTILIB_EXTRA_OPTS += Wa,-mno-mac
diff --git a/gcc/config/m68k/t-crtstuff b/gcc/config/m68k/t-crtstuff
new file mode 100644
index 000000000..a8bdb502d
--- /dev/null
+++ b/gcc/config/m68k/t-crtstuff
@@ -0,0 +1,10 @@
+EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crti.o crtn.o
+
+# Add flags here as required.
+CRTSTUFF_T_CFLAGS =
+
+# Assemble startup files.
+$(T)crti.o: $(srcdir)/config/m68k/crti.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(MULTILIB_CFLAGS) -c -o $(T)crti.o $(srcdir)/config/m68k/crti.s
+$(T)crtn.o: $(srcdir)/config/m68k/crtn.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(MULTILIB_CFLAGS) -c -o $(T)crtn.o $(srcdir)/config/m68k/crtn.s
diff --git a/gcc/config/m68k/t-floatlib b/gcc/config/m68k/t-floatlib
new file mode 100644
index 000000000..2039d1d0d
--- /dev/null
+++ b/gcc/config/m68k/t-floatlib
@@ -0,0 +1,31 @@
+# Copyright (C) 2007 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC = m68k/lb1sf68.asm
+LIB1ASMFUNCS = _mulsi3 _udivsi3 _divsi3 _umodsi3 _modsi3 \
+ _double _float _floatex \
+ _eqdf2 _nedf2 _gtdf2 _gedf2 _ltdf2 _ledf2 \
+ _eqsf2 _nesf2 _gtsf2 _gesf2 _ltsf2 _lesf2
+
+LIB2FUNCS_EXTRA = fpgnulib.c xfgnulib.c
+
+fpgnulib.c: $(srcdir)/config/m68k/fpgnulib.c
+ cp $(srcdir)/config/m68k/fpgnulib.c fpgnulib.c
+xfgnulib.c: $(srcdir)/config/m68k/fpgnulib.c
+ echo '#define EXTFLOAT' > xfgnulib.c
+ cat $(srcdir)/config/m68k/fpgnulib.c >> xfgnulib.c
diff --git a/gcc/config/m68k/t-linux b/gcc/config/m68k/t-linux
new file mode 100644
index 000000000..d61f73bea
--- /dev/null
+++ b/gcc/config/m68k/t-linux
@@ -0,0 +1,33 @@
+# Copyright (C) 2008, 2010 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o
+
+# Only include multilibs for 680x0 CPUs with an MMU.
+M68K_MLIB_CPU += && (CPU ~ "^m680") && (FLAGS ~ "FL_MMU")
+
+ifeq ($(M68K_ARCH),m68k)
+MULTIARCH_DIRNAME = $(call if_multiarch,m68k-linux-gnu)
+endif
+
+# This rule uses MULTILIB_MATCHES to generate a definition of
+# SYSROOT_SUFFIX_SPEC.
+sysroot-suffix.h: $(srcdir)/config/m68k/print-sysroot-suffix.sh
+ $(SHELL) $(srcdir)/config/m68k/print-sysroot-suffix.sh \
+ "$(SYSTEM_HEADER_DIR)/../.." "$(MULTILIB_MATCHES)" \
+ "$(MULTILIB_OPTIONS)" > $@
diff --git a/gcc/config/m68k/t-m68k b/gcc/config/m68k/t-m68k
new file mode 100644
index 000000000..cbff34d65
--- /dev/null
+++ b/gcc/config/m68k/t-m68k
@@ -0,0 +1,4 @@
+# Select only 680x0-specific CPUs.
+
+M68K_MLIB_CPU += && (CPU !~ "^mcf")
+M68K_ARCH := m68k
diff --git a/gcc/config/m68k/t-m68kbare b/gcc/config/m68k/t-m68kbare
new file mode 100644
index 000000000..0cbaead7d
--- /dev/null
+++ b/gcc/config/m68k/t-m68kbare
@@ -0,0 +1,4 @@
+# Add soft-float multilibs.
+M68K_MLIB_DIRNAMES += softfp
+M68K_MLIB_OPTIONS += msoft-float
+
diff --git a/gcc/config/m68k/t-m68kelf b/gcc/config/m68k/t-m68kelf
new file mode 100644
index 000000000..bea01dc4f
--- /dev/null
+++ b/gcc/config/m68k/t-m68kelf
@@ -0,0 +1,4 @@
+# from ../t-svr4
+EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o
+# no pic for now
+#CRTSTUFF_T_CFLAGS=-fpic
diff --git a/gcc/config/m68k/t-mlibs b/gcc/config/m68k/t-mlibs
new file mode 100644
index 000000000..dcf681c32
--- /dev/null
+++ b/gcc/config/m68k/t-mlibs
@@ -0,0 +1,115 @@
+# multilibs -*- mode:Makefile -*-
+#
+# Copyright (C) 2007 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# An awk command to extract lines from the m68k-devices.def file that
+# match $1 and then print the string defined by $2. Leading and
+# trailing whitespace is removed. $1 & $2 can make use of
+# CPU -- the cpu identifier (has leading 'm'/'mcf')
+# FLAGS -- the cpu capability flags
+# CPU_NAME -- the cpu name (has no leading m/mcf)
+# MLIB -- the multilib cpu name (no leading m/mcf)
+# This is intended to be used as $(call M68K_AWK,predicate,string)
+M68K_AWK = $(strip $(shell $(AWK) 'BEGIN { FS="[ \t]*[,()][ \t]*"; ORS=" " }; \
+ /^M68K_DEVICE/ { CPU=$$3; FLAGS=$$8; \
+ CPU_NAME=substr($$2,2,length($$2)-2); \
+ MLIB=substr($$5,2,length($$5)-2); \
+ if ($1) print $2 }' $(srcdir)/config/m68k/m68k-devices.def))
+
+# Add a multilib for each distinct architecture. M68K_MLIB_CPU, if defined,
+# adds additional restrictions.
+M68K_MLIB_CPUS := $(call M68K_AWK,\
+ (CPU_NAME == MLIB) $(M68K_MLIB_CPU), \
+ "m"MLIB)
+
+# Make the default cpu the default multilib.
+M68K_MLIB_DEFAULT := $(call M68K_AWK, CPU == "$(target_cpu_default)", MLIB)
+
+ifeq ($(filter m$(M68K_MLIB_DEFAULT),$(M68K_MLIB_CPUS)),)
+$(error Error default cpu '$(target_cpu_default)' is not in multilib set '$(M68K_MLIB_CPUS)')
+endif
+
+# Sed arguments that convert mcpu=* arguments into canonical forms.
+# We want to use the legacy m68* options instead of the new -mcpu=68*
+# options when compiling multilibs because the former are recognised
+# by older binutils.
+CANONICALIZE_OPTIONS = -e 's|mcpu=68|m68|g' -e 's|mcpu=cpu32|mcpu32|g'
+
+MULTILIB_DIRNAMES := $(filter-out m$(M68K_MLIB_DEFAULT),$(M68K_MLIB_CPUS))
+MULTILIB_OPTIONS := $(shell echo $(MULTILIB_DIRNAMES:m%=mcpu=%) \
+ | sed -e 's| |/|g' $(CANONICALIZE_OPTIONS))
+
+# Add subtarget specific options & dirs.
+MULTILIB_DIRNAMES += $(M68K_MLIB_DIRNAMES)
+MULTILIB_OPTIONS += $(M68K_MLIB_OPTIONS)
+
+MULTILIB_MATCHES :=
+
+ifneq ($(M68K_ARCH),cf)
+# Map the new-style options to the legacy m68k ones.
+MULTILIB_MATCHES += m68000=mcpu?68000 m68000=march?68000 m68000=mc68000 \
+ m68000=m68302 \
+ m68020=mcpu?68020 m68020=march?68020 m68020=mc68020 \
+ m68030=mcpu?68030 m68030=march?68030 \
+ m68040=mcpu?68040 m68040=march?68040 \
+ m68060=mcpu?68060 m68060=march?68060 \
+ mcpu32=mcpu?cpu32 mcpu32=march?cpu32 mcpu32=m68332
+endif
+
+ifneq ($(M68K_ARCH),m68k)
+# Map the legacy ColdFire options to the new ones.
+MULTILIB_MATCHES += mcpu?5206=m5200 mcpu?5206e=m5206e mcpu?5208=m528x \
+ mcpu?5307=m5300 mcpu?5307=m5307 \
+ mcpu?5407=m5400 mcpu?5407=m5407 \
+ mcpu?5475=mcfv4e
+# Map -march=* options to the representative -mcpu=* option.
+MULTILIB_MATCHES += mcpu?5206e=march?isaa mcpu?5208=march?isaaplus \
+ mcpu?5407=march?isab
+endif
+
+# Match non-representative -mcpu options to their representative option.
+MULTILIB_MATCHES += \
+ $(call M68K_AWK, \
+ (CPU_NAME != MLIB) $(M68K_MLIB_CPU), \
+ (match(MLIB, "^68") || MLIB == "cpu32" \
+ ? "m"MLIB"=mcpu?"CPU_NAME \
+ : "mcpu?"MLIB"=mcpu?"CPU_NAME))
+
+MULTILIB_EXCEPTIONS :=
+
+ifeq ($(firstword $(M68K_MLIB_OPTIONS)),msoft-float)
+# Exclude soft-float multilibs for targets that default to soft-float anyway.
+MULTILIB_EXCEPTIONS += $(call M68K_AWK,\
+ (CPU_NAME == MLIB) $(M68K_MLIB_CPU) \
+ && (((CPU ~ "^mcf") && !match(FLAGS, "FL_CF_FPU")) \
+ || CPU == "cpu32" \
+ || CPU == "m68000"), \
+ "mcpu="MLIB"/msoft-float*")
+endif
+
+# Remove the default CPU from the explicit exceptions.
+MULTILIB_EXCEPTIONS := \
+ $(patsubst mcpu=$(M68K_MLIB_DEFAULT)/%,%,$(MULTILIB_EXCEPTIONS))
+
+# Convert all options to canonical form.
+MULTILIB_EXCEPTIONS := $(shell echo $(MULTILIB_EXCEPTIONS) | \
+ sed $(CANONICALIZE_OPTIONS))
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc/config/m68k/t-openbsd b/gcc/config/m68k/t-openbsd
new file mode 100644
index 000000000..b295608de
--- /dev/null
+++ b/gcc/config/m68k/t-openbsd
@@ -0,0 +1,4 @@
+# gdb gets confused if pic code is linked with non pic
+# We cope by building all variants of libgcc.
+M68K_MLIB_OPTIONS += fpic/fPIC
+M68K_MLIB_DIRNAMES += fpic fPIC
diff --git a/gcc/config/m68k/t-rtems b/gcc/config/m68k/t-rtems
new file mode 100644
index 000000000..0997afebc
--- /dev/null
+++ b/gcc/config/m68k/t-rtems
@@ -0,0 +1,9 @@
+# Custom multilibs for RTEMS
+M68K_MLIB_CPU += && (match(MLIB, "^68") \
+ || MLIB == "cpu32" \
+ || MLIB == "5206" \
+ || MLIB == "5208" \
+ || MLIB == "5307" \
+ || MLIB == "5329" \
+ || MLIB == "5407" \
+ || MLIB == "5475")
diff --git a/gcc/config/m68k/t-slibgcc-elf-ver b/gcc/config/m68k/t-slibgcc-elf-ver
new file mode 100644
index 000000000..6aac37cc0
--- /dev/null
+++ b/gcc/config/m68k/t-slibgcc-elf-ver
@@ -0,0 +1,3 @@
+# Bump the version number of the shared libgcc library
+
+SHLIB_SOVERSION = 2
diff --git a/gcc/config/m68k/t-uclinux b/gcc/config/m68k/t-uclinux
new file mode 100644
index 000000000..e1711a344
--- /dev/null
+++ b/gcc/config/m68k/t-uclinux
@@ -0,0 +1,36 @@
+# Copyright (C) 2003, 2005, 2007, 2008 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# crti and crtn are provided by uClibc.
+EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o
+
+# Include multilibs for CPUs without an MMU or with FL_UCLINUX
+M68K_MLIB_CPU += && (!match(FLAGS, "FL_MMU") || match(FLAGS, "FL_UCLINUX"))
+
+# Add multilibs for execute-in-place and shared-library code.
+M68K_MLIB_OPTIONS += msep-data/mid-shared-library
+M68K_MLIB_DIRNAMES += msep-data mid-shared-library
+
+# This rule uses MULTILIB_MATCHES to generate a definition of
+# SYSROOT_SUFFIX_SPEC.
+sysroot-suffix.h: $(srcdir)/config/m68k/print-sysroot-suffix.sh
+ $(SHELL) $(srcdir)/config/m68k/print-sysroot-suffix.sh \
+ "$(SYSTEM_HEADER_DIR)/../.." "$(MULTILIB_MATCHES)" \
+ "$(MULTILIB_OPTIONS)" > $@
+
+generated_files += sysroot-suffix.h
diff --git a/gcc/config/m68k/uclinux-oldabi.h b/gcc/config/m68k/uclinux-oldabi.h
new file mode 100644
index 000000000..7ef202efb
--- /dev/null
+++ b/gcc/config/m68k/uclinux-oldabi.h
@@ -0,0 +1,70 @@
+/* Definitions of target machine for GCC. m68k/ColdFire based uClinux system
+ using ELF objects with special linker post-processing to produce FLAT
+ executables.
+
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* The old uClinux ABI used 80-byte "long double"s for ColdFire too. */
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 80
+#undef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 80
+
+/* Undo the definition of STARTFILE_SPEC from m68kelf.h so we'll
+ pick the default from gcc.c (just link crt0.o from multilib dir). */
+#undef STARTFILE_SPEC
+
+/* Override the default LIB_SPEC from gcc.c. We don't currently support
+ profiling, or libg.a. */
+#undef LIB_SPEC
+#define LIB_SPEC "\
+%{mid-shared-library:-R libc.gdb%s -elf2flt -shared-lib-id 0} -lc \
+"
+
+/* we don't want a .eh_frame section. */
+#define EH_FRAME_IN_DATA_SECTION
+
+/* ??? Quick hack to get constructors working. Make this look more like a
+ COFF target, so the existing dejagnu/libgloss support works. A better
+ solution would be to make the necessary dejagnu and libgloss changes so
+ that we can use normal the ELF constructor mechanism. */
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
+
+/* Bring in standard linux defines */
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("mc68000"); \
+ builtin_define ("__uClinux__"); \
+ builtin_define_std ("linux"); \
+ builtin_define_std ("unix"); \
+ builtin_define ("__gnu_linux__"); \
+ builtin_assert ("system=linux"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=posix"); \
+ if (TARGET_ID_SHARED_LIBRARY) \
+ builtin_define ("__ID_SHARED_LIBRARY__"); \
+ } \
+ while (0)
+
diff --git a/gcc/config/m68k/uclinux.h b/gcc/config/m68k/uclinux.h
new file mode 100644
index 000000000..1b21cafa0
--- /dev/null
+++ b/gcc/config/m68k/uclinux.h
@@ -0,0 +1,72 @@
+/* Definitions of target machine for GCC. m68k/ColdFire based uClinux system
+ using ELF objects with special linker post-processing to produce FLAT
+ executables.
+
+ Copyright (C) 2003, 2007, 2010 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (68k uClinux)");
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+"%{mshared-library-id=0|!mshared-library-id=*: crt1.o%s ;: Scrt1.o%s} \
+ crti.o%s crtbegin.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+/* Override the default LIB_SPEC from gcc.c. We don't currently support
+ profiling, or libg.a. */
+#undef LIB_SPEC
+#define LIB_SPEC \
+"%{mid-shared-library:%{!static-libc:-R libc.gdb%s}} %{pthread:-lpthread} -lc"
+
+/* Default to using -elf2flt with no options. */
+#undef LINK_SPEC
+#define LINK_SPEC \
+"%{!elf2flt*:-elf2flt} \
+ %{mid-shared-library: \
+ %{mshared-library-id=*:-shared-lib-id %*;:-shared-lib-id 0}}"
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ LINUX_TARGET_OS_CPP_BUILTINS (); \
+ builtin_define ("__uClinux__"); \
+ if (TARGET_ID_SHARED_LIBRARY) \
+ { \
+ builtin_define ("__ID_SHARED_LIBRARY__"); \
+ /* Shared libraries and executables do not share \
+ typeinfo names. */ \
+ builtin_define ("__GXX_MERGED_TYPEINFO_NAMES=0"); \
+ builtin_define ("__GXX_TYPEINFO_EQUALITY_INLINE=0"); \
+ } \
+ } \
+ while (0)
+
+/* -msep-data is the default PIC mode on this target. */
+#define DRIVER_SELF_SPECS \
+ "%{fpie|fPIE|fpic|fPIC:%{!msep-data:%{!mid-shared-library: -msep-data}}}"
+
+/* The uclinux binary format relies on relocations against a segment being
+ within that segment. Conservatively apply this rule to individual
+ sections. */
+#undef M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
+#define M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P 1
diff --git a/gcc/config/m68k/uclinux.opt b/gcc/config/m68k/uclinux.opt
new file mode 100644
index 000000000..537649952
--- /dev/null
+++ b/gcc/config/m68k/uclinux.opt
@@ -0,0 +1,36 @@
+; m68k/ColdFire uClinux options.
+
+; Copyright (C) 2011
+; Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+; See the GCC internals manual (options.texi) for a description of
+; this file's format.
+
+; Please try to keep this file in ASCII collating order.
+
+elf2flt
+Driver
+
+elf2flt=
+Driver JoinedOrMissing
+
+static-libc
+Driver
+
+; This comment is to ensure we retain the blank line above.