From 554fd8c5195424bdbcabf5de30fdc183aba391bd Mon Sep 17 00:00:00 2001 From: upstream source tree Date: Sun, 15 Mar 2015 20:14:05 -0400 Subject: obtained gcc-4.6.4.tar.bz2 from upstream website; verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository. --- gcc/config/lm32/lm32.md | 996 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 996 insertions(+) create mode 100644 gcc/config/lm32/lm32.md (limited to 'gcc/config/lm32/lm32.md') diff --git a/gcc/config/lm32/lm32.md b/gcc/config/lm32/lm32.md new file mode 100644 index 000000000..7539cb065 --- /dev/null +++ b/gcc/config/lm32/lm32.md @@ -0,0 +1,996 @@ +;; Machine description of the Lattice Mico32 architecture for GNU C compiler. +;; Contributed by Jon Beniston + +;; Copyright (C) 2009, 2010 Free Software Foundation, Inc. + +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; Include predicate and constraint definitions +(include "predicates.md") +(include "constraints.md") + + +;; Register numbers +(define_constants + [(RA_REGNUM 29) ; return address register. + ] +) + +;; LM32 specific volatile operations +(define_constants + [(UNSPECV_BLOCKAGE 1)] ; prevent scheduling across pro/epilog boundaries +) + +;; LM32 specific operations +(define_constants + [(UNSPEC_GOT 2) + (UNSPEC_GOTOFF_HI16 3) + (UNSPEC_GOTOFF_LO16 4)] +) + +;; --------------------------------- +;; instruction types +;; --------------------------------- + +(define_attr "type" + "unknown,load,store,arith,compare,shift,multiply,divide,call,icall,ubranch,uibranch,cbranch" + (const_string "unknown")) + +;; --------------------------------- +;; instruction lengths +;; --------------------------------- + +; All instructions are 4 bytes +; Except for branches that are out of range, and have to be implemented +; as two instructions +(define_attr "length" "" + (cond [ + (eq_attr "type" "cbranch") + (if_then_else + (lt (abs (minus (match_dup 2) (pc))) + (const_int 32768) + ) + (const_int 4) + (const_int 8) + ) + ] + (const_int 4)) +) + +;; --------------------------------- +;; scheduling +;; --------------------------------- + +(define_automaton "lm32") + +(define_cpu_unit "x" "lm32") +(define_cpu_unit "m" "lm32") +(define_cpu_unit "w" "lm32") + +(define_insn_reservation "singlecycle" 1 + (eq_attr "type" "store,arith,call,icall,ubranch,uibranch,cbranch") + "x") + +(define_insn_reservation "twocycle" 2 + (eq_attr "type" "compare,shift,divide") + "x,m") + +(define_insn_reservation "threecycle" 3 + (eq_attr "type" "load,multiply") + "x,m,w") + +;; --------------------------------- +;; mov +;; --------------------------------- + +(define_expand "movqi" + [(set (match_operand:QI 0 "general_operand" "") + (match_operand:QI 1 "general_operand" ""))] + "" + " +{ + if (can_create_pseudo_p ()) + { + if (GET_CODE (operand0) == MEM) + { + /* Source operand for store must be in a register. */ + operands[1] = force_reg (QImode, operands[1]); + } + } +}") + +(define_expand "movhi" + [(set (match_operand:HI 0 "general_operand" "") + (match_operand:HI 1 "general_operand" ""))] + "" + " +{ + if (can_create_pseudo_p ()) + { + if (GET_CODE (operands[0]) == MEM) + { + /* Source operand for store must be in a register. */ + operands[1] = force_reg (HImode, operands[1]); + } + } +}") + +(define_expand "movsi" + [(set (match_operand:SI 0 "general_operand" "") + (match_operand:SI 1 "general_operand" ""))] + "" + " +{ + if (can_create_pseudo_p ()) + { + if (GET_CODE (operands[0]) == MEM + || (GET_CODE (operands[0]) == SUBREG + && GET_CODE (SUBREG_REG (operands[0])) == MEM)) + { + /* Source operand for store must be in a register. */ + operands[1] = force_reg (SImode, operands[1]); + } + } + + if (flag_pic && symbolic_operand (operands[1], SImode)) + { + if (GET_CODE (operands[1]) == LABEL_REF + || (GET_CODE (operands[1]) == SYMBOL_REF + && SYMBOL_REF_LOCAL_P (operands[1]) + && !SYMBOL_REF_WEAK (operands[1]))) + { + emit_insn (gen_movsi_gotoff_hi16 (operands[0], operands[1])); + emit_insn (gen_addsi3 (operands[0], + operands[0], + pic_offset_table_rtx)); + emit_insn (gen_movsi_gotoff_lo16 (operands[0], + operands[0], + operands[1])); + } + else + emit_insn (gen_movsi_got (operands[0], operands[1])); + crtl->uses_pic_offset_table = 1; + DONE; + } + else if (flag_pic && GET_CODE (operands[1]) == CONST) + { + rtx op = XEXP (operands[1], 0); + if (GET_CODE (op) == PLUS) + { + rtx arg0 = XEXP (op, 0); + rtx arg1 = XEXP (op, 1); + if (GET_CODE (arg0) == LABEL_REF + || (GET_CODE (arg0) == SYMBOL_REF + && SYMBOL_REF_LOCAL_P (arg0) + && !SYMBOL_REF_WEAK (arg0))) + { + emit_insn (gen_movsi_gotoff_hi16 (operands[0], arg0)); + emit_insn (gen_addsi3 (operands[0], + operands[0], + pic_offset_table_rtx)); + emit_insn (gen_movsi_gotoff_lo16 (operands[0], + operands[0], + arg0)); + } + else + emit_insn (gen_movsi_got (operands[0], arg0)); + emit_insn (gen_addsi3 (operands[0], operands[0], arg1)); + crtl->uses_pic_offset_table = 1; + DONE; + } + } + else if (!flag_pic && reloc_operand (operands[1], GET_MODE (operands[1]))) + { + emit_insn (gen_rtx_SET (SImode, operands[0], gen_rtx_HIGH (SImode, operands[1]))); + emit_insn (gen_rtx_SET (SImode, operands[0], gen_rtx_LO_SUM (SImode, operands[0], operands[1]))); + DONE; + } + else if (GET_CODE (operands[1]) == CONST_INT) + { + if (!(satisfies_constraint_K (operands[1]) + || satisfies_constraint_L (operands[1]) + || satisfies_constraint_U (operands[1]))) + { + emit_insn (gen_movsi_insn (operands[0], + GEN_INT (INTVAL (operands[1]) & ~0xffff))); + emit_insn (gen_iorsi3 (operands[0], + operands[0], + GEN_INT (INTVAL (operands[1]) & 0xffff))); + DONE; + } + } +}") + +(define_expand "movmemsi" + [(parallel [(set (match_operand:BLK 0 "general_operand" "") + (match_operand:BLK 1 "general_operand" "")) + (use (match_operand:SI 2 "" "")) + (use (match_operand:SI 3 "const_int_operand" ""))])] + "" +{ + if (!lm32_expand_block_move (operands)) + FAIL; + DONE; +}) + +;; --------------------------------- +;; load/stores/moves +;; --------------------------------- + +(define_insn "movsi_got" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(match_operand 1 "" "")] UNSPEC_GOT))] + "flag_pic" + "lw %0, (gp+got(%1))" + [(set_attr "type" "load")] +) + +(define_insn "movsi_gotoff_hi16" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(match_operand 1 "" "")] UNSPEC_GOTOFF_HI16))] + "flag_pic" + "orhi %0, r0, gotoffhi16(%1)" + [(set_attr "type" "load")] +) + +(define_insn "movsi_gotoff_lo16" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(plus:SI (match_operand:SI 1 "register_operand" "0") + (match_operand 2 "" ""))] UNSPEC_GOTOFF_LO16))] + "flag_pic" + "addi %0, %1, gotofflo16(%2)" + [(set_attr "type" "arith")] +) + +(define_insn "*movsi_lo_sum" + [(set (match_operand:SI 0 "register_operand" "=r") + (lo_sum:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "reloc_operand" "i")))] + "!flag_pic" + "ori %0, %0, lo(%2)" + [(set_attr "type" "arith")] +) + +(define_insn "*movqi_insn" + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,m,r") + (match_operand:QI 1 "general_operand" "m,r,r,J,n"))] + "lm32_move_ok (QImode, operands)" + "@ + lbu %0, %1 + or %0, %1, r0 + sb %0, %1 + sb %0, r0 + addi %0, r0, %1" + [(set_attr "type" "load,arith,store,store,arith")] +) + +(define_insn "*movhi_insn" + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,m,r,r") + (match_operand:HI 1 "general_operand" "m,r,r,J,K,L"))] + "lm32_move_ok (HImode, operands)" + "@ + lhu %0, %1 + or %0, %1, r0 + sh %0, %1 + sh %0, r0 + addi %0, r0, %1 + ori %0, r0, %1" + [(set_attr "type" "load,arith,store,store,arith,arith")] +) + +(define_insn "movsi_insn" + [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,m,m,r,r,r,r,r") + (match_operand:SI 1 "movsi_rhs_operand" "m,r,r,J,K,L,U,S,Y"))] + "lm32_move_ok (SImode, operands)" + "@ + lw %0, %1 + or %0, %1, r0 + sw %0, %1 + sw %0, r0 + addi %0, r0, %1 + ori %0, r0, %1 + orhi %0, r0, hi(%1) + mva %0, gp(%1) + orhi %0, r0, hi(%1)" + [(set_attr "type" "load,arith,store,store,arith,arith,arith,arith,arith")] +) + +;; --------------------------------- +;; sign and zero extension +;; --------------------------------- + +(define_insn "*extendqihi2" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "m,r")))] + "TARGET_SIGN_EXTEND_ENABLED || (GET_CODE (operands[1]) != REG)" + "@ + lb %0, %1 + sextb %0, %1" + [(set_attr "type" "load,arith")] +) + +(define_insn "zero_extendqihi2" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "m,r")))] + "" + "@ + lbu %0, %1 + andi %0, %1, 0xff" + [(set_attr "type" "load,arith")] +) + +(define_insn "*extendqisi2" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "m,r")))] + "TARGET_SIGN_EXTEND_ENABLED || (GET_CODE (operands[1]) != REG)" + "@ + lb %0, %1 + sextb %0, %1" + [(set_attr "type" "load,arith")] +) + +(define_insn "zero_extendqisi2" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "m,r")))] + "" + "@ + lbu %0, %1 + andi %0, %1, 0xff" + [(set_attr "type" "load,arith")] +) + +(define_insn "*extendhisi2" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "m,r")))] + "TARGET_SIGN_EXTEND_ENABLED || (GET_CODE (operands[1]) != REG)" + "@ + lh %0, %1 + sexth %0, %1" + [(set_attr "type" "load,arith")] +) + +(define_insn "zero_extendhisi2" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "m,r")))] + "" + "@ + lhu %0, %1 + andi %0, %1, 0xffff" + [(set_attr "type" "load,arith")] +) + +;; --------------------------------- +;; compare +;; --------------------------------- + +(define_expand "cstoresi4" + [(set (match_operand:SI 0 "register_operand") + (match_operator:SI 1 "ordered_comparison_operator" + [(match_operand:SI 2 "register_operand") + (match_operand:SI 3 "register_or_int_operand")]))] + "" +{ + lm32_expand_scc (operands); + DONE; +}) + +(define_insn "*seq" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (eq:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ") + (match_operand:SI 2 "register_or_K_operand" "r,K")))] + "" + "@ + cmpe %0, %z1, %2 + cmpei %0, %z1, %2" + [(set_attr "type" "compare")] +) + +(define_insn "*sne" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (ne:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ") + (match_operand:SI 2 "register_or_K_operand" "r,K")))] + "" + "@ + cmpne %0, %z1, %2 + cmpnei %0, %z1, %2" + [(set_attr "type" "compare")] +) + +(define_insn "*sgt" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (gt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ") + (match_operand:SI 2 "register_or_K_operand" "r,K")))] + "" + "@ + cmpg %0, %z1, %2 + cmpgi %0, %z1, %2" + [(set_attr "type" "compare")] +) + +(define_insn "*sge" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (ge:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ") + (match_operand:SI 2 "register_or_K_operand" "r,K")))] + "" + "@ + cmpge %0, %z1, %2 + cmpgei %0, %z1, %2" + [(set_attr "type" "compare")] +) + +(define_insn "*sgtu" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (gtu:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L")))] + "" + "@ + cmpgu %0, %z1, %2 + cmpgui %0, %z1, %2" + [(set_attr "type" "compare")] +) + +(define_insn "*sgeu" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (geu:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L")))] + "" + "@ + cmpgeu %0, %z1, %2 + cmpgeui %0, %z1, %2" + [(set_attr "type" "compare")] +) + +;; --------------------------------- +;; unconditional branch +;; --------------------------------- + +(define_insn "jump" + [(set (pc) (label_ref (match_operand 0 "" "")))] + "" + "bi %0" + [(set_attr "type" "ubranch")] +) + +(define_insn "indirect_jump" + [(set (pc) (match_operand:SI 0 "register_operand" "r"))] + "" + "b %0" + [(set_attr "type" "uibranch")] +) + +;; --------------------------------- +;; conditional branch +;; --------------------------------- + +(define_expand "cbranchsi4" + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" + [(match_operand:SI 1 "register_operand") + (match_operand:SI 2 "nonmemory_operand")]) + (label_ref (match_operand 3 "" "")) + (pc)))] + "" + " +{ + lm32_expand_conditional_branch (operands); + DONE; +}") + +(define_insn "*beq" + [(set (pc) + (if_then_else (eq:SI (match_operand:SI 0 "register_or_zero_operand" "rJ") + (match_operand:SI 1 "register_or_zero_operand" "rJ")) + (label_ref (match_operand 2 "" "")) + (pc)))] + "" +{ + return get_attr_length (insn) == 4 + ? "be %z0,%z1,%2" + : "bne %z0,%z1,8\n\tbi %2"; +} + [(set_attr "type" "cbranch")]) + +(define_insn "*bne" + [(set (pc) + (if_then_else (ne:SI (match_operand:SI 0 "register_or_zero_operand" "rJ") + (match_operand:SI 1 "register_or_zero_operand" "rJ")) + (label_ref (match_operand 2 "" "")) + (pc)))] + "" +{ + return get_attr_length (insn) == 4 + ? "bne %z0,%z1,%2" + : "be %z0,%z1,8\n\tbi %2"; +} + [(set_attr "type" "cbranch")]) + +(define_insn "*bgt" + [(set (pc) + (if_then_else (gt:SI (match_operand:SI 0 "register_or_zero_operand" "rJ") + (match_operand:SI 1 "register_or_zero_operand" "rJ")) + (label_ref (match_operand 2 "" "")) + (pc)))] + "" +{ + return get_attr_length (insn) == 4 + ? "bg %z0,%z1,%2" + : "bge %z1,%z0,8\n\tbi %2"; +} + [(set_attr "type" "cbranch")]) + +(define_insn "*bge" + [(set (pc) + (if_then_else (ge:SI (match_operand:SI 0 "register_or_zero_operand" "rJ") + (match_operand:SI 1 "register_or_zero_operand" "rJ")) + (label_ref (match_operand 2 "" "")) + (pc)))] + "" +{ + return get_attr_length (insn) == 4 + ? "bge %z0,%z1,%2" + : "bg %z1,%z0,8\n\tbi %2"; +} + [(set_attr "type" "cbranch")]) + +(define_insn "*bgtu" + [(set (pc) + (if_then_else (gtu:SI (match_operand:SI 0 "register_or_zero_operand" "rJ") + (match_operand:SI 1 "register_or_zero_operand" "rJ")) + (label_ref (match_operand 2 "" "")) + (pc)))] + "" +{ + return get_attr_length (insn) == 4 + ? "bgu %z0,%z1,%2" + : "bgeu %z1,%z0,8\n\tbi %2"; +} + [(set_attr "type" "cbranch")]) + +(define_insn "*bgeu" + [(set (pc) + (if_then_else (geu:SI (match_operand:SI 0 "register_or_zero_operand" "rJ") + (match_operand:SI 1 "register_or_zero_operand" "rJ")) + (label_ref (match_operand 2 "" "")) + (pc)))] + "" +{ + return get_attr_length (insn) == 4 + ? "bgeu %z0,%z1,%2" + : "bgu %z1,%z0,8\n\tbi %2"; +} + [(set_attr "type" "cbranch")]) + +;; --------------------------------- +;; call +;; --------------------------------- + +(define_expand "call" + [(parallel [(call (match_operand 0 "" "") + (match_operand 1 "" "")) + (clobber (reg:SI RA_REGNUM)) + ])] + "" + " +{ + rtx addr = XEXP (operands[0], 0); + if (!CONSTANT_ADDRESS_P (addr)) + XEXP (operands[0], 0) = force_reg (Pmode, addr); +}") + +(define_insn "*call" + [(call (mem:SI (match_operand:SI 0 "call_operand" "r,s")) + (match_operand 1 "" "")) + (clobber (reg:SI RA_REGNUM))] + "" + "@ + call %0 + calli %0" + [(set_attr "type" "call,icall")] +) + +(define_expand "call_value" + [(parallel [(set (match_operand 0 "" "") + (call (match_operand 1 "" "") + (match_operand 2 "" ""))) + (clobber (reg:SI RA_REGNUM)) + ])] + "" + " +{ + rtx addr = XEXP (operands[1], 0); + if (!CONSTANT_ADDRESS_P (addr)) + XEXP (operands[1], 0) = force_reg (Pmode, addr); +}") + +(define_insn "*call_value" + [(set (match_operand 0 "register_operand" "=r,r") + (call (mem:SI (match_operand:SI 1 "call_operand" "r,s")) + (match_operand 2 "" ""))) + (clobber (reg:SI RA_REGNUM))] + "" + "@ + call %1 + calli %1" + [(set_attr "type" "call,icall")] +) + +(define_insn "return_internal" + [(use (match_operand:SI 0 "register_operand" "r")) + (return)] + "" + "b %0" + [(set_attr "type" "uibranch")] +) + +(define_insn "return" + [(return)] + "lm32_can_use_return ()" + "ret" + [(set_attr "type" "uibranch")] +) + +;; --------------------------------- +;; switch/case statements +;; --------------------------------- + +(define_expand "tablejump" + [(set (pc) (match_operand 0 "register_operand" "")) + (use (label_ref (match_operand 1 "" "")))] + "" + " +{ + rtx target = operands[0]; + if (flag_pic) + { + /* For PIC, the table entry is relative to the start of the table. */ + rtx label = gen_reg_rtx (SImode); + target = gen_reg_rtx (SImode); + emit_move_insn (label, gen_rtx_LABEL_REF (SImode, operands[1])); + emit_insn (gen_addsi3 (target, operands[0], label)); + } + emit_jump_insn (gen_tablejumpsi (target, operands[1])); + DONE; +}") + +(define_insn "tablejumpsi" + [(set (pc) (match_operand:SI 0 "register_operand" "r")) + (use (label_ref (match_operand 1 "" "")))] + "" + "b %0" + [(set_attr "type" "ubranch")] +) + +;; --------------------------------- +;; arithmetic +;; --------------------------------- + +(define_insn "addsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (plus:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ") + (match_operand:SI 2 "register_or_K_operand" "r,K")))] + "" + "@ + add %0, %z1, %2 + addi %0, %z1, %2" + [(set_attr "type" "arith")] +) + +(define_insn "subsi3" + [(set (match_operand:SI 0 "register_operand" "=r") + (minus:SI (match_operand:SI 1 "register_or_zero_operand" "rJ") + (match_operand:SI 2 "register_or_zero_operand" "rJ")))] + "" + "sub %0, %z1, %z2" + [(set_attr "type" "arith")] +) + +(define_insn "mulsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (mult:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ") + (match_operand:SI 2 "register_or_K_operand" "r,K")))] + "TARGET_MULTIPLY_ENABLED" + "@ + mul %0, %z1, %2 + muli %0, %z1, %2" + [(set_attr "type" "multiply")] +) + +(define_insn "udivsi3" + [(set (match_operand:SI 0 "register_operand" "=r") + (udiv:SI (match_operand:SI 1 "register_or_zero_operand" "rJ") + (match_operand:SI 2 "register_operand" "r")))] + "TARGET_DIVIDE_ENABLED" + "divu %0, %z1, %2" + [(set_attr "type" "divide")] +) + +(define_insn "umodsi3" + [(set (match_operand:SI 0 "register_operand" "=r") + (umod:SI (match_operand:SI 1 "register_or_zero_operand" "rJ") + (match_operand:SI 2 "register_operand" "r")))] + "TARGET_DIVIDE_ENABLED" + "modu %0, %z1, %2" + [(set_attr "type" "divide")] +) + +;; --------------------------------- +;; negation and inversion +;; --------------------------------- + +(define_insn "negsi2" + [(set (match_operand:SI 0 "register_operand" "=r") + (neg:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")))] + "" + "sub %0, r0, %z1" + [(set_attr "type" "arith")] +) + +(define_insn "one_cmplsi2" + [(set (match_operand:SI 0 "register_operand" "=r") + (not:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")))] + "" + "not %0, %z1" + [(set_attr "type" "arith")] +) + +;; --------------------------------- +;; logical +;; --------------------------------- + +(define_insn "andsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (and:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L")))] + "" + "@ + and %0, %z1, %2 + andi %0, %z1, %2" + [(set_attr "type" "arith")] +) + +(define_insn "iorsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (ior:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L")))] + "" + "@ + or %0, %z1, %2 + ori %0, %z1, %2" + [(set_attr "type" "arith")] +) + +(define_insn "xorsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (xor:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L")))] + "" + "@ + xor %0, %z1, %2 + xori %0, %z1, %2" + [(set_attr "type" "arith")] +) + +(define_insn "*norsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (not:SI (ior:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L"))))] + "" + "@ + nor %0, %z1, %2 + nori %0, %z1, %2" + [(set_attr "type" "arith")] +) + +(define_insn "*xnorsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (not:SI (xor:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L"))))] + "" + "@ + xnor %0, %z1, %2 + xnori %0, %z1, %2" + [(set_attr "type" "arith")] +) + +;; --------------------------------- +;; shifts +;; --------------------------------- + +(define_expand "ashlsi3" + [(set (match_operand:SI 0 "register_operand" "") + (ashift:SI (match_operand:SI 1 "register_or_zero_operand" "") + (match_operand:SI 2 "register_or_L_operand" "")))] + "" +{ + if (!TARGET_BARREL_SHIFT_ENABLED) + { + if (!optimize_size + && satisfies_constraint_L (operands[2]) + && INTVAL (operands[2]) <= 8) + { + int i; + int shifts = INTVAL (operands[2]); + rtx one = GEN_INT (1); + + if (shifts == 0) + emit_move_insn (operands[0], operands[1]); + else + emit_insn (gen_addsi3 (operands[0], operands[1], operands[1])); + for (i = 1; i < shifts; i++) + emit_insn (gen_addsi3 (operands[0], operands[0], operands[0])); + DONE; + } + else + FAIL; + } +}) + +(define_insn "*ashlsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (ashift:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L")))] + "TARGET_BARREL_SHIFT_ENABLED" + "@ + sl %0, %z1, %2 + sli %0, %z1, %2" + [(set_attr "type" "shift")] +) + +(define_expand "ashrsi3" + [(set (match_operand:SI 0 "register_operand" "") + (ashiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "") + (match_operand:SI 2 "register_or_L_operand" "")))] + "" +{ + if (!TARGET_BARREL_SHIFT_ENABLED) + { + if (!optimize_size + && satisfies_constraint_L (operands[2]) + && INTVAL (operands[2]) <= 8) + { + int i; + int shifts = INTVAL (operands[2]); + rtx one = GEN_INT (1); + + if (shifts == 0) + emit_move_insn (operands[0], operands[1]); + else + emit_insn (gen_ashrsi3_1bit (operands[0], operands[1], one)); + for (i = 1; i < shifts; i++) + emit_insn (gen_ashrsi3_1bit (operands[0], operands[0], one)); + DONE; + } + else + FAIL; + } +}) + +(define_insn "*ashrsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (ashiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L")))] + "TARGET_BARREL_SHIFT_ENABLED" + "@ + sr %0, %z1, %2 + sri %0, %z1, %2" + [(set_attr "type" "shift")] +) + +(define_insn "ashrsi3_1bit" + [(set (match_operand:SI 0 "register_operand" "=r") + (ashiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ") + (match_operand:SI 2 "constant_M_operand" "M")))] + "!TARGET_BARREL_SHIFT_ENABLED" + "sri %0, %z1, %2" + [(set_attr "type" "shift")] +) + +(define_expand "lshrsi3" + [(set (match_operand:SI 0 "register_operand" "") + (lshiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "") + (match_operand:SI 2 "register_or_L_operand" "")))] + "" +{ + if (!TARGET_BARREL_SHIFT_ENABLED) + { + if (!optimize_size + && satisfies_constraint_L (operands[2]) + && INTVAL (operands[2]) <= 8) + { + int i; + int shifts = INTVAL (operands[2]); + rtx one = GEN_INT (1); + + if (shifts == 0) + emit_move_insn (operands[0], operands[1]); + else + emit_insn (gen_lshrsi3_1bit (operands[0], operands[1], one)); + for (i = 1; i < shifts; i++) + emit_insn (gen_lshrsi3_1bit (operands[0], operands[0], one)); + DONE; + } + else + FAIL; + } +}) + +(define_insn "*lshrsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (lshiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ") + (match_operand:SI 2 "register_or_L_operand" "r,L")))] + "TARGET_BARREL_SHIFT_ENABLED" + "@ + sru %0, %z1, %2 + srui %0, %z1, %2" + [(set_attr "type" "shift")] +) + +(define_insn "lshrsi3_1bit" + [(set (match_operand:SI 0 "register_operand" "=r") + (lshiftrt:SI (match_operand:SI 1 "register_or_zero_operand" "rJ") + (match_operand:SI 2 "constant_M_operand" "M")))] + "!TARGET_BARREL_SHIFT_ENABLED" + "srui %0, %z1, %2" + [(set_attr "type" "shift")] +) + +;; --------------------------------- +;; function entry / exit +;; --------------------------------- + +(define_expand "prologue" + [(const_int 1)] + "" + " +{ + lm32_expand_prologue (); + DONE; +}") + +(define_expand "epilogue" + [(return)] + "" + " +{ + lm32_expand_epilogue (); + DONE; +}") + +;; --------------------------------- +;; nop +;; --------------------------------- + +(define_insn "nop" + [(const_int 0)] + "" + "nop" + [(set_attr "type" "arith")] +) + +;; --------------------------------- +;; blockage +;; --------------------------------- + +;; used to stop the scheduler from +;; scheduling code across certain boundaries + +(define_insn "blockage" + [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)] + "" + "" + [(set_attr "length" "0")] +) -- cgit v1.2.3