summaryrefslogtreecommitdiff
path: root/gcc/config/v850
diff options
context:
space:
mode:
authorupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
committerupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
commit554fd8c5195424bdbcabf5de30fdc183aba391bd (patch)
tree976dc5ab7fddf506dadce60ae936f43f58787092 /gcc/config/v850
downloadcbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.bz2
cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.xz
obtained gcc-4.6.4.tar.bz2 from upstream website;upstream
verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository.
Diffstat (limited to 'gcc/config/v850')
-rw-r--r--gcc/config/v850/constraints.md108
-rw-r--r--gcc/config/v850/lib1funcs.asm2330
-rw-r--r--gcc/config/v850/predicates.md501
-rw-r--r--gcc/config/v850/t-v850114
-rw-r--r--gcc/config/v850/t-v850e112
-rw-r--r--gcc/config/v850/v850-c.c273
-rw-r--r--gcc/config/v850/v850-modes.def29
-rw-r--r--gcc/config/v850/v850-protos.h73
-rw-r--r--gcc/config/v850/v850.c3226
-rw-r--r--gcc/config/v850/v850.h987
-rw-r--r--gcc/config/v850/v850.md2667
-rw-r--r--gcc/config/v850/v850.opt106
12 files changed, 10526 insertions, 0 deletions
diff --git a/gcc/config/v850/constraints.md b/gcc/config/v850/constraints.md
new file mode 100644
index 000000000..eecdab3d4
--- /dev/null
+++ b/gcc/config/v850/constraints.md
@@ -0,0 +1,108 @@
+;; Constraint definitions for V850.
+;; Copyright (C) 2011 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_register_constraint "e" "EVEN_REGS"
+ "@internal")
+
+;; Integer constraints.
+(define_constraint "I"
+ "Integer constant 0."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "J"
+ "A signed 5-bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -16 && ival <= 15")))
+
+(define_constraint "K"
+ "A signed 16-bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -32768 && ival <= 32767")))
+
+(define_constraint "L"
+ "A valid constant for a movhi instruction."
+ (and (match_code "const_int")
+ (ior (match_test "(ival | 0x7fff0000) == 0x7fff0000")
+ (match_test "(ival | 0x7fff0000) + 0x10000 == 0"))))
+
+(define_constraint "M"
+ "An unsigned 16-bit immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 65535")))
+
+(define_constraint "N"
+ "An unsigned 5-bit immediate in shift instructions."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 31")))
+
+(define_constraint "O"
+ "A signed 9-bit immediate for word multiply instructions."
+ (and (match_code "const_int")
+ (match_test "ival >= -255 && ival <= 255")))
+
+(define_constraint "P"
+ "@internal"
+ (and (match_code "const_int")
+ (match_test "0")))
+
+;; Floating-point constraints.
+(define_constraint "G"
+ "A zero of some form."
+ (and (match_code "const_double")
+ (ior (match_test "GET_MODE_CLASS (mode) == MODE_FLOAT")
+ (match_test "GET_MODE_CLASS (mode) == MODE_INT"))
+ (match_test "op == CONST0_RTX (mode)")))
+
+(define_constraint "H"
+ "@internal"
+ (and (match_code "const_double")
+ (match_test "0")))
+
+;;; Extra constraints.
+(define_constraint "Q"
+ "A memory address that does not contain a symbol address."
+ (and (match_code "mem")
+ (match_test "ep_memory_operand (op, mode, FALSE)")))
+
+(define_constraint "R"
+ "@internal"
+ (match_test "special_symbolref_operand (op, VOIDmode)"))
+
+(define_constraint "S"
+ "@internal"
+ (and (match_code "symbol_ref")
+ (match_test "!SYMBOL_REF_ZDA_P (op)")))
+
+(define_constraint "T"
+ "@internal"
+ (match_test "ep_memory_operand (op, mode, TRUE)"))
+
+(define_constraint "U"
+ "@internal"
+ (ior (and (match_code "symbol_ref")
+ (match_test "SYMBOL_REF_ZDA_P (op)"))
+ (and (match_code "const")
+ (match_test "GET_CODE (XEXP (op, 0)) == PLUS")
+ (match_test "GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF")
+ (match_test "SYMBOL_REF_ZDA_P (XEXP (XEXP (op, 0), 0))"))))
+
+(define_constraint "W"
+ "@internal"
+ (match_test "disp23_operand (op, VOIDmode)"))
diff --git a/gcc/config/v850/lib1funcs.asm b/gcc/config/v850/lib1funcs.asm
new file mode 100644
index 000000000..04e9b1e0a
--- /dev/null
+++ b/gcc/config/v850/lib1funcs.asm
@@ -0,0 +1,2330 @@
+/* libgcc routines for NEC V850.
+ Copyright (C) 1996, 1997, 2002, 2005, 2009, 2010
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifdef L_mulsi3
+ .text
+ .globl ___mulsi3
+ .type ___mulsi3,@function
+___mulsi3:
+#ifdef __v850__
+/*
+ #define SHIFT 12
+ #define MASK ((1 << SHIFT) - 1)
+
+ #define STEP(i, j) \
+ ({ \
+ short a_part = (a >> (i)) & MASK; \
+ short b_part = (b >> (j)) & MASK; \
+ int res = (((int) a_part) * ((int) b_part)); \
+ res; \
+ })
+
+ int
+ __mulsi3 (unsigned a, unsigned b)
+ {
+ return STEP (0, 0) +
+ ((STEP (SHIFT, 0) + STEP (0, SHIFT)) << SHIFT) +
+ ((STEP (0, 2 * SHIFT) + STEP (SHIFT, SHIFT) + STEP (2 * SHIFT, 0))
+ << (2 * SHIFT));
+ }
+*/
+ mov r6, r14
+ movea lo(32767), r0, r10
+ and r10, r14
+ mov r7, r15
+ and r10, r15
+ shr 15, r6
+ mov r6, r13
+ and r10, r13
+ shr 15, r7
+ mov r7, r12
+ and r10, r12
+ shr 15, r6
+ shr 15, r7
+ mov r14, r10
+ mulh r15, r10
+ mov r14, r11
+ mulh r12, r11
+ mov r13, r16
+ mulh r15, r16
+ mulh r14, r7
+ mulh r15, r6
+ add r16, r11
+ mulh r13, r12
+ shl 15, r11
+ add r11, r10
+ add r12, r7
+ add r6, r7
+ shl 30, r7
+ add r7, r10
+ jmp [r31]
+#endif /* __v850__ */
+#if defined(__v850e__) || defined(__v850ea__) || defined(__v850e2__) || defined(__v850e2v3__)
+ /* This routine is almost unneccesarry because gcc
+ generates the MUL instruction for the RTX mulsi3.
+ But if someone wants to link his application with
+ previsously compiled v850 objects then they will
+ need this function. */
+
+ /* It isn't good to put the inst sequence as below;
+ mul r7, r6,
+ mov r6, r10, r0
+ In this case, there is a RAW hazard between them.
+ MUL inst takes 2 cycle in EX stage, then MOV inst
+ must wait 1cycle. */
+ mov r7, r10
+ mul r6, r10, r0
+ jmp [r31]
+#endif /* __v850e__ */
+ .size ___mulsi3,.-___mulsi3
+#endif /* L_mulsi3 */
+
+
+#ifdef L_udivsi3
+ .text
+ .global ___udivsi3
+ .type ___udivsi3,@function
+___udivsi3:
+#ifdef __v850__
+ mov 1,r12
+ mov 0,r10
+ cmp r6,r7
+ bnl .L12
+ movhi hi(-2147483648),r0,r13
+ cmp r0,r7
+ blt .L12
+.L4:
+ shl 1,r7
+ shl 1,r12
+ cmp r6,r7
+ bnl .L12
+ cmp r0,r12
+ be .L8
+ mov r7,r19
+ and r13,r19
+ be .L4
+ br .L12
+.L9:
+ cmp r7,r6
+ bl .L10
+ sub r7,r6
+ or r12,r10
+.L10:
+ shr 1,r12
+ shr 1,r7
+.L12:
+ cmp r0,r12
+ bne .L9
+.L8:
+ jmp [r31]
+
+#else /* defined(__v850e__) */
+
+ /* See comments at end of __mulsi3. */
+ mov r6, r10
+ divu r7, r10, r0
+ jmp [r31]
+
+#endif /* __v850e__ */
+
+ .size ___udivsi3,.-___udivsi3
+#endif
+
+#ifdef L_divsi3
+ .text
+ .globl ___divsi3
+ .type ___divsi3,@function
+___divsi3:
+#ifdef __v850__
+ add -8,sp
+ st.w r31,4[sp]
+ st.w r22,0[sp]
+ mov 1,r22
+ tst r7,r7
+ bp .L3
+ subr r0,r7
+ subr r0,r22
+.L3:
+ tst r6,r6
+ bp .L4
+ subr r0,r6
+ subr r0,r22
+.L4:
+ jarl ___udivsi3,r31
+ cmp r0,r22
+ bp .L7
+ subr r0,r10
+.L7:
+ ld.w 0[sp],r22
+ ld.w 4[sp],r31
+ add 8,sp
+ jmp [r31]
+
+#else /* defined(__v850e__) */
+
+ /* See comments at end of __mulsi3. */
+ mov r6, r10
+ div r7, r10, r0
+ jmp [r31]
+
+#endif /* __v850e__ */
+
+ .size ___divsi3,.-___divsi3
+#endif
+
+#ifdef L_umodsi3
+ .text
+ .globl ___umodsi3
+ .type ___umodsi3,@function
+___umodsi3:
+#ifdef __v850__
+ add -12,sp
+ st.w r31,8[sp]
+ st.w r7,4[sp]
+ st.w r6,0[sp]
+ jarl ___udivsi3,r31
+ ld.w 4[sp],r7
+ mov r10,r6
+ jarl ___mulsi3,r31
+ ld.w 0[sp],r6
+ subr r6,r10
+ ld.w 8[sp],r31
+ add 12,sp
+ jmp [r31]
+
+#else /* defined(__v850e__) */
+
+ /* See comments at end of __mulsi3. */
+ divu r7, r6, r10
+ jmp [r31]
+
+#endif /* __v850e__ */
+
+ .size ___umodsi3,.-___umodsi3
+#endif /* L_umodsi3 */
+
+#ifdef L_modsi3
+ .text
+ .globl ___modsi3
+ .type ___modsi3,@function
+___modsi3:
+#ifdef __v850__
+ add -12,sp
+ st.w r31,8[sp]
+ st.w r7,4[sp]
+ st.w r6,0[sp]
+ jarl ___divsi3,r31
+ ld.w 4[sp],r7
+ mov r10,r6
+ jarl ___mulsi3,r31
+ ld.w 0[sp],r6
+ subr r6,r10
+ ld.w 8[sp],r31
+ add 12,sp
+ jmp [r31]
+
+#else /* defined(__v850e__) */
+
+ /* See comments at end of __mulsi3. */
+ div r7, r6, r10
+ jmp [r31]
+
+#endif /* __v850e__ */
+
+ .size ___modsi3,.-___modsi3
+#endif /* L_modsi3 */
+
+#ifdef L_save_2
+ .text
+ .align 2
+ .globl __save_r2_r29
+ .type __save_r2_r29,@function
+ /* Allocate space and save registers 2, 20 .. 29 on the stack. */
+ /* Called via: jalr __save_r2_r29,r10. */
+__save_r2_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -44,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r20,36[ep]
+ sst.w r2,40[ep]
+ mov r1,ep
+#else
+ addi -44,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r20,36[sp]
+ st.w r2,40[sp]
+#endif
+ jmp [r10]
+ .size __save_r2_r29,.-__save_r2_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r2_r29. */
+ .align 2
+ .globl __return_r2_r29
+ .type __return_r2_r29,@function
+__return_r2_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r20
+ sld.w 40[ep],r2
+ addi 44,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ ld.w 36[sp],r20
+ ld.w 40[sp],r2
+ addi 44,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r2_r29,.-__return_r2_r29
+#endif /* L_save_2 */
+
+#ifdef L_save_20
+ .text
+ .align 2
+ .globl __save_r20_r29
+ .type __save_r20_r29,@function
+ /* Allocate space and save registers 20 .. 29 on the stack. */
+ /* Called via: jalr __save_r20_r29,r10. */
+__save_r20_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -40,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r20,36[ep]
+ mov r1,ep
+#else
+ addi -40,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r20,36[sp]
+#endif
+ jmp [r10]
+ .size __save_r20_r29,.-__save_r20_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r20_r29. */
+ .align 2
+ .globl __return_r20_r29
+ .type __return_r20_r29,@function
+__return_r20_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r20
+ addi 40,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ ld.w 36[sp],r20
+ addi 40,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r20_r29,.-__return_r20_r29
+#endif /* L_save_20 */
+
+#ifdef L_save_21
+ .text
+ .align 2
+ .globl __save_r21_r29
+ .type __save_r21_r29,@function
+ /* Allocate space and save registers 21 .. 29 on the stack. */
+ /* Called via: jalr __save_r21_r29,r10. */
+__save_r21_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -36,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ mov r1,ep
+#else
+ addi -36,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+#endif
+ jmp [r10]
+ .size __save_r21_r29,.-__save_r21_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r21_r29. */
+ .align 2
+ .globl __return_r21_r29
+ .type __return_r21_r29,@function
+__return_r21_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ addi 36,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ addi 36,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r21_r29,.-__return_r21_r29
+#endif /* L_save_21 */
+
+#ifdef L_save_22
+ .text
+ .align 2
+ .globl __save_r22_r29
+ .type __save_r22_r29,@function
+ /* Allocate space and save registers 22 .. 29 on the stack. */
+ /* Called via: jalr __save_r22_r29,r10. */
+__save_r22_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -32,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ mov r1,ep
+#else
+ addi -32,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+#endif
+ jmp [r10]
+ .size __save_r22_r29,.-__save_r22_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r22_r29. */
+ .align 2
+ .globl __return_r22_r29
+ .type __return_r22_r29,@function
+__return_r22_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ addi 32,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ addi 32,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r22_r29,.-__return_r22_r29
+#endif /* L_save_22 */
+
+#ifdef L_save_23
+ .text
+ .align 2
+ .globl __save_r23_r29
+ .type __save_r23_r29,@function
+ /* Allocate space and save registers 23 .. 29 on the stack. */
+ /* Called via: jalr __save_r23_r29,r10. */
+__save_r23_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -28,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ mov r1,ep
+#else
+ addi -28,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+#endif
+ jmp [r10]
+ .size __save_r23_r29,.-__save_r23_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r23_r29. */
+ .align 2
+ .globl __return_r23_r29
+ .type __return_r23_r29,@function
+__return_r23_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ addi 28,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ addi 28,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r23_r29,.-__return_r23_r29
+#endif /* L_save_23 */
+
+#ifdef L_save_24
+ .text
+ .align 2
+ .globl __save_r24_r29
+ .type __save_r24_r29,@function
+ /* Allocate space and save registers 24 .. 29 on the stack. */
+ /* Called via: jalr __save_r24_r29,r10. */
+__save_r24_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -24,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ mov r1,ep
+#else
+ addi -24,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+#endif
+ jmp [r10]
+ .size __save_r24_r29,.-__save_r24_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r24_r29. */
+ .align 2
+ .globl __return_r24_r29
+ .type __return_r24_r29,@function
+__return_r24_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ addi 24,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ addi 24,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r24_r29,.-__return_r24_r29
+#endif /* L_save_24 */
+
+#ifdef L_save_25
+ .text
+ .align 2
+ .globl __save_r25_r29
+ .type __save_r25_r29,@function
+ /* Allocate space and save registers 25 .. 29 on the stack. */
+ /* Called via: jalr __save_r25_r29,r10. */
+__save_r25_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -20,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ mov r1,ep
+#else
+ addi -20,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+#endif
+ jmp [r10]
+ .size __save_r25_r29,.-__save_r25_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r25_r29. */
+ .align 2
+ .globl __return_r25_r29
+ .type __return_r25_r29,@function
+__return_r25_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ addi 20,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[ep],r29
+ ld.w 4[ep],r28
+ ld.w 8[ep],r27
+ ld.w 12[ep],r26
+ ld.w 16[ep],r25
+ addi 20,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r25_r29,.-__return_r25_r29
+#endif /* L_save_25 */
+
+#ifdef L_save_26
+ .text
+ .align 2
+ .globl __save_r26_r29
+ .type __save_r26_r29,@function
+ /* Allocate space and save registers 26 .. 29 on the stack. */
+ /* Called via: jalr __save_r26_r29,r10. */
+__save_r26_r29:
+#ifdef __EP__
+ mov ep,r1
+ add -16,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ mov r1,ep
+#else
+ add -16,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+#endif
+ jmp [r10]
+ .size __save_r26_r29,.-__save_r26_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r26_r29. */
+ .align 2
+ .globl __return_r26_r29
+ .type __return_r26_r29,@function
+__return_r26_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ addi 16,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ addi 16,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r26_r29,.-__return_r26_r29
+#endif /* L_save_26 */
+
+#ifdef L_save_27
+ .text
+ .align 2
+ .globl __save_r27_r29
+ .type __save_r27_r29,@function
+ /* Allocate space and save registers 27 .. 29 on the stack. */
+ /* Called via: jalr __save_r27_r29,r10. */
+__save_r27_r29:
+ add -12,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ jmp [r10]
+ .size __save_r27_r29,.-__save_r27_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r27_r29. */
+ .align 2
+ .globl __return_r27_r29
+ .type __return_r27_r29,@function
+__return_r27_r29:
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ add 12,sp
+ jmp [r31]
+ .size __return_r27_r29,.-__return_r27_r29
+#endif /* L_save_27 */
+
+#ifdef L_save_28
+ .text
+ .align 2
+ .globl __save_r28_r29
+ .type __save_r28_r29,@function
+ /* Allocate space and save registers 28,29 on the stack. */
+ /* Called via: jalr __save_r28_r29,r10. */
+__save_r28_r29:
+ add -8,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ jmp [r10]
+ .size __save_r28_r29,.-__save_r28_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r28_r29. */
+ .align 2
+ .globl __return_r28_r29
+ .type __return_r28_r29,@function
+__return_r28_r29:
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ add 8,sp
+ jmp [r31]
+ .size __return_r28_r29,.-__return_r28_r29
+#endif /* L_save_28 */
+
+#ifdef L_save_29
+ .text
+ .align 2
+ .globl __save_r29
+ .type __save_r29,@function
+ /* Allocate space and save register 29 on the stack. */
+ /* Called via: jalr __save_r29,r10. */
+__save_r29:
+ add -4,sp
+ st.w r29,0[sp]
+ jmp [r10]
+ .size __save_r29,.-__save_r29
+
+ /* Restore saved register 29, deallocate stack and return to the user. */
+ /* Called via: jr __return_r29. */
+ .align 2
+ .globl __return_r29
+ .type __return_r29,@function
+__return_r29:
+ ld.w 0[sp],r29
+ add 4,sp
+ jmp [r31]
+ .size __return_r29,.-__return_r29
+#endif /* L_save_28 */
+
+#ifdef L_save_2c
+ .text
+ .align 2
+ .globl __save_r2_r31
+ .type __save_r2_r31,@function
+ /* Allocate space and save registers 20 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r2_r31,r10. */
+__save_r2_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -48,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r20,36[ep]
+ sst.w r2,40[ep]
+ sst.w r31,44[ep]
+ mov r1,ep
+#else
+ addi -48,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r20,36[sp]
+ st.w r2,40[sp]
+ st.w r31,44[sp]
+#endif
+ jmp [r10]
+ .size __save_r2_r31,.-__save_r2_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r20_r31. */
+ .align 2
+ .globl __return_r2_r31
+ .type __return_r2_r31,@function
+__return_r2_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r20
+ sld.w 40[ep],r2
+ sld.w 44[ep],r31
+ addi 48,sp,sp
+ mov r1,ep
+#else
+ ld.w 44[sp],r29
+ ld.w 40[sp],r28
+ ld.w 36[sp],r27
+ ld.w 32[sp],r26
+ ld.w 28[sp],r25
+ ld.w 24[sp],r24
+ ld.w 20[sp],r23
+ ld.w 16[sp],r22
+ ld.w 12[sp],r21
+ ld.w 8[sp],r20
+ ld.w 4[sp],r2
+ ld.w 0[sp],r31
+ addi 48,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r2_r31,.-__return_r2_r31
+#endif /* L_save_2c */
+
+#ifdef L_save_20c
+ .text
+ .align 2
+ .globl __save_r20_r31
+ .type __save_r20_r31,@function
+ /* Allocate space and save registers 20 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r20_r31,r10. */
+__save_r20_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -44,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r20,36[ep]
+ sst.w r31,40[ep]
+ mov r1,ep
+#else
+ addi -44,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r20,36[sp]
+ st.w r31,40[sp]
+#endif
+ jmp [r10]
+ .size __save_r20_r31,.-__save_r20_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r20_r31. */
+ .align 2
+ .globl __return_r20_r31
+ .type __return_r20_r31,@function
+__return_r20_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r20
+ sld.w 40[ep],r31
+ addi 44,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ ld.w 36[sp],r20
+ ld.w 40[sp],r31
+ addi 44,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r20_r31,.-__return_r20_r31
+#endif /* L_save_20c */
+
+#ifdef L_save_21c
+ .text
+ .align 2
+ .globl __save_r21_r31
+ .type __save_r21_r31,@function
+ /* Allocate space and save registers 21 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r21_r31,r10. */
+__save_r21_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -40,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r31,36[ep]
+ mov r1,ep
+ jmp [r10]
+#else
+ addi -40,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r31,36[sp]
+ jmp [r10]
+#endif
+ .size __save_r21_r31,.-__save_r21_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r21_r31. */
+ .align 2
+ .globl __return_r21_r31
+ .type __return_r21_r31,@function
+__return_r21_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r31
+ addi 40,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ ld.w 36[sp],r31
+ addi 40,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r21_r31,.-__return_r21_r31
+#endif /* L_save_21c */
+
+#ifdef L_save_22c
+ .text
+ .align 2
+ .globl __save_r22_r31
+ .type __save_r22_r31,@function
+ /* Allocate space and save registers 22 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r22_r31,r10. */
+__save_r22_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -36,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r31,32[ep]
+ mov r1,ep
+#else
+ addi -36,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r31,32[sp]
+#endif
+ jmp [r10]
+ .size __save_r22_r31,.-__save_r22_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r22_r31. */
+ .align 2
+ .globl __return_r22_r31
+ .type __return_r22_r31,@function
+__return_r22_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r31
+ addi 36,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r31
+ addi 36,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r22_r31,.-__return_r22_r31
+#endif /* L_save_22c */
+
+#ifdef L_save_23c
+ .text
+ .align 2
+ .globl __save_r23_r31
+ .type __save_r23_r31,@function
+ /* Allocate space and save registers 23 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r23_r31,r10. */
+__save_r23_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -32,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r31,28[ep]
+ mov r1,ep
+#else
+ addi -32,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r31,28[sp]
+#endif
+ jmp [r10]
+ .size __save_r23_r31,.-__save_r23_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r23_r31. */
+ .align 2
+ .globl __return_r23_r31
+ .type __return_r23_r31,@function
+__return_r23_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r31
+ addi 32,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r31
+ addi 32,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r23_r31,.-__return_r23_r31
+#endif /* L_save_23c */
+
+#ifdef L_save_24c
+ .text
+ .align 2
+ .globl __save_r24_r31
+ .type __save_r24_r31,@function
+ /* Allocate space and save registers 24 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r24_r31,r10. */
+__save_r24_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -28,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r31,24[ep]
+ mov r1,ep
+#else
+ addi -28,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r31,24[sp]
+#endif
+ jmp [r10]
+ .size __save_r24_r31,.-__save_r24_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r24_r31. */
+ .align 2
+ .globl __return_r24_r31
+ .type __return_r24_r31,@function
+__return_r24_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r31
+ addi 28,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r31
+ addi 28,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r24_r31,.-__return_r24_r31
+#endif /* L_save_24c */
+
+#ifdef L_save_25c
+ .text
+ .align 2
+ .globl __save_r25_r31
+ .type __save_r25_r31,@function
+ /* Allocate space and save registers 25 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r25_r31,r10. */
+__save_r25_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -24,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r31,20[ep]
+ mov r1,ep
+#else
+ addi -24,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r31,20[sp]
+#endif
+ jmp [r10]
+ .size __save_r25_r31,.-__save_r25_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r25_r31. */
+ .align 2
+ .globl __return_r25_r31
+ .type __return_r25_r31,@function
+__return_r25_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r31
+ addi 24,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r31
+ addi 24,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r25_r31,.-__return_r25_r31
+#endif /* L_save_25c */
+
+#ifdef L_save_26c
+ .text
+ .align 2
+ .globl __save_r26_r31
+ .type __save_r26_r31,@function
+ /* Allocate space and save registers 26 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r26_r31,r10. */
+__save_r26_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -20,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r31,16[ep]
+ mov r1,ep
+#else
+ addi -20,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r31,16[sp]
+#endif
+ jmp [r10]
+ .size __save_r26_r31,.-__save_r26_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r26_r31. */
+ .align 2
+ .globl __return_r26_r31
+ .type __return_r26_r31,@function
+__return_r26_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r31
+ addi 20,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r31
+ addi 20,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r26_r31,.-__return_r26_r31
+#endif /* L_save_26c */
+
+#ifdef L_save_27c
+ .text
+ .align 2
+ .globl __save_r27_r31
+ .type __save_r27_r31,@function
+ /* Allocate space and save registers 27 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r27_r31,r10. */
+__save_r27_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -16,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r31,12[ep]
+ mov r1,ep
+#else
+ addi -16,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r31,12[sp]
+#endif
+ jmp [r10]
+ .size __save_r27_r31,.-__save_r27_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r27_r31. */
+ .align 2
+ .globl __return_r27_r31
+ .type __return_r27_r31,@function
+__return_r27_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r31
+ addi 16,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r31
+ addi 16,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r27_r31,.-__return_r27_r31
+#endif /* L_save_27c */
+
+#ifdef L_save_28c
+ .text
+ .align 2
+ .globl __save_r28_r31
+ .type __save_r28_r31,@function
+ /* Allocate space and save registers 28 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r28_r31,r10. */
+__save_r28_r31:
+ addi -12,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r31,8[sp]
+ jmp [r10]
+ .size __save_r28_r31,.-__save_r28_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r28_r31. */
+ .align 2
+ .globl __return_r28_r31
+ .type __return_r28_r31,@function
+__return_r28_r31:
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r31
+ addi 12,sp,sp
+ jmp [r31]
+ .size __return_r28_r31,.-__return_r28_r31
+#endif /* L_save_28c */
+
+#ifdef L_save_29c
+ .text
+ .align 2
+ .globl __save_r29_r31
+ .type __save_r29_r31,@function
+ /* Allocate space and save registers 29 & 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r29_r31,r10. */
+__save_r29_r31:
+ addi -8,sp,sp
+ st.w r29,0[sp]
+ st.w r31,4[sp]
+ jmp [r10]
+ .size __save_r29_r31,.-__save_r29_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r29_r31. */
+ .align 2
+ .globl __return_r29_r31
+ .type __return_r29_r31,@function
+__return_r29_r31:
+ ld.w 0[sp],r29
+ ld.w 4[sp],r31
+ addi 8,sp,sp
+ jmp [r31]
+ .size __return_r29_r31,.-__return_r29_r31
+#endif /* L_save_29c */
+
+#ifdef L_save_31c
+ .text
+ .align 2
+ .globl __save_r31
+ .type __save_r31,@function
+ /* Allocate space and save register 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r31,r10. */
+__save_r31:
+ addi -4,sp,sp
+ st.w r31,0[sp]
+ jmp [r10]
+ .size __save_r31,.-__save_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r31. */
+ .align 2
+ .globl __return_r31
+ .type __return_r31,@function
+__return_r31:
+ ld.w 0[sp],r31
+ addi 4,sp,sp
+ jmp [r31]
+ .size __return_r31,.-__return_r31
+#endif /* L_save_31c */
+
+#ifdef L_save_interrupt
+ .text
+ .align 2
+ .globl __save_interrupt
+ .type __save_interrupt,@function
+ /* Save registers r1, r4 on stack and load up with expected values. */
+ /* Note, 20 bytes of stack have already been allocated. */
+ /* Called via: jalr __save_interrupt,r10. */
+__save_interrupt:
+ /* add -20,sp ; st.w r11,16[sp] ; st.w r10,12[sp] ; */
+ st.w ep,0[sp]
+ st.w gp,4[sp]
+ st.w r1,8[sp]
+ movhi hi(__ep),r0,ep
+ movea lo(__ep),ep,ep
+ movhi hi(__gp),r0,gp
+ movea lo(__gp),gp,gp
+ jmp [r10]
+ .size __save_interrupt,.-__save_interrupt
+
+ /* Restore saved registers, deallocate stack and return from the interrupt. */
+ /* Called via: jr __return_interrupt. */
+ .align 2
+ .globl __return_interrupt
+ .type __return_interrupt,@function
+__return_interrupt:
+ ld.w 0[sp],ep
+ ld.w 4[sp],gp
+ ld.w 8[sp],r1
+ ld.w 12[sp],r10
+ ld.w 16[sp],r11
+ addi 20,sp,sp
+ reti
+ .size __return_interrupt,.-__return_interrupt
+#endif /* L_save_interrupt */
+
+#ifdef L_save_all_interrupt
+ .text
+ .align 2
+ .globl __save_all_interrupt
+ .type __save_all_interrupt,@function
+ /* Save all registers except for those saved in __save_interrupt. */
+ /* Allocate enough stack for all of the registers & 16 bytes of space. */
+ /* Called via: jalr __save_all_interrupt,r10. */
+__save_all_interrupt:
+ addi -104,sp,sp
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sst.w r31,100[ep]
+ sst.w r2,96[ep]
+ sst.w gp,92[ep]
+ sst.w r6,88[ep]
+ sst.w r7,84[ep]
+ sst.w r8,80[ep]
+ sst.w r9,76[ep]
+ sst.w r11,72[ep]
+ sst.w r12,68[ep]
+ sst.w r13,64[ep]
+ sst.w r14,60[ep]
+ sst.w r15,56[ep]
+ sst.w r16,52[ep]
+ sst.w r17,48[ep]
+ sst.w r18,44[ep]
+ sst.w r19,40[ep]
+ sst.w r20,36[ep]
+ sst.w r21,32[ep]
+ sst.w r22,28[ep]
+ sst.w r23,24[ep]
+ sst.w r24,20[ep]
+ sst.w r25,16[ep]
+ sst.w r26,12[ep]
+ sst.w r27,8[ep]
+ sst.w r28,4[ep]
+ sst.w r29,0[ep]
+ mov r1,ep
+#else
+ st.w r31,100[sp]
+ st.w r2,96[sp]
+ st.w gp,92[sp]
+ st.w r6,88[sp]
+ st.w r7,84[sp]
+ st.w r8,80[sp]
+ st.w r9,76[sp]
+ st.w r11,72[sp]
+ st.w r12,68[sp]
+ st.w r13,64[sp]
+ st.w r14,60[sp]
+ st.w r15,56[sp]
+ st.w r16,52[sp]
+ st.w r17,48[sp]
+ st.w r18,44[sp]
+ st.w r19,40[sp]
+ st.w r20,36[sp]
+ st.w r21,32[sp]
+ st.w r22,28[sp]
+ st.w r23,24[sp]
+ st.w r24,20[sp]
+ st.w r25,16[sp]
+ st.w r26,12[sp]
+ st.w r27,8[sp]
+ st.w r28,4[sp]
+ st.w r29,0[sp]
+#endif
+ jmp [r10]
+ .size __save_all_interrupt,.-__save_all_interrupt
+
+ .globl __restore_all_interrupt
+ .type __restore_all_interrupt,@function
+ /* Restore all registers saved in __save_all_interrupt and
+ deallocate the stack space. */
+ /* Called via: jalr __restore_all_interrupt,r10. */
+__restore_all_interrupt:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 100[ep],r31
+ sld.w 96[ep],r2
+ sld.w 92[ep],gp
+ sld.w 88[ep],r6
+ sld.w 84[ep],r7
+ sld.w 80[ep],r8
+ sld.w 76[ep],r9
+ sld.w 72[ep],r11
+ sld.w 68[ep],r12
+ sld.w 64[ep],r13
+ sld.w 60[ep],r14
+ sld.w 56[ep],r15
+ sld.w 52[ep],r16
+ sld.w 48[ep],r17
+ sld.w 44[ep],r18
+ sld.w 40[ep],r19
+ sld.w 36[ep],r20
+ sld.w 32[ep],r21
+ sld.w 28[ep],r22
+ sld.w 24[ep],r23
+ sld.w 20[ep],r24
+ sld.w 16[ep],r25
+ sld.w 12[ep],r26
+ sld.w 8[ep],r27
+ sld.w 4[ep],r28
+ sld.w 0[ep],r29
+ mov r1,ep
+#else
+ ld.w 100[sp],r31
+ ld.w 96[sp],r2
+ ld.w 92[sp],gp
+ ld.w 88[sp],r6
+ ld.w 84[sp],r7
+ ld.w 80[sp],r8
+ ld.w 76[sp],r9
+ ld.w 72[sp],r11
+ ld.w 68[sp],r12
+ ld.w 64[sp],r13
+ ld.w 60[sp],r14
+ ld.w 56[sp],r15
+ ld.w 52[sp],r16
+ ld.w 48[sp],r17
+ ld.w 44[sp],r18
+ ld.w 40[sp],r19
+ ld.w 36[sp],r20
+ ld.w 32[sp],r21
+ ld.w 28[sp],r22
+ ld.w 24[sp],r23
+ ld.w 20[sp],r24
+ ld.w 16[sp],r25
+ ld.w 12[sp],r26
+ ld.w 8[sp],r27
+ ld.w 4[sp],r28
+ ld.w 0[sp],r29
+#endif
+ addi 104,sp,sp
+ jmp [r10]
+ .size __restore_all_interrupt,.-__restore_all_interrupt
+#endif /* L_save_all_interrupt */
+
+#if defined(__v850e__) || defined(__v850e1__) || defined(__v850e2__) || defined(__v850e2v3__)
+#ifdef L_callt_save_r2_r29
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Allocate space and save registers 2, 20 .. 29 on the stack. */
+ /* Called via: callt ctoff(__callt_save_r2_r29). */
+ .align 2
+.L_save_r2_r29:
+ add -4, sp
+ st.w r2, 0[sp]
+ prepare {r20 - r29}, 0
+ ctret
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: callt ctoff(__callt_return_r2_r29). */
+ .align 2
+.L_return_r2_r29:
+ dispose 0, {r20-r29}
+ ld.w 0[sp], r2
+ add 4, sp
+ jmp [r31]
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_r2_r29
+ .type __callt_save_r2_r29,@function
+__callt_save_r2_r29: .short ctoff(.L_save_r2_r29)
+
+ .global __callt_return_r2_r29
+ .type __callt_return_r2_r29,@function
+__callt_return_r2_r29: .short ctoff(.L_return_r2_r29)
+
+#endif /* L_callt_save_r2_r29. */
+
+#ifdef L_callt_save_r2_r31
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Allocate space and save registers 2 and 20 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: callt ctoff(__callt_save_r2_r31). */
+ .align 2
+.L_save_r2_r31:
+ add -4, sp
+ st.w r2, 0[sp]
+ prepare {r20 - r29, r31}, 0
+ ctret
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: callt ctoff(__callt_return_r2_r31). */
+ .align 2
+.L_return_r2_r31:
+ dispose 0, {r20 - r29, r31}
+ ld.w 0[sp], r2
+ addi 4, sp, sp
+ jmp [r31]
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_r2_r31
+ .type __callt_save_r2_r31,@function
+__callt_save_r2_r31: .short ctoff(.L_save_r2_r31)
+
+ .global __callt_return_r2_r31
+ .type __callt_return_r2_r31,@function
+__callt_return_r2_r31: .short ctoff(.L_return_r2_r31)
+
+#endif /* L_callt_save_r2_r31 */
+
+#ifdef L_callt_save_interrupt
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Save registers r1, ep, gp, r10 on stack and load up with expected values. */
+ /* Called via: callt ctoff(__callt_save_interrupt). */
+ .align 2
+.L_save_interrupt:
+ /* SP has already been moved before callt ctoff(_save_interrupt). */
+ /* R1,R10,R11,ctpc,ctpsw has alread been saved bofore callt ctoff(_save_interrupt). */
+ /* addi -28, sp, sp */
+ /* st.w r1, 24[sp] */
+ /* st.w r10, 12[sp] */
+ /* st.w r11, 16[sp] */
+ /* stsr ctpc, r10 */
+ /* st.w r10, 20[sp] */
+ /* stsr ctpsw, r10 */
+ /* st.w r10, 24[sp] */
+ st.w ep, 0[sp]
+ st.w gp, 4[sp]
+ st.w r1, 8[sp]
+ mov hilo(__ep),ep
+ mov hilo(__gp),gp
+ ctret
+
+ .call_table_text
+ /* Restore saved registers, deallocate stack and return from the interrupt. */
+ /* Called via: callt ctoff(__callt_restore_interrupt). */
+ .align 2
+ .globl __return_interrupt
+ .type __return_interrupt,@function
+.L_return_interrupt:
+ ld.w 24[sp], r1
+ ldsr r1, ctpsw
+ ld.w 20[sp], r1
+ ldsr r1, ctpc
+ ld.w 16[sp], r11
+ ld.w 12[sp], r10
+ ld.w 8[sp], r1
+ ld.w 4[sp], gp
+ ld.w 0[sp], ep
+ addi 28, sp, sp
+ reti
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_interrupt
+ .type __callt_save_interrupt,@function
+__callt_save_interrupt: .short ctoff(.L_save_interrupt)
+
+ .global __callt_return_interrupt
+ .type __callt_return_interrupt,@function
+__callt_return_interrupt: .short ctoff(.L_return_interrupt)
+
+#endif /* L_callt_save_interrupt */
+
+#ifdef L_callt_save_all_interrupt
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Save all registers except for those saved in __save_interrupt. */
+ /* Allocate enough stack for all of the registers & 16 bytes of space. */
+ /* Called via: callt ctoff(__callt_save_all_interrupt). */
+ .align 2
+.L_save_all_interrupt:
+ addi -60, sp, sp
+#ifdef __EP__
+ mov ep, r1
+ mov sp, ep
+ sst.w r2, 56[ep]
+ sst.w r5, 52[ep]
+ sst.w r6, 48[ep]
+ sst.w r7, 44[ep]
+ sst.w r8, 40[ep]
+ sst.w r9, 36[ep]
+ sst.w r11, 32[ep]
+ sst.w r12, 28[ep]
+ sst.w r13, 24[ep]
+ sst.w r14, 20[ep]
+ sst.w r15, 16[ep]
+ sst.w r16, 12[ep]
+ sst.w r17, 8[ep]
+ sst.w r18, 4[ep]
+ sst.w r19, 0[ep]
+ mov r1, ep
+#else
+ st.w r2, 56[sp]
+ st.w r5, 52[sp]
+ st.w r6, 48[sp]
+ st.w r7, 44[sp]
+ st.w r8, 40[sp]
+ st.w r9, 36[sp]
+ st.w r11, 32[sp]
+ st.w r12, 28[sp]
+ st.w r13, 24[sp]
+ st.w r14, 20[sp]
+ st.w r15, 16[sp]
+ st.w r16, 12[sp]
+ st.w r17, 8[sp]
+ st.w r18, 4[sp]
+ st.w r19, 0[sp]
+#endif
+ prepare {r20 - r29, r31}, 0
+ ctret
+
+ /* Restore all registers saved in __save_all_interrupt
+ deallocate the stack space. */
+ /* Called via: callt ctoff(__callt_restore_all_interrupt). */
+ .align 2
+.L_restore_all_interrupt:
+ dispose 0, {r20 - r29, r31}
+#ifdef __EP__
+ mov ep, r1
+ mov sp, ep
+ sld.w 0 [ep], r19
+ sld.w 4 [ep], r18
+ sld.w 8 [ep], r17
+ sld.w 12[ep], r16
+ sld.w 16[ep], r15
+ sld.w 20[ep], r14
+ sld.w 24[ep], r13
+ sld.w 28[ep], r12
+ sld.w 32[ep], r11
+ sld.w 36[ep], r9
+ sld.w 40[ep], r8
+ sld.w 44[ep], r7
+ sld.w 48[ep], r6
+ sld.w 52[ep], r5
+ sld.w 56[ep], r2
+ mov r1, ep
+#else
+ ld.w 0 [sp], r19
+ ld.w 4 [sp], r18
+ ld.w 8 [sp], r17
+ ld.w 12[sp], r16
+ ld.w 16[sp], r15
+ ld.w 20[sp], r14
+ ld.w 24[sp], r13
+ ld.w 28[sp], r12
+ ld.w 32[sp], r11
+ ld.w 36[sp], r9
+ ld.w 40[sp], r8
+ ld.w 44[sp], r7
+ ld.w 48[sp], r6
+ ld.w 52[sp], r5
+ ld.w 56[sp], r2
+#endif
+ addi 60, sp, sp
+ ctret
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_all_interrupt
+ .type __callt_save_all_interrupt,@function
+__callt_save_all_interrupt: .short ctoff(.L_save_all_interrupt)
+
+ .global __callt_restore_all_interrupt
+ .type __callt_restore_all_interrupt,@function
+__callt_restore_all_interrupt: .short ctoff(.L_restore_all_interrupt)
+
+#endif /* L_callt_save_all_interrupt */
+
+
+#define MAKE_CALLT_FUNCS( START ) \
+ .call_table_text ;\
+ .align 2 ;\
+ /* Allocate space and save registers START .. r29 on the stack. */ ;\
+ /* Called via: callt ctoff(__callt_save_START_r29). */ ;\
+.L_save_##START##_r29: ;\
+ prepare { START - r29 }, 0 ;\
+ ctret ;\
+ ;\
+ /* Restore saved registers, deallocate stack and return. */ ;\
+ /* Called via: callt ctoff(__return_START_r29). */ ;\
+ .align 2 ;\
+.L_return_##START##_r29: ;\
+ dispose 0, { START - r29 }, r31 ;\
+ ;\
+ /* Place the offsets of the start of these funcs into the call table. */;\
+ .call_table_data ;\
+ ;\
+ .global __callt_save_##START##_r29 ;\
+ .type __callt_save_##START##_r29,@function ;\
+__callt_save_##START##_r29: .short ctoff(.L_save_##START##_r29 ) ;\
+ ;\
+ .global __callt_return_##START##_r29 ;\
+ .type __callt_return_##START##_r29,@function ;\
+__callt_return_##START##_r29: .short ctoff(.L_return_##START##_r29 )
+
+
+#define MAKE_CALLT_CFUNCS( START ) \
+ .call_table_text ;\
+ .align 2 ;\
+ /* Allocate space and save registers START .. r31 on the stack. */ ;\
+ /* Called via: callt ctoff(__callt_save_START_r31c). */ ;\
+.L_save_##START##_r31c: ;\
+ prepare { START - r29, r31}, 0 ;\
+ ctret ;\
+ ;\
+ /* Restore saved registers, deallocate stack and return. */ ;\
+ /* Called via: callt ctoff(__return_START_r31c). */ ;\
+ .align 2 ;\
+.L_return_##START##_r31c: ;\
+ dispose 0, { START - r29, r31}, r31 ;\
+ ;\
+ /* Place the offsets of the start of these funcs into the call table. */;\
+ .call_table_data ;\
+ ;\
+ .global __callt_save_##START##_r31c ;\
+ .type __callt_save_##START##_r31c,@function ;\
+__callt_save_##START##_r31c: .short ctoff(.L_save_##START##_r31c ) ;\
+ ;\
+ .global __callt_return_##START##_r31c ;\
+ .type __callt_return_##START##_r31c,@function ;\
+__callt_return_##START##_r31c: .short ctoff(.L_return_##START##_r31c )
+
+
+#ifdef L_callt_save_20
+ MAKE_CALLT_FUNCS (r20)
+#endif
+#ifdef L_callt_save_21
+ MAKE_CALLT_FUNCS (r21)
+#endif
+#ifdef L_callt_save_22
+ MAKE_CALLT_FUNCS (r22)
+#endif
+#ifdef L_callt_save_23
+ MAKE_CALLT_FUNCS (r23)
+#endif
+#ifdef L_callt_save_24
+ MAKE_CALLT_FUNCS (r24)
+#endif
+#ifdef L_callt_save_25
+ MAKE_CALLT_FUNCS (r25)
+#endif
+#ifdef L_callt_save_26
+ MAKE_CALLT_FUNCS (r26)
+#endif
+#ifdef L_callt_save_27
+ MAKE_CALLT_FUNCS (r27)
+#endif
+#ifdef L_callt_save_28
+ MAKE_CALLT_FUNCS (r28)
+#endif
+#ifdef L_callt_save_29
+ MAKE_CALLT_FUNCS (r29)
+#endif
+
+#ifdef L_callt_save_20c
+ MAKE_CALLT_CFUNCS (r20)
+#endif
+#ifdef L_callt_save_21c
+ MAKE_CALLT_CFUNCS (r21)
+#endif
+#ifdef L_callt_save_22c
+ MAKE_CALLT_CFUNCS (r22)
+#endif
+#ifdef L_callt_save_23c
+ MAKE_CALLT_CFUNCS (r23)
+#endif
+#ifdef L_callt_save_24c
+ MAKE_CALLT_CFUNCS (r24)
+#endif
+#ifdef L_callt_save_25c
+ MAKE_CALLT_CFUNCS (r25)
+#endif
+#ifdef L_callt_save_26c
+ MAKE_CALLT_CFUNCS (r26)
+#endif
+#ifdef L_callt_save_27c
+ MAKE_CALLT_CFUNCS (r27)
+#endif
+#ifdef L_callt_save_28c
+ MAKE_CALLT_CFUNCS (r28)
+#endif
+#ifdef L_callt_save_29c
+ MAKE_CALLT_CFUNCS (r29)
+#endif
+
+
+#ifdef L_callt_save_31c
+ .call_table_text
+ .align 2
+ /* Allocate space and save register r31 on the stack. */
+ /* Called via: callt ctoff(__callt_save_r31c). */
+.L_callt_save_r31c:
+ prepare {r31}, 0
+ ctret
+
+ /* Restore saved registers, deallocate stack and return. */
+ /* Called via: callt ctoff(__return_r31c). */
+ .align 2
+.L_callt_return_r31c:
+ dispose 0, {r31}, r31
+
+ /* Place the offsets of the start of these funcs into the call table. */
+ .call_table_data
+
+ .global __callt_save_r31c
+ .type __callt_save_r31c,@function
+__callt_save_r31c: .short ctoff(.L_callt_save_r31c)
+
+ .global __callt_return_r31c
+ .type __callt_return_r31c,@function
+__callt_return_r31c: .short ctoff(.L_callt_return_r31c)
+#endif
+
+#endif /* __v850e__ */
+
+/* libgcc2 routines for NEC V850. */
+/* Double Integer Arithmetical Operation. */
+
+#ifdef L_negdi2
+ .text
+ .global ___negdi2
+ .type ___negdi2, @function
+___negdi2:
+ not r6, r10
+ add 1, r10
+ setf l, r6
+ not r7, r11
+ add r6, r11
+ jmp [lp]
+
+ .size ___negdi2,.-___negdi2
+#endif
+
+#ifdef L_cmpdi2
+ .text
+ .global ___cmpdi2
+ .type ___cmpdi2,@function
+___cmpdi2:
+ # Signed comparison bitween each high word.
+ cmp r9, r7
+ be .L_cmpdi_cmp_low
+ setf ge, r10
+ setf gt, r6
+ add r6, r10
+ jmp [lp]
+.L_cmpdi_cmp_low:
+ # Unsigned comparigon bitween each low word.
+ cmp r8, r6
+ setf nl, r10
+ setf h, r6
+ add r6, r10
+ jmp [lp]
+ .size ___cmpdi2, . - ___cmpdi2
+#endif
+
+#ifdef L_ucmpdi2
+ .text
+ .global ___ucmpdi2
+ .type ___ucmpdi2,@function
+___ucmpdi2:
+ cmp r9, r7 # Check if each high word are same.
+ bne .L_ucmpdi_check_psw
+ cmp r8, r6 # Compare the word.
+.L_ucmpdi_check_psw:
+ setf nl, r10 #
+ setf h, r6 #
+ add r6, r10 # Add the result of comparison NL and comparison H.
+ jmp [lp]
+ .size ___ucmpdi2, . - ___ucmpdi2
+#endif
+
+#ifdef L_muldi3
+ .text
+ .global ___muldi3
+ .type ___muldi3,@function
+___muldi3:
+#ifdef __v850__
+ jarl __save_r26_r31, r10
+ addi 16, sp, sp
+ mov r6, r28
+ shr 15, r28
+ movea lo(32767), r0, r14
+ and r14, r28
+ mov r8, r10
+ shr 15, r10
+ and r14, r10
+ mov r6, r19
+ shr 30, r19
+ mov r7, r12
+ shl 2, r12
+ or r12, r19
+ and r14, r19
+ mov r8, r13
+ shr 30, r13
+ mov r9, r12
+ shl 2, r12
+ or r12, r13
+ and r14, r13
+ mov r7, r11
+ shr 13, r11
+ and r14, r11
+ mov r9, r31
+ shr 13, r31
+ and r14, r31
+ mov r7, r29
+ shr 28, r29
+ and r14, r29
+ mov r9, r12
+ shr 28, r12
+ and r14, r12
+ and r14, r6
+ and r14, r8
+ mov r6, r14
+ mulh r8, r14
+ mov r6, r16
+ mulh r10, r16
+ mov r6, r18
+ mulh r13, r18
+ mov r6, r15
+ mulh r31, r15
+ mulh r12, r6
+ mov r28, r17
+ mulh r10, r17
+ add -16, sp
+ mov r28, r12
+ mulh r8, r12
+ add r17, r18
+ mov r28, r17
+ mulh r31, r17
+ add r12, r16
+ mov r28, r12
+ mulh r13, r12
+ add r17, r6
+ mov r19, r17
+ add r12, r15
+ mov r19, r12
+ mulh r8, r12
+ mulh r10, r17
+ add r12, r18
+ mov r19, r12
+ mulh r13, r12
+ add r17, r15
+ mov r11, r13
+ mulh r8, r13
+ add r12, r6
+ mov r11, r12
+ mulh r10, r12
+ add r13, r15
+ mulh r29, r8
+ add r12, r6
+ mov r16, r13
+ shl 15, r13
+ add r14, r13
+ mov r18, r12
+ shl 30, r12
+ mov r13, r26
+ add r12, r26
+ shr 15, r14
+ movhi hi(131071), r0, r12
+ movea lo(131071), r12, r13
+ and r13, r14
+ mov r16, r12
+ and r13, r12
+ add r12, r14
+ mov r18, r12
+ shl 15, r12
+ and r13, r12
+ add r12, r14
+ shr 17, r14
+ shr 17, r16
+ add r14, r16
+ shl 13, r15
+ shr 2, r18
+ add r18, r15
+ add r15, r16
+ mov r16, r27
+ add r8, r6
+ shl 28, r6
+ add r6, r27
+ mov r26, r10
+ mov r27, r11
+ jr __return_r26_r31
+#else /* defined(__v850e__) */
+ /* (Ahi << 32 + Alo) * (Bhi << 32 + Blo) */
+ /* r7 r6 r9 r8 */
+ mov r8, r10
+ mulu r7, r8, r0 /* Ahi * Blo */
+ mulu r6, r9, r0 /* Alo * Bhi */
+ mulu r6, r10, r11 /* Alo * Blo */
+ add r8, r11
+ add r9, r11
+ jmp [r31]
+#endif /* defined(__v850e__) */
+ .size ___muldi3, . - ___muldi3
+#endif
+
diff --git a/gcc/config/v850/predicates.md b/gcc/config/v850/predicates.md
new file mode 100644
index 000000000..129f00d59
--- /dev/null
+++ b/gcc/config/v850/predicates.md
@@ -0,0 +1,501 @@
+;; Predicate definitions for NEC V850.
+;; Copyright (C) 2005, 2007, 2010 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Return true if OP is either a register or 0.
+
+(define_predicate "reg_or_0_operand"
+ (match_code "reg,subreg,const_int,const_double")
+{
+ if (GET_CODE (op) == CONST_INT)
+ return INTVAL (op) == 0;
+
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ return satisfies_constraint_G (op);
+
+ else
+ return register_operand (op, mode);
+})
+
+;; Return true if OP is either a register or a signed five bit
+;; integer.
+
+(define_predicate "reg_or_int5_operand"
+ (match_code "reg,subreg,const_int")
+{
+ if (GET_CODE (op) == CONST_INT)
+ return CONST_OK_FOR_J (INTVAL (op));
+
+ else
+ return register_operand (op, mode);
+})
+
+;; Return true if OP is either a register or a signed nine bit
+;; integer.
+
+(define_predicate "reg_or_int9_operand"
+ (match_code "reg,subreg,const_int")
+{
+ if (GET_CODE (op) == CONST_INT)
+ return CONST_OK_FOR_O (INTVAL (op));
+
+ return register_operand (op, mode);
+})
+
+;; Return true if OP is either a register or a const integer.
+
+(define_predicate "reg_or_const_operand"
+ (match_code "reg,const_int")
+{
+ if (GET_CODE (op) == CONST_INT)
+ return TRUE;
+
+ return register_operand (op, mode);
+})
+
+;; Return true if OP is a even number register.
+
+(define_predicate "even_reg_operand"
+ (match_code "reg")
+{
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || ((REGNO (op) > 0) && (REGNO (op) < 32)
+ && ((REGNO (op) & 1)==0))));
+})
+
+;; Return true if OP is a valid call operand.
+
+(define_predicate "call_address_operand"
+ (match_code "reg,symbol_ref")
+{
+ /* Only registers are valid call operands if TARGET_LONG_CALLS. */
+ if (TARGET_LONG_CALLS)
+ return GET_CODE (op) == REG;
+ return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG);
+})
+
+;; Return true if OP is a valid source operand for SImode move.
+
+(define_predicate "movsi_source_operand"
+ (match_code "label_ref,symbol_ref,const_int,const_double,const,high,mem,reg,subreg")
+{
+ /* Some constants, as well as symbolic operands
+ must be done with HIGH & LO_SUM patterns. */
+ if (CONSTANT_P (op)
+ && GET_CODE (op) != HIGH
+ && !(GET_CODE (op) == CONST_INT
+ && (CONST_OK_FOR_J (INTVAL (op))
+ || CONST_OK_FOR_K (INTVAL (op))
+ || CONST_OK_FOR_L (INTVAL (op)))))
+ return special_symbolref_operand (op, mode);
+ else
+ return general_operand (op, mode);
+})
+
+;; Return true if OP is a valid operand for 23 bit displacement
+;; operations.
+
+(define_predicate "disp23_operand"
+ (match_code "const_int")
+{
+ if (GET_CODE (op) == CONST_INT
+ && ((unsigned)(INTVAL (op)) >= 0x8000)
+ && ((unsigned)(INTVAL (op)) < 0x400000))
+ return 1;
+ else
+ return 0;
+})
+
+;; Return true if OP is a symbol ref with 16-bit signed value.
+
+(define_predicate "special_symbolref_operand"
+ (match_code "symbol_ref")
+{
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && satisfies_constraint_K (XEXP (XEXP (op, 0), 1)))
+ op = XEXP (XEXP (op, 0), 0);
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ return (SYMBOL_REF_FLAGS (op)
+ & (SYMBOL_FLAG_ZDA | SYMBOL_FLAG_TDA | SYMBOL_FLAG_SDA)) != 0;
+
+ return FALSE;
+})
+
+;; Return true if OP is a valid operand for bit related operations
+;; containing only single 1 in its binary representation.
+
+(define_predicate "power_of_two_operand"
+ (match_code "const_int")
+{
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+
+ if (exact_log2 (INTVAL (op)) == -1)
+ return 0;
+ return 1;
+})
+
+;; Return nonzero if the given RTX is suitable for collapsing into a
+;; jump to a function prologue.
+
+(define_predicate "pattern_is_ok_for_prologue"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+ rtx vector_element;
+
+ /* If there are no registers to save then the function prologue
+ is not suitable. */
+ if (count <= (TARGET_LONG_CALLS ? 3 : 2))
+ return 0;
+
+ /* The pattern matching has already established that we are adjusting the
+ stack and pushing at least one register. We must now check that the
+ remaining entries in the vector to make sure that they are also register
+ pushes, except for the last entry which should be a CLOBBER of r10.
+
+ The test below performs the C equivalent of this machine description
+ pattern match:
+
+ (set (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI 2 "immediate_operand" "i")))
+ (match_operand:SI 3 "register_is_ok_for_epilogue" "r"))
+
+ */
+
+ for (i = 2; i < count - (TARGET_LONG_CALLS ? 2: 1); i++)
+ {
+ rtx dest;
+ rtx src;
+ rtx plus;
+
+ vector_element = XVECEXP (op, 0, i);
+
+ if (GET_CODE (vector_element) != SET)
+ return 0;
+
+ dest = SET_DEST (vector_element);
+ src = SET_SRC (vector_element);
+
+ if (GET_CODE (dest) != MEM
+ || GET_MODE (dest) != SImode
+ || GET_CODE (src) != REG
+ || GET_MODE (src) != SImode
+ || ! register_is_ok_for_epilogue (src, SImode))
+ return 0;
+
+ plus = XEXP (dest, 0);
+
+ if ( GET_CODE (plus) != PLUS
+ || GET_CODE (XEXP (plus, 0)) != REG
+ || GET_MODE (XEXP (plus, 0)) != SImode
+ || REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
+ || GET_CODE (XEXP (plus, 1)) != CONST_INT)
+ return 0;
+
+ /* If the register is being pushed somewhere other than the stack
+ space just acquired by the first operand then abandon this quest.
+ Note: the test is <= because both values are negative. */
+ if (INTVAL (XEXP (plus, 1))
+ <= INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)))
+ {
+ return 0;
+ }
+ }
+
+ /* Make sure that the last entries in the vector are clobbers. */
+ vector_element = XVECEXP (op, 0, i++);
+
+ if (GET_CODE (vector_element) != CLOBBER
+ || GET_CODE (XEXP (vector_element, 0)) != REG
+ || REGNO (XEXP (vector_element, 0)) != 10)
+ return 0;
+
+ if (TARGET_LONG_CALLS)
+ {
+ vector_element = XVECEXP (op, 0, i++);
+
+ if (GET_CODE (vector_element) != CLOBBER
+ || GET_CODE (XEXP (vector_element, 0)) != REG
+ || REGNO (XEXP (vector_element, 0)) != 11)
+ return 0;
+ }
+
+ return i == count;
+})
+
+;; Return nonzero if the given RTX is suitable for collapsing into
+;; jump to a function epilogue.
+
+(define_predicate "pattern_is_ok_for_epilogue"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+
+ /* If there are no registers to restore then the function epilogue
+ is not suitable. */
+ if (count <= 2)
+ return 0;
+
+ /* The pattern matching has already established that we are performing a
+ function epilogue and that we are popping at least one register. We must
+ now check the remaining entries in the vector to make sure that they are
+ also register pops. There is no good reason why there should ever be
+ anything else in this vector, but being paranoid always helps...
+
+ The test below performs the C equivalent of this machine description
+ pattern match:
+
+ (set (match_operand:SI n "register_is_ok_for_epilogue" "r")
+ (mem:SI (plus:SI (reg:SI 3) (match_operand:SI n "immediate_operand" "i"))))
+ */
+
+ for (i = 2; i < count; i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+ rtx dest;
+ rtx src;
+ rtx plus;
+
+ if (GET_CODE (vector_element) != SET)
+ return 0;
+
+ dest = SET_DEST (vector_element);
+ src = SET_SRC (vector_element);
+
+ if (GET_CODE (dest) != REG
+ || GET_MODE (dest) != SImode
+ || ! register_is_ok_for_epilogue (dest, SImode)
+ || GET_CODE (src) != MEM
+ || GET_MODE (src) != SImode)
+ return 0;
+
+ plus = XEXP (src, 0);
+
+ if (GET_CODE (plus) != PLUS
+ || GET_CODE (XEXP (plus, 0)) != REG
+ || GET_MODE (XEXP (plus, 0)) != SImode
+ || REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
+ || GET_CODE (XEXP (plus, 1)) != CONST_INT)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return true if the given RTX is a register which can be restored by
+;; a function epilogue.
+
+(define_predicate "register_is_ok_for_epilogue"
+ (match_code "reg")
+{
+ /* The save/restore routines can only cope with registers 20 - 31. */
+ return ((GET_CODE (op) == REG)
+ && (((REGNO (op) >= 20) && REGNO (op) <= 31)));
+})
+
+;; Return nonzero if the given RTX is suitable for collapsing into a
+;; DISPOSE instruction.
+
+(define_predicate "pattern_is_ok_for_dispose"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+
+ /* If there are no registers to restore then
+ the dispose instruction is not suitable. */
+ if (count <= 2)
+ return 0;
+
+ /* The pattern matching has already established that we are performing a
+ function epilogue and that we are popping at least one register. We must
+ now check the remaining entries in the vector to make sure that they are
+ also register pops. There is no good reason why there should ever be
+ anything else in this vector, but being paranoid always helps...
+
+ The test below performs the C equivalent of this machine description
+ pattern match:
+
+ (set (match_operand:SI n "register_is_ok_for_epilogue" "r")
+ (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI n "immediate_operand" "i"))))
+ */
+
+ for (i = 3; i < count; i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+ rtx dest;
+ rtx src;
+ rtx plus;
+
+ if (GET_CODE (vector_element) != SET)
+ return 0;
+
+ dest = SET_DEST (vector_element);
+ src = SET_SRC (vector_element);
+
+ if ( GET_CODE (dest) != REG
+ || GET_MODE (dest) != SImode
+ || ! register_is_ok_for_epilogue (dest, SImode)
+ || GET_CODE (src) != MEM
+ || GET_MODE (src) != SImode)
+ return 0;
+
+ plus = XEXP (src, 0);
+
+ if ( GET_CODE (plus) != PLUS
+ || GET_CODE (XEXP (plus, 0)) != REG
+ || GET_MODE (XEXP (plus, 0)) != SImode
+ || REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
+ || GET_CODE (XEXP (plus, 1)) != CONST_INT)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return nonzero if the given RTX is suitable for collapsing into a
+;; PREPARE instruction.
+
+(define_predicate "pattern_is_ok_for_prepare"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+
+ /* If there are no registers to restore then the prepare instruction
+ is not suitable. */
+ if (count <= 1)
+ return 0;
+
+ /* The pattern matching has already established that we are adjusting the
+ stack and pushing at least one register. We must now check that the
+ remaining entries in the vector to make sure that they are also register
+ pushes.
+
+ The test below performs the C equivalent of this machine description
+ pattern match:
+
+ (set (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI 2 "immediate_operand" "i")))
+ (match_operand:SI 3 "register_is_ok_for_epilogue" "r"))
+
+ */
+
+ for (i = 1; i < count; i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+ rtx dest;
+ rtx src;
+ rtx plus;
+
+ if (GET_CODE (vector_element) == CLOBBER)
+ continue;
+
+ if (GET_CODE (vector_element) != SET)
+ return 0;
+
+ dest = SET_DEST (vector_element);
+ src = SET_SRC (vector_element);
+
+ if ( GET_CODE (dest) != MEM
+ || GET_MODE (dest) != SImode
+ || GET_CODE (src) != REG
+ || GET_MODE (src) != SImode
+ || ! register_is_ok_for_epilogue (src, SImode)
+ )
+ return 0;
+
+ plus = XEXP (dest, 0);
+
+ if ( GET_CODE (plus) != PLUS
+ || GET_CODE (XEXP (plus, 0)) != REG
+ || GET_MODE (XEXP (plus, 0)) != SImode
+ || REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
+ || GET_CODE (XEXP (plus, 1)) != CONST_INT)
+ return 0;
+
+ /* If the register is being pushed somewhere other than the stack
+ space just acquired by the first operand then abandon this quest.
+ Note: the test is <= because both values are negative. */
+ if (INTVAL (XEXP (plus, 1))
+ < INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)))
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return true if OP is a valid operand for bit related operations
+;; containing only single 0 in its binary representation.
+
+(define_predicate "not_power_of_two_operand"
+ (match_code "const_int")
+{
+ unsigned int mask;
+
+ if (mode == QImode)
+ mask = 0xff;
+ else if (mode == HImode)
+ mask = 0xffff;
+ else if (mode == SImode)
+ mask = 0xffffffff;
+ else
+ return 0;
+
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+
+ if (exact_log2 (~INTVAL (op) & mask) == -1)
+ return 0;
+ return 1;
+})
+
+;; Return true if OP is a float value operand with value as 1.
+
+(define_predicate "const_float_1_operand"
+ (match_code "const_int")
+{
+ if (GET_CODE (op) != CONST_DOUBLE
+ || mode != GET_MODE (op)
+ || (mode != DFmode && mode != SFmode))
+ return 0;
+
+ return op == CONST1_RTX(mode);
+})
+
+;; Return true if OP is a float value operand with value as 0.
+
+(define_predicate "const_float_0_operand"
+ (match_code "const_int")
+{
+ if (GET_CODE (op) != CONST_DOUBLE
+ || mode != GET_MODE (op)
+ || (mode != DFmode && mode != SFmode))
+ return 0;
+
+ return op == CONST0_RTX(mode);
+})
+
+
diff --git a/gcc/config/v850/t-v850 b/gcc/config/v850/t-v850
new file mode 100644
index 000000000..076b00d60
--- /dev/null
+++ b/gcc/config/v850/t-v850
@@ -0,0 +1,114 @@
+# Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2008, 2009, 2010
+# Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC = v850/lib1funcs.asm
+LIB1ASMFUNCS = _mulsi3 \
+ _divsi3 \
+ _udivsi3 \
+ _modsi3 \
+ _umodsi3 \
+ _save_2 \
+ _save_20 \
+ _save_21 \
+ _save_22 \
+ _save_23 \
+ _save_24 \
+ _save_25 \
+ _save_26 \
+ _save_27 \
+ _save_28 \
+ _save_29 \
+ _save_2c \
+ _save_20c \
+ _save_21c \
+ _save_22c \
+ _save_23c \
+ _save_24c \
+ _save_25c \
+ _save_26c \
+ _save_27c \
+ _save_28c \
+ _save_29c \
+ _save_31c \
+ _save_interrupt \
+ _save_all_interrupt \
+ _callt_save_20 \
+ _callt_save_21 \
+ _callt_save_22 \
+ _callt_save_23 \
+ _callt_save_24 \
+ _callt_save_25 \
+ _callt_save_26 \
+ _callt_save_27 \
+ _callt_save_28 \
+ _callt_save_29 \
+ _callt_save_20c \
+ _callt_save_21c \
+ _callt_save_22c \
+ _callt_save_23c \
+ _callt_save_24c \
+ _callt_save_25c \
+ _callt_save_26c \
+ _callt_save_27c \
+ _callt_save_28c \
+ _callt_save_29c \
+ _callt_save_31c \
+ _callt_save_interrupt \
+ _callt_save_all_interrupt \
+ _callt_save_r2_r29 \
+ _callt_save_r2_r31 \
+ _negdi2 \
+ _cmpdi2 \
+ _ucmpdi2 \
+ _muldi3
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifdef __LITTLE_ENDIAN__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >>dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifdef __LITTLE_ENDIAN__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >>fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# Create target-specific versions of the libraries
+MULTILIB_OPTIONS = mv850/mv850e/mv850e2/mv850e2v3
+MULTILIB_DIRNAMES = v850 v850e v850e2 v850e2v3
+INSTALL_LIBGCC = install-multilib
+MULTILIB_MATCHES = mv850e=mv850e1
+
+TCFLAGS = -mno-app-regs -msmall-sld -Wa,-mwarn-signed-overflow -Wa,-mwarn-unsigned-overflow
+
+v850-c.o: $(srcdir)/config/v850/v850-c.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ $(TM_H) $(CPPLIB_H) $(TREE_H) $(C_PRAGMA_H) $(GGC_H) $(TM_P_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/v850/v850-c.c
+
+# Local Variables:
+# mode: Makefile
+# End:
diff --git a/gcc/config/v850/t-v850e b/gcc/config/v850/t-v850e
new file mode 100644
index 000000000..1eb768520
--- /dev/null
+++ b/gcc/config/v850/t-v850e
@@ -0,0 +1,112 @@
+# Copyright (C) 2003, 2008, 2009, 2010 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC = v850/lib1funcs.asm
+LIB1ASMFUNCS = _mulsi3 \
+ _divsi3 \
+ _udivsi3 \
+ _modsi3 \
+ _umodsi3 \
+ _save_2 \
+ _save_20 \
+ _save_21 \
+ _save_22 \
+ _save_23 \
+ _save_24 \
+ _save_25 \
+ _save_26 \
+ _save_27 \
+ _save_28 \
+ _save_29 \
+ _save_2c \
+ _save_20c \
+ _save_21c \
+ _save_22c \
+ _save_23c \
+ _save_24c \
+ _save_25c \
+ _save_26c \
+ _save_27c \
+ _save_28c \
+ _save_29c \
+ _save_31c \
+ _save_interrupt \
+ _save_all_interrupt \
+ _callt_save_20 \
+ _callt_save_21 \
+ _callt_save_22 \
+ _callt_save_23 \
+ _callt_save_24 \
+ _callt_save_25 \
+ _callt_save_26 \
+ _callt_save_27 \
+ _callt_save_28 \
+ _callt_save_29 \
+ _callt_save_20c \
+ _callt_save_21c \
+ _callt_save_22c \
+ _callt_save_23c \
+ _callt_save_24c \
+ _callt_save_25c \
+ _callt_save_26c \
+ _callt_save_27c \
+ _callt_save_28c \
+ _callt_save_29c \
+ _callt_save_31c \
+ _callt_save_interrupt \
+ _callt_save_all_interrupt \
+ _callt_save_r2_r29 \
+ _callt_save_r2_r31 \
+ _negdi2 \
+ _cmpdi2 \
+ _ucmpdi2 \
+ _muldi3
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifdef __LITTLE_ENDIAN__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >>dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifdef __LITTLE_ENDIAN__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >>fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# Create target-specific versions of the libraries
+MULTILIB_OPTIONS = mv850
+MULTILIB_DIRNAMES = v850
+INSTALL_LIBGCC = install-multilib
+
+TCFLAGS = -mno-app-regs -msmall-sld -Wa,-mwarn-signed-overflow -Wa,-mwarn-unsigned-overflow
+
+v850-c.o: $(srcdir)/config/v850/v850-c.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ $(TM_H) $(CPPLIB_H) $(TREE_H) $(C_PRAGMA_H) $(GGC_H) $(TM_P_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/v850/v850-c.c
+
+# Local Variables:
+# mode: Makefile
+# End:
diff --git a/gcc/config/v850/v850-c.c b/gcc/config/v850/v850-c.c
new file mode 100644
index 000000000..db881cf4d
--- /dev/null
+++ b/gcc/config/v850/v850-c.c
@@ -0,0 +1,273 @@
+/* v850 specific, C compiler specific functions.
+ Copyright (C) 2000, 2001, 2002, 2003, 2005, 2007, 2009, 2010
+ Free Software Foundation, Inc.
+ Contributed by Jeff Law (law@cygnus.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "cpplib.h"
+#include "tree.h"
+#include "c-family/c-pragma.h"
+#include "diagnostic-core.h"
+#include "ggc.h"
+#include "tm_p.h"
+
+#ifndef streq
+#define streq(a,b) (strcmp (a, b) == 0)
+#endif
+
+static int pop_data_area (v850_data_area);
+static int push_data_area (v850_data_area);
+static void mark_current_function_as_interrupt (void);
+
+/* Push a data area onto the stack. */
+
+static int
+push_data_area (v850_data_area data_area)
+{
+ data_area_stack_element * elem;
+
+ elem = (data_area_stack_element *) xmalloc (sizeof (* elem));
+
+ if (elem == NULL)
+ return 0;
+
+ elem->prev = data_area_stack;
+ elem->data_area = data_area;
+
+ data_area_stack = elem;
+
+ return 1;
+}
+
+/* Remove a data area from the stack. */
+
+static int
+pop_data_area (v850_data_area data_area)
+{
+ if (data_area_stack == NULL)
+ warning (OPT_Wpragmas, "#pragma GHS endXXXX found without "
+ "previous startXXX");
+ else if (data_area != data_area_stack->data_area)
+ warning (OPT_Wpragmas, "#pragma GHS endXXX does not match "
+ "previous startXXX");
+ else
+ {
+ data_area_stack_element * elem;
+
+ elem = data_area_stack;
+ data_area_stack = data_area_stack->prev;
+
+ free (elem);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Set the machine specific 'interrupt' attribute on the current function. */
+
+static void
+mark_current_function_as_interrupt (void)
+{
+ tree name;
+
+ if (current_function_decl == NULL_TREE)
+ {
+ warning (0, "cannot set interrupt attribute: no current function");
+ return;
+ }
+
+ name = get_identifier ("interrupt");
+
+ if (name == NULL_TREE || TREE_CODE (name) != IDENTIFIER_NODE)
+ {
+ warning (0, "cannot set interrupt attribute: no such identifier");
+ return;
+ }
+
+ decl_attributes (&current_function_decl,
+ tree_cons (name, NULL_TREE, NULL_TREE), 0);
+}
+
+
+/* Support for GHS pragmata. */
+
+void
+ghs_pragma_section (cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ int repeat = 0;
+
+ /* #pragma ghs section [name = alias [, name = alias [, ...]]] */
+ do
+ {
+ tree x;
+ enum cpp_ttype type;
+ tree sect_ident;
+ const char *sect, *alias;
+ enum GHS_section_kind kind;
+
+ type = pragma_lex (&x);
+
+ if (type == CPP_EOF && !repeat)
+ goto reset;
+ else if (type == CPP_NAME)
+ {
+ sect_ident = x;
+ sect = IDENTIFIER_POINTER (sect_ident);
+ }
+ else
+ goto bad;
+ repeat = 0;
+
+ if (pragma_lex (&x) != CPP_EQ)
+ goto bad;
+ if (pragma_lex (&x) != CPP_NAME)
+ goto bad;
+
+ alias = IDENTIFIER_POINTER (x);
+
+ type = pragma_lex (&x);
+ if (type == CPP_COMMA)
+ repeat = 1;
+ else if (type != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma ghs section");
+
+ if (streq (sect, "data")) kind = GHS_SECTION_KIND_DATA;
+ else if (streq (sect, "text")) kind = GHS_SECTION_KIND_TEXT;
+ else if (streq (sect, "rodata")) kind = GHS_SECTION_KIND_RODATA;
+ else if (streq (sect, "const")) kind = GHS_SECTION_KIND_RODATA;
+ else if (streq (sect, "rosdata")) kind = GHS_SECTION_KIND_ROSDATA;
+ else if (streq (sect, "rozdata")) kind = GHS_SECTION_KIND_ROZDATA;
+ else if (streq (sect, "sdata")) kind = GHS_SECTION_KIND_SDATA;
+ else if (streq (sect, "tdata")) kind = GHS_SECTION_KIND_TDATA;
+ else if (streq (sect, "zdata")) kind = GHS_SECTION_KIND_ZDATA;
+ /* According to GHS beta documentation, the following should not be
+ allowed! */
+ else if (streq (sect, "bss")) kind = GHS_SECTION_KIND_BSS;
+ else if (streq (sect, "zbss")) kind = GHS_SECTION_KIND_ZDATA;
+ else
+ {
+ warning (0, "unrecognized section name %qE", sect_ident);
+ return;
+ }
+
+ if (streq (alias, "default"))
+ GHS_current_section_names [kind] = NULL;
+ else
+ GHS_current_section_names [kind] =
+ build_string (strlen (alias) + 1, alias);
+ }
+ while (repeat);
+
+ return;
+
+ bad:
+ warning (OPT_Wpragmas, "malformed #pragma ghs section");
+ return;
+
+ reset:
+ /* #pragma ghs section \n: Reset all section names back to their defaults. */
+ {
+ int i;
+
+ for (i = COUNT_OF_GHS_SECTION_KINDS; i--;)
+ GHS_current_section_names [i] = NULL;
+ }
+}
+
+void
+ghs_pragma_interrupt (cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ tree x;
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma ghs interrupt");
+
+ mark_current_function_as_interrupt ();
+}
+
+void
+ghs_pragma_starttda (cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ tree x;
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma ghs starttda");
+
+ push_data_area (DATA_AREA_TDA);
+}
+
+void
+ghs_pragma_startsda (cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ tree x;
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma ghs startsda");
+
+ push_data_area (DATA_AREA_SDA);
+}
+
+void
+ghs_pragma_startzda (cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ tree x;
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma ghs startzda");
+
+ push_data_area (DATA_AREA_ZDA);
+}
+
+void
+ghs_pragma_endtda (cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ tree x;
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma ghs endtda");
+
+ pop_data_area (DATA_AREA_TDA);
+}
+
+void
+ghs_pragma_endsda (cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ tree x;
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma ghs endsda");
+
+ pop_data_area (DATA_AREA_SDA);
+}
+
+void
+ghs_pragma_endzda (cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ tree x;
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma ghs endzda");
+
+ pop_data_area (DATA_AREA_ZDA);
+}
diff --git a/gcc/config/v850/v850-modes.def b/gcc/config/v850/v850-modes.def
new file mode 100644
index 000000000..d90ce5456
--- /dev/null
+++ b/gcc/config/v850/v850-modes.def
@@ -0,0 +1,29 @@
+/* Definitions of target machine for GNU compiler. NEC V850 series
+ Copyright (C) 2005
+ Free Software Foundation, Inc.
+ Contributed by NEC EL
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+CC_MODE (CC_FPU_LT);
+CC_MODE (CC_FPU_LE);
+CC_MODE (CC_FPU_GT);
+CC_MODE (CC_FPU_GE);
+CC_MODE (CC_FPU_EQ);
+CC_MODE (CC_FPU_NE);
+
diff --git a/gcc/config/v850/v850-protos.h b/gcc/config/v850/v850-protos.h
new file mode 100644
index 000000000..8ee6c20d1
--- /dev/null
+++ b/gcc/config/v850/v850-protos.h
@@ -0,0 +1,73 @@
+/* Prototypes for v850.c functions used in the md file & elsewhere.
+ Copyright (C) 1999, 2000, 2002, 2004, 2005, 2007, 2010
+ Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Function prototypes that cannot exist in v850.h due to dependency
+ complications. */
+#ifndef GCC_V850_PROTOS_H
+#define GCC_V850_PROTOS_H
+
+#define Mmode enum machine_mode
+
+extern void expand_prologue (void);
+extern void expand_epilogue (void);
+extern int v850_handle_pragma (int (*)(void), void (*)(int), char *);
+extern int compute_register_save_size (long *);
+extern int compute_frame_size (int, long *);
+extern void v850_init_expanders (void);
+
+#ifdef RTX_CODE
+extern int v850_output_addr_const_extra (FILE *, rtx);
+extern rtx v850_return_addr (int);
+extern const char *output_move_single (rtx *);
+extern void notice_update_cc (rtx, rtx);
+extern char * construct_save_jarl (rtx);
+extern char * construct_restore_jr (rtx);
+#ifdef HAVE_MACHINE_MODES
+extern char * construct_dispose_instruction (rtx);
+extern char * construct_prepare_instruction (rtx);
+extern int ep_memory_operand (rtx, Mmode, int);
+extern int v850_float_z_comparison_operator (rtx, Mmode);
+extern int v850_float_nz_comparison_operator (rtx, Mmode);
+extern rtx v850_gen_compare (enum rtx_code, Mmode, rtx, rtx);
+extern Mmode v850_gen_float_compare (enum rtx_code, Mmode, rtx, rtx);
+extern Mmode v850_select_cc_mode (RTX_CODE, rtx, rtx);
+#endif
+#endif /* RTX_CODE */
+
+#ifdef TREE_CODE
+extern int v850_interrupt_function_p (tree);
+extern void v850_output_aligned_bss (FILE *, tree, const char *, unsigned HOST_WIDE_INT, int);
+extern void v850_output_common (FILE *, tree, const char *, int, int);
+extern void v850_output_local (FILE *, tree, const char *, int, int);
+extern v850_data_area v850_get_data_area (tree);
+#endif
+
+extern void ghs_pragma_section (struct cpp_reader *);
+extern void ghs_pragma_interrupt (struct cpp_reader *);
+extern void ghs_pragma_starttda (struct cpp_reader *);
+extern void ghs_pragma_startsda (struct cpp_reader *);
+extern void ghs_pragma_startzda (struct cpp_reader *);
+extern void ghs_pragma_endtda (struct cpp_reader *);
+extern void ghs_pragma_endsda (struct cpp_reader *);
+extern void ghs_pragma_endzda (struct cpp_reader *);
+
+#undef Mmode
+
+#endif /* ! GCC_V850_PROTOS_H */
diff --git a/gcc/config/v850/v850.c b/gcc/config/v850/v850.c
new file mode 100644
index 000000000..d75f88c55
--- /dev/null
+++ b/gcc/config/v850/v850.c
@@ -0,0 +1,3226 @@
+/* Subroutines for insn-output.c for NEC V850 series
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
+ 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+ Contributed by Jeff Law (law@cygnus.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "recog.h"
+#include "expr.h"
+#include "function.h"
+#include "diagnostic-core.h"
+#include "ggc.h"
+#include "integrate.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "df.h"
+
+#ifndef streq
+#define streq(a,b) (strcmp (a, b) == 0)
+#endif
+
+static void v850_print_operand_address (FILE *, rtx);
+
+/* Information about the various small memory areas. */
+struct small_memory_info small_memory[ (int)SMALL_MEMORY_max ] =
+{
+ /* Name Max Physical max. */
+ { "tda", 0, 256 },
+ { "sda", 0, 65536 },
+ { "zda", 0, 32768 },
+};
+
+/* Names of the various data areas used on the v850. */
+tree GHS_default_section_names [(int) COUNT_OF_GHS_SECTION_KINDS];
+tree GHS_current_section_names [(int) COUNT_OF_GHS_SECTION_KINDS];
+
+/* Track the current data area set by the data area pragma (which
+ can be nested). Tested by check_default_data_area. */
+data_area_stack_element * data_area_stack = NULL;
+
+/* True if we don't need to check any more if the current
+ function is an interrupt handler. */
+static int v850_interrupt_cache_p = FALSE;
+
+rtx v850_compare_op0, v850_compare_op1;
+
+/* Whether current function is an interrupt handler. */
+static int v850_interrupt_p = FALSE;
+
+static GTY(()) section * rosdata_section;
+static GTY(()) section * rozdata_section;
+static GTY(()) section * tdata_section;
+static GTY(()) section * zdata_section;
+static GTY(()) section * zbss_section;
+
+/* Set the maximum size of small memory area TYPE to the value given
+ by VALUE. Return true if VALUE was syntactically correct. VALUE
+ starts with the argument separator: either "-" or "=". */
+
+static bool
+v850_handle_memory_option (enum small_memory_type type, const char *value)
+{
+ int i, size;
+
+ if (*value != '-' && *value != '=')
+ return false;
+
+ value++;
+ for (i = 0; value[i]; i++)
+ if (!ISDIGIT (value[i]))
+ return false;
+
+ size = atoi (value);
+ if (size > small_memory[type].physical_max)
+ error ("value passed to %<-m%s%> is too large", small_memory[type].name);
+ else
+ small_memory[type].max = size;
+ return true;
+}
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+v850_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
+{
+ switch (code)
+ {
+ case OPT_mspace:
+ target_flags |= MASK_EP | MASK_PROLOG_FUNCTION;
+ return true;
+
+ case OPT_mv850:
+ target_flags &= ~(MASK_CPU ^ MASK_V850);
+ return true;
+
+ case OPT_mv850e:
+ case OPT_mv850e1:
+ target_flags &= ~(MASK_CPU ^ MASK_V850E);
+ return true;
+
+ case OPT_mtda:
+ return v850_handle_memory_option (SMALL_MEMORY_TDA, arg);
+
+ case OPT_msda:
+ return v850_handle_memory_option (SMALL_MEMORY_SDA, arg);
+
+ case OPT_mzda:
+ return v850_handle_memory_option (SMALL_MEMORY_ZDA, arg);
+
+ default:
+ return true;
+ }
+}
+
+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
+
+static const struct default_options v850_option_optimization_table[] =
+ {
+ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+ /* Note - we no longer enable MASK_EP when optimizing. This is
+ because of a hardware bug which stops the SLD and SST instructions
+ from correctly detecting some hazards. If the user is sure that
+ their hardware is fixed or that their program will not encounter
+ the conditions that trigger the bug then they can enable -mep by
+ hand. */
+ { OPT_LEVELS_1_PLUS, OPT_mprolog_function, NULL, 1 },
+ { OPT_LEVELS_NONE, 0, NULL, 0 }
+ };
+
+/* Handle the TARGET_PASS_BY_REFERENCE target hook.
+ Specify whether to pass the argument by reference. */
+
+static bool
+v850_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ unsigned HOST_WIDE_INT size;
+
+ if (type)
+ size = int_size_in_bytes (type);
+ else
+ size = GET_MODE_SIZE (mode);
+
+ return size > 8;
+}
+
+/* Implementing the Varargs Macros. */
+
+static bool
+v850_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
+{
+ return !TARGET_GHS ? true : false;
+}
+
+/* Return an RTX to represent where an argument with mode MODE
+ and type TYPE will be passed to a function. If the result
+ is NULL_RTX, the argument will be pushed. */
+
+static rtx
+v850_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ rtx result = NULL_RTX;
+ int size, align;
+
+ if (!named)
+ return NULL_RTX;
+
+ if (mode == BLKmode)
+ size = int_size_in_bytes (type);
+ else
+ size = GET_MODE_SIZE (mode);
+
+ size = (size + UNITS_PER_WORD -1) & ~(UNITS_PER_WORD -1);
+
+ if (size < 1)
+ {
+ /* Once we have stopped using argument registers, do not start up again. */
+ cum->nbytes = 4 * UNITS_PER_WORD;
+ return NULL_RTX;
+ }
+
+ if (size <= UNITS_PER_WORD && type)
+ align = TYPE_ALIGN (type) / BITS_PER_UNIT;
+ else
+ align = size;
+
+ cum->nbytes = (cum->nbytes + align - 1) &~(align - 1);
+
+ if (cum->nbytes > 4 * UNITS_PER_WORD)
+ return NULL_RTX;
+
+ if (type == NULL_TREE
+ && cum->nbytes + size > 4 * UNITS_PER_WORD)
+ return NULL_RTX;
+
+ switch (cum->nbytes / UNITS_PER_WORD)
+ {
+ case 0:
+ result = gen_rtx_REG (mode, 6);
+ break;
+ case 1:
+ result = gen_rtx_REG (mode, 7);
+ break;
+ case 2:
+ result = gen_rtx_REG (mode, 8);
+ break;
+ case 3:
+ result = gen_rtx_REG (mode, 9);
+ break;
+ default:
+ result = NULL_RTX;
+ }
+
+ return result;
+}
+
+/* Return the number of bytes which must be put into registers
+ for values which are part in registers and part in memory. */
+static int
+v850_arg_partial_bytes (CUMULATIVE_ARGS * cum, enum machine_mode mode,
+ tree type, bool named)
+{
+ int size, align;
+
+ if (TARGET_GHS && !named)
+ return 0;
+
+ if (mode == BLKmode)
+ size = int_size_in_bytes (type);
+ else
+ size = GET_MODE_SIZE (mode);
+
+ if (size < 1)
+ size = 1;
+
+ if (type)
+ align = TYPE_ALIGN (type) / BITS_PER_UNIT;
+ else
+ align = size;
+
+ cum->nbytes = (cum->nbytes + align - 1) & ~ (align - 1);
+
+ if (cum->nbytes > 4 * UNITS_PER_WORD)
+ return 0;
+
+ if (cum->nbytes + size <= 4 * UNITS_PER_WORD)
+ return 0;
+
+ if (type == NULL_TREE
+ && cum->nbytes + size > 4 * UNITS_PER_WORD)
+ return 0;
+
+ return 4 * UNITS_PER_WORD - cum->nbytes;
+}
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+static void
+v850_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ cum->nbytes += (((type && int_size_in_bytes (type) > 8
+ ? GET_MODE_SIZE (Pmode)
+ : (mode != BLKmode
+ ? GET_MODE_SIZE (mode)
+ : int_size_in_bytes (type))) + UNITS_PER_WORD - 1)
+ & -UNITS_PER_WORD);
+}
+
+/* Return the high and low words of a CONST_DOUBLE */
+
+static void
+const_double_split (rtx x, HOST_WIDE_INT * p_high, HOST_WIDE_INT * p_low)
+{
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ long t[2];
+ REAL_VALUE_TYPE rv;
+
+ switch (GET_MODE (x))
+ {
+ case DFmode:
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, t);
+ *p_high = t[1]; /* since v850 is little endian */
+ *p_low = t[0]; /* high is second word */
+ return;
+
+ case SFmode:
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, *p_high);
+ *p_low = 0;
+ return;
+
+ case VOIDmode:
+ case DImode:
+ *p_high = CONST_DOUBLE_HIGH (x);
+ *p_low = CONST_DOUBLE_LOW (x);
+ return;
+
+ default:
+ break;
+ }
+ }
+
+ fatal_insn ("const_double_split got a bad insn:", x);
+}
+
+
+/* Return the cost of the rtx R with code CODE. */
+
+static int
+const_costs_int (HOST_WIDE_INT value, int zero_cost)
+{
+ if (CONST_OK_FOR_I (value))
+ return zero_cost;
+ else if (CONST_OK_FOR_J (value))
+ return 1;
+ else if (CONST_OK_FOR_K (value))
+ return 2;
+ else
+ return 4;
+}
+
+static int
+const_costs (rtx r, enum rtx_code c)
+{
+ HOST_WIDE_INT high, low;
+
+ switch (c)
+ {
+ case CONST_INT:
+ return const_costs_int (INTVAL (r), 0);
+
+ case CONST_DOUBLE:
+ const_double_split (r, &high, &low);
+ if (GET_MODE (r) == SFmode)
+ return const_costs_int (high, 1);
+ else
+ return const_costs_int (high, 1) + const_costs_int (low, 1);
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ return 2;
+
+ case HIGH:
+ return 1;
+
+ default:
+ return 4;
+ }
+}
+
+static bool
+v850_rtx_costs (rtx x,
+ int codearg,
+ int outer_code ATTRIBUTE_UNUSED,
+ int * total, bool speed)
+{
+ enum rtx_code code = (enum rtx_code) codearg;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ *total = COSTS_N_INSNS (const_costs (x, code));
+ return true;
+
+ case MOD:
+ case DIV:
+ case UMOD:
+ case UDIV:
+ if (TARGET_V850E && !speed)
+ *total = 6;
+ else
+ *total = 60;
+ return true;
+
+ case MULT:
+ if (TARGET_V850E
+ && ( GET_MODE (x) == SImode
+ || GET_MODE (x) == HImode
+ || GET_MODE (x) == QImode))
+ {
+ if (GET_CODE (XEXP (x, 1)) == REG)
+ *total = 4;
+ else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ if (CONST_OK_FOR_O (INTVAL (XEXP (x, 1))))
+ *total = 6;
+ else if (CONST_OK_FOR_K (INTVAL (XEXP (x, 1))))
+ *total = 10;
+ }
+ }
+ else
+ *total = 20;
+ return true;
+
+ case ZERO_EXTRACT:
+ if (outer_code == COMPARE)
+ *total = 0;
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+/* Print operand X using operand code CODE to assembly language output file
+ FILE. */
+
+static void
+v850_print_operand (FILE * file, rtx x, int code)
+{
+ HOST_WIDE_INT high, low;
+
+ switch (code)
+ {
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+ output_addr_const(file, x);
+ break;
+ }
+ /* fall through */
+ case 'b':
+ case 'B':
+ case 'C':
+ switch ((code == 'B' || code == 'C')
+ ? reverse_condition (GET_CODE (x)) : GET_CODE (x))
+ {
+ case NE:
+ if (code == 'c' || code == 'C')
+ fprintf (file, "nz");
+ else
+ fprintf (file, "ne");
+ break;
+ case EQ:
+ if (code == 'c' || code == 'C')
+ fprintf (file, "z");
+ else
+ fprintf (file, "e");
+ break;
+ case GE:
+ fprintf (file, "ge");
+ break;
+ case GT:
+ fprintf (file, "gt");
+ break;
+ case LE:
+ fprintf (file, "le");
+ break;
+ case LT:
+ fprintf (file, "lt");
+ break;
+ case GEU:
+ fprintf (file, "nl");
+ break;
+ case GTU:
+ fprintf (file, "h");
+ break;
+ case LEU:
+ fprintf (file, "nh");
+ break;
+ case LTU:
+ fprintf (file, "l");
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ case 'F': /* high word of CONST_DOUBLE */
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ fprintf (file, "%d", (INTVAL (x) >= 0) ? 0 : -1);
+ break;
+
+ case CONST_DOUBLE:
+ const_double_split (x, &high, &low);
+ fprintf (file, "%ld", (long) high);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ case 'G': /* low word of CONST_DOUBLE */
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ fprintf (file, "%ld", (long) INTVAL (x));
+ break;
+
+ case CONST_DOUBLE:
+ const_double_split (x, &high, &low);
+ fprintf (file, "%ld", (long) low);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ case 'L':
+ fprintf (file, "%d\n", (int)(INTVAL (x) & 0xffff));
+ break;
+ case 'M':
+ fprintf (file, "%d", exact_log2 (INTVAL (x)));
+ break;
+ case 'O':
+ gcc_assert (special_symbolref_operand (x, VOIDmode));
+
+ if (GET_CODE (x) == CONST)
+ x = XEXP (XEXP (x, 0), 0);
+ else
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+ if (SYMBOL_REF_ZDA_P (x))
+ fprintf (file, "zdaoff");
+ else if (SYMBOL_REF_SDA_P (x))
+ fprintf (file, "sdaoff");
+ else if (SYMBOL_REF_TDA_P (x))
+ fprintf (file, "tdaoff");
+ else
+ gcc_unreachable ();
+ break;
+ case 'P':
+ gcc_assert (special_symbolref_operand (x, VOIDmode));
+ output_addr_const (file, x);
+ break;
+ case 'Q':
+ gcc_assert (special_symbolref_operand (x, VOIDmode));
+
+ if (GET_CODE (x) == CONST)
+ x = XEXP (XEXP (x, 0), 0);
+ else
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+ if (SYMBOL_REF_ZDA_P (x))
+ fprintf (file, "r0");
+ else if (SYMBOL_REF_SDA_P (x))
+ fprintf (file, "gp");
+ else if (SYMBOL_REF_TDA_P (x))
+ fprintf (file, "ep");
+ else
+ gcc_unreachable ();
+ break;
+ case 'R': /* 2nd word of a double. */
+ switch (GET_CODE (x))
+ {
+ case REG:
+ fprintf (file, reg_names[REGNO (x) + 1]);
+ break;
+ case MEM:
+ x = XEXP (adjust_address (x, SImode, 4), 0);
+ v850_print_operand_address (file, x);
+ if (GET_CODE (x) == CONST_INT)
+ fprintf (file, "[r0]");
+ break;
+
+ default:
+ break;
+ }
+ break;
+ case 'S':
+ {
+ /* If it's a reference to a TDA variable, use sst/sld vs. st/ld. */
+ if (GET_CODE (x) == MEM && ep_memory_operand (x, GET_MODE (x), FALSE))
+ fputs ("s", file);
+
+ break;
+ }
+ case 'T':
+ {
+ /* Like an 'S' operand above, but for unsigned loads only. */
+ if (GET_CODE (x) == MEM && ep_memory_operand (x, GET_MODE (x), TRUE))
+ fputs ("s", file);
+
+ break;
+ }
+ case 'W': /* print the instruction suffix */
+ switch (GET_MODE (x))
+ {
+ default:
+ gcc_unreachable ();
+
+ case QImode: fputs (".b", file); break;
+ case HImode: fputs (".h", file); break;
+ case SImode: fputs (".w", file); break;
+ case SFmode: fputs (".w", file); break;
+ }
+ break;
+ case '.': /* register r0 */
+ fputs (reg_names[0], file);
+ break;
+ case 'z': /* reg or zero */
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], file);
+ else if ((GET_MODE(x) == SImode
+ || GET_MODE(x) == DFmode
+ || GET_MODE(x) == SFmode)
+ && x == CONST0_RTX(GET_MODE(x)))
+ fputs (reg_names[0], file);
+ else
+ {
+ gcc_assert (x == const0_rtx);
+ fputs (reg_names[0], file);
+ }
+ break;
+ default:
+ switch (GET_CODE (x))
+ {
+ case MEM:
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ output_address (gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 0),
+ XEXP (x, 0)));
+ else
+ output_address (XEXP (x, 0));
+ break;
+
+ case REG:
+ fputs (reg_names[REGNO (x)], file);
+ break;
+ case SUBREG:
+ fputs (reg_names[subreg_regno (x)], file);
+ break;
+ case CONST_INT:
+ case SYMBOL_REF:
+ case CONST:
+ case LABEL_REF:
+ case CODE_LABEL:
+ v850_print_operand_address (file, x);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ }
+}
+
+
+/* Output assembly language output for the address ADDR to FILE. */
+
+static void
+v850_print_operand_address (FILE * file, rtx addr)
+{
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ fprintf (file, "0[");
+ v850_print_operand (file, addr, 0);
+ fprintf (file, "]");
+ break;
+ case LO_SUM:
+ if (GET_CODE (XEXP (addr, 0)) == REG)
+ {
+ /* reg,foo */
+ fprintf (file, "lo(");
+ v850_print_operand (file, XEXP (addr, 1), 0);
+ fprintf (file, ")[");
+ v850_print_operand (file, XEXP (addr, 0), 0);
+ fprintf (file, "]");
+ }
+ break;
+ case PLUS:
+ if (GET_CODE (XEXP (addr, 0)) == REG
+ || GET_CODE (XEXP (addr, 0)) == SUBREG)
+ {
+ /* reg,foo */
+ v850_print_operand (file, XEXP (addr, 1), 0);
+ fprintf (file, "[");
+ v850_print_operand (file, XEXP (addr, 0), 0);
+ fprintf (file, "]");
+ }
+ else
+ {
+ v850_print_operand (file, XEXP (addr, 0), 0);
+ fprintf (file, "+");
+ v850_print_operand (file, XEXP (addr, 1), 0);
+ }
+ break;
+ case SYMBOL_REF:
+ {
+ const char *off_name = NULL;
+ const char *reg_name = NULL;
+
+ if (SYMBOL_REF_ZDA_P (addr))
+ {
+ off_name = "zdaoff";
+ reg_name = "r0";
+ }
+ else if (SYMBOL_REF_SDA_P (addr))
+ {
+ off_name = "sdaoff";
+ reg_name = "gp";
+ }
+ else if (SYMBOL_REF_TDA_P (addr))
+ {
+ off_name = "tdaoff";
+ reg_name = "ep";
+ }
+
+ if (off_name)
+ fprintf (file, "%s(", off_name);
+ output_addr_const (file, addr);
+ if (reg_name)
+ fprintf (file, ")[%s]", reg_name);
+ }
+ break;
+ case CONST:
+ if (special_symbolref_operand (addr, VOIDmode))
+ {
+ rtx x = XEXP (XEXP (addr, 0), 0);
+ const char *off_name;
+ const char *reg_name;
+
+ if (SYMBOL_REF_ZDA_P (x))
+ {
+ off_name = "zdaoff";
+ reg_name = "r0";
+ }
+ else if (SYMBOL_REF_SDA_P (x))
+ {
+ off_name = "sdaoff";
+ reg_name = "gp";
+ }
+ else if (SYMBOL_REF_TDA_P (x))
+ {
+ off_name = "tdaoff";
+ reg_name = "ep";
+ }
+ else
+ gcc_unreachable ();
+
+ fprintf (file, "%s(", off_name);
+ output_addr_const (file, addr);
+ fprintf (file, ")[%s]", reg_name);
+ }
+ else
+ output_addr_const (file, addr);
+ break;
+ default:
+ output_addr_const (file, addr);
+ break;
+ }
+}
+
+static bool
+v850_print_operand_punct_valid_p (unsigned char code)
+{
+ return code == '.';
+}
+
+/* When assemble_integer is used to emit the offsets for a switch
+ table it can encounter (TRUNCATE:HI (MINUS:SI (LABEL_REF:SI) (LABEL_REF:SI))).
+ output_addr_const will normally barf at this, but it is OK to omit
+ the truncate and just emit the difference of the two labels. The
+ .hword directive will automatically handle the truncation for us.
+
+ Returns 1 if rtx was handled, 0 otherwise. */
+
+int
+v850_output_addr_const_extra (FILE * file, rtx x)
+{
+ if (GET_CODE (x) != TRUNCATE)
+ return 0;
+
+ x = XEXP (x, 0);
+
+ /* We must also handle the case where the switch table was passed a
+ constant value and so has been collapsed. In this case the first
+ label will have been deleted. In such a case it is OK to emit
+ nothing, since the table will not be used.
+ (cf gcc.c-torture/compile/990801-1.c). */
+ if (GET_CODE (x) == MINUS
+ && GET_CODE (XEXP (x, 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == CODE_LABEL
+ && INSN_DELETED_P (XEXP (XEXP (x, 0), 0)))
+ return 1;
+
+ output_addr_const (file, x);
+ return 1;
+}
+
+/* Return appropriate code to load up a 1, 2, or 4 integer/floating
+ point value. */
+
+const char *
+output_move_single (rtx * operands)
+{
+ rtx dst = operands[0];
+ rtx src = operands[1];
+
+ if (REG_P (dst))
+ {
+ if (REG_P (src))
+ return "mov %1,%0";
+
+ else if (GET_CODE (src) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL (src);
+
+ if (CONST_OK_FOR_J (value)) /* Signed 5-bit immediate. */
+ return "mov %1,%0";
+
+ else if (CONST_OK_FOR_K (value)) /* Signed 16-bit immediate. */
+ return "movea %1,%.,%0";
+
+ else if (CONST_OK_FOR_L (value)) /* Upper 16 bits were set. */
+ return "movhi hi0(%1),%.,%0";
+
+ /* A random constant. */
+ else if (TARGET_V850E || TARGET_V850E2_ALL)
+ return "mov %1,%0";
+ else
+ return "movhi hi(%1),%.,%0\n\tmovea lo(%1),%0,%0";
+ }
+
+ else if (GET_CODE (src) == CONST_DOUBLE && GET_MODE (src) == SFmode)
+ {
+ HOST_WIDE_INT high, low;
+
+ const_double_split (src, &high, &low);
+
+ if (CONST_OK_FOR_J (high)) /* Signed 5-bit immediate. */
+ return "mov %F1,%0";
+
+ else if (CONST_OK_FOR_K (high)) /* Signed 16-bit immediate. */
+ return "movea %F1,%.,%0";
+
+ else if (CONST_OK_FOR_L (high)) /* Upper 16 bits were set. */
+ return "movhi hi0(%F1),%.,%0";
+
+ /* A random constant. */
+ else if (TARGET_V850E || TARGET_V850E2_ALL)
+ return "mov %F1,%0";
+
+ else
+ return "movhi hi(%F1),%.,%0\n\tmovea lo(%F1),%0,%0";
+ }
+
+ else if (GET_CODE (src) == MEM)
+ return "%S1ld%W1 %1,%0";
+
+ else if (special_symbolref_operand (src, VOIDmode))
+ return "movea %O1(%P1),%Q1,%0";
+
+ else if (GET_CODE (src) == LABEL_REF
+ || GET_CODE (src) == SYMBOL_REF
+ || GET_CODE (src) == CONST)
+ {
+ if (TARGET_V850E || TARGET_V850E2_ALL)
+ return "mov hilo(%1),%0";
+ else
+ return "movhi hi(%1),%.,%0\n\tmovea lo(%1),%0,%0";
+ }
+
+ else if (GET_CODE (src) == HIGH)
+ return "movhi hi(%1),%.,%0";
+
+ else if (GET_CODE (src) == LO_SUM)
+ {
+ operands[2] = XEXP (src, 0);
+ operands[3] = XEXP (src, 1);
+ return "movea lo(%3),%2,%0";
+ }
+ }
+
+ else if (GET_CODE (dst) == MEM)
+ {
+ if (REG_P (src))
+ return "%S0st%W0 %1,%0";
+
+ else if (GET_CODE (src) == CONST_INT && INTVAL (src) == 0)
+ return "%S0st%W0 %.,%0";
+
+ else if (GET_CODE (src) == CONST_DOUBLE
+ && CONST0_RTX (GET_MODE (dst)) == src)
+ return "%S0st%W0 %.,%0";
+ }
+
+ fatal_insn ("output_move_single:", gen_rtx_SET (VOIDmode, dst, src));
+ return "";
+}
+
+/* Generate comparison code. */
+int
+v850_float_z_comparison_operator (rtx op, enum machine_mode mode)
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != RTX_COMPARE
+ && GET_RTX_CLASS (code) != RTX_COMM_COMPARE)
+ return 0;
+
+ if (mode != GET_MODE (op) && mode != VOIDmode)
+ return 0;
+
+ if ((GET_CODE (XEXP (op, 0)) != REG
+ || REGNO (XEXP (op, 0)) != CC_REGNUM)
+ || XEXP (op, 1) != const0_rtx)
+ return 0;
+
+ if (GET_MODE (XEXP (op, 0)) == CC_FPU_LTmode)
+ return code == LT;
+ if (GET_MODE (XEXP (op, 0)) == CC_FPU_LEmode)
+ return code == LE;
+ if (GET_MODE (XEXP (op, 0)) == CC_FPU_EQmode)
+ return code == EQ;
+
+ return 0;
+}
+
+int
+v850_float_nz_comparison_operator (rtx op, enum machine_mode mode)
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != RTX_COMPARE
+ && GET_RTX_CLASS (code) != RTX_COMM_COMPARE)
+ return 0;
+
+ if (mode != GET_MODE (op) && mode != VOIDmode)
+ return 0;
+
+ if ((GET_CODE (XEXP (op, 0)) != REG
+ || REGNO (XEXP (op, 0)) != CC_REGNUM)
+ || XEXP (op, 1) != const0_rtx)
+ return 0;
+
+ if (GET_MODE (XEXP (op, 0)) == CC_FPU_GTmode)
+ return code == GT;
+ if (GET_MODE (XEXP (op, 0)) == CC_FPU_GEmode)
+ return code == GE;
+ if (GET_MODE (XEXP (op, 0)) == CC_FPU_NEmode)
+ return code == NE;
+
+ return 0;
+}
+
+enum machine_mode
+v850_select_cc_mode (enum rtx_code cond, rtx op0, rtx op1 ATTRIBUTE_UNUSED)
+{
+ if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
+ {
+ switch (cond)
+ {
+ case LE:
+ return CC_FPU_LEmode;
+ case GE:
+ return CC_FPU_GEmode;
+ case LT:
+ return CC_FPU_LTmode;
+ case GT:
+ return CC_FPU_GTmode;
+ case EQ:
+ return CC_FPU_EQmode;
+ case NE:
+ return CC_FPU_NEmode;
+ default:
+ abort ();
+ }
+ }
+ return CCmode;
+}
+
+enum machine_mode
+v850_gen_float_compare (enum rtx_code cond, enum machine_mode mode ATTRIBUTE_UNUSED, rtx op0, rtx op1)
+{
+ if (GET_MODE(op0) == DFmode)
+ {
+ switch (cond)
+ {
+ case LE:
+ emit_insn (gen_cmpdf_le_insn (op0, op1));
+ break;
+ case GE:
+ emit_insn (gen_cmpdf_ge_insn (op0, op1));
+ break;
+ case LT:
+ emit_insn (gen_cmpdf_lt_insn (op0, op1));
+ break;
+ case GT:
+ emit_insn (gen_cmpdf_gt_insn (op0, op1));
+ break;
+ case EQ:
+ emit_insn (gen_cmpdf_eq_insn (op0, op1));
+ break;
+ case NE:
+ emit_insn (gen_cmpdf_ne_insn (op0, op1));
+ break;
+ default:
+ abort ();
+ }
+ }
+ else if (GET_MODE(v850_compare_op0) == SFmode)
+ {
+ switch (cond)
+ {
+ case LE:
+ emit_insn (gen_cmpsf_le_insn(op0, op1));
+ break;
+ case GE:
+ emit_insn (gen_cmpsf_ge_insn(op0, op1));
+ break;
+ case LT:
+ emit_insn (gen_cmpsf_lt_insn(op0, op1));
+ break;
+ case GT:
+ emit_insn (gen_cmpsf_gt_insn(op0, op1));
+ break;
+ case EQ:
+ emit_insn (gen_cmpsf_eq_insn(op0, op1));
+ break;
+ case NE:
+ emit_insn (gen_cmpsf_ne_insn(op0, op1));
+ break;
+ default:
+ abort ();
+ }
+ }
+ else
+ {
+ abort ();
+ }
+
+ return v850_select_cc_mode (cond, op0, op1);
+}
+
+rtx
+v850_gen_compare (enum rtx_code cond, enum machine_mode mode, rtx op0, rtx op1)
+{
+ if (GET_MODE_CLASS(GET_MODE (op0)) != MODE_FLOAT)
+ {
+ emit_insn (gen_cmpsi_insn (op0, op1));
+ return gen_rtx_fmt_ee (cond, mode, gen_rtx_REG(CCmode, CC_REGNUM), const0_rtx);
+ }
+ else
+ {
+ rtx cc_reg;
+ mode = v850_gen_float_compare (cond, mode, op0, op1);
+ cc_reg = gen_rtx_REG (mode, CC_REGNUM);
+ emit_insn (gen_rtx_SET(mode, cc_reg, gen_rtx_REG (mode, FCC_REGNUM)));
+
+ return gen_rtx_fmt_ee (cond, mode, cc_reg, const0_rtx);
+ }
+}
+
+/* Return maximum offset supported for a short EP memory reference of mode
+ MODE and signedness UNSIGNEDP. */
+
+static int
+ep_memory_offset (enum machine_mode mode, int unsignedp ATTRIBUTE_UNUSED)
+{
+ int max_offset = 0;
+
+ switch (mode)
+ {
+ case QImode:
+ if (TARGET_SMALL_SLD)
+ max_offset = (1 << 4);
+ else if ((TARGET_V850E || TARGET_V850E2_ALL)
+ && unsignedp)
+ max_offset = (1 << 4);
+ else
+ max_offset = (1 << 7);
+ break;
+
+ case HImode:
+ if (TARGET_SMALL_SLD)
+ max_offset = (1 << 5);
+ else if ((TARGET_V850E || TARGET_V850E2_ALL)
+ && unsignedp)
+ max_offset = (1 << 5);
+ else
+ max_offset = (1 << 8);
+ break;
+
+ case SImode:
+ case SFmode:
+ max_offset = (1 << 8);
+ break;
+
+ default:
+ break;
+ }
+
+ return max_offset;
+}
+
+/* Return true if OP is a valid short EP memory reference */
+
+int
+ep_memory_operand (rtx op, enum machine_mode mode, int unsigned_load)
+{
+ rtx addr, op0, op1;
+ int max_offset;
+ int mask;
+
+ /* If we are not using the EP register on a per-function basis
+ then do not allow this optimization at all. This is to
+ prevent the use of the SLD/SST instructions which cannot be
+ guaranteed to work properly due to a hardware bug. */
+ if (!TARGET_EP)
+ return FALSE;
+
+ if (GET_CODE (op) != MEM)
+ return FALSE;
+
+ max_offset = ep_memory_offset (mode, unsigned_load);
+
+ mask = GET_MODE_SIZE (mode) - 1;
+
+ addr = XEXP (op, 0);
+ if (GET_CODE (addr) == CONST)
+ addr = XEXP (addr, 0);
+
+ switch (GET_CODE (addr))
+ {
+ default:
+ break;
+
+ case SYMBOL_REF:
+ return SYMBOL_REF_TDA_P (addr);
+
+ case REG:
+ return REGNO (addr) == EP_REGNUM;
+
+ case PLUS:
+ op0 = XEXP (addr, 0);
+ op1 = XEXP (addr, 1);
+ if (GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) < max_offset
+ && INTVAL (op1) >= 0
+ && (INTVAL (op1) & mask) == 0)
+ {
+ if (GET_CODE (op0) == REG && REGNO (op0) == EP_REGNUM)
+ return TRUE;
+
+ if (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_TDA_P (op0))
+ return TRUE;
+ }
+ break;
+ }
+
+ return FALSE;
+}
+
+/* Substitute memory references involving a pointer, to use the ep pointer,
+ taking care to save and preserve the ep. */
+
+static void
+substitute_ep_register (rtx first_insn,
+ rtx last_insn,
+ int uses,
+ int regno,
+ rtx * p_r1,
+ rtx * p_ep)
+{
+ rtx reg = gen_rtx_REG (Pmode, regno);
+ rtx insn;
+
+ if (!*p_r1)
+ {
+ df_set_regs_ever_live (1, true);
+ *p_r1 = gen_rtx_REG (Pmode, 1);
+ *p_ep = gen_rtx_REG (Pmode, 30);
+ }
+
+ if (TARGET_DEBUG)
+ fprintf (stderr, "\
+Saved %d bytes (%d uses of register %s) in function %s, starting as insn %d, ending at %d\n",
+ 2 * (uses - 3), uses, reg_names[regno],
+ IDENTIFIER_POINTER (DECL_NAME (current_function_decl)),
+ INSN_UID (first_insn), INSN_UID (last_insn));
+
+ if (GET_CODE (first_insn) == NOTE)
+ first_insn = next_nonnote_insn (first_insn);
+
+ last_insn = next_nonnote_insn (last_insn);
+ for (insn = first_insn; insn && insn != last_insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == INSN)
+ {
+ rtx pattern = single_set (insn);
+
+ /* Replace the memory references. */
+ if (pattern)
+ {
+ rtx *p_mem;
+ /* Memory operands are signed by default. */
+ int unsignedp = FALSE;
+
+ if (GET_CODE (SET_DEST (pattern)) == MEM
+ && GET_CODE (SET_SRC (pattern)) == MEM)
+ p_mem = (rtx *)0;
+
+ else if (GET_CODE (SET_DEST (pattern)) == MEM)
+ p_mem = &SET_DEST (pattern);
+
+ else if (GET_CODE (SET_SRC (pattern)) == MEM)
+ p_mem = &SET_SRC (pattern);
+
+ else if (GET_CODE (SET_SRC (pattern)) == SIGN_EXTEND
+ && GET_CODE (XEXP (SET_SRC (pattern), 0)) == MEM)
+ p_mem = &XEXP (SET_SRC (pattern), 0);
+
+ else if (GET_CODE (SET_SRC (pattern)) == ZERO_EXTEND
+ && GET_CODE (XEXP (SET_SRC (pattern), 0)) == MEM)
+ {
+ p_mem = &XEXP (SET_SRC (pattern), 0);
+ unsignedp = TRUE;
+ }
+ else
+ p_mem = (rtx *)0;
+
+ if (p_mem)
+ {
+ rtx addr = XEXP (*p_mem, 0);
+
+ if (GET_CODE (addr) == REG && REGNO (addr) == (unsigned) regno)
+ *p_mem = change_address (*p_mem, VOIDmode, *p_ep);
+
+ else if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) == (unsigned) regno
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && ((INTVAL (XEXP (addr, 1)))
+ < ep_memory_offset (GET_MODE (*p_mem),
+ unsignedp))
+ && ((INTVAL (XEXP (addr, 1))) >= 0))
+ *p_mem = change_address (*p_mem, VOIDmode,
+ gen_rtx_PLUS (Pmode,
+ *p_ep,
+ XEXP (addr, 1)));
+ }
+ }
+ }
+ }
+
+ /* Optimize back to back cases of ep <- r1 & r1 <- ep. */
+ insn = prev_nonnote_insn (first_insn);
+ if (insn && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == *p_ep
+ && SET_SRC (PATTERN (insn)) == *p_r1)
+ delete_insn (insn);
+ else
+ emit_insn_before (gen_rtx_SET (Pmode, *p_r1, *p_ep), first_insn);
+
+ emit_insn_before (gen_rtx_SET (Pmode, *p_ep, reg), first_insn);
+ emit_insn_before (gen_rtx_SET (Pmode, *p_ep, *p_r1), last_insn);
+}
+
+
+/* TARGET_MACHINE_DEPENDENT_REORG. On the 850, we use it to implement
+ the -mep mode to copy heavily used pointers to ep to use the implicit
+ addressing. */
+
+static void
+v850_reorg (void)
+{
+ struct
+ {
+ int uses;
+ rtx first_insn;
+ rtx last_insn;
+ }
+ regs[FIRST_PSEUDO_REGISTER];
+
+ int i;
+ int use_ep = FALSE;
+ rtx r1 = NULL_RTX;
+ rtx ep = NULL_RTX;
+ rtx insn;
+ rtx pattern;
+
+ /* If not ep mode, just return now. */
+ if (!TARGET_EP)
+ return;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ regs[i].uses = 0;
+ regs[i].first_insn = NULL_RTX;
+ regs[i].last_insn = NULL_RTX;
+ }
+
+ for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ {
+ switch (GET_CODE (insn))
+ {
+ /* End of basic block */
+ default:
+ if (!use_ep)
+ {
+ int max_uses = -1;
+ int max_regno = -1;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (max_uses < regs[i].uses)
+ {
+ max_uses = regs[i].uses;
+ max_regno = i;
+ }
+ }
+
+ if (max_uses > 3)
+ substitute_ep_register (regs[max_regno].first_insn,
+ regs[max_regno].last_insn,
+ max_uses, max_regno, &r1, &ep);
+ }
+
+ use_ep = FALSE;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ regs[i].uses = 0;
+ regs[i].first_insn = NULL_RTX;
+ regs[i].last_insn = NULL_RTX;
+ }
+ break;
+
+ case NOTE:
+ break;
+
+ case INSN:
+ pattern = single_set (insn);
+
+ /* See if there are any memory references we can shorten */
+ if (pattern)
+ {
+ rtx src = SET_SRC (pattern);
+ rtx dest = SET_DEST (pattern);
+ rtx mem;
+ /* Memory operands are signed by default. */
+ int unsignedp = FALSE;
+
+ /* We might have (SUBREG (MEM)) here, so just get rid of the
+ subregs to make this code simpler. */
+ if (GET_CODE (dest) == SUBREG
+ && (GET_CODE (SUBREG_REG (dest)) == MEM
+ || GET_CODE (SUBREG_REG (dest)) == REG))
+ alter_subreg (&dest);
+ if (GET_CODE (src) == SUBREG
+ && (GET_CODE (SUBREG_REG (src)) == MEM
+ || GET_CODE (SUBREG_REG (src)) == REG))
+ alter_subreg (&src);
+
+ if (GET_CODE (dest) == MEM && GET_CODE (src) == MEM)
+ mem = NULL_RTX;
+
+ else if (GET_CODE (dest) == MEM)
+ mem = dest;
+
+ else if (GET_CODE (src) == MEM)
+ mem = src;
+
+ else if (GET_CODE (src) == SIGN_EXTEND
+ && GET_CODE (XEXP (src, 0)) == MEM)
+ mem = XEXP (src, 0);
+
+ else if (GET_CODE (src) == ZERO_EXTEND
+ && GET_CODE (XEXP (src, 0)) == MEM)
+ {
+ mem = XEXP (src, 0);
+ unsignedp = TRUE;
+ }
+ else
+ mem = NULL_RTX;
+
+ if (mem && ep_memory_operand (mem, GET_MODE (mem), unsignedp))
+ use_ep = TRUE;
+
+ else if (!use_ep && mem
+ && GET_MODE_SIZE (GET_MODE (mem)) <= UNITS_PER_WORD)
+ {
+ rtx addr = XEXP (mem, 0);
+ int regno = -1;
+ int short_p;
+
+ if (GET_CODE (addr) == REG)
+ {
+ short_p = TRUE;
+ regno = REGNO (addr);
+ }
+
+ else if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 0)) == REG
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && ((INTVAL (XEXP (addr, 1)))
+ < ep_memory_offset (GET_MODE (mem), unsignedp))
+ && ((INTVAL (XEXP (addr, 1))) >= 0))
+ {
+ short_p = TRUE;
+ regno = REGNO (XEXP (addr, 0));
+ }
+
+ else
+ short_p = FALSE;
+
+ if (short_p)
+ {
+ regs[regno].uses++;
+ regs[regno].last_insn = insn;
+ if (!regs[regno].first_insn)
+ regs[regno].first_insn = insn;
+ }
+ }
+
+ /* Loading up a register in the basic block zaps any savings
+ for the register */
+ if (GET_CODE (dest) == REG)
+ {
+ enum machine_mode mode = GET_MODE (dest);
+ int regno;
+ int endregno;
+
+ regno = REGNO (dest);
+ endregno = regno + HARD_REGNO_NREGS (regno, mode);
+
+ if (!use_ep)
+ {
+ /* See if we can use the pointer before this
+ modification. */
+ int max_uses = -1;
+ int max_regno = -1;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (max_uses < regs[i].uses)
+ {
+ max_uses = regs[i].uses;
+ max_regno = i;
+ }
+ }
+
+ if (max_uses > 3
+ && max_regno >= regno
+ && max_regno < endregno)
+ {
+ substitute_ep_register (regs[max_regno].first_insn,
+ regs[max_regno].last_insn,
+ max_uses, max_regno, &r1,
+ &ep);
+
+ /* Since we made a substitution, zap all remembered
+ registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ regs[i].uses = 0;
+ regs[i].first_insn = NULL_RTX;
+ regs[i].last_insn = NULL_RTX;
+ }
+ }
+ }
+
+ for (i = regno; i < endregno; i++)
+ {
+ regs[i].uses = 0;
+ regs[i].first_insn = NULL_RTX;
+ regs[i].last_insn = NULL_RTX;
+ }
+ }
+ }
+ }
+ }
+}
+
+/* # of registers saved by the interrupt handler. */
+#define INTERRUPT_FIXED_NUM 5
+
+/* # of bytes for registers saved by the interrupt handler. */
+#define INTERRUPT_FIXED_SAVE_SIZE (4 * INTERRUPT_FIXED_NUM)
+
+/* # of words saved for other registers. */
+#define INTERRUPT_ALL_SAVE_NUM \
+ (30 - INTERRUPT_FIXED_NUM)
+
+#define INTERRUPT_ALL_SAVE_SIZE (4 * INTERRUPT_ALL_SAVE_NUM)
+
+int
+compute_register_save_size (long * p_reg_saved)
+{
+ int size = 0;
+ int i;
+ int interrupt_handler = v850_interrupt_function_p (current_function_decl);
+ int call_p = df_regs_ever_live_p (LINK_POINTER_REGNUM);
+ long reg_saved = 0;
+
+ /* Count the return pointer if we need to save it. */
+ if (crtl->profile && !call_p)
+ {
+ df_set_regs_ever_live (LINK_POINTER_REGNUM, true);
+ call_p = 1;
+ }
+
+ /* Count space for the register saves. */
+ if (interrupt_handler)
+ {
+ for (i = 0; i <= 31; i++)
+ switch (i)
+ {
+ default:
+ if (df_regs_ever_live_p (i) || call_p)
+ {
+ size += 4;
+ reg_saved |= 1L << i;
+ }
+ break;
+
+ /* We don't save/restore r0 or the stack pointer */
+ case 0:
+ case STACK_POINTER_REGNUM:
+ break;
+
+ /* For registers with fixed use, we save them, set them to the
+ appropriate value, and then restore them.
+ These registers are handled specially, so don't list them
+ on the list of registers to save in the prologue. */
+ case 1: /* temp used to hold ep */
+ case 4: /* gp */
+ case 10: /* temp used to call interrupt save/restore */
+ case 11: /* temp used to call interrupt save/restore (long call) */
+ case EP_REGNUM: /* ep */
+ size += 4;
+ break;
+ }
+ }
+ else
+ {
+ /* Find the first register that needs to be saved. */
+ for (i = 0; i <= 31; i++)
+ if (df_regs_ever_live_p (i) && ((! call_used_regs[i])
+ || i == LINK_POINTER_REGNUM))
+ break;
+
+ /* If it is possible that an out-of-line helper function might be
+ used to generate the prologue for the current function, then we
+ need to cover the possibility that such a helper function will
+ be used, despite the fact that there might be gaps in the list of
+ registers that need to be saved. To detect this we note that the
+ helper functions always push at least register r29 (provided
+ that the function is not an interrupt handler). */
+
+ if (TARGET_PROLOG_FUNCTION
+ && (i == 2 || ((i >= 20) && (i < 30))))
+ {
+ if (i == 2)
+ {
+ size += 4;
+ reg_saved |= 1L << i;
+
+ i = 20;
+ }
+
+ /* Helper functions save all registers between the starting
+ register and the last register, regardless of whether they
+ are actually used by the function or not. */
+ for (; i <= 29; i++)
+ {
+ size += 4;
+ reg_saved |= 1L << i;
+ }
+
+ if (df_regs_ever_live_p (LINK_POINTER_REGNUM))
+ {
+ size += 4;
+ reg_saved |= 1L << LINK_POINTER_REGNUM;
+ }
+ }
+ else
+ {
+ for (; i <= 31; i++)
+ if (df_regs_ever_live_p (i) && ((! call_used_regs[i])
+ || i == LINK_POINTER_REGNUM))
+ {
+ size += 4;
+ reg_saved |= 1L << i;
+ }
+ }
+ }
+
+ if (p_reg_saved)
+ *p_reg_saved = reg_saved;
+
+ return size;
+}
+
+int
+compute_frame_size (int size, long * p_reg_saved)
+{
+ return (size
+ + compute_register_save_size (p_reg_saved)
+ + crtl->outgoing_args_size);
+}
+
+static int
+use_prolog_function (int num_save, int frame_size)
+{
+ int alloc_stack = (4 * num_save);
+ int unalloc_stack = frame_size - alloc_stack;
+ int save_func_len, restore_func_len;
+ int save_normal_len, restore_normal_len;
+
+ if (! TARGET_DISABLE_CALLT)
+ save_func_len = restore_func_len = 2;
+ else
+ save_func_len = restore_func_len = TARGET_LONG_CALLS ? (4+4+4+2+2) : 4;
+
+ if (unalloc_stack)
+ {
+ save_func_len += CONST_OK_FOR_J (-unalloc_stack) ? 2 : 4;
+ restore_func_len += CONST_OK_FOR_J (-unalloc_stack) ? 2 : 4;
+ }
+
+ /* See if we would have used ep to save the stack. */
+ if (TARGET_EP && num_save > 3 && (unsigned)frame_size < 255)
+ save_normal_len = restore_normal_len = (3 * 2) + (2 * num_save);
+ else
+ save_normal_len = restore_normal_len = 4 * num_save;
+
+ save_normal_len += CONST_OK_FOR_J (-frame_size) ? 2 : 4;
+ restore_normal_len += (CONST_OK_FOR_J (frame_size) ? 2 : 4) + 2;
+
+ /* Don't bother checking if we don't actually save any space.
+ This happens for instance if one register is saved and additional
+ stack space is allocated. */
+ return ((save_func_len + restore_func_len) < (save_normal_len + restore_normal_len));
+}
+
+void
+expand_prologue (void)
+{
+ unsigned int i;
+ unsigned int size = get_frame_size ();
+ unsigned int actual_fsize;
+ unsigned int init_stack_alloc = 0;
+ rtx save_regs[32];
+ rtx save_all;
+ unsigned int num_save;
+ int code;
+ int interrupt_handler = v850_interrupt_function_p (current_function_decl);
+ long reg_saved = 0;
+
+ actual_fsize = compute_frame_size (size, &reg_saved);
+
+ /* Save/setup global registers for interrupt functions right now. */
+ if (interrupt_handler)
+ {
+ if (! TARGET_DISABLE_CALLT && (TARGET_V850E || TARGET_V850E2_ALL))
+ emit_insn (gen_callt_save_interrupt ());
+ else
+ emit_insn (gen_save_interrupt ());
+
+ actual_fsize -= INTERRUPT_FIXED_SAVE_SIZE;
+
+ if (((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
+ actual_fsize -= INTERRUPT_ALL_SAVE_SIZE;
+ }
+
+ /* Identify all of the saved registers. */
+ num_save = 0;
+ for (i = 1; i < 32; i++)
+ {
+ if (((1L << i) & reg_saved) != 0)
+ save_regs[num_save++] = gen_rtx_REG (Pmode, i);
+ }
+
+ /* See if we have an insn that allocates stack space and saves the particular
+ registers we want to. */
+ save_all = NULL_RTX;
+ if (TARGET_PROLOG_FUNCTION && num_save > 0)
+ {
+ if (use_prolog_function (num_save, actual_fsize))
+ {
+ int alloc_stack = 4 * num_save;
+ int offset = 0;
+
+ save_all = gen_rtx_PARALLEL
+ (VOIDmode,
+ rtvec_alloc (num_save + 1
+ + (TARGET_DISABLE_CALLT ? (TARGET_LONG_CALLS ? 2 : 1) : 0)));
+
+ XVECEXP (save_all, 0, 0)
+ = gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT(-alloc_stack)));
+ for (i = 0; i < num_save; i++)
+ {
+ offset -= 4;
+ XVECEXP (save_all, 0, i+1)
+ = gen_rtx_SET (VOIDmode,
+ gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT(offset))),
+ save_regs[i]);
+ }
+
+ if (TARGET_DISABLE_CALLT)
+ {
+ XVECEXP (save_all, 0, num_save + 1)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 10));
+
+ if (TARGET_LONG_CALLS)
+ XVECEXP (save_all, 0, num_save + 2)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
+ }
+
+ code = recog (save_all, NULL_RTX, NULL);
+ if (code >= 0)
+ {
+ rtx insn = emit_insn (save_all);
+ INSN_CODE (insn) = code;
+ actual_fsize -= alloc_stack;
+
+ }
+ else
+ save_all = NULL_RTX;
+ }
+ }
+
+ /* If no prolog save function is available, store the registers the old
+ fashioned way (one by one). */
+ if (!save_all)
+ {
+ /* Special case interrupt functions that save all registers for a call. */
+ if (interrupt_handler && ((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
+ {
+ if (! TARGET_DISABLE_CALLT && (TARGET_V850E || TARGET_V850E2_ALL))
+ emit_insn (gen_callt_save_all_interrupt ());
+ else
+ emit_insn (gen_save_all_interrupt ());
+ }
+ else
+ {
+ int offset;
+ /* If the stack is too big, allocate it in chunks so we can do the
+ register saves. We use the register save size so we use the ep
+ register. */
+ if (actual_fsize && !CONST_OK_FOR_K (-actual_fsize))
+ init_stack_alloc = compute_register_save_size (NULL);
+ else
+ init_stack_alloc = actual_fsize;
+
+ /* Save registers at the beginning of the stack frame. */
+ offset = init_stack_alloc - 4;
+
+ if (init_stack_alloc)
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (- (signed) init_stack_alloc)));
+
+ /* Save the return pointer first. */
+ if (num_save > 0 && REGNO (save_regs[num_save-1]) == LINK_POINTER_REGNUM)
+ {
+ emit_move_insn (gen_rtx_MEM (SImode,
+ plus_constant (stack_pointer_rtx,
+ offset)),
+ save_regs[--num_save]);
+ offset -= 4;
+ }
+
+ for (i = 0; i < num_save; i++)
+ {
+ emit_move_insn (gen_rtx_MEM (SImode,
+ plus_constant (stack_pointer_rtx,
+ offset)),
+ save_regs[i]);
+ offset -= 4;
+ }
+ }
+ }
+
+ /* Allocate the rest of the stack that was not allocated above (either it is
+ > 32K or we just called a function to save the registers and needed more
+ stack. */
+ if (actual_fsize > init_stack_alloc)
+ {
+ int diff = actual_fsize - init_stack_alloc;
+ if (CONST_OK_FOR_K (-diff))
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-diff)));
+ else
+ {
+ rtx reg = gen_rtx_REG (Pmode, 12);
+ emit_move_insn (reg, GEN_INT (-diff));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ }
+
+ /* If we need a frame pointer, set it up now. */
+ if (frame_pointer_needed)
+ emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
+}
+
+
+void
+expand_epilogue (void)
+{
+ unsigned int i;
+ unsigned int size = get_frame_size ();
+ long reg_saved = 0;
+ int actual_fsize = compute_frame_size (size, &reg_saved);
+ rtx restore_regs[32];
+ rtx restore_all;
+ unsigned int num_restore;
+ int code;
+ int interrupt_handler = v850_interrupt_function_p (current_function_decl);
+
+ /* Eliminate the initial stack stored by interrupt functions. */
+ if (interrupt_handler)
+ {
+ actual_fsize -= INTERRUPT_FIXED_SAVE_SIZE;
+ if (((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
+ actual_fsize -= INTERRUPT_ALL_SAVE_SIZE;
+ }
+
+ /* Cut off any dynamic stack created. */
+ if (frame_pointer_needed)
+ emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
+
+ /* Identify all of the saved registers. */
+ num_restore = 0;
+ for (i = 1; i < 32; i++)
+ {
+ if (((1L << i) & reg_saved) != 0)
+ restore_regs[num_restore++] = gen_rtx_REG (Pmode, i);
+ }
+
+ /* See if we have an insn that restores the particular registers we
+ want to. */
+ restore_all = NULL_RTX;
+
+ if (TARGET_PROLOG_FUNCTION
+ && num_restore > 0
+ && !interrupt_handler)
+ {
+ int alloc_stack = (4 * num_restore);
+
+ /* Don't bother checking if we don't actually save any space. */
+ if (use_prolog_function (num_restore, actual_fsize))
+ {
+ int offset;
+ restore_all = gen_rtx_PARALLEL (VOIDmode,
+ rtvec_alloc (num_restore + 2));
+ XVECEXP (restore_all, 0, 0) = gen_rtx_RETURN (VOIDmode);
+ XVECEXP (restore_all, 0, 1)
+ = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (alloc_stack)));
+
+ offset = alloc_stack - 4;
+ for (i = 0; i < num_restore; i++)
+ {
+ XVECEXP (restore_all, 0, i+2)
+ = gen_rtx_SET (VOIDmode,
+ restore_regs[i],
+ gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT(offset))));
+ offset -= 4;
+ }
+
+ code = recog (restore_all, NULL_RTX, NULL);
+
+ if (code >= 0)
+ {
+ rtx insn;
+
+ actual_fsize -= alloc_stack;
+ if (actual_fsize)
+ {
+ if (CONST_OK_FOR_K (actual_fsize))
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (actual_fsize)));
+ else
+ {
+ rtx reg = gen_rtx_REG (Pmode, 12);
+ emit_move_insn (reg, GEN_INT (actual_fsize));
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ reg));
+ }
+ }
+
+ insn = emit_jump_insn (restore_all);
+ INSN_CODE (insn) = code;
+
+ }
+ else
+ restore_all = NULL_RTX;
+ }
+ }
+
+ /* If no epilogue save function is available, restore the registers the
+ old fashioned way (one by one). */
+ if (!restore_all)
+ {
+ unsigned int init_stack_free;
+
+ /* If the stack is large, we need to cut it down in 2 pieces. */
+ if (interrupt_handler)
+ init_stack_free = 0;
+ else if (actual_fsize && !CONST_OK_FOR_K (-actual_fsize))
+ init_stack_free = 4 * num_restore;
+ else
+ init_stack_free = (signed) actual_fsize;
+
+ /* Deallocate the rest of the stack if it is > 32K. */
+ if ((unsigned int) actual_fsize > init_stack_free)
+ {
+ int diff;
+
+ diff = actual_fsize - init_stack_free;
+
+ if (CONST_OK_FOR_K (diff))
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (diff)));
+ else
+ {
+ rtx reg = gen_rtx_REG (Pmode, 12);
+ emit_move_insn (reg, GEN_INT (diff));
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ reg));
+ }
+ }
+
+ /* Special case interrupt functions that save all registers
+ for a call. */
+ if (interrupt_handler && ((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
+ {
+ if (! TARGET_DISABLE_CALLT)
+ emit_insn (gen_callt_restore_all_interrupt ());
+ else
+ emit_insn (gen_restore_all_interrupt ());
+ }
+ else
+ {
+ /* Restore registers from the beginning of the stack frame. */
+ int offset = init_stack_free - 4;
+
+ /* Restore the return pointer first. */
+ if (num_restore > 0
+ && REGNO (restore_regs [num_restore - 1]) == LINK_POINTER_REGNUM)
+ {
+ emit_move_insn (restore_regs[--num_restore],
+ gen_rtx_MEM (SImode,
+ plus_constant (stack_pointer_rtx,
+ offset)));
+ offset -= 4;
+ }
+
+ for (i = 0; i < num_restore; i++)
+ {
+ emit_move_insn (restore_regs[i],
+ gen_rtx_MEM (SImode,
+ plus_constant (stack_pointer_rtx,
+ offset)));
+
+ emit_use (restore_regs[i]);
+ offset -= 4;
+ }
+
+ /* Cut back the remainder of the stack. */
+ if (init_stack_free)
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (init_stack_free)));
+ }
+
+ /* And return or use reti for interrupt handlers. */
+ if (interrupt_handler)
+ {
+ if (! TARGET_DISABLE_CALLT && (TARGET_V850E || TARGET_V850E2_ALL))
+ emit_insn (gen_callt_return_interrupt ());
+ else
+ emit_jump_insn (gen_return_interrupt ());
+ }
+ else if (actual_fsize)
+ emit_jump_insn (gen_return_internal ());
+ else
+ emit_jump_insn (gen_return_simple ());
+ }
+
+ v850_interrupt_cache_p = FALSE;
+ v850_interrupt_p = FALSE;
+}
+
+/* Update the condition code from the insn. */
+void
+notice_update_cc (rtx body, rtx insn)
+{
+ switch (get_attr_cc (insn))
+ {
+ case CC_NONE:
+ /* Insn does not affect CC at all. */
+ break;
+
+ case CC_NONE_0HIT:
+ /* Insn does not change CC, but the 0'th operand has been changed. */
+ if (cc_status.value1 != 0
+ && reg_overlap_mentioned_p (recog_data.operand[0], cc_status.value1))
+ cc_status.value1 = 0;
+ break;
+
+ case CC_SET_ZN:
+ /* Insn sets the Z,N flags of CC to recog_data.operand[0].
+ V,C is in an unusable state. */
+ CC_STATUS_INIT;
+ cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
+ cc_status.value1 = recog_data.operand[0];
+ break;
+
+ case CC_SET_ZNV:
+ /* Insn sets the Z,N,V flags of CC to recog_data.operand[0].
+ C is in an unusable state. */
+ CC_STATUS_INIT;
+ cc_status.flags |= CC_NO_CARRY;
+ cc_status.value1 = recog_data.operand[0];
+ break;
+
+ case CC_COMPARE:
+ /* The insn is a compare instruction. */
+ CC_STATUS_INIT;
+ cc_status.value1 = SET_SRC (body);
+ break;
+
+ case CC_CLOBBER:
+ /* Insn doesn't leave CC in a usable state. */
+ CC_STATUS_INIT;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Retrieve the data area that has been chosen for the given decl. */
+
+v850_data_area
+v850_get_data_area (tree decl)
+{
+ if (lookup_attribute ("sda", DECL_ATTRIBUTES (decl)) != NULL_TREE)
+ return DATA_AREA_SDA;
+
+ if (lookup_attribute ("tda", DECL_ATTRIBUTES (decl)) != NULL_TREE)
+ return DATA_AREA_TDA;
+
+ if (lookup_attribute ("zda", DECL_ATTRIBUTES (decl)) != NULL_TREE)
+ return DATA_AREA_ZDA;
+
+ return DATA_AREA_NORMAL;
+}
+
+/* Store the indicated data area in the decl's attributes. */
+
+static void
+v850_set_data_area (tree decl, v850_data_area data_area)
+{
+ tree name;
+
+ switch (data_area)
+ {
+ case DATA_AREA_SDA: name = get_identifier ("sda"); break;
+ case DATA_AREA_TDA: name = get_identifier ("tda"); break;
+ case DATA_AREA_ZDA: name = get_identifier ("zda"); break;
+ default:
+ return;
+ }
+
+ DECL_ATTRIBUTES (decl) = tree_cons
+ (name, NULL, DECL_ATTRIBUTES (decl));
+}
+
+/* Handle an "interrupt" attribute; arguments as in
+ struct attribute_spec.handler. */
+static tree
+v850_handle_interrupt_attribute (tree * node,
+ tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool * no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle a "sda", "tda" or "zda" attribute; arguments as in
+ struct attribute_spec.handler. */
+static tree
+v850_handle_data_area_attribute (tree* node,
+ tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool * no_add_attrs)
+{
+ v850_data_area data_area;
+ v850_data_area area;
+ tree decl = *node;
+
+ /* Implement data area attribute. */
+ if (is_attribute_p ("sda", name))
+ data_area = DATA_AREA_SDA;
+ else if (is_attribute_p ("tda", name))
+ data_area = DATA_AREA_TDA;
+ else if (is_attribute_p ("zda", name))
+ data_area = DATA_AREA_ZDA;
+ else
+ gcc_unreachable ();
+
+ switch (TREE_CODE (decl))
+ {
+ case VAR_DECL:
+ if (current_function_decl != NULL_TREE)
+ {
+ error_at (DECL_SOURCE_LOCATION (decl),
+ "data area attributes cannot be specified for "
+ "local variables");
+ *no_add_attrs = true;
+ }
+
+ /* Drop through. */
+
+ case FUNCTION_DECL:
+ area = v850_get_data_area (decl);
+ if (area != DATA_AREA_NORMAL && data_area != area)
+ {
+ error ("data area of %q+D conflicts with previous declaration",
+ decl);
+ *no_add_attrs = true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+
+/* Return nonzero if FUNC is an interrupt function as specified
+ by the "interrupt" attribute. */
+
+int
+v850_interrupt_function_p (tree func)
+{
+ tree a;
+ int ret = 0;
+
+ if (v850_interrupt_cache_p)
+ return v850_interrupt_p;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ return 0;
+
+ a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
+ if (a != NULL_TREE)
+ ret = 1;
+
+ else
+ {
+ a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
+ ret = a != NULL_TREE;
+ }
+
+ /* Its not safe to trust global variables until after function inlining has
+ been done. */
+ if (reload_completed | reload_in_progress)
+ v850_interrupt_p = ret;
+
+ return ret;
+}
+
+
+static void
+v850_encode_data_area (tree decl, rtx symbol)
+{
+ int flags;
+
+ /* Map explicit sections into the appropriate attribute */
+ if (v850_get_data_area (decl) == DATA_AREA_NORMAL)
+ {
+ if (DECL_SECTION_NAME (decl))
+ {
+ const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
+
+ if (streq (name, ".zdata") || streq (name, ".zbss"))
+ v850_set_data_area (decl, DATA_AREA_ZDA);
+
+ else if (streq (name, ".sdata") || streq (name, ".sbss"))
+ v850_set_data_area (decl, DATA_AREA_SDA);
+
+ else if (streq (name, ".tdata"))
+ v850_set_data_area (decl, DATA_AREA_TDA);
+ }
+
+ /* If no attribute, support -m{zda,sda,tda}=n */
+ else
+ {
+ int size = int_size_in_bytes (TREE_TYPE (decl));
+ if (size <= 0)
+ ;
+
+ else if (size <= small_memory [(int) SMALL_MEMORY_TDA].max)
+ v850_set_data_area (decl, DATA_AREA_TDA);
+
+ else if (size <= small_memory [(int) SMALL_MEMORY_SDA].max)
+ v850_set_data_area (decl, DATA_AREA_SDA);
+
+ else if (size <= small_memory [(int) SMALL_MEMORY_ZDA].max)
+ v850_set_data_area (decl, DATA_AREA_ZDA);
+ }
+
+ if (v850_get_data_area (decl) == DATA_AREA_NORMAL)
+ return;
+ }
+
+ flags = SYMBOL_REF_FLAGS (symbol);
+ switch (v850_get_data_area (decl))
+ {
+ case DATA_AREA_ZDA: flags |= SYMBOL_FLAG_ZDA; break;
+ case DATA_AREA_TDA: flags |= SYMBOL_FLAG_TDA; break;
+ case DATA_AREA_SDA: flags |= SYMBOL_FLAG_SDA; break;
+ default: gcc_unreachable ();
+ }
+ SYMBOL_REF_FLAGS (symbol) = flags;
+}
+
+static void
+v850_encode_section_info (tree decl, rtx rtl, int first)
+{
+ default_encode_section_info (decl, rtl, first);
+
+ if (TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
+ v850_encode_data_area (decl, XEXP (rtl, 0));
+}
+
+/* Construct a JR instruction to a routine that will perform the equivalent of
+ the RTL passed in as an argument. This RTL is a function epilogue that
+ pops registers off the stack and possibly releases some extra stack space
+ as well. The code has already verified that the RTL matches these
+ requirements. */
+
+char *
+construct_restore_jr (rtx op)
+{
+ int count = XVECLEN (op, 0);
+ int stack_bytes;
+ unsigned long int mask;
+ unsigned long int first;
+ unsigned long int last;
+ int i;
+ static char buff [100]; /* XXX */
+
+ if (count <= 2)
+ {
+ error ("bogus JR construction: %d", count);
+ return NULL;
+ }
+
+ /* Work out how many bytes to pop off the stack before retrieving
+ registers. */
+ gcc_assert (GET_CODE (XVECEXP (op, 0, 1)) == SET);
+ gcc_assert (GET_CODE (SET_SRC (XVECEXP (op, 0, 1))) == PLUS);
+ gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1)) == CONST_INT);
+
+ stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1));
+
+ /* Each pop will remove 4 bytes from the stack.... */
+ stack_bytes -= (count - 2) * 4;
+
+ /* Make sure that the amount we are popping either 0 or 16 bytes. */
+ if (stack_bytes != 0)
+ {
+ error ("bad amount of stack space removal: %d", stack_bytes);
+ return NULL;
+ }
+
+ /* Now compute the bit mask of registers to push. */
+ mask = 0;
+ for (i = 2; i < count; i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+
+ gcc_assert (GET_CODE (vector_element) == SET);
+ gcc_assert (GET_CODE (SET_DEST (vector_element)) == REG);
+ gcc_assert (register_is_ok_for_epilogue (SET_DEST (vector_element),
+ SImode));
+
+ mask |= 1 << REGNO (SET_DEST (vector_element));
+ }
+
+ /* Scan for the first register to pop. */
+ for (first = 0; first < 32; first++)
+ {
+ if (mask & (1 << first))
+ break;
+ }
+
+ gcc_assert (first < 32);
+
+ /* Discover the last register to pop. */
+ if (mask & (1 << LINK_POINTER_REGNUM))
+ {
+ last = LINK_POINTER_REGNUM;
+ }
+ else
+ {
+ gcc_assert (!stack_bytes);
+ gcc_assert (mask & (1 << 29));
+
+ last = 29;
+ }
+
+ /* Note, it is possible to have gaps in the register mask.
+ We ignore this here, and generate a JR anyway. We will
+ be popping more registers than is strictly necessary, but
+ it does save code space. */
+
+ if (TARGET_LONG_CALLS)
+ {
+ char name[40];
+
+ if (first == last)
+ sprintf (name, "__return_%s", reg_names [first]);
+ else
+ sprintf (name, "__return_%s_%s", reg_names [first], reg_names [last]);
+
+ sprintf (buff, "movhi hi(%s), r0, r6\n\tmovea lo(%s), r6, r6\n\tjmp r6",
+ name, name);
+ }
+ else
+ {
+ if (first == last)
+ sprintf (buff, "jr __return_%s", reg_names [first]);
+ else
+ sprintf (buff, "jr __return_%s_%s", reg_names [first], reg_names [last]);
+ }
+
+ return buff;
+}
+
+
+/* Construct a JARL instruction to a routine that will perform the equivalent
+ of the RTL passed as a parameter. This RTL is a function prologue that
+ saves some of the registers r20 - r31 onto the stack, and possibly acquires
+ some stack space as well. The code has already verified that the RTL
+ matches these requirements. */
+char *
+construct_save_jarl (rtx op)
+{
+ int count = XVECLEN (op, 0);
+ int stack_bytes;
+ unsigned long int mask;
+ unsigned long int first;
+ unsigned long int last;
+ int i;
+ static char buff [100]; /* XXX */
+
+ if (count <= (TARGET_LONG_CALLS ? 3 : 2))
+ {
+ error ("bogus JARL construction: %d", count);
+ return NULL;
+ }
+
+ /* Paranoia. */
+ gcc_assert (GET_CODE (XVECEXP (op, 0, 0)) == SET);
+ gcc_assert (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) == PLUS);
+ gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0)) == REG);
+ gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)) == CONST_INT);
+
+ /* Work out how many bytes to push onto the stack after storing the
+ registers. */
+ stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1));
+
+ /* Each push will put 4 bytes from the stack.... */
+ stack_bytes += (count - (TARGET_LONG_CALLS ? 3 : 2)) * 4;
+
+ /* Make sure that the amount we are popping either 0 or 16 bytes. */
+ if (stack_bytes != 0)
+ {
+ error ("bad amount of stack space removal: %d", stack_bytes);
+ return NULL;
+ }
+
+ /* Now compute the bit mask of registers to push. */
+ mask = 0;
+ for (i = 1; i < count - (TARGET_LONG_CALLS ? 2 : 1); i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+
+ gcc_assert (GET_CODE (vector_element) == SET);
+ gcc_assert (GET_CODE (SET_SRC (vector_element)) == REG);
+ gcc_assert (register_is_ok_for_epilogue (SET_SRC (vector_element),
+ SImode));
+
+ mask |= 1 << REGNO (SET_SRC (vector_element));
+ }
+
+ /* Scan for the first register to push. */
+ for (first = 0; first < 32; first++)
+ {
+ if (mask & (1 << first))
+ break;
+ }
+
+ gcc_assert (first < 32);
+
+ /* Discover the last register to push. */
+ if (mask & (1 << LINK_POINTER_REGNUM))
+ {
+ last = LINK_POINTER_REGNUM;
+ }
+ else
+ {
+ gcc_assert (!stack_bytes);
+ gcc_assert (mask & (1 << 29));
+
+ last = 29;
+ }
+
+ /* Note, it is possible to have gaps in the register mask.
+ We ignore this here, and generate a JARL anyway. We will
+ be pushing more registers than is strictly necessary, but
+ it does save code space. */
+
+ if (TARGET_LONG_CALLS)
+ {
+ char name[40];
+
+ if (first == last)
+ sprintf (name, "__save_%s", reg_names [first]);
+ else
+ sprintf (name, "__save_%s_%s", reg_names [first], reg_names [last]);
+
+ sprintf (buff, "movhi hi(%s), r0, r11\n\tmovea lo(%s), r11, r11\n\tjarl .+4, r10\n\tadd 4, r10\n\tjmp r11",
+ name, name);
+ }
+ else
+ {
+ if (first == last)
+ sprintf (buff, "jarl __save_%s, r10", reg_names [first]);
+ else
+ sprintf (buff, "jarl __save_%s_%s, r10", reg_names [first],
+ reg_names [last]);
+ }
+
+ return buff;
+}
+
+extern tree last_assemble_variable_decl;
+extern int size_directive_output;
+
+/* A version of asm_output_aligned_bss() that copes with the special
+ data areas of the v850. */
+void
+v850_output_aligned_bss (FILE * file,
+ tree decl,
+ const char * name,
+ unsigned HOST_WIDE_INT size,
+ int align)
+{
+ switch (v850_get_data_area (decl))
+ {
+ case DATA_AREA_ZDA:
+ switch_to_section (zbss_section);
+ break;
+
+ case DATA_AREA_SDA:
+ switch_to_section (sbss_section);
+ break;
+
+ case DATA_AREA_TDA:
+ switch_to_section (tdata_section);
+
+ default:
+ switch_to_section (bss_section);
+ break;
+ }
+
+ ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
+#ifdef ASM_DECLARE_OBJECT_NAME
+ last_assemble_variable_decl = decl;
+ ASM_DECLARE_OBJECT_NAME (file, name, decl);
+#else
+ /* Standard thing is just output label for the object. */
+ ASM_OUTPUT_LABEL (file, name);
+#endif /* ASM_DECLARE_OBJECT_NAME */
+ ASM_OUTPUT_SKIP (file, size ? size : 1);
+}
+
+/* Called via the macro ASM_OUTPUT_DECL_COMMON */
+void
+v850_output_common (FILE * file,
+ tree decl,
+ const char * name,
+ int size,
+ int align)
+{
+ if (decl == NULL_TREE)
+ {
+ fprintf (file, "%s", COMMON_ASM_OP);
+ }
+ else
+ {
+ switch (v850_get_data_area (decl))
+ {
+ case DATA_AREA_ZDA:
+ fprintf (file, "%s", ZCOMMON_ASM_OP);
+ break;
+
+ case DATA_AREA_SDA:
+ fprintf (file, "%s", SCOMMON_ASM_OP);
+ break;
+
+ case DATA_AREA_TDA:
+ fprintf (file, "%s", TCOMMON_ASM_OP);
+ break;
+
+ default:
+ fprintf (file, "%s", COMMON_ASM_OP);
+ break;
+ }
+ }
+
+ assemble_name (file, name);
+ fprintf (file, ",%u,%u\n", size, align / BITS_PER_UNIT);
+}
+
+/* Called via the macro ASM_OUTPUT_DECL_LOCAL */
+void
+v850_output_local (FILE * file,
+ tree decl,
+ const char * name,
+ int size,
+ int align)
+{
+ fprintf (file, "%s", LOCAL_ASM_OP);
+ assemble_name (file, name);
+ fprintf (file, "\n");
+
+ ASM_OUTPUT_ALIGNED_DECL_COMMON (file, decl, name, size, align);
+}
+
+/* Add data area to the given declaration if a ghs data area pragma is
+ currently in effect (#pragma ghs startXXX/endXXX). */
+static void
+v850_insert_attributes (tree decl, tree * attr_ptr ATTRIBUTE_UNUSED )
+{
+ if (data_area_stack
+ && data_area_stack->data_area
+ && current_function_decl == NULL_TREE
+ && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == CONST_DECL)
+ && v850_get_data_area (decl) == DATA_AREA_NORMAL)
+ v850_set_data_area (decl, data_area_stack->data_area);
+
+ /* Initialize the default names of the v850 specific sections,
+ if this has not been done before. */
+
+ if (GHS_default_section_names [(int) GHS_SECTION_KIND_SDATA] == NULL)
+ {
+ GHS_default_section_names [(int) GHS_SECTION_KIND_SDATA]
+ = build_string (sizeof (".sdata")-1, ".sdata");
+
+ GHS_default_section_names [(int) GHS_SECTION_KIND_ROSDATA]
+ = build_string (sizeof (".rosdata")-1, ".rosdata");
+
+ GHS_default_section_names [(int) GHS_SECTION_KIND_TDATA]
+ = build_string (sizeof (".tdata")-1, ".tdata");
+
+ GHS_default_section_names [(int) GHS_SECTION_KIND_ZDATA]
+ = build_string (sizeof (".zdata")-1, ".zdata");
+
+ GHS_default_section_names [(int) GHS_SECTION_KIND_ROZDATA]
+ = build_string (sizeof (".rozdata")-1, ".rozdata");
+ }
+
+ if (current_function_decl == NULL_TREE
+ && (TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == CONST_DECL
+ || TREE_CODE (decl) == FUNCTION_DECL)
+ && (!DECL_EXTERNAL (decl) || DECL_INITIAL (decl))
+ && !DECL_SECTION_NAME (decl))
+ {
+ enum GHS_section_kind kind = GHS_SECTION_KIND_DEFAULT;
+ tree chosen_section;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ kind = GHS_SECTION_KIND_TEXT;
+ else
+ {
+ /* First choose a section kind based on the data area of the decl. */
+ switch (v850_get_data_area (decl))
+ {
+ default:
+ gcc_unreachable ();
+
+ case DATA_AREA_SDA:
+ kind = ((TREE_READONLY (decl))
+ ? GHS_SECTION_KIND_ROSDATA
+ : GHS_SECTION_KIND_SDATA);
+ break;
+
+ case DATA_AREA_TDA:
+ kind = GHS_SECTION_KIND_TDATA;
+ break;
+
+ case DATA_AREA_ZDA:
+ kind = ((TREE_READONLY (decl))
+ ? GHS_SECTION_KIND_ROZDATA
+ : GHS_SECTION_KIND_ZDATA);
+ break;
+
+ case DATA_AREA_NORMAL: /* default data area */
+ if (TREE_READONLY (decl))
+ kind = GHS_SECTION_KIND_RODATA;
+ else if (DECL_INITIAL (decl))
+ kind = GHS_SECTION_KIND_DATA;
+ else
+ kind = GHS_SECTION_KIND_BSS;
+ }
+ }
+
+ /* Now, if the section kind has been explicitly renamed,
+ then attach a section attribute. */
+ chosen_section = GHS_current_section_names [(int) kind];
+
+ /* Otherwise, if this kind of section needs an explicit section
+ attribute, then also attach one. */
+ if (chosen_section == NULL)
+ chosen_section = GHS_default_section_names [(int) kind];
+
+ if (chosen_section)
+ {
+ /* Only set the section name if specified by a pragma, because
+ otherwise it will force those variables to get allocated storage
+ in this module, rather than by the linker. */
+ DECL_SECTION_NAME (decl) = chosen_section;
+ }
+ }
+}
+
+/* Construct a DISPOSE instruction that is the equivalent of
+ the given RTX. We have already verified that this should
+ be possible. */
+
+char *
+construct_dispose_instruction (rtx op)
+{
+ int count = XVECLEN (op, 0);
+ int stack_bytes;
+ unsigned long int mask;
+ int i;
+ static char buff[ 100 ]; /* XXX */
+ int use_callt = 0;
+
+ if (count <= 2)
+ {
+ error ("bogus DISPOSE construction: %d", count);
+ return NULL;
+ }
+
+ /* Work out how many bytes to pop off the
+ stack before retrieving registers. */
+ gcc_assert (GET_CODE (XVECEXP (op, 0, 1)) == SET);
+ gcc_assert (GET_CODE (SET_SRC (XVECEXP (op, 0, 1))) == PLUS);
+ gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1)) == CONST_INT);
+
+ stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1));
+
+ /* Each pop will remove 4 bytes from the stack.... */
+ stack_bytes -= (count - 2) * 4;
+
+ /* Make sure that the amount we are popping
+ will fit into the DISPOSE instruction. */
+ if (stack_bytes > 128)
+ {
+ error ("too much stack space to dispose of: %d", stack_bytes);
+ return NULL;
+ }
+
+ /* Now compute the bit mask of registers to push. */
+ mask = 0;
+
+ for (i = 2; i < count; i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+
+ gcc_assert (GET_CODE (vector_element) == SET);
+ gcc_assert (GET_CODE (SET_DEST (vector_element)) == REG);
+ gcc_assert (register_is_ok_for_epilogue (SET_DEST (vector_element),
+ SImode));
+
+ if (REGNO (SET_DEST (vector_element)) == 2)
+ use_callt = 1;
+ else
+ mask |= 1 << REGNO (SET_DEST (vector_element));
+ }
+
+ if (! TARGET_DISABLE_CALLT
+ && (use_callt || stack_bytes == 0))
+ {
+ if (use_callt)
+ {
+ sprintf (buff, "callt ctoff(__callt_return_r2_r%d)", (mask & (1 << 31)) ? 31 : 29);
+ return buff;
+ }
+ else
+ {
+ for (i = 20; i < 32; i++)
+ if (mask & (1 << i))
+ break;
+
+ if (i == 31)
+ sprintf (buff, "callt ctoff(__callt_return_r31c)");
+ else
+ sprintf (buff, "callt ctoff(__callt_return_r%d_r%s)",
+ i, (mask & (1 << 31)) ? "31c" : "29");
+ }
+ }
+ else
+ {
+ static char regs [100]; /* XXX */
+ int done_one;
+
+ /* Generate the DISPOSE instruction. Note we could just issue the
+ bit mask as a number as the assembler can cope with this, but for
+ the sake of our readers we turn it into a textual description. */
+ regs[0] = 0;
+ done_one = 0;
+
+ for (i = 20; i < 32; i++)
+ {
+ if (mask & (1 << i))
+ {
+ int first;
+
+ if (done_one)
+ strcat (regs, ", ");
+ else
+ done_one = 1;
+
+ first = i;
+ strcat (regs, reg_names[ first ]);
+
+ for (i++; i < 32; i++)
+ if ((mask & (1 << i)) == 0)
+ break;
+
+ if (i > first + 1)
+ {
+ strcat (regs, " - ");
+ strcat (regs, reg_names[ i - 1 ] );
+ }
+ }
+ }
+
+ sprintf (buff, "dispose %d {%s}, r31", stack_bytes / 4, regs);
+ }
+
+ return buff;
+}
+
+/* Construct a PREPARE instruction that is the equivalent of
+ the given RTL. We have already verified that this should
+ be possible. */
+
+char *
+construct_prepare_instruction (rtx op)
+{
+ int count;
+ int stack_bytes;
+ unsigned long int mask;
+ int i;
+ static char buff[ 100 ]; /* XXX */
+ int use_callt = 0;
+
+ if (XVECLEN (op, 0) <= 1)
+ {
+ error ("bogus PREPEARE construction: %d", XVECLEN (op, 0));
+ return NULL;
+ }
+
+ /* Work out how many bytes to push onto
+ the stack after storing the registers. */
+ gcc_assert (GET_CODE (XVECEXP (op, 0, 0)) == SET);
+ gcc_assert (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) == PLUS);
+ gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)) == CONST_INT);
+
+ stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1));
+
+
+ /* Make sure that the amount we are popping
+ will fit into the DISPOSE instruction. */
+ if (stack_bytes < -128)
+ {
+ error ("too much stack space to prepare: %d", stack_bytes);
+ return NULL;
+ }
+
+ /* Now compute the bit mask of registers to push. */
+ count = 0;
+ mask = 0;
+ for (i = 1; i < XVECLEN (op, 0); i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+
+ if (GET_CODE (vector_element) == CLOBBER)
+ continue;
+
+ gcc_assert (GET_CODE (vector_element) == SET);
+ gcc_assert (GET_CODE (SET_SRC (vector_element)) == REG);
+ gcc_assert (register_is_ok_for_epilogue (SET_SRC (vector_element),
+ SImode));
+
+ if (REGNO (SET_SRC (vector_element)) == 2)
+ use_callt = 1;
+ else
+ mask |= 1 << REGNO (SET_SRC (vector_element));
+ count++;
+ }
+
+ stack_bytes += count * 4;
+
+ if ((! TARGET_DISABLE_CALLT)
+ && (use_callt || stack_bytes == 0))
+ {
+ if (use_callt)
+ {
+ sprintf (buff, "callt ctoff(__callt_save_r2_r%d)", (mask & (1 << 31)) ? 31 : 29 );
+ return buff;
+ }
+
+ for (i = 20; i < 32; i++)
+ if (mask & (1 << i))
+ break;
+
+ if (i == 31)
+ sprintf (buff, "callt ctoff(__callt_save_r31c)");
+ else
+ sprintf (buff, "callt ctoff(__callt_save_r%d_r%s)",
+ i, (mask & (1 << 31)) ? "31c" : "29");
+ }
+ else
+ {
+ static char regs [100]; /* XXX */
+ int done_one;
+
+
+ /* Generate the PREPARE instruction. Note we could just issue the
+ bit mask as a number as the assembler can cope with this, but for
+ the sake of our readers we turn it into a textual description. */
+ regs[0] = 0;
+ done_one = 0;
+
+ for (i = 20; i < 32; i++)
+ {
+ if (mask & (1 << i))
+ {
+ int first;
+
+ if (done_one)
+ strcat (regs, ", ");
+ else
+ done_one = 1;
+
+ first = i;
+ strcat (regs, reg_names[ first ]);
+
+ for (i++; i < 32; i++)
+ if ((mask & (1 << i)) == 0)
+ break;
+
+ if (i > first + 1)
+ {
+ strcat (regs, " - ");
+ strcat (regs, reg_names[ i - 1 ] );
+ }
+ }
+ }
+
+ sprintf (buff, "prepare {%s}, %d", regs, (- stack_bytes) / 4);
+ }
+
+ return buff;
+}
+
+/* Return an RTX indicating where the return address to the
+ calling function can be found. */
+
+rtx
+v850_return_addr (int count)
+{
+ if (count != 0)
+ return const0_rtx;
+
+ return get_hard_reg_initial_val (Pmode, LINK_POINTER_REGNUM);
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
+
+static void
+v850_asm_init_sections (void)
+{
+ rosdata_section
+ = get_unnamed_section (0, output_section_asm_op,
+ "\t.section .rosdata,\"a\"");
+
+ rozdata_section
+ = get_unnamed_section (0, output_section_asm_op,
+ "\t.section .rozdata,\"a\"");
+
+ tdata_section
+ = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
+ "\t.section .tdata,\"aw\"");
+
+ zdata_section
+ = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
+ "\t.section .zdata,\"aw\"");
+
+ zbss_section
+ = get_unnamed_section (SECTION_WRITE | SECTION_BSS,
+ output_section_asm_op,
+ "\t.section .zbss,\"aw\"");
+}
+
+static section *
+v850_select_section (tree exp,
+ int reloc ATTRIBUTE_UNUSED,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+ if (TREE_CODE (exp) == VAR_DECL)
+ {
+ int is_const;
+ if (!TREE_READONLY (exp)
+ || TREE_SIDE_EFFECTS (exp)
+ || !DECL_INITIAL (exp)
+ || (DECL_INITIAL (exp) != error_mark_node
+ && !TREE_CONSTANT (DECL_INITIAL (exp))))
+ is_const = FALSE;
+ else
+ is_const = TRUE;
+
+ switch (v850_get_data_area (exp))
+ {
+ case DATA_AREA_ZDA:
+ return is_const ? rozdata_section : zdata_section;
+
+ case DATA_AREA_TDA:
+ return tdata_section;
+
+ case DATA_AREA_SDA:
+ return is_const ? rosdata_section : sdata_section;
+
+ default:
+ return is_const ? readonly_data_section : data_section;
+ }
+ }
+ return readonly_data_section;
+}
+
+/* Worker function for TARGET_FUNCTION_VALUE_REGNO_P. */
+
+static bool
+v850_function_value_regno_p (const unsigned int regno)
+{
+ return (regno == 10);
+}
+
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+
+static bool
+v850_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ /* Return values > 8 bytes in length in memory. */
+ return int_size_in_bytes (type) > 8 || TYPE_MODE (type) == BLKmode;
+}
+
+/* Worker function for TARGET_FUNCTION_VALUE. */
+
+static rtx
+v850_function_value (const_tree valtype,
+ const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (TYPE_MODE (valtype), 10);
+}
+
+
+/* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
+
+static void
+v850_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED,
+ int *pretend_arg_size ATTRIBUTE_UNUSED,
+ int second_time ATTRIBUTE_UNUSED)
+{
+ ca->anonymous_args = (!TARGET_GHS ? 1 : 0);
+}
+
+/* Worker function for TARGET_CAN_ELIMINATE. */
+
+static bool
+v850_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
+}
+
+/* Worker function for TARGET_CONDITIONAL_REGISTER_USAGE.
+
+ If TARGET_APP_REGS is not defined then add r2 and r5 to
+ the pool of fixed registers. See PR 14505. */
+
+static void
+v850_conditional_register_usage (void)
+{
+ if (TARGET_APP_REGS)
+ {
+ fixed_regs[2] = 0; call_used_regs[2] = 0;
+ fixed_regs[5] = 0; call_used_regs[5] = 1;
+ }
+}
+
+/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE. */
+
+static void
+v850_asm_trampoline_template (FILE *f)
+{
+ fprintf (f, "\tjarl .+4,r12\n");
+ fprintf (f, "\tld.w 12[r12],r20\n");
+ fprintf (f, "\tld.w 16[r12],r12\n");
+ fprintf (f, "\tjmp [r12]\n");
+ fprintf (f, "\tnop\n");
+ fprintf (f, "\t.long 0\n");
+ fprintf (f, "\t.long 0\n");
+}
+
+/* Worker function for TARGET_TRAMPOLINE_INIT. */
+
+static void
+v850_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+ rtx mem, fnaddr = XEXP (DECL_RTL (fndecl), 0);
+
+ emit_block_move (m_tramp, assemble_trampoline_template (),
+ GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
+
+ mem = adjust_address (m_tramp, SImode, 16);
+ emit_move_insn (mem, chain_value);
+ mem = adjust_address (m_tramp, SImode, 20);
+ emit_move_insn (mem, fnaddr);
+}
+
+static int
+v850_issue_rate (void)
+{
+ return (TARGET_V850E2_ALL? 2 : 1);
+}
+
+/* V850 specific attributes. */
+
+static const struct attribute_spec v850_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "interrupt_handler", 0, 0, true, false, false, v850_handle_interrupt_attribute },
+ { "interrupt", 0, 0, true, false, false, v850_handle_interrupt_attribute },
+ { "sda", 0, 0, true, false, false, v850_handle_data_area_attribute },
+ { "tda", 0, 0, true, false, false, v850_handle_data_area_attribute },
+ { "zda", 0, 0, true, false, false, v850_handle_data_area_attribute },
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
+
+#undef TARGET_PRINT_OPERAND
+#define TARGET_PRINT_OPERAND v850_print_operand
+#undef TARGET_PRINT_OPERAND_ADDRESS
+#define TARGET_PRINT_OPERAND_ADDRESS v850_print_operand_address
+#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
+#define TARGET_PRINT_OPERAND_PUNCT_VALID_P v850_print_operand_punct_valid_p
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE v850_attribute_table
+
+#undef TARGET_INSERT_ATTRIBUTES
+#define TARGET_INSERT_ATTRIBUTES v850_insert_attributes
+
+#undef TARGET_ASM_SELECT_SECTION
+#define TARGET_ASM_SELECT_SECTION v850_select_section
+
+/* The assembler supports switchable .bss sections, but
+ v850_select_section doesn't yet make use of them. */
+#undef TARGET_HAVE_SWITCHABLE_BSS_SECTIONS
+#define TARGET_HAVE_SWITCHABLE_BSS_SECTIONS false
+
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO v850_encode_section_info
+
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS (MASK_DEFAULT | MASK_APP_REGS)
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION v850_handle_option
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS v850_rtx_costs
+
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
+
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG v850_reorg
+
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE v850_issue_rate
+
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P v850_function_value_regno_p
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE v850_function_value
+
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY v850_return_in_memory
+
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE v850_pass_by_reference
+
+#undef TARGET_CALLEE_COPIES
+#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS v850_setup_incoming_varargs
+
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES v850_arg_partial_bytes
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG v850_function_arg
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE v850_function_arg_advance
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE v850_can_eliminate
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE v850_conditional_register_usage
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE v850_asm_trampoline_template
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT v850_trampoline_init
+
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING v850_strict_argument_naming
+
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#define TARGET_OPTION_OPTIMIZATION_TABLE v850_option_optimization_table
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-v850.h"
diff --git a/gcc/config/v850/v850.h b/gcc/config/v850/v850.h
new file mode 100644
index 000000000..892a34c63
--- /dev/null
+++ b/gcc/config/v850/v850.h
@@ -0,0 +1,987 @@
+/* Definitions of target machine for GNU compiler. NEC V850 series
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
+ 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+ Contributed by Jeff Law (law@cygnus.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_V850_H
+#define GCC_V850_H
+
+extern GTY(()) rtx v850_compare_op0;
+extern GTY(()) rtx v850_compare_op1;
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!shared:%{!symbolic:--start-group -lc -lgcc --end-group}}"
+
+#undef ENDFILE_SPEC
+#undef LINK_SPEC
+#undef STARTFILE_SPEC
+#undef ASM_SPEC
+
+#define TARGET_CPU_generic 1
+#define TARGET_CPU_v850e 2
+#define TARGET_CPU_v850e1 3
+#define TARGET_CPU_v850e2 4
+#define TARGET_CPU_v850e2v3 5
+
+
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+#define MASK_DEFAULT MASK_V850
+#define SUBTARGET_ASM_SPEC "%{!mv*:-mv850}"
+#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850__}"
+#define TARGET_VERSION fprintf (stderr, " (NEC V850)");
+
+/* Choose which processor will be the default.
+ We must pass a -mv850xx option to the assembler if no explicit -mv* option
+ is given, because the assembler's processor default may not be correct. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v850e
+#undef MASK_DEFAULT
+#define MASK_DEFAULT MASK_V850E
+#undef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC "%{!mv*:-mv850e}"
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850e__}"
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (NEC V850E)");
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v850e1
+#undef MASK_DEFAULT
+#define MASK_DEFAULT MASK_V850E /* No practical difference. */
+#undef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC "%{!mv*:-mv850e1}"
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850e1__} %{mv850e1:-D__v850e1__}"
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (NEC V850E1)");
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v850e2
+#undef MASK_DEFAULT
+#define MASK_DEFAULT MASK_V850E2
+#undef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC "%{!mv*:-mv850e2}"
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850e2__} %{mv850e2:-D__v850e2__}"
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (NEC V850E2)");
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v850e2v3
+#undef MASK_DEFAULT
+#define MASK_DEFAULT MASK_V850E2V3
+#undef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC "%{!mv*:-mv850e2v3}"
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850e2v3__} %{mv850e2v3:-D__v850e2v3__}"
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (NEC V850E2V3)");
+#endif
+
+#define TARGET_V850E2_ALL (TARGET_V850E2 || TARGET_V850E2V3)
+
+#define ASM_SPEC "%{mv850es:-mv850e1}%{!mv850es:%{mv*:-mv%*}}"
+#define CPP_SPEC "\
+ %{mv850e2v3:-D__v850e2v3__} \
+ %{mv850e2:-D__v850e2__} \
+ %{mv850es:-D__v850e1__} \
+ %{mv850e1:-D__v850e1__} \
+ %{mv850:-D__v850__} \
+ %(subtarget_cpp_spec)" \
+ " %{mep:-D__EP__}"
+
+#define EXTRA_SPECS \
+ { "subtarget_asm_spec", SUBTARGET_ASM_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }
+
+/* Names to predefine in the preprocessor for this target machine. */
+#define TARGET_CPU_CPP_BUILTINS() do { \
+ builtin_define( "__v851__" ); \
+ builtin_define( "__v850" ); \
+ builtin_assert( "machine=v850" ); \
+ builtin_assert( "cpu=v850" ); \
+ if (TARGET_EP) \
+ builtin_define ("__EP__"); \
+} while(0)
+
+#define MASK_CPU (MASK_V850 | MASK_V850E)
+
+/* Information about the various small memory areas. */
+struct small_memory_info {
+ const char *name;
+ long max;
+ long physical_max;
+};
+
+enum small_memory_type {
+ /* tiny data area, using EP as base register */
+ SMALL_MEMORY_TDA = 0,
+ /* small data area using dp as base register */
+ SMALL_MEMORY_SDA,
+ /* zero data area using r0 as base register */
+ SMALL_MEMORY_ZDA,
+ SMALL_MEMORY_max
+};
+
+extern struct small_memory_info small_memory[(int)SMALL_MEMORY_max];
+
+/* Target machine storage layout */
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields.
+ This is not true on the NEC V850. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* This is not true on the NEC V850. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is lowest
+ numbered.
+ This is not true on the NEC V850. */
+#define WORDS_BIG_ENDIAN 0
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type.
+
+ Some simple experiments have shown that leaving UNSIGNEDP alone
+ generates the best overall code. */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { (MODE) = SImode; }
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* The stack goes in 32-bit lumps. */
+#define STACK_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for the code of a function.
+ 16 is the minimum boundary; 32 would give better performance. */
+#define FUNCTION_BOUNDARY 16
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 32
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* No structure field wants to be aligned rounder than this. */
+#define BIGGEST_FIELD_ALIGNMENT 32
+
+/* Define this if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT (!TARGET_NO_STRICT_ALIGN)
+
+/* Define this as 1 if `char' should by default be signed; else as 0.
+
+ On the NEC V850, loads do sign extension, so make this default. */
+#define DEFAULT_SIGNED_CHAR 1
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers. */
+
+#define FIRST_PSEUDO_REGISTER 36
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+
+#define FIXED_REGISTERS \
+ { 1, 1, 1, 1, 1, 1, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 1, 0, \
+ 1, 1, \
+ 1, 1}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you
+ like. */
+
+#define CALL_USED_REGISTERS \
+ { 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 1, 1, \
+ 1, 1, \
+ 1, 1}
+
+/* List the order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS.
+
+ On the 850, we make the return registers first, then all of the volatile
+ registers, then the saved registers in reverse order to better save the
+ registers with an out of line function, and finally the fixed
+ registers. */
+
+#define REG_ALLOC_ORDER \
+{ \
+ 10, 11, /* return registers */ \
+ 12, 13, 14, 15, 16, 17, 18, 19, /* scratch registers */ \
+ 6, 7, 8, 9, 31, /* argument registers */ \
+ 29, 28, 27, 26, 25, 24, 23, 22, /* saved registers */ \
+ 21, 20, 2, \
+ 0, 1, 3, 4, 5, 30, 32, 33, /* fixed registers */ \
+ 34, 35 \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers. */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode
+ MODE. */
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) <= 4) || (((REGNO) & 1) == 0 && (REGNO) != 0))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (MODE1 == MODE2 || (GET_MODE_SIZE (MODE1) <= 4 && GET_MODE_SIZE (MODE2) <= 4))
+
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+enum reg_class
+{
+ NO_REGS, GENERAL_REGS, EVEN_REGS, ALL_REGS, LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define IRA_COVER_CLASSES \
+{ \
+ GENERAL_REGS, LIM_REG_CLASSES \
+}
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+{ "NO_REGS", "GENERAL_REGS", "EVEN_REGS", "ALL_REGS", "LIM_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000,0x0 }, /* NO_REGS */ \
+ { 0xffffffff,0x0 }, /* GENERAL_REGS */ \
+ { 0x55555554,0x0 }, /* EVEN_REGS */ \
+ { 0xffffffff,0x0 }, /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) ((REGNO == CC_REGNUM || REGNO == FCC_REGNUM) ? NO_REGS : GENERAL_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+
+#define INDEX_REG_CLASS NO_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_BASE_P(regno) \
+ (((regno) < FIRST_PSEUDO_REGISTER \
+ && (regno) != CC_REGNUM \
+ && (regno) != FCC_REGNUM) \
+ || reg_renumber[regno] >= 0)
+
+#define REGNO_OK_FOR_INDEX_P(regno) 0
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Convenience wrappers around insn_const_int_ok_for_constraint. */
+
+#define CONST_OK_FOR_I(VALUE) \
+ insn_const_int_ok_for_constraint (VALUE, CONSTRAINT_I)
+#define CONST_OK_FOR_J(VALUE) \
+ insn_const_int_ok_for_constraint (VALUE, CONSTRAINT_J)
+#define CONST_OK_FOR_K(VALUE) \
+ insn_const_int_ok_for_constraint (VALUE, CONSTRAINT_K)
+#define CONST_OK_FOR_L(VALUE) \
+ insn_const_int_ok_for_constraint (VALUE, CONSTRAINT_L)
+#define CONST_OK_FOR_M(VALUE) \
+ insn_const_int_ok_for_constraint (VALUE, CONSTRAINT_M)
+#define CONST_OK_FOR_N(VALUE) \
+ insn_const_int_ok_for_constraint (VALUE, CONSTRAINT_N)
+#define CONST_OK_FOR_O(VALUE) \
+ insn_const_int_ok_for_constraint (VALUE, CONSTRAINT_O)
+
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+
+#define STACK_GROWS_DOWNWARD
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+
+#define STARTING_FRAME_OFFSET 0
+
+/* Offset of first parameter from the argument pointer register value. */
+/* Is equal to the size of the saved fp + pc, even if an fp isn't
+ saved since the value is used before we know. */
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM SP_REGNUM
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 34
+
+/* Register containing return address from latest function call. */
+#define LINK_POINTER_REGNUM LP_REGNUM
+
+/* On some machines the offset between the frame pointer and starting
+ offset of the automatic variables is not known until after register
+ allocation has been done (for example, because the saved registers
+ are between these two locations). On those machines, define
+ `FRAME_POINTER_REGNUM' the number of a special, fixed register to
+ be used internally until the offset is known, and define
+ `HARD_FRAME_POINTER_REGNUM' to be actual the hard register number
+ used for the frame pointer.
+
+ You should define this macro only in the very rare circumstances
+ when it is not possible to calculate the offset between the frame
+ pointer and the automatic variables until after register
+ allocation has been completed. When this macro is defined, you
+ must also indicate in your definition of `ELIMINABLE_REGS' how to
+ eliminate `FRAME_POINTER_REGNUM' into either
+ `HARD_FRAME_POINTER_REGNUM' or `STACK_POINTER_REGNUM'.
+
+ Do not define this macro if it would be the same as
+ `FRAME_POINTER_REGNUM'. */
+#undef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM 29
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 35
+
+/* Register in which static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM 20
+
+/* If defined, this macro specifies a table of register pairs used to
+ eliminate unneeded registers that point into the stack frame. If
+ it is not defined, the only elimination attempted by the compiler
+ is to replace references to the frame pointer with references to
+ the stack pointer.
+
+ The definition of this macro is a list of structure
+ initializations, each of which specifies an original and
+ replacement register.
+
+ On some machines, the position of the argument pointer is not
+ known until the compilation is completed. In such a case, a
+ separate hard register must be used for the argument pointer.
+ This register can be eliminated by replacing it with either the
+ frame pointer or the argument pointer, depending on whether or not
+ the frame pointer has been eliminated.
+
+ In this case, you might specify:
+ #define ELIMINABLE_REGS \
+ {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+ Note that the elimination of the argument pointer with the stack
+ pointer is specified first since that is the preferred elimination. */
+
+#define ELIMINABLE_REGS \
+{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }} \
+
+/* This macro is similar to `INITIAL_FRAME_POINTER_OFFSET'. It
+ specifies the initial difference between the specified pair of
+ registers. This macro must be defined if `ELIMINABLE_REGS' is
+ defined. */
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = get_frame_size () + crtl->outgoing_args_size; \
+ else if ((FROM) == ARG_POINTER_REGNUM) \
+ (OFFSET) = compute_frame_size (get_frame_size (), (long *)0); \
+ else \
+ gcc_unreachable (); \
+}
+
+/* Keep the stack pointer constant throughout the function. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_ADDR_RTX(COUNT, FP) v850_return_addr (COUNT)
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go. */
+
+#define CUMULATIVE_ARGS struct cum_arg
+struct cum_arg { int nbytes; int anonymous_args; };
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ ((CUM).nbytes = 0, (CUM).anonymous_args = 0)
+
+/* When a parameter is passed in a register, stack space is still
+ allocated for it. */
+#define REG_PARM_STACK_SPACE(DECL) 0
+
+/* 1 if N is a possible register number for function argument passing. */
+
+#define FUNCTION_ARG_REGNO_P(N) (N >= 6 && N <= 9)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx_REG (MODE, 10)
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+/* Define this macro as a C expression that is nonzero for registers
+ used by the epilogue or the `return' pattern. */
+
+#define EPILOGUE_USES(REGNO) \
+ (reload_completed && (REGNO) == LINK_POINTER_REGNUM)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) ;
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 24
+
+/* Addressing modes, and classification of registers for them. */
+
+
+/* 1 if X is an rtx for a constant that is a valid address. */
+
+/* ??? This seems too exclusive. May get better code by accepting more
+ possibilities here, in particular, should accept ZDA_NAME SYMBOL_REFs. */
+
+#define CONSTANT_ADDRESS_P(X) constraint_satisfied_p (X, CONSTRAINT_K)
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 1
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) 0
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) 1
+#define REG_OK_FOR_INDEX_P_STRICT(X) 0
+#define REG_OK_FOR_BASE_P_STRICT(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define STRICT 0
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) 0
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define STRICT 1
+
+#endif
+
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS,
+ except for CONSTANT_ADDRESS_P which is actually
+ machine-independent. */
+
+/* Accept either REG or SUBREG where a register is valid. */
+
+#define RTX_OK_FOR_BASE_P(X) \
+ ((REG_P (X) && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == SUBREG && REG_P (SUBREG_REG (X)) \
+ && REG_OK_FOR_BASE_P (SUBREG_REG (X))))
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+do { \
+ if (RTX_OK_FOR_BASE_P (X)) \
+ goto ADDR; \
+ if (CONSTANT_ADDRESS_P (X) \
+ && (MODE == QImode || INTVAL (X) % 2 == 0) \
+ && (GET_MODE_SIZE (MODE) <= 4 || INTVAL (X) % 4 == 0)) \
+ goto ADDR; \
+ if (GET_CODE (X) == LO_SUM \
+ && REG_P (XEXP (X, 0)) \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && CONSTANT_P (XEXP (X, 1)) \
+ && (GET_CODE (XEXP (X, 1)) != CONST_INT \
+ || ((MODE == QImode || INTVAL (XEXP (X, 1)) % 2 == 0) \
+ && CONST_OK_FOR_K (INTVAL (XEXP (X, 1))))) \
+ && GET_MODE_SIZE (MODE) <= GET_MODE_SIZE (word_mode)) \
+ goto ADDR; \
+ if (special_symbolref_operand (X, MODE) \
+ && (GET_MODE_SIZE (MODE) <= GET_MODE_SIZE (word_mode))) \
+ goto ADDR; \
+ if (GET_CODE (X) == PLUS \
+ && RTX_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && constraint_satisfied_p (XEXP (X,1), CONSTRAINT_K) \
+ && ((MODE == QImode || INTVAL (XEXP (X, 1)) % 2 == 0) \
+ && CONST_OK_FOR_K (INTVAL (XEXP (X, 1)) \
+ + (GET_MODE_NUNITS (MODE) * UNITS_PER_WORD)))) \
+ goto ADDR; \
+} while (0)
+
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_DOUBLE \
+ || !(GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT \
+ && ! CONST_OK_FOR_K (INTVAL (XEXP (XEXP (X, 0), 1)))))
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+
+ For floating-point equality comparisons, CCFPEQmode should be used.
+ VOIDmode should be used in all other cases.
+
+ For integer comparisons against zero, reduce to CCNOmode or CCZmode if
+ possible, to allow for more combinations. */
+
+#define SELECT_CC_MODE(OP, X, Y) v850_select_cc_mode (OP, X, Y)
+
+/* Tell final.c how to eliminate redundant test instructions. */
+
+/* Here we define machine-dependent flags and fields in cc_status
+ (see `conditions.h'). No extra ones are needed for the VAX. */
+
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define CC_OVERFLOW_UNUSABLE 0x200
+#define CC_NO_CARRY CC_NO_OVERFLOW
+#define NOTICE_UPDATE_CC(EXP, INSN) notice_update_cc(EXP, INSN)
+
+/* Nonzero if access to memory by bytes or half words is no faster
+ than accessing full words. */
+#define SLOW_BYTE_ACCESS 1
+
+/* According expr.c, a value of around 6 should minimize code size, and
+ for the V850 series, that's our primary concern. */
+#define MOVE_RATIO(speed) 6
+
+/* Indirect calls are expensive, never turn a direct call
+ into an indirect call. */
+#define NO_FUNCTION_CSE
+
+/* The four different data regions on the v850. */
+typedef enum
+{
+ DATA_AREA_NORMAL,
+ DATA_AREA_SDA,
+ DATA_AREA_TDA,
+ DATA_AREA_ZDA
+} v850_data_area;
+
+#define TEXT_SECTION_ASM_OP "\t.section .text"
+#define DATA_SECTION_ASM_OP "\t.section .data"
+#define BSS_SECTION_ASM_OP "\t.section .bss"
+#define SDATA_SECTION_ASM_OP "\t.section .sdata,\"aw\""
+#define SBSS_SECTION_ASM_OP "\t.section .sbss,\"aw\""
+
+#define SCOMMON_ASM_OP "\t.scomm\t"
+#define ZCOMMON_ASM_OP "\t.zcomm\t"
+#define TCOMMON_ASM_OP "\t.tcomm\t"
+
+#define ASM_COMMENT_START "#"
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON "#APP\n"
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF "#NO_APP\n"
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+#define OUTPUT_ADDR_CONST_EXTRA(FILE, X, FAIL) \
+ if (! v850_output_addr_const_extra (FILE, X)) \
+ goto FAIL
+
+/* This says how to output the assembler to define a global
+ uninitialized but not common symbol. */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss ((FILE), (DECL), (NAME), (SIZE), (ALIGN))
+
+#undef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ v850_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+
+/* This says how to output the assembler to define a global
+ uninitialized, common symbol. */
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#undef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN) \
+ v850_output_common (FILE, DECL, NAME, SIZE, ALIGN)
+
+/* This says how to output the assembler to define a local
+ uninitialized symbol. */
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#undef ASM_OUTPUT_LOCAL
+#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \
+ v850_output_local (FILE, DECL, NAME, SIZE, ALIGN)
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.global "
+
+#define ASM_PN_FORMAT "%s___%lu"
+
+/* This is how we tell the assembler that two symbols have the same value. */
+
+#define ASM_OUTPUT_DEF(FILE,NAME1,NAME2) \
+ do { assemble_name(FILE, NAME1); \
+ fputs(" = ", FILE); \
+ assemble_name(FILE, NAME2); \
+ fputc('\n', FILE); } while (0)
+
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{ "r0", "r1", "r2", "sp", "gp", "r5", "r6" , "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "r26", "r27", "r28", "r29", "ep", "r31", \
+ "psw", "fcc", \
+ ".fp", ".ap"}
+
+/* Register numbers */
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ { "zero", ZERO_REGNUM }, \
+ { "hp", 2 }, \
+ { "r3", 3 }, \
+ { "r4", 4 }, \
+ { "tp", 5 }, \
+ { "fp", 29 }, \
+ { "r30", 30 }, \
+ { "lp", LP_REGNUM} }
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t%s .L%d\n", \
+ (TARGET_BIG_SWITCH ? ".long" : ".short"), VALUE)
+
+/* This is how to output an element of a case-vector that is relative. */
+
+/* Disable the shift, which is for the currently disabled "switch"
+ opcode. Se casesi in v850.md. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t%s %s.L%d-.L%d%s\n", \
+ (TARGET_BIG_SWITCH ? ".long" : ".short"), \
+ (0 && ! TARGET_BIG_SWITCH && (TARGET_V850E || TARGET_V850E2_ALL) ? "(" : ""), \
+ VALUE, REL, \
+ (0 && ! TARGET_BIG_SWITCH && (TARGET_V850E || TARGET_V850E2_ALL) ? ")>>1" : ""))
+
+#define ASM_OUTPUT_ALIGN(FILE, LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+/* We don't have to worry about dbx compatibility for the v850. */
+#define DEFAULT_GDB_EXTENSIONS 1
+
+/* Use stabs debugging info by default. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE (TARGET_BIG_SWITCH ? SImode : HImode)
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+/* The switch instruction requires that the jump table immediately follow
+ it. */
+#define JUMP_TABLES_IN_TEXT_SECTION (!TARGET_JUMP_TABLES_IN_DATA_SECTION)
+
+#undef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
+ ASM_OUTPUT_ALIGN ((FILE), (TARGET_BIG_SWITCH ? 2 : 1));
+
+#define WORD_REGISTER_OPERATIONS
+
+/* Byte and short loads sign extend the value to a word. */
+#define LOAD_EXTEND_OP(MODE) SIGN_EXTEND
+
+/* This flag, if defined, says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+#define FIXUNS_TRUNC_LIKE_FIX_TRUNC
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if shifts truncate the shift count
+ which implies one can omit a sign-extension or zero-extension
+ of a shift count. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE QImode
+
+/* Tell compiler we want to support GHS pragmas */
+#define REGISTER_TARGET_PRAGMAS() do { \
+ c_register_pragma ("ghs", "interrupt", ghs_pragma_interrupt); \
+ c_register_pragma ("ghs", "section", ghs_pragma_section); \
+ c_register_pragma ("ghs", "starttda", ghs_pragma_starttda); \
+ c_register_pragma ("ghs", "startsda", ghs_pragma_startsda); \
+ c_register_pragma ("ghs", "startzda", ghs_pragma_startzda); \
+ c_register_pragma ("ghs", "endtda", ghs_pragma_endtda); \
+ c_register_pragma ("ghs", "endsda", ghs_pragma_endsda); \
+ c_register_pragma ("ghs", "endzda", ghs_pragma_endzda); \
+} while (0)
+
+/* enum GHS_SECTION_KIND is an enumeration of the kinds of sections that
+ can appear in the "ghs section" pragma. These names are used to index
+ into the GHS_default_section_names[] and GHS_current_section_names[]
+ that are defined in v850.c, and so the ordering of each must remain
+ consistent.
+
+ These arrays give the default and current names for each kind of
+ section defined by the GHS pragmas. The current names can be changed
+ by the "ghs section" pragma. If the current names are null, use
+ the default names. Note that the two arrays have different types.
+
+ For the *normal* section kinds (like .data, .text, etc.) we do not
+ want to explicitly force the name of these sections, but would rather
+ let the linker (or at least the back end) choose the name of the
+ section, UNLESS the user has force a specific name for these section
+ kinds. To accomplish this set the name in ghs_default_section_names
+ to null. */
+
+enum GHS_section_kind
+{
+ GHS_SECTION_KIND_DEFAULT,
+
+ GHS_SECTION_KIND_TEXT,
+ GHS_SECTION_KIND_DATA,
+ GHS_SECTION_KIND_RODATA,
+ GHS_SECTION_KIND_BSS,
+ GHS_SECTION_KIND_SDATA,
+ GHS_SECTION_KIND_ROSDATA,
+ GHS_SECTION_KIND_TDATA,
+ GHS_SECTION_KIND_ZDATA,
+ GHS_SECTION_KIND_ROZDATA,
+
+ COUNT_OF_GHS_SECTION_KINDS /* must be last */
+};
+
+/* The following code is for handling pragmas supported by the
+ v850 compiler produced by Green Hills Software. This is at
+ the specific request of a customer. */
+
+typedef struct data_area_stack_element
+{
+ struct data_area_stack_element * prev;
+ v850_data_area data_area; /* Current default data area. */
+} data_area_stack_element;
+
+/* Track the current data area set by the
+ data area pragma (which can be nested). */
+extern data_area_stack_element * data_area_stack;
+
+/* Names of the various data areas used on the v850. */
+extern union tree_node * GHS_default_section_names [(int) COUNT_OF_GHS_SECTION_KINDS];
+extern union tree_node * GHS_current_section_names [(int) COUNT_OF_GHS_SECTION_KINDS];
+
+/* The assembler op to start the file. */
+
+#define FILE_ASM_OP "\t.file\n"
+
+/* Enable the register move pass to improve code. */
+#define ENABLE_REGMOVE_PASS
+
+
+/* Implement ZDA, TDA, and SDA */
+
+#define EP_REGNUM 30 /* ep register number */
+
+#define SYMBOL_FLAG_ZDA (SYMBOL_FLAG_MACH_DEP << 0)
+#define SYMBOL_FLAG_TDA (SYMBOL_FLAG_MACH_DEP << 1)
+#define SYMBOL_FLAG_SDA (SYMBOL_FLAG_MACH_DEP << 2)
+#define SYMBOL_REF_ZDA_P(X) ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_ZDA) != 0)
+#define SYMBOL_REF_TDA_P(X) ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_TDA) != 0)
+#define SYMBOL_REF_SDA_P(X) ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_SDA) != 0)
+
+#define TARGET_ASM_INIT_SECTIONS v850_asm_init_sections
+
+/* Define this so that the cc1plus will not think that system header files
+ need an implicit 'extern "C" { ... }' assumed. This breaks testing C++
+ in a build directory where the libstdc++ header files are found via a
+ -isystem <path-to-build-dir>. */
+#define NO_IMPLICIT_EXTERN_C
+
+#endif /* ! GCC_V850_H */
diff --git a/gcc/config/v850/v850.md b/gcc/config/v850/v850.md
new file mode 100644
index 000000000..88e42c65e
--- /dev/null
+++ b/gcc/config/v850/v850.md
@@ -0,0 +1,2667 @@
+;; GCC machine description for NEC V850
+;; Copyright (C) 1996, 1997, 1998, 1999, 2002, 2004, 2005, 2007, 2008, 2010
+;; Free Software Foundation, Inc.
+;; Contributed by Jeff Law (law@cygnus.com).
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; The original PO technology requires these to be ordered by speed,
+;; so that assigner will pick the fastest.
+
+;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; The V851 manual states that the instruction address space is 16M;
+;; the various branch/call instructions only have a 22bit offset (4M range).
+;;
+;; One day we'll probably need to handle calls to targets more than 4M
+;; away.
+
+;; The size of instructions in bytes.
+
+;;---------------------------------------------------------------------------
+;; Constants
+
+;;
+(define_constants
+ [(ZERO_REGNUM 0) ; constant zero
+ (SP_REGNUM 3) ; Stack Pointer
+ (GP_REGNUM 4) ; GP Pointer
+ (EP_REGNUM 30) ; EP pointer
+ (LP_REGNUM 31) ; Return address register
+ (CC_REGNUM 32) ; Condition code pseudo register
+ (FCC_REGNUM 33) ; Floating Condition code pseudo register
+ ]
+)
+
+(define_attr "length" ""
+ (const_int 4))
+
+(define_attr "long_calls" "yes,no"
+ (const (if_then_else (symbol_ref "TARGET_LONG_CALLS")
+ (const_string "yes")
+ (const_string "no"))))
+
+;; Types of instructions (for scheduling purposes).
+
+(define_attr "type" "load,store,bit1,mult,macc,div,fpu,single,other"
+ (const_string "other"))
+
+(define_attr "cpu" "none,v850,v850e,v850e1,v850e2,v850e2v3"
+ (cond [(ne (symbol_ref "TARGET_V850") (const_int 0))
+ (const_string "v850")
+ (ne (symbol_ref "TARGET_V850E") (const_int 0))
+ (const_string "v850e")
+ (ne (symbol_ref "TARGET_V850E1") (const_int 0))
+ (const_string "v850e1")
+ (ne (symbol_ref "TARGET_V850E2") (const_int 0))
+ (const_string "v850e2")
+ (ne (symbol_ref "TARGET_V850E2") (const_int 0))
+ (const_string "v850e2v3")]
+ (const_string "none")))
+
+;; Condition code settings.
+;; none - insn does not affect cc
+;; none_0hit - insn does not affect cc but it does modify operand 0
+;; This attribute is used to keep track of when operand 0 changes.
+;; See the description of NOTICE_UPDATE_CC for more info.
+;; set_znv - sets z,n,v to usable values; c is unknown.
+;; set_zn - sets z,n to usable values; v,c is unknown.
+;; compare - compare instruction
+;; clobber - value of cc is unknown
+(define_attr "cc" "none,none_0hit,set_z,set_zn,set_znv,compare,clobber"
+ (const_string "clobber"))
+
+;; Function units for the V850. As best as I can tell, there's
+;; a traditional memory load/use stall as well as a stall if
+;; the result of a multiply is used too early.
+
+(define_insn_reservation "v850_other" 1
+ (eq_attr "type" "other")
+ "nothing")
+(define_insn_reservation "v850_mult" 2
+ (eq_attr "type" "mult")
+ "nothing")
+(define_insn_reservation "v850_memory" 2
+ (eq_attr "type" "load")
+ "nothing")
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; ----------------------------------------------------------------------
+;; MOVE INSTRUCTIONS
+;; ----------------------------------------------------------------------
+(define_insn "sign23byte_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "disp23_operand" "W")))))]
+ "TARGET_V850E2V3"
+ "ld.b %2[%1],%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "unsign23byte_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "disp23_operand" "W")))))]
+ "TARGET_V850E2V3"
+ "ld.bu %2[%1],%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "sign23hword_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI
+ (mem:HI (plus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "disp23_operand" "W")))))]
+ "TARGET_V850E2V3"
+ "ld.h %2[%1],%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "unsign23hword_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (mem:HI (plus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "disp23_operand" "W")))))]
+ "TARGET_V850E2V3"
+ "ld.hu %2[%1],%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "23word_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "disp23_operand" "W"))))]
+ "TARGET_V850E2V3"
+ "ld.w %2[%1],%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "23byte_store"
+ [(set (mem:QI (plus:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand 1 "disp23_operand" "W")))
+ (match_operand:QI 2 "register_operand" "r"))]
+ "TARGET_V850E2V3"
+ "st.b %2,%1[%0]"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "23hword_store"
+ [(set (mem:HI (plus:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand 1 "disp23_operand" "W")))
+ (match_operand:HI 2 "register_operand" "r"))]
+ "TARGET_V850E2V3"
+ "st.h %2,%1[%0]"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "23word_store"
+ [(set (mem:SI (plus:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand 1 "disp23_operand" "W")))
+ (match_operand:SI 2 "register_operand" "r"))]
+ "TARGET_V850E2V3"
+ "st.w %2,%1[%0]"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+;; movqi
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register or 0 */
+ if (!register_operand (operand0, QImode)
+ && !reg_or_0_operand (operand1, QImode))
+ operands[1] = copy_to_mode_reg (QImode, operand1);
+}")
+
+(define_insn "*movqi_internal"
+ [(set (match_operand:QI 0 "general_operand" "=r,r,r,Q,r,m,m")
+ (match_operand:QI 1 "general_operand" "Jr,n,Q,Ir,m,r,I"))]
+ "register_operand (operands[0], QImode)
+ || reg_or_0_operand (operands[1], QImode)"
+ "* return output_move_single (operands);"
+ [(set_attr "length" "2,4,2,2,4,4,4")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
+ (set_attr "type" "other,other,load,other,load,store,store")])
+
+;; movhi
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register or 0 */
+ if (!register_operand (operand0, HImode)
+ && !reg_or_0_operand (operand1, HImode))
+ operands[1] = copy_to_mode_reg (HImode, operand1);
+}")
+
+(define_insn "*movhi_internal"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,r,Q,r,m,m")
+ (match_operand:HI 1 "general_operand" "Jr,n,Q,Ir,m,r,I"))]
+ "register_operand (operands[0], HImode)
+ || reg_or_0_operand (operands[1], HImode)"
+ "* return output_move_single (operands);"
+ [(set_attr "length" "2,4,2,2,4,4,4")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
+ (set_attr "type" "other,other,load,other,load,store,store")])
+
+;; movsi and helpers
+
+(define_insn "*movsi_high"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (match_operand 1 "" "")))]
+ ""
+ "movhi hi(%1),%.,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "other")])
+
+(define_insn "*movsi_lo"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ ""
+ "movea lo(%2),%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "other")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register or 0 */
+ if (!register_operand (operand0, SImode)
+ && !reg_or_0_operand (operand1, SImode))
+ operands[1] = copy_to_mode_reg (SImode, operand1);
+
+ /* Some constants, as well as symbolic operands
+ must be done with HIGH & LO_SUM patterns. */
+ if (CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != HIGH
+ && ! (TARGET_V850E || TARGET_V850E2_ALL)
+ && !special_symbolref_operand (operands[1], VOIDmode)
+ && !(GET_CODE (operands[1]) == CONST_INT
+ && (CONST_OK_FOR_J (INTVAL (operands[1]))
+ || CONST_OK_FOR_K (INTVAL (operands[1]))
+ || CONST_OK_FOR_L (INTVAL (operands[1])))))
+ {
+ rtx temp;
+
+ if (reload_in_progress || reload_completed)
+ temp = operands[0];
+ else
+ temp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rtx_SET (SImode, temp,
+ gen_rtx_HIGH (SImode, operand1)));
+ emit_insn (gen_rtx_SET (SImode, operand0,
+ gen_rtx_LO_SUM (SImode, temp, operand1)));
+ DONE;
+ }
+}")
+
+;; This is the same as the following pattern, except that it includes
+;; support for arbitrary 32-bit immediates.
+
+;; ??? This always loads addresses using hilo. If the only use of this address
+;; was in a load/store, then we would get smaller code if we only loaded the
+;; upper part with hi, and then put the lower part in the load/store insn.
+
+(define_insn "*movsi_internal_v850e"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,r,Q,r,r,m,m,r")
+ (match_operand:SI 1 "general_operand" "Jr,K,L,Q,Ir,m,R,r,I,i"))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)
+ && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
+ "* return output_move_single (operands);"
+ [(set_attr "length" "2,4,4,2,2,4,4,4,4,6")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
+ (set_attr "type" "other,other,other,load,other,load,other,store,store,other")])
+
+(define_insn "*movsi_internal"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,r,Q,r,r,m,m")
+ (match_operand:SI 1 "movsi_source_operand" "Jr,K,L,Q,Ir,m,R,r,I"))]
+ "register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode)"
+ "* return output_move_single (operands);"
+ [(set_attr "length" "2,4,4,2,2,4,4,4,4")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
+ (set_attr "type" "other,other,other,load,other,load,store,store,other")])
+
+(define_insn "*movsf_internal"
+ [(set (match_operand:SF 0 "general_operand" "=r,r,r,r,r,Q,r,m,m,r")
+ (match_operand:SF 1 "general_operand" "Jr,K,L,n,Q,Ir,m,r,IG,iF"))]
+ "register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode)"
+ "* return output_move_single (operands);"
+ [(set_attr "length" "2,4,4,8,2,2,4,4,4,8")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
+ (set_attr "type" "other,other,other,other,load,other,load,store,store,other")])
+
+;; ----------------------------------------------------------------------
+;; TEST INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "*v850_tst1"
+ [(set (cc0)
+ (compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "m")
+ (const_int 1)
+ (match_operand:QI 1 "const_int_operand" "n"))
+ (const_int 0)))]
+ ""
+ "tst1 %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;; This replaces ld.b;sar;andi with tst1;setf nz.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (compare (zero_extract:SI (match_operand:QI 1 "memory_operand" "")
+ (const_int 1)
+ (match_operand 2 "const_int_operand" ""))
+ (const_int 0)))]
+ ""
+ [(set (cc0) (compare (zero_extract:SI (match_dup 1)
+ (const_int 1)
+ (match_dup 2))
+ (const_int 0)))
+ (set (match_dup 0) (ne:SI (cc0) (const_int 0)))])
+
+(define_expand "cbranchsi4"
+ [(set (cc0)
+ (compare (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int5_operand" "")))
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "")
+
+(define_expand "cstoresi4"
+ [(set (cc0)
+ (compare (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "reg_or_int5_operand" "")))
+ (set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator" [(cc0)
+ (const_int 0)]))]
+ "")
+
+(define_expand "cmpsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_int5_operand" "r,J")))]
+ ""
+ "
+{
+ v850_compare_op0 = operands[0];
+ v850_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_insn "cmpsi_insn"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_int5_operand" "r,J")))]
+ ""
+ "@
+ cmp %1,%0
+ cmp %1,%0"
+ [(set_attr "length" "2,2")
+ (set_attr "cc" "compare")])
+
+(define_expand "cmpsf"
+ [(set (reg:CC CC_REGNUM)
+ (compare (match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "
+{
+ v850_compare_op0 = operands[0];
+ v850_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpdf"
+ [(set (reg:CC CC_REGNUM)
+ (compare (match_operand:DF 0 "even_reg_operand" "r")
+ (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "
+{
+ v850_compare_op0 = operands[0];
+ v850_compare_op1 = operands[1];
+ DONE;
+}")
+
+;; ----------------------------------------------------------------------
+;; ADD INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,r,r")
+ (match_operand:SI 2 "nonmemory_operand" "rJ,K,U")))
+ (clobber (reg:CC CC_REGNUM))]
+
+ ""
+ "@
+ add %2,%0
+ addi %2,%1,%0
+ addi %O2(%P2),%1,%0"
+ [(set_attr "length" "2,4,4")
+ (set_attr "cc" "set_zn,set_zn,set_zn")])
+
+;; ----------------------------------------------------------------------
+;; SUBTRACT INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,r")
+ (match_operand:SI 2 "register_operand" "r,0")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "@
+ sub %2,%0
+ subr %1,%0"
+ [(set_attr "length" "2,2")
+ (set_attr "cc" "set_zn,set_zn")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "register_operand" "0")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "subr %.,%0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_zn")])
+
+;; ----------------------------------------------------------------------
+;; MULTIPLY INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_expand "mulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI
+ (sign_extend:SI (match_operand:HI 1 "register_operand" ""))
+ (sign_extend:SI (match_operand:HI 2 "nonmemory_operand" ""))))]
+ ""
+ "if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_mulhisi3_internal2 (operands[0], operands[1], operands[2]));
+ DONE;
+ }")
+
+(define_insn "*mulhisi3_internal1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "%0"))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
+ ""
+ "mulh %2,%0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "mult")])
+
+(define_insn "mulhisi3_internal2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (mult:SI
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "%0,r"))
+ (match_operand:HI 2 "const_int_operand" "J,K")))]
+ ""
+ "@
+ mulh %2,%0
+ mulhi %2,%1,%0"
+ [(set_attr "length" "2,4")
+ (set_attr "cc" "none_0hit,none_0hit")
+ (set_attr "type" "mult")])
+
+;; ??? The scheduling info is probably wrong.
+
+;; ??? This instruction can also generate the 32-bit highpart, but using it
+;; may increase code size counter to the desired result.
+
+;; ??? This instructions can also give a DImode result.
+
+;; ??? There is unsigned version, but it matters only for the DImode/highpart
+;; results.
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "reg_or_int9_operand" "rO")))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "mul %2,%1,%."
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "mult")])
+
+;; ----------------------------------------------------------------------
+;; DIVIDE INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; ??? These insns do set the Z/N condition codes, except that they are based
+;; on only one of the two results, so it doesn't seem to make sense to use
+;; them.
+
+;; ??? The scheduling info is probably wrong.
+
+(define_insn "divmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))
+ (set (match_operand:SI 3 "register_operand" "=r")
+ (mod:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_V850E"
+ "div %2,%0,%3"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "div")])
+
+(define_insn "udivmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))
+ (set (match_operand:SI 3 "register_operand" "=r")
+ (umod:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_V850E"
+ "divu %2,%0,%3"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "div")])
+
+;; ??? There is a 2 byte instruction for generating only the quotient.
+;; However, it isn't clear how to compute the length field correctly.
+
+(define_insn "divmodhi4"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (div:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "register_operand" "r")))
+ (set (match_operand:HI 3 "register_operand" "=r")
+ (mod:HI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_V850E"
+ "divh %2,%0,%3"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "div")])
+
+;; Half-words are sign-extended by default, so we must zero extend to a word
+;; here before doing the divide.
+
+(define_insn "udivmodhi4"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (udiv:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "register_operand" "r")))
+ (set (match_operand:HI 3 "register_operand" "=r")
+ (umod:HI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_V850E"
+ "zxh %0 ; divhu %2,%0,%3"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "div")])
+
+;; ----------------------------------------------------------------------
+;; AND INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "*v850_clr1_1"
+ [(set (match_operand:QI 0 "memory_operand" "=m")
+ (subreg:QI
+ (and:SI (subreg:SI (match_dup 0) 0)
+ (match_operand:QI 1 "not_power_of_two_operand" "")) 0))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "*
+{
+ rtx xoperands[2];
+ xoperands[0] = operands[0];
+ xoperands[1] = GEN_INT (~INTVAL (operands[1]) & 0xff);
+ output_asm_insn (\"clr1 %M1,%0\", xoperands);
+ return \"\";
+}"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "bit1")])
+
+(define_insn "*v850_clr1_2"
+ [(set (match_operand:HI 0 "indirect_operand" "=m")
+ (subreg:HI
+ (and:SI (subreg:SI (match_dup 0) 0)
+ (match_operand:HI 1 "not_power_of_two_operand" "")) 0))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "*
+{
+ int log2 = exact_log2 (~INTVAL (operands[1]) & 0xffff);
+
+ rtx xoperands[2];
+ xoperands[0] = gen_rtx_MEM (QImode,
+ plus_constant (XEXP (operands[0], 0), log2 / 8));
+ xoperands[1] = GEN_INT (log2 % 8);
+ output_asm_insn (\"clr1 %1,%0\", xoperands);
+ return \"\";
+}"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "bit1")])
+
+(define_insn "*v850_clr1_3"
+ [(set (match_operand:SI 0 "indirect_operand" "=m")
+ (and:SI (match_dup 0)
+ (match_operand:SI 1 "not_power_of_two_operand" "")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "*
+{
+ int log2 = exact_log2 (~INTVAL (operands[1]) & 0xffffffff);
+
+ rtx xoperands[2];
+ xoperands[0] = gen_rtx_MEM (QImode,
+ plus_constant (XEXP (operands[0], 0), log2 / 8));
+ xoperands[1] = GEN_INT (log2 % 8);
+ output_asm_insn (\"clr1 %1,%0\", xoperands);
+ return \"\";
+}"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "bit1")])
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,I,M")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "@
+ and %2,%0
+ and %.,%0
+ andi %2,%1,%0"
+ [(set_attr "length" "2,2,4")
+ (set_attr "cc" "set_zn")])
+
+;; ----------------------------------------------------------------------
+;; OR INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "*v850_set1_1"
+ [(set (match_operand:QI 0 "memory_operand" "=m")
+ (subreg:QI (ior:SI (subreg:SI (match_dup 0) 0)
+ (match_operand 1 "power_of_two_operand" "")) 0))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "set1 %M1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "bit1")])
+
+(define_insn "*v850_set1_2"
+ [(set (match_operand:HI 0 "indirect_operand" "=m")
+ (subreg:HI (ior:SI (subreg:SI (match_dup 0) 0)
+ (match_operand 1 "power_of_two_operand" "")) 0))]
+ ""
+ "*
+{
+ int log2 = exact_log2 (INTVAL (operands[1]));
+
+ if (log2 < 8)
+ return \"set1 %M1,%0\";
+ else
+ {
+ rtx xoperands[2];
+ xoperands[0] = gen_rtx_MEM (QImode,
+ plus_constant (XEXP (operands[0], 0),
+ log2 / 8));
+ xoperands[1] = GEN_INT (log2 % 8);
+ output_asm_insn (\"set1 %1,%0\", xoperands);
+ }
+ return \"\";
+}"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "bit1")])
+
+(define_insn "*v850_set1_3"
+ [(set (match_operand:SI 0 "indirect_operand" "=m")
+ (ior:SI (match_dup 0)
+ (match_operand 1 "power_of_two_operand" "")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "*
+{
+ int log2 = exact_log2 (INTVAL (operands[1]));
+
+ if (log2 < 8)
+ return \"set1 %M1,%0\";
+ else
+ {
+ rtx xoperands[2];
+ xoperands[0] = gen_rtx_MEM (QImode,
+ plus_constant (XEXP (operands[0], 0),
+ log2 / 8));
+ xoperands[1] = GEN_INT (log2 % 8);
+ output_asm_insn (\"set1 %1,%0\", xoperands);
+ }
+ return \"\";
+}"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "bit1")])
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,I,M")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "@
+ or %2,%0
+ or %.,%0
+ ori %2,%1,%0"
+ [(set_attr "length" "2,2,4")
+ (set_attr "cc" "set_zn")])
+
+;; ----------------------------------------------------------------------
+;; XOR INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "*v850_not1_1"
+ [(set (match_operand:QI 0 "memory_operand" "=m")
+ (subreg:QI (xor:SI (subreg:SI (match_dup 0) 0)
+ (match_operand 1 "power_of_two_operand" "")) 0))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "not1 %M1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "bit1")])
+
+(define_insn "*v850_not1_2"
+ [(set (match_operand:HI 0 "indirect_operand" "=m")
+ (subreg:HI (xor:SI (subreg:SI (match_dup 0) 0)
+ (match_operand 1 "power_of_two_operand" "")) 0))]
+ ""
+ "*
+{
+ int log2 = exact_log2 (INTVAL (operands[1]));
+
+ if (log2 < 8)
+ return \"not1 %M1,%0\";
+ else
+ {
+ rtx xoperands[2];
+ xoperands[0] = gen_rtx_MEM (QImode,
+ plus_constant (XEXP (operands[0], 0),
+ log2 / 8));
+ xoperands[1] = GEN_INT (log2 % 8);
+ output_asm_insn (\"not1 %1,%0\", xoperands);
+ }
+ return \"\";
+}"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "bit1")])
+
+(define_insn "*v850_not1_3"
+ [(set (match_operand:SI 0 "indirect_operand" "=m")
+ (xor:SI (match_dup 0)
+ (match_operand 1 "power_of_two_operand" "")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "*
+{
+ int log2 = exact_log2 (INTVAL (operands[1]));
+
+ if (log2 < 8)
+ return \"not1 %M1,%0\";
+ else
+ {
+ rtx xoperands[2];
+ xoperands[0] = gen_rtx_MEM (QImode,
+ plus_constant (XEXP (operands[0], 0),
+ log2 / 8));
+ xoperands[1] = GEN_INT (log2 % 8);
+ output_asm_insn (\"not1 %1,%0\", xoperands);
+ }
+ return \"\";
+}"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "bit1")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,I,M")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "@
+ xor %2,%0
+ xor %.,%0
+ xori %2,%1,%0"
+ [(set_attr "length" "2,2,4")
+ (set_attr "cc" "set_zn")])
+
+;; ----------------------------------------------------------------------
+;; NOT INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "not %1,%0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_zn")])
+
+;; -----------------------------------------------------------------
+;; BIT FIELDS
+;; -----------------------------------------------------------------
+
+;; ??? Is it worth defining insv and extv for the V850 series?!?
+
+;; An insv pattern would be useful, but does not get used because
+;; store_bit_field never calls insv when storing a constant value into a
+;; single-bit bitfield.
+
+;; extv/extzv patterns would be useful, but do not get used because
+;; optimize_bitfield_compare in fold-const usually converts single
+;; bit extracts into an AND with a mask.
+
+;; -----------------------------------------------------------------
+;; Scc INSTRUCTIONS
+;; -----------------------------------------------------------------
+
+(define_insn "*setcc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(cc0) (const_int 0)]))]
+ ""
+ "*
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0
+ && (GET_CODE (operands[1]) == GT
+ || GET_CODE (operands[1]) == GE
+ || GET_CODE (operands[1]) == LE
+ || GET_CODE (operands[1]) == LT))
+ return 0;
+
+ return \"setf %c1,%0\";
+}"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "setf_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(reg:CC CC_REGNUM) (const_int 0)]))]
+ ""
+ "setf %b1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "set_z_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand 1 "v850_float_z_comparison_operator" ""))]
+ "TARGET_V850E2V3"
+ "setf z,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+(define_insn "set_nz_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand 1 "v850_float_nz_comparison_operator" ""))]
+ "TARGET_V850E2V3"
+ "setf nz,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")])
+
+;; ----------------------------------------------------------------------
+;; CONDITIONAL MOVE INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; Instructions using cc0 aren't allowed to have input reloads, so we must
+;; hide the fact that this instruction uses cc0. We do so by including the
+;; compare instruction inside it.
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operand 1 "comparison_operator")
+ (match_operand:SI 2 "reg_or_const_operand" "rJ")
+ (match_operand:SI 3 "reg_or_const_operand" "rI")))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "
+{
+ if ( (GET_CODE (operands[2]) == CONST_INT
+ && GET_CODE (operands[3]) == CONST_INT))
+ {
+ int o2 = INTVAL (operands[2]);
+ int o3 = INTVAL (operands[3]);
+
+ if (o2 == 1 && o3 == 0)
+ FAIL; /* setf */
+ if (o3 == 1 && o2 == 0)
+ FAIL; /* setf */
+ if (o2 == 0 && (o3 < -16 || o3 > 15) && exact_log2 (o3) >= 0)
+ FAIL; /* setf + shift */
+ if (o3 == 0 && (o2 < -16 || o2 > 15) && exact_log2 (o2) >=0)
+ FAIL; /* setf + shift */
+ if (o2 != 0)
+ operands[2] = copy_to_mode_reg (SImode, operands[2]);
+ if (o3 !=0 )
+ operands[3] = copy_to_mode_reg (SImode, operands[3]);
+ }
+ else
+ {
+ if (GET_CODE (operands[2]) != REG)
+ operands[2] = copy_to_mode_reg (SImode,operands[2]);
+ if (GET_CODE (operands[3]) != REG)
+ operands[3] = copy_to_mode_reg (SImode, operands[3]);
+ }
+}")
+
+;; ??? Clobbering the condition codes is overkill.
+
+;; ??? We sometimes emit an unnecessary compare instruction because the
+;; condition codes may have already been set by an earlier instruction,
+;; but we have no code here to avoid the compare if it is unnecessary.
+
+(define_insn "movsicc_normal_cc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(reg:CC CC_REGNUM) (const_int 0)])
+ (match_operand:SI 2 "reg_or_int5_operand" "rJ")
+ (match_operand:SI 3 "reg_or_0_operand" "rI")))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "cmov %c1,%2,%z3,%0";
+ [(set_attr "length" "6")
+ (set_attr "cc" "compare")])
+
+(define_insn "movsicc_reversed_cc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(reg:CC CC_REGNUM) (const_int 0)])
+ (match_operand:SI 2 "reg_or_0_operand" "rI")
+ (match_operand:SI 3 "reg_or_int5_operand" "rJ")))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "cmov %C1,%3,%z2,%0"
+ [(set_attr "length" "6")
+ (set_attr "cc" "compare")])
+
+(define_insn "*movsicc_normal"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 4 "register_operand" "r")
+ (match_operand:SI 5 "reg_or_int5_operand" "rJ")])
+ (match_operand:SI 2 "reg_or_int5_operand" "rJ")
+ (match_operand:SI 3 "reg_or_0_operand" "rI")))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "cmp %5,%4 ; cmov %c1,%2,%z3,%0"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*movsicc_reversed"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 4 "register_operand" "r")
+ (match_operand:SI 5 "reg_or_int5_operand" "rJ")])
+ (match_operand:SI 2 "reg_or_0_operand" "rI")
+ (match_operand:SI 3 "reg_or_int5_operand" "rJ")))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "cmp %5,%4 ; cmov %C1,%3,%z2,%0"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*movsicc_tst1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(zero_extract:SI
+ (match_operand:QI 2 "memory_operand" "m")
+ (const_int 1)
+ (match_operand 3 "const_int_operand" "n"))
+ (const_int 0)])
+ (match_operand:SI 4 "reg_or_int5_operand" "rJ")
+ (match_operand:SI 5 "reg_or_0_operand" "rI")))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "tst1 %3,%2 ; cmov %c1,%4,%z5,%0"
+ [(set_attr "length" "8")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*movsicc_tst1_reversed"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(zero_extract:SI
+ (match_operand:QI 2 "memory_operand" "m")
+ (const_int 1)
+ (match_operand 3 "const_int_operand" "n"))
+ (const_int 0)])
+ (match_operand:SI 4 "reg_or_0_operand" "rI")
+ (match_operand:SI 5 "reg_or_int5_operand" "rJ")))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "tst1 %3,%2 ; cmov %C1,%5,%z4,%0"
+ [(set_attr "length" "8")
+ (set_attr "cc" "clobber")])
+
+;; Matching for sasf requires combining 4 instructions, so we provide a
+;; dummy pattern to match the first 3, which will always be turned into the
+;; second pattern by subsequent combining. As above, we must include the
+;; comparison to avoid input reloads in an insn using cc0.
+
+(define_insn "*sasf"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ior:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 3 "register_operand" "r")
+ (match_operand:SI 4 "reg_or_int5_operand" "rJ")])
+ (ashift:SI (match_operand:SI 2 "register_operand" "0")
+ (const_int 1))))
+ (clobber (reg:CC CC_REGNUM))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "cmp %4,%3 ; sasf %c1,%0"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 4 "register_operand" "")
+ (match_operand:SI 5 "reg_or_int5_operand" "")])
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))
+ (clobber (reg:CC CC_REGNUM))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)
+ && ((INTVAL (operands[2]) ^ INTVAL (operands[3])) == 1)
+ && ((INTVAL (operands[2]) + INTVAL (operands[3])) != 1)
+ && (GET_CODE (operands[5]) == CONST_INT
+ || REGNO (operands[0]) != REGNO (operands[5]))
+ && REGNO (operands[0]) != REGNO (operands[4])"
+ [(set (match_dup 0) (match_dup 6))
+ (parallel [(set (match_dup 0)
+ (ior:SI (match_op_dup 7 [(match_dup 4) (match_dup 5)])
+ (ashift:SI (match_dup 0) (const_int 1))))
+ (clobber (reg:CC CC_REGNUM))])]
+ "
+{
+ operands[6] = GEN_INT (INTVAL (operands[2]) >> 1);
+ if (INTVAL (operands[2]) & 0x1)
+ operands[7] = operands[1];
+ else
+ operands[7] = gen_rtx_fmt_ee (reverse_condition (GET_CODE (operands[1])),
+ GET_MODE (operands[1]),
+ XEXP (operands[1], 0), XEXP (operands[1], 1));
+}")
+
+;; ---------------------------------------------------------------------
+;; BYTE SWAP INSTRUCTIONS
+;; ---------------------------------------------------------------------
+(define_expand "rotlhi3"
+ [(parallel [(set (match_operand:HI 0 "register_operand" "")
+ (rotate:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:HI 2 "const_int_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "
+{
+ if (INTVAL (operands[2]) != 8)
+ FAIL;
+}")
+
+(define_insn "*rotlhi3_8"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (rotate:HI (match_operand:HI 1 "register_operand" "r")
+ (const_int 8)))
+ (clobber (reg:CC CC_REGNUM))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "bsh %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+(define_expand "rotlsi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (rotate:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "
+{
+ if (INTVAL (operands[2]) != 16)
+ FAIL;
+}")
+
+(define_insn "*rotlsi3_16"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16)))
+ (clobber (reg:CC CC_REGNUM))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "hsw %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;; ----------------------------------------------------------------------
+;; JUMP INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; Conditional jump instructions
+
+(define_insn "*branch_normal"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0
+ && (GET_CODE (operands[1]) == GT
+ || GET_CODE (operands[1]) == GE
+ || GET_CODE (operands[1]) == LE
+ || GET_CODE (operands[1]) == LT))
+ return 0;
+
+ if (get_attr_length (insn) == 2)
+ return \"b%b1 %l0\";
+ else
+ return \"b%B1 .+6 ; jr %l0\";
+}"
+ [(set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 0) (pc)))
+ (const_int 256))
+ (const_int 2)
+ (const_int 6)))
+ (set_attr "cc" "none")])
+
+(define_insn "*branch_invert"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0
+ && (GET_CODE (operands[1]) == GT
+ || GET_CODE (operands[1]) == GE
+ || GET_CODE (operands[1]) == LE
+ || GET_CODE (operands[1]) == LT))
+ return 0;
+ if (get_attr_length (insn) == 2)
+ return \"b%B1 %l0\";
+ else
+ return \"b%b1 .+6 ; jr %l0\";
+}"
+ [(set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 0) (pc)))
+ (const_int 256))
+ (const_int 2)
+ (const_int 6)))
+ (set_attr "cc" "none")])
+
+(define_insn "branch_z_normal"
+ [(set (pc)
+ (if_then_else (match_operand 1 "v850_float_z_comparison_operator" "")
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_V850E2V3"
+ "*
+{
+ if (get_attr_length (insn) == 2)
+ return \"bz %l0\";
+ else
+ return \"bnz 1f ; jr %l0 ; 1:\";
+}"
+ [(set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 0) (pc)))
+ (const_int 256))
+ (const_int 2)
+ (const_int 6)))
+ (set_attr "cc" "none")])
+
+(define_insn "*branch_z_invert"
+ [(set (pc)
+ (if_then_else (match_operand 1 "v850_float_z_comparison_operator" "")
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_V850E2V3"
+ "*
+{
+ if (get_attr_length (insn) == 2)
+ return \"bnz %l0\";
+ else
+ return \"bz 1f ; jr %l0 ; 1:\";
+}"
+ [(set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 0) (pc)))
+ (const_int 256))
+ (const_int 2)
+ (const_int 6)))
+ (set_attr "cc" "none")])
+
+(define_insn "branch_nz_normal"
+ [(set (pc)
+ (if_then_else (match_operand 1 "v850_float_nz_comparison_operator" "")
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_V850E2V3"
+ "*
+{
+ if (get_attr_length (insn) == 2)
+ return \"bnz %l0\";
+ else
+ return \"bz 1f ; jr %l0 ; 1:\";
+}"
+[(set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 0) (pc)))
+ (const_int 256))
+ (const_int 2)
+ (const_int 6)))
+ (set_attr "cc" "none")])
+
+(define_insn "*branch_nz_invert"
+ [(set (pc)
+ (if_then_else (match_operand 1 "v850_float_nz_comparison_operator" "")
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_V850E2V3"
+ "*
+{
+ if (get_attr_length (insn) == 2)
+ return \"bz %l0\";
+ else
+ return \"bnz 1f ; jr %l0 ; 1:\";
+}"
+ [(set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 0) (pc)))
+ (const_int 256))
+ (const_int 2)
+ (const_int 6)))
+ (set_attr "cc" "none")])
+
+;; Unconditional and other jump instructions.
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+{
+ if (get_attr_length (insn) == 2)
+ return \"br %0\";
+ else
+ return \"jr %0\";
+}"
+ [(set (attr "length")
+ (if_then_else (lt (abs (minus (match_dup 0) (pc)))
+ (const_int 256))
+ (const_int 2)
+ (const_int 4)))
+ (set_attr "cc" "none")])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "jmp %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+(define_insn "switch"
+ [(set (pc)
+ (plus:SI
+ (sign_extend:SI
+ (mem:HI
+ (plus:SI (ashift:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1))
+ (label_ref (match_operand 1 "" "")))))
+ (label_ref (match_dup 1))))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "switch %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")
+ (match_operand 3 "" "") (match_operand 4 "" "")]
+ ""
+ "
+{
+ rtx reg = gen_reg_rtx (SImode);
+ rtx tableaddress = gen_reg_rtx (SImode);
+ rtx test;
+ rtx mem;
+
+ /* Subtract the lower bound from the index. */
+ emit_insn (gen_subsi3 (reg, operands[0], operands[1]));
+
+ /* Compare the result against the number of table entries;
+ branch to the default label if out of range of the table. */
+ test = gen_rtx_fmt_ee (GTU, VOIDmode, reg, operands[2]);
+ emit_jump_insn (gen_cbranchsi4 (test, reg, operands[2], operands[4]));
+
+ /* Shift index for the table array access. */
+ emit_insn (gen_ashlsi3 (reg, reg, GEN_INT (TARGET_BIG_SWITCH ? 2 : 1)));
+ /* Load the table address into a pseudo. */
+ emit_insn (gen_movsi (tableaddress,
+ gen_rtx_LABEL_REF (Pmode, operands[3])));
+ /* Add the table address to the index. */
+ emit_insn (gen_addsi3 (reg, reg, tableaddress));
+ /* Load the table entry. */
+ mem = gen_const_mem (CASE_VECTOR_MODE, reg);
+ if (! TARGET_BIG_SWITCH)
+ {
+ rtx reg2 = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg2, mem));
+ emit_insn (gen_extendhisi2 (reg, reg2));
+ }
+ else
+ emit_insn (gen_movsi (reg, mem));
+ /* Add the table address. */
+ emit_insn (gen_addsi3 (reg, reg, tableaddress));
+ /* Branch to the switch label. */
+ emit_jump_insn (gen_tablejump (reg, operands[3]));
+ DONE;
+}")
+
+;; Call subroutine with no return value.
+
+(define_expand "call"
+ [(call (match_operand:QI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! call_address_operand (XEXP (operands[0], 0), QImode)
+ || TARGET_LONG_CALLS)
+ XEXP (operands[0], 0) = force_reg (SImode, XEXP (operands[0], 0));
+ if (TARGET_LONG_CALLS)
+ emit_call_insn (gen_call_internal_long (XEXP (operands[0], 0), operands[1]));
+ else
+ emit_call_insn (gen_call_internal_short (XEXP (operands[0], 0), operands[1]));
+
+ DONE;
+}")
+
+(define_insn "call_internal_short"
+ [(call (mem:QI (match_operand:SI 0 "call_address_operand" "S,r"))
+ (match_operand:SI 1 "general_operand" "g,g"))
+ (clobber (reg:SI 31))]
+ "! TARGET_LONG_CALLS"
+ "@
+ jarl %0,r31
+ jarl .+4,r31 ; add 4,r31 ; jmp %0"
+ [(set_attr "length" "4,8")
+ (set_attr "cc" "clobber,clobber")]
+)
+
+(define_insn "call_internal_long"
+ [(call (mem:QI (match_operand:SI 0 "call_address_operand" "S,r"))
+ (match_operand:SI 1 "general_operand" "g,g"))
+ (clobber (reg:SI 31))]
+ "TARGET_LONG_CALLS"
+ "*
+ {
+ if (which_alternative == 0)
+ {
+ if (GET_CODE (operands[0]) == REG)
+ return \"jarl %0,r31\";
+ else
+ return \"movhi hi(%0), r0, r11 ; movea lo(%0), r11, r11 ; jarl .+4,r31 ; add 4, r31 ; jmp r11\";
+ }
+ else
+ return \"jarl .+4,r31 ; add 4,r31 ; jmp %0\";
+ }"
+ [(set_attr "length" "16,8")
+ (set_attr "cc" "clobber,clobber")]
+)
+
+;; Call subroutine, returning value in operand 0
+;; (which must be a hard register).
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ""
+ "
+{
+ if (! call_address_operand (XEXP (operands[1], 0), QImode)
+ || TARGET_LONG_CALLS)
+ XEXP (operands[1], 0) = force_reg (SImode, XEXP (operands[1], 0));
+ if (TARGET_LONG_CALLS)
+ emit_call_insn (gen_call_value_internal_long (operands[0],
+ XEXP (operands[1], 0),
+ operands[2]));
+ else
+ emit_call_insn (gen_call_value_internal_short (operands[0],
+ XEXP (operands[1], 0),
+ operands[2]));
+ DONE;
+}")
+
+(define_insn "call_value_internal_short"
+ [(set (match_operand 0 "" "=r,r")
+ (call (mem:QI (match_operand:SI 1 "call_address_operand" "S,r"))
+ (match_operand:SI 2 "general_operand" "g,g")))
+ (clobber (reg:SI 31))]
+ "! TARGET_LONG_CALLS"
+ "@
+ jarl %1,r31
+ jarl .+4,r31 ; add 4,r31 ; jmp %1"
+ [(set_attr "length" "4,8")
+ (set_attr "cc" "clobber,clobber")]
+)
+
+(define_insn "call_value_internal_long"
+ [(set (match_operand 0 "" "=r,r")
+ (call (mem:QI (match_operand:SI 1 "call_address_operand" "S,r"))
+ (match_operand:SI 2 "general_operand" "g,g")))
+ (clobber (reg:SI 31))]
+ "TARGET_LONG_CALLS"
+ "*
+ {
+ if (which_alternative == 0)
+ {
+ if (GET_CODE (operands[1]) == REG)
+ return \"jarl %1, r31\";
+ else
+ /* Reload can generate this pattern.... */
+ return \"movhi hi(%1), r0, r11 ; movea lo(%1), r11, r11 ; jarl .+4, r31 ; add 4, r31 ; jmp r11\";
+ }
+ else
+ return \"jarl .+4, r31 ; add 4, r31 ; jmp %1\";
+ }"
+ [(set_attr "length" "16,8")
+ (set_attr "cc" "clobber,clobber")]
+)
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+;; ----------------------------------------------------------------------
+;; EXTEND INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "0,r,T,m")))
+ (clobber (reg:CC CC_REGNUM))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "@
+ zxh %0
+ andi 65535,%1,%0
+ sld.hu %1,%0
+ ld.hu %1,%0"
+ [(set_attr "length" "2,4,2,4")
+ (set_attr "cc" "none_0hit,set_zn,none_0hit,none_0hit")])
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "andi 65535,%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_zn")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "0,r,T,m")))
+ (clobber (reg:CC CC_REGNUM))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "@
+ zxb %0
+ andi 255,%1,%0
+ sld.bu %1,%0
+ ld.bu %1,%0"
+ [(set_attr "length" "2,4,2,4")
+ (set_attr "cc" "none_0hit,set_zn,none_0hit,none_0hit")])
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (match_operand:QI 1 "register_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "andi 255,%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_zn")])
+
+;;- sign extension instructions
+
+;; ??? The extendhisi2 pattern should not emit shifts for v850e?
+
+(define_insn "*extendhisi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,Q,m")))
+ (clobber (reg:CC CC_REGNUM))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "@
+ sxh %0
+ sld.h %1,%0
+ ld.h %1,%0"
+ [(set_attr "length" "2,2,4")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit")])
+
+;; ??? This is missing a sign extend from memory pattern to match the ld.h
+;; instruction.
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "register_operand" "")
+ (const_int 16)))
+ (clobber (reg:CC CC_REGNUM))])
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+ "
+{
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+;; ??? The extendqisi2 pattern should not emit shifts for v850e?
+
+(define_insn "*extendqisi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,Q,m")))
+ (clobber (reg:CC CC_REGNUM))]
+ "(TARGET_V850E || TARGET_V850E2_ALL)"
+ "@
+ sxb %0
+ sld.b %1,%0
+ ld.b %1,%0"
+ [(set_attr "length" "2,2,4")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit")])
+
+;; ??? This is missing a sign extend from memory pattern to match the ld.b
+;; instruction.
+
+(define_expand "extendqisi2"
+ [(parallel [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "register_operand" "")
+ (const_int 24)))
+ (clobber (reg:CC CC_REGNUM))])
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+ "
+{
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+;; ----------------------------------------------------------------------
+;; SHIFTS
+;; ----------------------------------------------------------------------
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashift:SI
+ (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,N")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "@
+ shl %2,%0
+ shl %2,%0"
+ [(set_attr "length" "4,2")
+ (set_attr "cc" "set_zn")])
+
+(define_insn "ashlsi3_v850e2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "nonmemory_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_V850E2_ALL"
+ "shl %2,%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_znv")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,N")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "@
+ shr %2,%0
+ shr %2,%0"
+ [(set_attr "length" "4,2")
+ (set_attr "cc" "set_zn")])
+
+(define_insn "lshrsi3_v850e2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "nonmemory_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_V850E2_ALL"
+ "shr %2,%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_zn")])
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashiftrt:SI
+ (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,N")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "@
+ sar %2,%0
+ sar %2,%0"
+ [(set_attr "length" "4,2")
+ (set_attr "cc" "set_zn, set_zn")])
+
+(define_insn "ashrsi3_v850e2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "nonmemory_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_V850E2_ALL"
+ "sar %2,%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_zn")])
+
+;; ----------------------------------------------------------------------
+;; FIND FIRST BIT INSTRUCTION
+;; ----------------------------------------------------------------------
+
+(define_insn "ffssi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ffs:SI (match_operand:SI 1 "register_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_V850E2_ALL"
+ "sch1r %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;; ----------------------------------------------------------------------
+;; PROLOGUE/EPILOGUE
+;; ----------------------------------------------------------------------
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "expand_prologue (); DONE;")
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+ "
+{
+ expand_epilogue ();
+ DONE;
+}")
+
+(define_insn "return_simple"
+ [(return)]
+ "reload_completed"
+ "jmp [r31]"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+(define_insn "return_internal"
+ [(return)
+ (use (reg:SI 31))]
+ ""
+ "jmp [r31]"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+;; ----------------------------------------------------------------------
+;; v850e2V3 floating-point hardware support
+;; ----------------------------------------------------------------------
+
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (plus:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "addf.s %1,%2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (plus:DF (match_operand:DF 1 "even_reg_operand" "r")
+ (match_operand:DF 2 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "addf.d %1,%2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (minus:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "subf.s %2,%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (minus:DF (match_operand:DF 1 "even_reg_operand" "r")
+ (match_operand:DF 2 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "subf.d %2,%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (mult:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "mulf.s %1,%2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (mult:DF (match_operand:DF 1 "even_reg_operand" "r")
+ (match_operand:DF 2 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "mulf.d %1,%2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (div:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "divf.s %2,%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (div:DF (match_operand:DF 1 "even_reg_operand" "r")
+ (match_operand:DF 2 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "divf.d %2,%1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "minsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (smin:SF (match_operand:SF 1 "reg_or_0_operand" "r")
+ (match_operand:SF 2 "reg_or_0_operand" "r")))]
+ "TARGET_V850E2V3"
+ "minf.s %z1,%z2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "mindf3"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (smin:DF (match_operand:DF 1 "even_reg_operand" "r")
+ (match_operand:DF 2 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "minf.d %1,%2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "maxsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (smax:SF (match_operand:SF 1 "reg_or_0_operand" "r")
+ (match_operand:SF 2 "reg_or_0_operand" "r")))]
+ "TARGET_V850E2V3"
+ "maxf.s %z1,%z2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "maxdf3"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (smax:DF (match_operand:DF 1 "even_reg_operand" "r")
+ (match_operand:DF 2 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "maxf.d %1,%2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (abs:SF (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "absf.s %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (abs:DF (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "absf.d %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (neg:SF (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "negf.s %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (neg:DF (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "negf.d %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;; square-root
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (sqrt:SF (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "sqrtf.s %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (sqrt:DF (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "sqrtf.d %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;; float -> int
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "r"))))]
+ "TARGET_V850E2V3"
+ "trncf.sw %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (fix:SI (fix:DF (match_operand:DF 1 "even_reg_operand" "r"))))]
+ "TARGET_V850E2V3"
+ "trncf.dw %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;; int -> float
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (float:SF (match_operand:SI 1 "reg_or_0_operand" "rI")))]
+ "TARGET_V850E2V3"
+ "cvtf.ws %z1, %0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (float:DF (match_operand:SI 1 "reg_or_0_operand" "rI")))]
+ "TARGET_V850E2V3"
+ "cvtf.wd %z1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;; single-float -> double-float
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (float_extend:DF
+ (match_operand:SF 1 "reg_or_0_operand" "rI")))]
+ "TARGET_V850E2V3"
+ "cvtf.sd %z1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;; double-float -> single-float
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (float_truncate:SF
+ (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cvtf.ds %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;;
+;; ---------------- special insns
+;;
+
+;;; reciprocal
+(define_insn "recipsf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (div:SF (match_operand:SF 1 "const_float_1_operand" "")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "recipf.s %2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "recipdf2"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (div:DF (match_operand:DF 1 "const_float_1_operand" "")
+ (match_operand:DF 2 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "recipf.d %2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;;; reciprocal of square-root
+(define_insn "rsqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (div:SF (match_operand:SF 1 "const_float_1_operand" "")
+ (sqrt:SF (match_operand:SF 2 "register_operand" "r"))))]
+ "TARGET_V850E2V3"
+ "rsqrtf.s %2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "rsqrtdf2"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (div:DF (match_operand:DF 1 "const_float_1_operand" "")
+ (sqrt:DF (match_operand:DF 2 "even_reg_operand" "r"))))]
+ "TARGET_V850E2V3"
+ "rsqrtf.d %2,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;;; multiply-add
+(define_insn "fmasf4"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (fma:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")
+ (match_operand:SF 3 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "maddf.s %2,%1,%3,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;;; multiply-subtract
+(define_insn "fmssf4"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (fma:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")
+ (neg:SF (match_operand:SF 3 "register_operand" "r"))))]
+ "TARGET_V850E2V3"
+ "msubf.s %2,%1,%3,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;;; negative-multiply-add
+(define_insn "fnmasf4"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (fma:SF (neg:SF (match_operand:SF 1 "register_operand" "r"))
+ (match_operand:SF 2 "register_operand" "r")
+ (match_operand:SF 3 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "nmaddf.s %2,%1,%3,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+;; negative-multiply-subtract
+(define_insn "fnmssf4"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (fma:SF (neg:SF (match_operand:SF 1 "register_operand" "r"))
+ (match_operand:SF 2 "register_operand" "r")
+ (neg:SF (match_operand:SF 3 "register_operand" "r"))))]
+ "TARGET_V850E2V3"
+ "nmsubf.s %2,%1,%3,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+;
+; ---------------- comparison/conditionals
+;
+; SF
+
+(define_insn "cmpsf_le_insn"
+ [(set (reg:CC_FPU_LE FCC_REGNUM)
+ (compare:CC_FPU_LE (match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.s le,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpsf_lt_insn"
+ [(set (reg:CC_FPU_LT FCC_REGNUM)
+ (compare:CC_FPU_LT (match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.s lt,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpsf_ge_insn"
+ [(set (reg:CC_FPU_GE FCC_REGNUM)
+ (compare:CC_FPU_GE (match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.s ge,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpsf_gt_insn"
+ [(set (reg:CC_FPU_GT FCC_REGNUM)
+ (compare:CC_FPU_GT (match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.s gt,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpsf_eq_insn"
+ [(set (reg:CC_FPU_EQ FCC_REGNUM)
+ (compare:CC_FPU_EQ (match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.s eq,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpsf_ne_insn"
+ [(set (reg:CC_FPU_NE FCC_REGNUM)
+ (compare:CC_FPU_NE (match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.s neq,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+; DF
+
+(define_insn "cmpdf_le_insn"
+ [(set (reg:CC_FPU_LE FCC_REGNUM)
+ (compare:CC_FPU_LE (match_operand:DF 0 "even_reg_operand" "r")
+ (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.d le,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpdf_lt_insn"
+ [(set (reg:CC_FPU_LT FCC_REGNUM)
+ (compare:CC_FPU_LT (match_operand:DF 0 "even_reg_operand" "r")
+ (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.d lt,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpdf_ge_insn"
+ [(set (reg:CC_FPU_GE FCC_REGNUM)
+ (compare:CC_FPU_GE (match_operand:DF 0 "even_reg_operand" "r")
+ (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.d ge,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpdf_gt_insn"
+ [(set (reg:CC_FPU_GT FCC_REGNUM)
+ (compare:CC_FPU_GT (match_operand:DF 0 "even_reg_operand" "r")
+ (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.d gt,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpdf_eq_insn"
+ [(set (reg:CC_FPU_EQ FCC_REGNUM)
+ (compare:CC_FPU_EQ (match_operand:DF 0 "even_reg_operand" "r")
+ (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.d eq,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+(define_insn "cmpdf_ne_insn"
+ [(set (reg:CC_FPU_NE FCC_REGNUM)
+ (compare:CC_FPU_NE (match_operand:DF 0 "even_reg_operand" "r")
+ (match_operand:DF 1 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmpf.d neq,%z0,%z1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "fpu")])
+
+
+;;
+;; Transfer a v850e2v3 fcc to the Z bit of CC0 (this is necessary to do a
+;; conditional branch based on a floating-point compare)
+;;
+
+(define_insn "trfsr"
+ [(set (match_operand 0 "" "") (match_operand 1 "" ""))]
+ "TARGET_V850E2V3
+ && GET_MODE(operands[0]) == GET_MODE(operands[1])
+ && GET_CODE(operands[0]) == REG && REGNO (operands[0]) == CC_REGNUM
+ && GET_CODE(operands[1]) == REG && REGNO (operands[1]) == FCC_REGNUM
+ && (GET_MODE(operands[0]) == CC_FPU_LEmode
+ || GET_MODE(operands[0]) == CC_FPU_GEmode
+ || GET_MODE(operands[0]) == CC_FPU_LTmode
+ || GET_MODE(operands[0]) == CC_FPU_GTmode
+ || GET_MODE(operands[0]) == CC_FPU_EQmode
+ || GET_MODE(operands[0]) == CC_FPU_NEmode)"
+ "trfsr"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_z")
+ (set_attr "type" "fpu")])
+
+;;
+;; Floating-point conditional moves for the v850e2v3.
+;;
+
+;; The actual v850e2v3 conditional move instructions
+;;
+(define_insn "movsfcc_z_insn"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (if_then_else:SF
+ (match_operand 3 "v850_float_z_comparison_operator" "")
+ (match_operand:SF 1 "reg_or_0_operand" "rIG")
+ (match_operand:SF 2 "reg_or_0_operand" "rIG")))]
+ "TARGET_V850E2V3"
+ "cmovf.s 0,%z1,%z2,%0"
+ [(set_attr "cc" "clobber")]) ;; ??? or none_0hit
+
+(define_insn "movsfcc_nz_insn"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (if_then_else:SF
+ (match_operand 3 "v850_float_nz_comparison_operator" "")
+ (match_operand:SF 1 "reg_or_0_operand" "rIG")
+ (match_operand:SF 2 "reg_or_0_operand" "rIG")))]
+ "TARGET_V850E2V3"
+ "cmovf.s 0,%z2,%z1,%0"
+ [(set_attr "cc" "clobber")]) ;; ??? or none_0hit
+
+(define_insn "movdfcc_z_insn"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (if_then_else:DF
+ (match_operand 3 "v850_float_z_comparison_operator" "")
+ (match_operand:DF 1 "even_reg_operand" "r")
+ (match_operand:DF 2 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmovf.d 0,%z1,%z2,%0"
+ [(set_attr "cc" "clobber")]) ;; ??? or none_0hit
+
+(define_insn "movdfcc_nz_insn"
+ [(set (match_operand:DF 0 "even_reg_operand" "=r")
+ (if_then_else:DF
+ (match_operand 3 "v850_float_nz_comparison_operator" "")
+ (match_operand:DF 1 "even_reg_operand" "r")
+ (match_operand:DF 2 "even_reg_operand" "r")))]
+ "TARGET_V850E2V3"
+ "cmovf.d 0,%z2,%z1,%0"
+ [(set_attr "cc" "clobber")]) ;; ??? or none_0hit
+
+(define_insn "movedfcc_z_zero"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (if_then_else:DF
+ (match_operand 3 "v850_float_z_comparison_operator" "")
+ (match_operand:DF 1 "reg_or_0_operand" "rIG")
+ (match_operand:DF 2 "reg_or_0_operand" "rIG")))]
+ "TARGET_V850E2V3"
+ "cmovf.s 0,%z1,%z2,%0 ; cmovf.s 0,%Z1,%Z2,%R0"
+ [(set_attr "length" "8")
+ (set_attr "cc" "clobber")]) ;; ??? or none_0hit
+
+(define_insn "movedfcc_nz_zero"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (if_then_else:DF
+ (match_operand 3 "v850_float_nz_comparison_operator" "")
+ (match_operand:DF 1 "reg_or_0_operand" "rIG")
+ (match_operand:DF 2 "reg_or_0_operand" "rIG")))]
+ "TARGET_V850E2V3"
+ "cmovf.s 0,%z2,%z1,%0 ; cmovf.s 0,%Z2,%Z1,%R0"
+ [(set_attr "length" "8")
+ (set_attr "cc" "clobber")]) ;; ??? or none_0hit
+
+
+;; ----------------------------------------------------------------------
+;; HELPER INSTRUCTIONS for saving the prologue and epilogue registers
+;; ----------------------------------------------------------------------
+
+;; This pattern will match a stack adjust RTX followed by any number of push
+;; RTXs. These RTXs will then be turned into a suitable call to a worker
+;; function.
+
+;;
+;; Actually, convert the RTXs into a PREPARE instruction.
+;;
+
+(define_insn ""
+ [(match_parallel 0 "pattern_is_ok_for_prepare"
+ [(set (reg:SI 3)
+ (plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
+ (set (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI 2 "immediate_operand" "i")))
+ (match_operand:SI 3 "register_is_ok_for_epilogue" "r"))])]
+ "TARGET_PROLOG_FUNCTION && (TARGET_V850E || TARGET_V850E2_ALL)"
+ "* return construct_prepare_instruction (operands[0]);
+ "
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+(define_insn ""
+ [(match_parallel 0 "pattern_is_ok_for_prologue"
+ [(set (reg:SI 3)
+ (plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
+ (set (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI 2 "immediate_operand" "i")))
+ (match_operand:SI 3 "register_is_ok_for_epilogue" "r"))])]
+ "TARGET_PROLOG_FUNCTION"
+ "* return construct_save_jarl (operands[0]);
+ "
+ [(set (attr "length") (if_then_else (eq_attr "long_calls" "yes")
+ (const_string "16")
+ (const_string "4")))
+ (set_attr "cc" "clobber")])
+
+;;
+;; Actually, turn the RTXs into a DISPOSE instruction.
+;;
+(define_insn ""
+ [(match_parallel 0 "pattern_is_ok_for_dispose"
+ [(return)
+ (set (reg:SI 3)
+ (plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
+ (set (match_operand:SI 2 "register_is_ok_for_epilogue" "=r")
+ (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI 3 "immediate_operand" "i"))))])]
+ "TARGET_PROLOG_FUNCTION && (TARGET_V850E || TARGET_V850E2_ALL)"
+ "* return construct_dispose_instruction (operands[0]);
+ "
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;; This pattern will match a return RTX followed by any number of pop RTXs
+;; and possible a stack adjustment as well. These RTXs will be turned into
+;; a suitable call to a worker function.
+
+(define_insn ""
+[(match_parallel 0 "pattern_is_ok_for_epilogue"
+ [(return)
+ (set (reg:SI 3)
+ (plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
+ (set (match_operand:SI 2 "register_is_ok_for_epilogue" "=r")
+ (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI 3 "immediate_operand" "i"))))])]
+ "TARGET_PROLOG_FUNCTION"
+ "* return construct_restore_jr (operands[0]);
+ "
+ [(set (attr "length") (if_then_else (eq_attr "long_calls" "yes")
+ (const_string "12")
+ (const_string "4")))
+ (set_attr "cc" "clobber")])
+
+;; Initialize an interrupt function. Do not depend on TARGET_PROLOG_FUNCTION.
+(define_insn "callt_save_interrupt"
+ [(unspec_volatile [(const_int 0)] 2)]
+ "(TARGET_V850E || TARGET_V850E2_ALL) && !TARGET_DISABLE_CALLT"
+ ;; The CALLT instruction stores the next address of CALLT to CTPC register
+ ;; without saving its previous value. So if the interrupt handler
+ ;; or its caller could possibly execute the CALLT insn, save_interrupt
+ ;; MUST NOT be called via CALLT.
+ "*
+{
+ output_asm_insn (\"addi -28, sp, sp\", operands);
+ output_asm_insn (\"st.w r1, 24[sp]\", operands);
+ output_asm_insn (\"st.w r10, 12[sp]\", operands);
+ output_asm_insn (\"st.w r11, 16[sp]\", operands);
+ output_asm_insn (\"stsr ctpc, r10\", operands);
+ output_asm_insn (\"st.w r10, 20[sp]\", operands);
+ output_asm_insn (\"stsr ctpsw, r10\", operands);
+ output_asm_insn (\"st.w r10, 24[sp]\", operands);
+ output_asm_insn (\"callt ctoff(__callt_save_interrupt)\", operands);
+ return \"\";
+}"
+ [(set_attr "length" "26")
+ (set_attr "cc" "clobber")])
+
+(define_insn "callt_return_interrupt"
+ [(unspec_volatile [(const_int 0)] 3)]
+ "(TARGET_V850E || TARGET_V850E2_ALL) && !TARGET_DISABLE_CALLT"
+ "callt ctoff(__callt_return_interrupt)"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")])
+
+(define_insn "save_interrupt"
+ [(set (reg:SI 3) (plus:SI (reg:SI 3) (const_int -20)))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int -20))) (reg:SI 30))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int -16))) (reg:SI 4))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int -12))) (reg:SI 1))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int -8))) (reg:SI 10))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int -4))) (reg:SI 11))]
+ ""
+ "*
+{
+ if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
+ return \"addi -20,sp,sp \; st.w r11,16[sp] \; st.w r10,12[sp] \; jarl __save_interrupt,r10\";
+ else
+ {
+ output_asm_insn (\"addi -20, sp, sp\", operands);
+ output_asm_insn (\"st.w r11, 16[sp]\", operands);
+ output_asm_insn (\"st.w r10, 12[sp]\", operands);
+ output_asm_insn (\"st.w ep, 0[sp]\", operands);
+ output_asm_insn (\"st.w gp, 4[sp]\", operands);
+ output_asm_insn (\"st.w r1, 8[sp]\", operands);
+ output_asm_insn (\"movhi hi(__ep), r0, ep\", operands);
+ output_asm_insn (\"movea lo(__ep), ep, ep\", operands);
+ output_asm_insn (\"movhi hi(__gp), r0, gp\", operands);
+ output_asm_insn (\"movea lo(__gp), gp, gp\", operands);
+ return \"\";
+ }
+}"
+ [(set (attr "length")
+ (if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
+ (const_int 10)
+ (const_int 34)))
+ (set_attr "cc" "clobber")])
+
+;; Restore r1, r4, r10, and return from the interrupt
+(define_insn "return_interrupt"
+ [(return)
+ (set (reg:SI 3) (plus:SI (reg:SI 3) (const_int 20)))
+ (set (reg:SI 11) (mem:SI (plus:SI (reg:SI 3) (const_int 16))))
+ (set (reg:SI 10) (mem:SI (plus:SI (reg:SI 3) (const_int 12))))
+ (set (reg:SI 1) (mem:SI (plus:SI (reg:SI 3) (const_int 8))))
+ (set (reg:SI 4) (mem:SI (plus:SI (reg:SI 3) (const_int 4))))
+ (set (reg:SI 30) (mem:SI (reg:SI 3)))]
+ ""
+ "*
+{
+ if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
+ return \"jr __return_interrupt\";
+ else
+ {
+ output_asm_insn (\"ld.w 0[sp], ep\", operands);
+ output_asm_insn (\"ld.w 4[sp], gp\", operands);
+ output_asm_insn (\"ld.w 8[sp], r1\", operands);
+ output_asm_insn (\"ld.w 12[sp], r10\", operands);
+ output_asm_insn (\"ld.w 16[sp], r11\", operands);
+ output_asm_insn (\"addi 20, sp, sp\", operands);
+ output_asm_insn (\"reti\", operands);
+ return \"\";
+ }
+}"
+ [(set (attr "length")
+ (if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
+ (const_int 4)
+ (const_int 24)))
+ (set_attr "cc" "clobber")])
+
+;; Save all registers except for the registers saved in save_interrupt when
+;; an interrupt function makes a call.
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+;; This is needed because the rest of the compiler is not ready to handle
+;; insns this complicated.
+
+(define_insn "callt_save_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 0)]
+ "(TARGET_V850E || TARGET_V850E2_ALL) && !TARGET_DISABLE_CALLT"
+ "callt ctoff(__callt_save_all_interrupt)"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+(define_insn "save_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ "*
+{
+ if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
+ return \"jarl __save_all_interrupt,r10\";
+
+ output_asm_insn (\"addi -120, sp, sp\", operands);
+
+ if (TARGET_EP)
+ {
+ output_asm_insn (\"mov ep, r1\", operands);
+ output_asm_insn (\"mov sp, ep\", operands);
+ output_asm_insn (\"sst.w r31, 116[ep]\", operands);
+ output_asm_insn (\"sst.w r2, 112[ep]\", operands);
+ output_asm_insn (\"sst.w gp, 108[ep]\", operands);
+ output_asm_insn (\"sst.w r6, 104[ep]\", operands);
+ output_asm_insn (\"sst.w r7, 100[ep]\", operands);
+ output_asm_insn (\"sst.w r8, 96[ep]\", operands);
+ output_asm_insn (\"sst.w r9, 92[ep]\", operands);
+ output_asm_insn (\"sst.w r11, 88[ep]\", operands);
+ output_asm_insn (\"sst.w r12, 84[ep]\", operands);
+ output_asm_insn (\"sst.w r13, 80[ep]\", operands);
+ output_asm_insn (\"sst.w r14, 76[ep]\", operands);
+ output_asm_insn (\"sst.w r15, 72[ep]\", operands);
+ output_asm_insn (\"sst.w r16, 68[ep]\", operands);
+ output_asm_insn (\"sst.w r17, 64[ep]\", operands);
+ output_asm_insn (\"sst.w r18, 60[ep]\", operands);
+ output_asm_insn (\"sst.w r19, 56[ep]\", operands);
+ output_asm_insn (\"sst.w r20, 52[ep]\", operands);
+ output_asm_insn (\"sst.w r21, 48[ep]\", operands);
+ output_asm_insn (\"sst.w r22, 44[ep]\", operands);
+ output_asm_insn (\"sst.w r23, 40[ep]\", operands);
+ output_asm_insn (\"sst.w r24, 36[ep]\", operands);
+ output_asm_insn (\"sst.w r25, 32[ep]\", operands);
+ output_asm_insn (\"sst.w r26, 28[ep]\", operands);
+ output_asm_insn (\"sst.w r27, 24[ep]\", operands);
+ output_asm_insn (\"sst.w r28, 20[ep]\", operands);
+ output_asm_insn (\"sst.w r29, 16[ep]\", operands);
+ output_asm_insn (\"mov r1, ep\", operands);
+ }
+ else
+ {
+ output_asm_insn (\"st.w r31, 116[sp]\", operands);
+ output_asm_insn (\"st.w r2, 112[sp]\", operands);
+ output_asm_insn (\"st.w gp, 108[sp]\", operands);
+ output_asm_insn (\"st.w r6, 104[sp]\", operands);
+ output_asm_insn (\"st.w r7, 100[sp]\", operands);
+ output_asm_insn (\"st.w r8, 96[sp]\", operands);
+ output_asm_insn (\"st.w r9, 92[sp]\", operands);
+ output_asm_insn (\"st.w r11, 88[sp]\", operands);
+ output_asm_insn (\"st.w r12, 84[sp]\", operands);
+ output_asm_insn (\"st.w r13, 80[sp]\", operands);
+ output_asm_insn (\"st.w r14, 76[sp]\", operands);
+ output_asm_insn (\"st.w r15, 72[sp]\", operands);
+ output_asm_insn (\"st.w r16, 68[sp]\", operands);
+ output_asm_insn (\"st.w r17, 64[sp]\", operands);
+ output_asm_insn (\"st.w r18, 60[sp]\", operands);
+ output_asm_insn (\"st.w r19, 56[sp]\", operands);
+ output_asm_insn (\"st.w r20, 52[sp]\", operands);
+ output_asm_insn (\"st.w r21, 48[sp]\", operands);
+ output_asm_insn (\"st.w r22, 44[sp]\", operands);
+ output_asm_insn (\"st.w r23, 40[sp]\", operands);
+ output_asm_insn (\"st.w r24, 36[sp]\", operands);
+ output_asm_insn (\"st.w r25, 32[sp]\", operands);
+ output_asm_insn (\"st.w r26, 28[sp]\", operands);
+ output_asm_insn (\"st.w r27, 24[sp]\", operands);
+ output_asm_insn (\"st.w r28, 20[sp]\", operands);
+ output_asm_insn (\"st.w r29, 16[sp]\", operands);
+ }
+
+ return \"\";
+}"
+ [(set (attr "length")
+ (if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
+ (const_int 4)
+ (const_int 62)
+ ))
+ (set_attr "cc" "clobber")])
+
+(define_insn "_save_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 0)]
+ "TARGET_V850 && ! TARGET_LONG_CALLS"
+ "jarl __save_all_interrupt,r10"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;; Restore all registers saved when an interrupt function makes a call.
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+;; This is needed because the rest of the compiler is not ready to handle
+;; insns this complicated.
+
+(define_insn "callt_restore_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "(TARGET_V850E || TARGET_V850E2_ALL) && !TARGET_DISABLE_CALLT"
+ "callt ctoff(__callt_restore_all_interrupt)"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+(define_insn "restore_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+{
+ if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
+ return \"jarl __restore_all_interrupt,r10\";
+
+ if (TARGET_EP)
+ {
+ output_asm_insn (\"mov ep, r1\", operands);
+ output_asm_insn (\"mov sp, ep\", operands);
+ output_asm_insn (\"sld.w 116[ep], r31\", operands);
+ output_asm_insn (\"sld.w 112[ep], r2\", operands);
+ output_asm_insn (\"sld.w 108[ep], gp\", operands);
+ output_asm_insn (\"sld.w 104[ep], r6\", operands);
+ output_asm_insn (\"sld.w 100[ep], r7\", operands);
+ output_asm_insn (\"sld.w 96[ep], r8\", operands);
+ output_asm_insn (\"sld.w 92[ep], r9\", operands);
+ output_asm_insn (\"sld.w 88[ep], r11\", operands);
+ output_asm_insn (\"sld.w 84[ep], r12\", operands);
+ output_asm_insn (\"sld.w 80[ep], r13\", operands);
+ output_asm_insn (\"sld.w 76[ep], r14\", operands);
+ output_asm_insn (\"sld.w 72[ep], r15\", operands);
+ output_asm_insn (\"sld.w 68[ep], r16\", operands);
+ output_asm_insn (\"sld.w 64[ep], r17\", operands);
+ output_asm_insn (\"sld.w 60[ep], r18\", operands);
+ output_asm_insn (\"sld.w 56[ep], r19\", operands);
+ output_asm_insn (\"sld.w 52[ep], r20\", operands);
+ output_asm_insn (\"sld.w 48[ep], r21\", operands);
+ output_asm_insn (\"sld.w 44[ep], r22\", operands);
+ output_asm_insn (\"sld.w 40[ep], r23\", operands);
+ output_asm_insn (\"sld.w 36[ep], r24\", operands);
+ output_asm_insn (\"sld.w 32[ep], r25\", operands);
+ output_asm_insn (\"sld.w 28[ep], r26\", operands);
+ output_asm_insn (\"sld.w 24[ep], r27\", operands);
+ output_asm_insn (\"sld.w 20[ep], r28\", operands);
+ output_asm_insn (\"sld.w 16[ep], r29\", operands);
+ output_asm_insn (\"mov r1, ep\", operands);
+ }
+ else
+ {
+ output_asm_insn (\"ld.w 116[sp], r31\", operands);
+ output_asm_insn (\"ld.w 112[sp], r2\", operands);
+ output_asm_insn (\"ld.w 108[sp], gp\", operands);
+ output_asm_insn (\"ld.w 104[sp], r6\", operands);
+ output_asm_insn (\"ld.w 100[sp], r7\", operands);
+ output_asm_insn (\"ld.w 96[sp], r8\", operands);
+ output_asm_insn (\"ld.w 92[sp], r9\", operands);
+ output_asm_insn (\"ld.w 88[sp], r11\", operands);
+ output_asm_insn (\"ld.w 84[sp], r12\", operands);
+ output_asm_insn (\"ld.w 80[sp], r13\", operands);
+ output_asm_insn (\"ld.w 76[sp], r14\", operands);
+ output_asm_insn (\"ld.w 72[sp], r15\", operands);
+ output_asm_insn (\"ld.w 68[sp], r16\", operands);
+ output_asm_insn (\"ld.w 64[sp], r17\", operands);
+ output_asm_insn (\"ld.w 60[sp], r18\", operands);
+ output_asm_insn (\"ld.w 56[sp], r19\", operands);
+ output_asm_insn (\"ld.w 52[sp], r20\", operands);
+ output_asm_insn (\"ld.w 48[sp], r21\", operands);
+ output_asm_insn (\"ld.w 44[sp], r22\", operands);
+ output_asm_insn (\"ld.w 40[sp], r23\", operands);
+ output_asm_insn (\"ld.w 36[sp], r24\", operands);
+ output_asm_insn (\"ld.w 32[sp], r25\", operands);
+ output_asm_insn (\"ld.w 28[sp], r26\", operands);
+ output_asm_insn (\"ld.w 24[sp], r27\", operands);
+ output_asm_insn (\"ld.w 20[sp], r28\", operands);
+ output_asm_insn (\"ld.w 16[sp], r29\", operands);
+ }
+ output_asm_insn (\"addi 120, sp, sp\", operands);
+ return \"\";
+}"
+ [(set (attr "length")
+ (if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
+ (const_int 4)
+ (const_int 62)
+ ))
+ (set_attr "cc" "clobber")])
+
+(define_insn "_restore_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "TARGET_V850 && ! TARGET_LONG_CALLS"
+ "jarl __restore_all_interrupt,r10"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+
diff --git a/gcc/config/v850/v850.opt b/gcc/config/v850/v850.opt
new file mode 100644
index 000000000..0ae14313f
--- /dev/null
+++ b/gcc/config/v850/v850.opt
@@ -0,0 +1,106 @@
+; Options for the NEC V850 port of the compiler.
+
+; Copyright (C) 2005, 2007, 2010 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mapp-regs
+Target Report Mask(APP_REGS)
+Use registers r2 and r5
+
+mbig-switch
+Target Report Mask(BIG_SWITCH)
+Use 4 byte entries in switch tables
+
+mdebug
+Target Report Mask(DEBUG)
+Enable backend debugging
+
+mdisable-callt
+Target Report Mask(DISABLE_CALLT)
+Do not use the callt instruction
+
+mep
+Target Report Mask(EP)
+Reuse r30 on a per function basis
+
+mghs
+Target Report Mask(GHS)
+Support Green Hills ABI
+
+mlong-calls
+Target Report Mask(LONG_CALLS)
+Prohibit PC relative function calls
+
+mprolog-function
+Target Report Mask(PROLOG_FUNCTION)
+Use stubs for function prologues
+
+msda
+Target RejectNegative Joined
+Set the max size of data eligible for the SDA area
+
+msmall-sld
+Target Report Mask(SMALL_SLD)
+Enable the use of the short load instructions
+
+mspace
+Target RejectNegative
+Same as: -mep -mprolog-function
+
+mtda
+Target RejectNegative Joined
+Set the max size of data eligible for the TDA area
+
+mno-strict-align
+Target Report Mask(NO_STRICT_ALIGN)
+Do not enforce strict alignment
+
+mjump-tables-in-data-section
+Target Report Mask(JUMP_TABLES_IN_DATA_SECTION)
+Put jump tables for switch statements into the .data section rather than the .code section
+
+mUS-bit-set
+Target Report Mask(US_BIT_SET)
+
+mv850
+Target Report RejectNegative Mask(V850)
+Compile for the v850 processor
+
+mv850e
+Target Report RejectNegative Mask(V850E)
+Compile for the v850e processor
+
+mv850e1
+Target RejectNegative Mask(V850E1)
+Compile for the v850e1 processor
+
+mv850es
+Target RejectNegative Mask(V850E1) MaskExists
+Compile for the v850es variant of the v850e1
+
+mv850e2
+Target Report RejectNegative Mask(V850E2)
+Compile for the v850e2 processor
+
+mv850e2v3
+Target Report RejectNegative Mask(V850E2V3)
+Compile for the v850e2v3 processor
+
+mzda
+Target RejectNegative Joined
+Set the max size of data eligible for the ZDA area