summaryrefslogtreecommitdiff
path: root/gcc/config/avr
diff options
context:
space:
mode:
authorupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
committerupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
commit554fd8c5195424bdbcabf5de30fdc183aba391bd (patch)
tree976dc5ab7fddf506dadce60ae936f43f58787092 /gcc/config/avr
downloadcbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.bz2
cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.xz
obtained gcc-4.6.4.tar.bz2 from upstream website;upstream
verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository.
Diffstat (limited to 'gcc/config/avr')
-rw-r--r--gcc/config/avr/avr-c.c85
-rwxr-xr-xgcc/config/avr/avr-devices.c229
-rw-r--r--gcc/config/avr/avr-protos.h121
-rw-r--r--gcc/config/avr/avr-stdint.h66
-rw-r--r--gcc/config/avr/avr.c6416
-rw-r--r--gcc/config/avr/avr.h835
-rw-r--r--gcc/config/avr/avr.md3248
-rw-r--r--gcc/config/avr/avr.opt60
-rw-r--r--gcc/config/avr/constraints.md109
-rwxr-xr-xgcc/config/avr/driver-avr.c114
-rw-r--r--gcc/config/avr/libgcc.S901
-rwxr-xr-xgcc/config/avr/predicates.md140
-rw-r--r--gcc/config/avr/rtems.h28
-rw-r--r--gcc/config/avr/t-avr225
-rw-r--r--gcc/config/avr/t-rtems3
15 files changed, 12580 insertions, 0 deletions
diff --git a/gcc/config/avr/avr-c.c b/gcc/config/avr/avr-c.c
new file mode 100644
index 000000000..05e8e8b30
--- /dev/null
+++ b/gcc/config/avr/avr-c.c
@@ -0,0 +1,85 @@
+/* Copyright (C) 2009, 2010
+ Free Software Foundation, Inc.
+ Contributed by Anatoly Sokolov (aesok@post.ru)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tm_p.h"
+#include "cpplib.h"
+#include "tree.h"
+#include "c-family/c-common.h"
+
+/* Not included in avr.c since this requires C front end. */
+
+/* Worker function for TARGET_CPU_CPP_BUILTINS. */
+
+void
+avr_cpu_cpp_builtins (struct cpp_reader *pfile)
+{
+ builtin_define_std ("AVR");
+
+ if (avr_current_arch->macro)
+ cpp_define (pfile, avr_current_arch->macro);
+ if (avr_extra_arch_macro)
+ cpp_define (pfile, avr_extra_arch_macro);
+ if (avr_current_arch->have_elpm)
+ cpp_define (pfile, "__AVR_HAVE_RAMPZ__");
+ if (avr_current_arch->have_elpm)
+ cpp_define (pfile, "__AVR_HAVE_ELPM__");
+ if (avr_current_arch->have_elpmx)
+ cpp_define (pfile, "__AVR_HAVE_ELPMX__");
+ if (avr_current_arch->have_movw_lpmx)
+ {
+ cpp_define (pfile, "__AVR_HAVE_MOVW__");
+ cpp_define (pfile, "__AVR_HAVE_LPMX__");
+ }
+ if (avr_current_arch->asm_only)
+ cpp_define (pfile, "__AVR_ASM_ONLY__");
+ if (avr_current_arch->have_mul)
+ {
+ cpp_define (pfile, "__AVR_ENHANCED__");
+ cpp_define (pfile, "__AVR_HAVE_MUL__");
+ }
+ if (avr_current_arch->have_jmp_call)
+ {
+ cpp_define (pfile, "__AVR_MEGA__");
+ cpp_define (pfile, "__AVR_HAVE_JMP_CALL__");
+ }
+ if (avr_current_arch->have_eijmp_eicall)
+ {
+ cpp_define (pfile, "__AVR_HAVE_EIJMP_EICALL__");
+ cpp_define (pfile, "__AVR_3_BYTE_PC__");
+ }
+ else
+ {
+ cpp_define (pfile, "__AVR_2_BYTE_PC__");
+ }
+
+ if (avr_current_device->short_sp)
+ cpp_define (pfile, "__AVR_HAVE_8BIT_SP__");
+ else
+ cpp_define (pfile, "__AVR_HAVE_16BIT_SP__");
+
+ if (TARGET_NO_INTERRUPTS)
+ cpp_define (pfile, "__NO_INTERRUPTS__");
+}
+
diff --git a/gcc/config/avr/avr-devices.c b/gcc/config/avr/avr-devices.c
new file mode 100755
index 000000000..91ca95e0f
--- /dev/null
+++ b/gcc/config/avr/avr-devices.c
@@ -0,0 +1,229 @@
+/* Copyright (C) 2009, 2010, 2011
+ Free Software Foundation, Inc.
+ Contributed by Anatoly Sokolov (aesok@post.ru)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+
+/* List of all known AVR MCU architectures. */
+
+const struct base_arch_s avr_arch_types[] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0060, NULL, "avr2" }, /* unknown device specified */
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0x0060, "__AVR_ARCH__=1", "avr1" },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0060, "__AVR_ARCH__=2", "avr2" },
+ { 0, 0, 0, 1, 0, 0, 0, 0, 0, 0x0060, "__AVR_ARCH__=25", "avr25" },
+ { 0, 0, 1, 0, 0, 0, 0, 0, 0, 0x0060, "__AVR_ARCH__=3", "avr3" },
+ { 0, 0, 1, 0, 1, 0, 0, 0, 0, 0x0060, "__AVR_ARCH__=31", "avr31" },
+ { 0, 0, 1, 1, 0, 0, 0, 0, 0, 0x0060, "__AVR_ARCH__=35", "avr35" },
+ { 0, 1, 0, 1, 0, 0, 0, 0, 0, 0x0060, "__AVR_ARCH__=4", "avr4" },
+ { 0, 1, 1, 1, 0, 0, 0, 0, 0, 0x0060, "__AVR_ARCH__=5", "avr5" },
+ { 0, 1, 1, 1, 1, 1, 0, 0, 0, 0x0060, "__AVR_ARCH__=51", "avr51" },
+ { 0, 1, 1, 1, 1, 1, 1, 0, 0, 0x0060, "__AVR_ARCH__=6", "avr6" }
+};
+
+/* List of all known AVR MCU types - if updated, it has to be kept
+ in sync in several places (FIXME: is there a better way?):
+ - here;
+ - t-avr (MULTILIB_MATCHES);
+ - gas/config/tc-avr.c;
+ - avr-libc. */
+
+const struct mcu_type_s avr_mcu_types[] = {
+ /* Classic, <= 8K. */
+ { "avr2", ARCH_AVR2, NULL, 0, 0x0060, "s8515" },
+ { "at90s2313", ARCH_AVR2, "__AVR_AT90S2313__", 1, 0x0060, "s2313" },
+ { "at90s2323", ARCH_AVR2, "__AVR_AT90S2323__", 1, 0x0060, "s2323" },
+ { "at90s2333", ARCH_AVR2, "__AVR_AT90S2333__", 1, 0x0060, "s2333" },
+ { "at90s2343", ARCH_AVR2, "__AVR_AT90S2343__", 1, 0x0060, "s2343" },
+ { "attiny22", ARCH_AVR2, "__AVR_ATtiny22__", 1, 0x0060, "tn22" },
+ { "attiny26", ARCH_AVR2, "__AVR_ATtiny26__", 1, 0x0060, "tn26" },
+ { "at90s4414", ARCH_AVR2, "__AVR_AT90S4414__", 0, 0x0060, "s4414" },
+ { "at90s4433", ARCH_AVR2, "__AVR_AT90S4433__", 1, 0x0060, "s4433" },
+ { "at90s4434", ARCH_AVR2, "__AVR_AT90S4434__", 0, 0x0060, "s4434" },
+ { "at90s8515", ARCH_AVR2, "__AVR_AT90S8515__", 0, 0x0060, "s8515" },
+ { "at90c8534", ARCH_AVR2, "__AVR_AT90C8534__", 0, 0x0060, "c8534" },
+ { "at90s8535", ARCH_AVR2, "__AVR_AT90S8535__", 0, 0x0060, "s8535" },
+ /* Classic + MOVW, <= 8K. */
+ { "avr25", ARCH_AVR25, NULL, 0, 0x0060, "tn85" },
+ { "ata6289", ARCH_AVR25, "__AVR_ATA6289__", 0, 0x0100, "a6289" },
+ { "attiny13", ARCH_AVR25, "__AVR_ATtiny13__", 1, 0x0060, "tn13" },
+ { "attiny13a", ARCH_AVR25, "__AVR_ATtiny13A__", 1, 0x0060, "tn13a" },
+ { "attiny2313", ARCH_AVR25, "__AVR_ATtiny2313__", 1, 0x0060, "tn2313" },
+ { "attiny2313a", ARCH_AVR25, "__AVR_ATtiny2313A__", 1, 0x0060, "tn2313a" },
+ { "attiny24", ARCH_AVR25, "__AVR_ATtiny24__", 1, 0x0060, "tn24" },
+ { "attiny24a", ARCH_AVR25, "__AVR_ATtiny24A__", 1, 0x0060, "tn24a" },
+ { "attiny4313", ARCH_AVR25, "__AVR_ATtiny4313__", 0, 0x0060, "tn4313" },
+ { "attiny44", ARCH_AVR25, "__AVR_ATtiny44__", 0, 0x0060, "tn44" },
+ { "attiny44a", ARCH_AVR25, "__AVR_ATtiny44A__", 0, 0x0060, "tn44a" },
+ { "attiny84", ARCH_AVR25, "__AVR_ATtiny84__", 0, 0x0060, "tn84" },
+ { "attiny84a", ARCH_AVR25, "__AVR_ATtiny84A__", 0, 0x0060, "tn84" },
+ { "attiny25", ARCH_AVR25, "__AVR_ATtiny25__", 1, 0x0060, "tn25" },
+ { "attiny45", ARCH_AVR25, "__AVR_ATtiny45__", 0, 0x0060, "tn45" },
+ { "attiny85", ARCH_AVR25, "__AVR_ATtiny85__", 0, 0x0060, "tn85" },
+ { "attiny261", ARCH_AVR25, "__AVR_ATtiny261__", 1, 0x0060, "tn261" },
+ { "attiny261a", ARCH_AVR25, "__AVR_ATtiny261A__", 1, 0x0060, "tn261a" },
+ { "attiny461", ARCH_AVR25, "__AVR_ATtiny461__", 0, 0x0060, "tn461" },
+ { "attiny461a", ARCH_AVR25, "__AVR_ATtiny461A__", 0, 0x0060, "tn461a" },
+ { "attiny861", ARCH_AVR25, "__AVR_ATtiny861__", 0, 0x0060, "tn861" },
+ { "attiny861a", ARCH_AVR25, "__AVR_ATtiny861A__", 0, 0x0060, "tn861a" },
+ { "attiny43u", ARCH_AVR25, "__AVR_ATtiny43U__", 0, 0x0060, "tn43u" },
+ { "attiny87", ARCH_AVR25, "__AVR_ATtiny87__", 0, 0x0100, "tn87" },
+ { "attiny48", ARCH_AVR25, "__AVR_ATtiny48__", 0, 0x0100, "tn48" },
+ { "attiny88", ARCH_AVR25, "__AVR_ATtiny88__", 0, 0x0100, "tn88" },
+ { "at86rf401", ARCH_AVR25, "__AVR_AT86RF401__", 0, 0x0060, "86401" },
+ /* Classic, > 8K, <= 64K. */
+ { "avr3", ARCH_AVR3, NULL, 0, 0x0060, "43355" },
+ { "at43usb355", ARCH_AVR3, "__AVR_AT43USB355__", 0, 0x0060, "43355" },
+ { "at76c711", ARCH_AVR3, "__AVR_AT76C711__", 0, 0x0060, "76711" },
+ /* Classic, == 128K. */
+ { "avr31", ARCH_AVR31, NULL, 0, 0x0060, "m103" },
+ { "atmega103", ARCH_AVR31, "__AVR_ATmega103__", 0, 0x0060, "m103" },
+ { "at43usb320", ARCH_AVR31, "__AVR_AT43USB320__", 0, 0x0060, "43320" },
+ /* Classic + MOVW + JMP/CALL. */
+ { "avr35", ARCH_AVR35, NULL, 0, 0x0100, "usb162" },
+ { "at90usb82", ARCH_AVR35, "__AVR_AT90USB82__", 0, 0x0100, "usb82" },
+ { "at90usb162", ARCH_AVR35, "__AVR_AT90USB162__", 0, 0x0100, "usb162" },
+ { "atmega8u2", ARCH_AVR35, "__AVR_ATmega8U2__", 0, 0x0100, "m8u2" },
+ { "atmega16u2", ARCH_AVR35, "__AVR_ATmega16U2__", 0, 0x0100, "m16u2" },
+ { "atmega32u2", ARCH_AVR35, "__AVR_ATmega32U2__", 0, 0x0100, "m32u2" },
+ { "attiny167", ARCH_AVR35, "__AVR_ATtiny167__", 0, 0x0100, "tn167" },
+ /* Enhanced, <= 8K. */
+ { "avr4", ARCH_AVR4, NULL, 0, 0x0060, "m8" },
+ { "atmega8", ARCH_AVR4, "__AVR_ATmega8__", 0, 0x0060, "m8" },
+ { "atmega48", ARCH_AVR4, "__AVR_ATmega48__", 0, 0x0100, "m48" },
+ { "atmega48a", ARCH_AVR4, "__AVR_ATmega48A__", 0, 0x0100, "m48a" },
+ { "atmega48p", ARCH_AVR4, "__AVR_ATmega48P__", 0, 0x0100, "m48p" },
+ { "atmega88", ARCH_AVR4, "__AVR_ATmega88__", 0, 0x0100, "m88" },
+ { "atmega88a", ARCH_AVR4, "__AVR_ATmega88A__", 0, 0x0100, "m88a" },
+ { "atmega88p", ARCH_AVR4, "__AVR_ATmega88P__", 0, 0x0100, "m88p" },
+ { "atmega88pa", ARCH_AVR4, "__AVR_ATmega88PA__", 0, 0x0100, "m88pa" },
+ { "atmega8515", ARCH_AVR4, "__AVR_ATmega8515__", 0, 0x0060, "m8515" },
+ { "atmega8535", ARCH_AVR4, "__AVR_ATmega8535__", 0, 0x0060, "m8535" },
+ { "atmega8hva", ARCH_AVR4, "__AVR_ATmega8HVA__", 0, 0x0100, "m8hva" },
+ { "at90pwm1", ARCH_AVR4, "__AVR_AT90PWM1__", 0, 0x0100, "90pwm1" },
+ { "at90pwm2", ARCH_AVR4, "__AVR_AT90PWM2__", 0, 0x0100, "90pwm2" },
+ { "at90pwm2b", ARCH_AVR4, "__AVR_AT90PWM2B__", 0, 0x0100, "90pwm2b" },
+ { "at90pwm3", ARCH_AVR4, "__AVR_AT90PWM3__", 0, 0x0100, "90pwm3" },
+ { "at90pwm3b", ARCH_AVR4, "__AVR_AT90PWM3B__", 0, 0x0100, "90pwm3b" },
+ { "at90pwm81", ARCH_AVR4, "__AVR_AT90PWM81__", 0, 0x0100, "90pwm81" },
+ /* Enhanced, > 8K, <= 64K. */
+ { "avr5", ARCH_AVR5, NULL, 0, 0x0060, "m16" },
+ { "atmega16", ARCH_AVR5, "__AVR_ATmega16__", 0, 0x0060, "m16" },
+ { "atmega16a", ARCH_AVR5, "__AVR_ATmega16A__", 0, 0x0060, "m16a" },
+ { "atmega161", ARCH_AVR5, "__AVR_ATmega161__", 0, 0x0060, "m161" },
+ { "atmega162", ARCH_AVR5, "__AVR_ATmega162__", 0, 0x0100, "m162" },
+ { "atmega163", ARCH_AVR5, "__AVR_ATmega163__", 0, 0x0060, "m163" },
+ { "atmega164a", ARCH_AVR5, "__AVR_ATmega164A__", 0, 0x0100, "m164a" },
+ { "atmega164p", ARCH_AVR5, "__AVR_ATmega164P__", 0, 0x0100, "m164p" },
+ { "atmega165", ARCH_AVR5, "__AVR_ATmega165__", 0, 0x0100, "m165" },
+ { "atmega165a", ARCH_AVR5, "__AVR_ATmega165A__", 0, 0x0100, "m165a" },
+ { "atmega165p", ARCH_AVR5, "__AVR_ATmega165P__", 0, 0x0100, "m165p" },
+ { "atmega168", ARCH_AVR5, "__AVR_ATmega168__", 0, 0x0100, "m168" },
+ { "atmega168a", ARCH_AVR5, "__AVR_ATmega168A__", 0, 0x0100, "m168a" },
+ { "atmega168p", ARCH_AVR5, "__AVR_ATmega168P__", 0, 0x0100, "m168p" },
+ { "atmega169", ARCH_AVR5, "__AVR_ATmega169__", 0, 0x0100, "m169" },
+ { "atmega169a", ARCH_AVR5, "__AVR_ATmega169A__", 0, 0x0100, "m169a" },
+ { "atmega169p", ARCH_AVR5, "__AVR_ATmega169P__", 0, 0x0100, "m169p" },
+ { "atmega169pa", ARCH_AVR5, "__AVR_ATmega169PA__", 0, 0x0100, "m169pa" },
+ { "atmega32", ARCH_AVR5, "__AVR_ATmega32__", 0, 0x0060, "m32" },
+ { "atmega323", ARCH_AVR5, "__AVR_ATmega323__", 0, 0x0060, "m323" },
+ { "atmega324a", ARCH_AVR5, "__AVR_ATmega324A__", 0, 0x0100, "m324a" },
+ { "atmega324p", ARCH_AVR5, "__AVR_ATmega324P__", 0, 0x0100, "m324p" },
+ { "atmega324pa", ARCH_AVR5, "__AVR_ATmega324PA__", 0, 0x0100, "m324pa" },
+ { "atmega325", ARCH_AVR5, "__AVR_ATmega325__", 0, 0x0100, "m325" },
+ { "atmega325a", ARCH_AVR5, "__AVR_ATmega325A__", 0, 0x0100, "m325a" },
+ { "atmega325p", ARCH_AVR5, "__AVR_ATmega325P__", 0, 0x0100, "m325p" },
+ { "atmega3250", ARCH_AVR5, "__AVR_ATmega3250__", 0, 0x0100, "m3250" },
+ { "atmega3250a", ARCH_AVR5, "__AVR_ATmega3250A__", 0, 0x0100, "m3250a" },
+ { "atmega3250p", ARCH_AVR5, "__AVR_ATmega3250P__", 0, 0x0100, "m3250p" },
+ { "atmega328", ARCH_AVR5, "__AVR_ATmega328__", 0, 0x0100, "m328" },
+ { "atmega328p", ARCH_AVR5, "__AVR_ATmega328P__", 0, 0x0100, "m328p" },
+ { "atmega329", ARCH_AVR5, "__AVR_ATmega329__", 0, 0x0100, "m329" },
+ { "atmega329a", ARCH_AVR5, "__AVR_ATmega329A__", 0, 0x0100, "m329a" },
+ { "atmega329p", ARCH_AVR5, "__AVR_ATmega329P__", 0, 0x0100, "m329p" },
+ { "atmega329pa", ARCH_AVR5, "__AVR_ATmega329PA__", 0, 0x0100, "m329pa" },
+ { "atmega3290", ARCH_AVR5, "__AVR_ATmega3290__", 0, 0x0100, "m3290" },
+ { "atmega3290a", ARCH_AVR5, "__AVR_ATmega3290A__", 0, 0x0100, "m3290a" },
+ { "atmega3290p", ARCH_AVR5, "__AVR_ATmega3290P__", 0, 0x0100, "m3290p" },
+ { "atmega406", ARCH_AVR5, "__AVR_ATmega406__", 0, 0x0100, "m406" },
+ { "atmega64", ARCH_AVR5, "__AVR_ATmega64__", 0, 0x0100, "m64" },
+ { "atmega640", ARCH_AVR5, "__AVR_ATmega640__", 0, 0x0200, "m640" },
+ { "atmega644", ARCH_AVR5, "__AVR_ATmega644__", 0, 0x0100, "m644" },
+ { "atmega644a", ARCH_AVR5, "__AVR_ATmega644A__", 0, 0x0100, "m644a" },
+ { "atmega644p", ARCH_AVR5, "__AVR_ATmega644P__", 0, 0x0100, "m644p" },
+ { "atmega644pa", ARCH_AVR5, "__AVR_ATmega644PA__", 0, 0x0100, "m644pa" },
+ { "atmega645", ARCH_AVR5, "__AVR_ATmega645__", 0, 0x0100, "m645" },
+ { "atmega645a", ARCH_AVR5, "__AVR_ATmega645A__", 0, 0x0100, "m645a" },
+ { "atmega645p", ARCH_AVR5, "__AVR_ATmega645P__", 0, 0x0100, "m645p" },
+ { "atmega6450", ARCH_AVR5, "__AVR_ATmega6450__", 0, 0x0100, "m6450" },
+ { "atmega6450a", ARCH_AVR5, "__AVR_ATmega6450A__", 0, 0x0100, "m6450a" },
+ { "atmega6450p", ARCH_AVR5, "__AVR_ATmega6450P__", 0, 0x0100, "m6450p" },
+ { "atmega649", ARCH_AVR5, "__AVR_ATmega649__", 0, 0x0100, "m649" },
+ { "atmega649a", ARCH_AVR5, "__AVR_ATmega649A__", 0, 0x0100, "m649a" },
+ { "atmega649p", ARCH_AVR5, "__AVR_ATmega649P__", 0, 0x0100, "m649p" },
+ { "atmega6490", ARCH_AVR5, "__AVR_ATmega6490__", 0, 0x0100, "m6490" },
+ { "atmega16hva", ARCH_AVR5, "__AVR_ATmega16HVA__", 0, 0x0100, "m16hva" },
+ { "atmega16hva2", ARCH_AVR5, "__AVR_ATmega16HVA2__", 0, 0x0100, "m16hva2" },
+ { "atmega16hvb", ARCH_AVR5, "__AVR_ATmega16HVB__", 0, 0x0100, "m16hvb" },
+ { "atmega32hvb", ARCH_AVR5, "__AVR_ATmega32HVB__", 0, 0x0100, "m32hvb" },
+ { "atmega64hve", ARCH_AVR5, "__AVR_ATmega64HVE__", 0, 0x0100, "m64hve" },
+ { "at90can32", ARCH_AVR5, "__AVR_AT90CAN32__", 0, 0x0100, "can32" },
+ { "at90can64", ARCH_AVR5, "__AVR_AT90CAN64__", 0, 0x0100, "can64" },
+ { "at90pwm216", ARCH_AVR5, "__AVR_AT90PWM216__", 0, 0x0100, "90pwm216" },
+ { "at90pwm316", ARCH_AVR5, "__AVR_AT90PWM316__", 0, 0x0100, "90pwm316" },
+ { "atmega32c1", ARCH_AVR5, "__AVR_ATmega32C1__", 0, 0x0100, "m32c1" },
+ { "atmega64c1", ARCH_AVR5, "__AVR_ATmega64C1__", 0, 0x0100, "m64c1" },
+ { "atmega16m1", ARCH_AVR5, "__AVR_ATmega16M1__", 0, 0x0100, "m16m1" },
+ { "atmega32m1", ARCH_AVR5, "__AVR_ATmega32M1__", 0, 0x0100, "m32m1" },
+ { "atmega64m1", ARCH_AVR5, "__AVR_ATmega64M1__", 0, 0x0100, "m64m1" },
+ { "atmega16u4", ARCH_AVR5, "__AVR_ATmega16U4__", 0, 0x0100, "m16u4" },
+ { "atmega32u4", ARCH_AVR5, "__AVR_ATmega32U4__", 0, 0x0100, "m32u4" },
+ { "atmega32u6", ARCH_AVR5, "__AVR_ATmega32U6__", 0, 0x0100, "m32u6" },
+ { "at90scr100", ARCH_AVR5, "__AVR_AT90SCR100__", 0, 0x0100, "90scr100" },
+ { "at90usb646", ARCH_AVR5, "__AVR_AT90USB646__", 0, 0x0100, "usb646" },
+ { "at90usb647", ARCH_AVR5, "__AVR_AT90USB647__", 0, 0x0100, "usb647" },
+ { "at94k", ARCH_AVR5, "__AVR_AT94K__", 0, 0x0060, "at94k" },
+ { "m3000", ARCH_AVR5, "__AVR_M3000__", 0, 0x1000, "m3000" },
+ /* Enhanced, == 128K. */
+ { "avr51", ARCH_AVR51, NULL, 0, 0x0100, "m128" },
+ { "atmega128", ARCH_AVR51, "__AVR_ATmega128__", 0, 0x0100, "m128" },
+ { "atmega1280", ARCH_AVR51, "__AVR_ATmega1280__", 0, 0x0200, "m1280" },
+ { "atmega1281", ARCH_AVR51, "__AVR_ATmega1281__", 0, 0x0200, "m1281" },
+ { "atmega1284p", ARCH_AVR51, "__AVR_ATmega1284P__", 0, 0x0100, "m1284p" },
+ { "atmega128rfa1", ARCH_AVR51, "__AVR_ATmega128RFA1__", 0, 0x0200, "m128rfa1" },
+ { "at90can128", ARCH_AVR51, "__AVR_AT90CAN128__", 0, 0x0100, "can128" },
+ { "at90usb1286", ARCH_AVR51, "__AVR_AT90USB1286__", 0, 0x0100, "usb1286" },
+ { "at90usb1287", ARCH_AVR51, "__AVR_AT90USB1287__", 0, 0x0100, "usb1287" },
+ /* 3-Byte PC. */
+ { "avr6", ARCH_AVR6, NULL, 0, 0x0200, "m2561" },
+ { "atmega2560", ARCH_AVR6, "__AVR_ATmega2560__", 0, 0x0200, "m2560" },
+ { "atmega2561", ARCH_AVR6, "__AVR_ATmega2561__", 0, 0x0200, "m2561" },
+ /* Assembler only. */
+ { "avr1", ARCH_AVR1, NULL, 0, 0x0060, "s1200" },
+ { "at90s1200", ARCH_AVR1, "__AVR_AT90S1200__", 0, 0x0060, "s1200" },
+ { "attiny11", ARCH_AVR1, "__AVR_ATtiny11__", 0, 0x0060, "tn11" },
+ { "attiny12", ARCH_AVR1, "__AVR_ATtiny12__", 0, 0x0060, "tn12" },
+ { "attiny15", ARCH_AVR1, "__AVR_ATtiny15__", 0, 0x0060, "tn15" },
+ { "attiny28", ARCH_AVR1, "__AVR_ATtiny28__", 0, 0x0060, "tn28" },
+ /* End of list. */
+ { NULL, ARCH_UNKNOWN, NULL, 0, 0, NULL }
+};
+
diff --git a/gcc/config/avr/avr-protos.h b/gcc/config/avr/avr-protos.h
new file mode 100644
index 000000000..06c9254fd
--- /dev/null
+++ b/gcc/config/avr/avr-protos.h
@@ -0,0 +1,121 @@
+/* Prototypes for exported functions defined in avr.c
+
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
+ Contributed by Denis Chertykov (chertykov@gmail.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+extern int function_arg_regno_p (int r);
+extern void avr_cpu_cpp_builtins (struct cpp_reader * pfile);
+extern int avr_ret_register (void);
+extern enum reg_class avr_regno_reg_class (int r);
+extern void asm_globalize_label (FILE *file, const char *name);
+extern void avr_asm_declare_function_name (FILE *, const char *, tree);
+extern void order_regs_for_local_alloc (void);
+extern int avr_initial_elimination_offset (int from, int to);
+extern int avr_simple_epilogue (void);
+extern void gas_output_limited_string (FILE *file, const char *str);
+extern void gas_output_ascii (FILE *file, const char *str, size_t length);
+extern int avr_hard_regno_rename_ok (unsigned int, unsigned int);
+extern rtx avr_return_addr_rtx (int count, rtx tem);
+
+#ifdef TREE_CODE
+extern void asm_output_external (FILE *file, tree decl, char *name);
+extern int avr_progmem_p (tree decl, tree attributes);
+
+#ifdef RTX_CODE /* inside TREE_CODE */
+extern void init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
+ rtx libname, tree fndecl);
+#endif /* RTX_CODE inside TREE_CODE */
+
+#endif /* TREE_CODE */
+
+#ifdef RTX_CODE
+extern void asm_output_external_libcall (FILE *file, rtx symref);
+extern int compare_diff_p (rtx insn);
+extern const char *output_movqi (rtx insn, rtx operands[], int *l);
+extern const char *output_movhi (rtx insn, rtx operands[], int *l);
+extern const char *out_movqi_r_mr (rtx insn, rtx op[], int *l);
+extern const char *out_movqi_mr_r (rtx insn, rtx op[], int *l);
+extern const char *out_movhi_r_mr (rtx insn, rtx op[], int *l);
+extern const char *out_movhi_mr_r (rtx insn, rtx op[], int *l);
+extern const char *out_movsi_r_mr (rtx insn, rtx op[], int *l);
+extern const char *out_movsi_mr_r (rtx insn, rtx op[], int *l);
+extern const char *output_movsisf (rtx insn, rtx operands[], int *l);
+extern const char *out_tstsi (rtx insn, rtx src, int *l);
+extern const char *out_tsthi (rtx insn, rtx src, int *l);
+extern const char *ret_cond_branch (rtx x, int len, int reverse);
+
+extern const char *ashlqi3_out (rtx insn, rtx operands[], int *len);
+extern const char *ashlhi3_out (rtx insn, rtx operands[], int *len);
+extern const char *ashlsi3_out (rtx insn, rtx operands[], int *len);
+
+extern const char *ashrqi3_out (rtx insn, rtx operands[], int *len);
+extern const char *ashrhi3_out (rtx insn, rtx operands[], int *len);
+extern const char *ashrsi3_out (rtx insn, rtx operands[], int *len);
+
+extern const char *lshrqi3_out (rtx insn, rtx operands[], int *len);
+extern const char *lshrhi3_out (rtx insn, rtx operands[], int *len);
+extern const char *lshrsi3_out (rtx insn, rtx operands[], int *len);
+extern bool avr_rotate_bytes (rtx operands[]);
+
+extern void expand_prologue (void);
+extern void expand_epilogue (void);
+extern int avr_epilogue_uses (int regno);
+
+extern void avr_output_bld (rtx operands[], int bit_nr);
+extern void avr_output_addr_vec_elt (FILE *stream, int value);
+extern const char *avr_out_sbxx_branch (rtx insn, rtx operands[]);
+
+extern int extra_constraint_Q (rtx x);
+extern int adjust_insn_length (rtx insn, int len);
+extern rtx avr_libcall_value (enum machine_mode mode);
+extern const char *output_reload_inhi (rtx insn, rtx *operands, int *len);
+extern const char *output_reload_insisf (rtx insn, rtx *operands, int *len);
+extern enum reg_class secondary_input_reload_class (enum reg_class,
+ enum machine_mode,
+ rtx);
+extern void notice_update_cc (rtx body, rtx insn);
+extern void print_operand (FILE *file, rtx x, int code);
+extern void print_operand_address (FILE *file, rtx addr);
+extern int reg_unused_after (rtx insn, rtx reg);
+extern int _reg_unused_after (rtx insn, rtx reg);
+extern int avr_jump_mode (rtx x, rtx insn);
+extern int byte_immediate_operand (rtx op, enum machine_mode mode);
+extern int test_hard_reg_class (enum reg_class rclass, rtx x);
+extern int jump_over_one_insn_p (rtx insn, rtx dest);
+
+extern int avr_hard_regno_mode_ok (int regno, enum machine_mode mode);
+extern void final_prescan_insn (rtx insn, rtx *operand, int num_operands);
+extern int avr_simplify_comparison_p (enum machine_mode mode,
+ RTX_CODE op, rtx x);
+extern RTX_CODE avr_normalize_condition (RTX_CODE condition);
+extern int compare_eq_p (rtx insn);
+extern void out_shift_with_cnt (const char *templ, rtx insn,
+ rtx operands[], int *len, int t_len);
+extern rtx avr_incoming_return_addr_rtx (void);
+#endif /* RTX_CODE */
+
+#ifdef HAVE_MACHINE_MODES
+extern int class_max_nregs (enum reg_class rclass, enum machine_mode mode);
+#endif /* HAVE_MACHINE_MODES */
+
+#ifdef REAL_VALUE_TYPE
+extern void asm_output_float (FILE *file, REAL_VALUE_TYPE n);
+#endif
diff --git a/gcc/config/avr/avr-stdint.h b/gcc/config/avr/avr-stdint.h
new file mode 100644
index 000000000..c3ec3ce9f
--- /dev/null
+++ b/gcc/config/avr/avr-stdint.h
@@ -0,0 +1,66 @@
+/* Definitions for <stdint.h> types on systems using newlib.
+ Copyright (C) 2012 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ The intention of this file is to supply definitions that work with
+ avr-gcc's -mint8 that sets int to an 8-bit type.
+
+ This file is intended to yield the same results as newlib-stdint.h,
+ but there are some differences to newlib-stdint.h:
+
+ - AVR is an 8-bit architecture that cannot access 16-bit values
+ atomically, this SIG_ATOMIC_TYPE is "char".
+
+ - For the same reason, [u]int_fast8_t is defined as 8-bit type.
+
+*/
+
+#define SIG_ATOMIC_TYPE "char"
+
+#define INT8_TYPE "signed char"
+#define INT16_TYPE (INT_TYPE_SIZE == 16 ? "short int" : "long int")
+#define INT32_TYPE (INT_TYPE_SIZE == 16 ? "long int" : "long long int")
+#define INT64_TYPE (INT_TYPE_SIZE == 16 ? "long long int" : 0)
+#define UINT8_TYPE "unsigned char"
+#define UINT16_TYPE (INT_TYPE_SIZE == 16 ? "short unsigned int" : "long unsigned int")
+#define UINT32_TYPE (INT_TYPE_SIZE == 16 ? "long unsigned int" : "long long unsigned int")
+#define UINT64_TYPE (INT_TYPE_SIZE == 16 ? "long long unsigned int" : 0)
+
+#define INT_LEAST8_TYPE INT8_TYPE
+#define INT_LEAST16_TYPE INT16_TYPE
+#define INT_LEAST32_TYPE INT32_TYPE
+#define INT_LEAST64_TYPE INT64_TYPE
+#define UINT_LEAST8_TYPE UINT8_TYPE
+#define UINT_LEAST16_TYPE UINT16_TYPE
+#define UINT_LEAST32_TYPE UINT32_TYPE
+#define UINT_LEAST64_TYPE UINT64_TYPE
+
+#define INT_FAST8_TYPE INT8_TYPE
+#define INT_FAST16_TYPE (INT_TYPE_SIZE == 16 ? "int" : INT16_TYPE)
+#define INT_FAST32_TYPE INT32_TYPE
+#define INT_FAST64_TYPE INT64_TYPE
+#define UINT_FAST8_TYPE UINT8_TYPE
+#define UINT_FAST16_TYPE (INT_TYPE_SIZE == 16 ? "unsigned int" : UINT16_TYPE)
+#define UINT_FAST32_TYPE UINT32_TYPE
+#define UINT_FAST64_TYPE UINT64_TYPE
+
+#define INTPTR_TYPE PTRDIFF_TYPE
+#ifndef UINTPTR_TYPE
+#define UINTPTR_TYPE SIZE_TYPE
+#endif
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
new file mode 100644
index 000000000..e60857980
--- /dev/null
+++ b/gcc/config/avr/avr.c
@@ -0,0 +1,6416 @@
+/* Subroutines for insn-output.c for ATMEL AVR micro controllers
+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2004, 2005, 2006, 2007, 2008,
+ 2009, 2010 Free Software Foundation, Inc.
+ Contributed by Denis Chertykov (chertykov@gmail.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "reload.h"
+#include "tree.h"
+#include "output.h"
+#include "expr.h"
+#include "diagnostic-core.h"
+#include "obstack.h"
+#include "function.h"
+#include "recog.h"
+#include "ggc.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "params.h"
+#include "df.h"
+
+/* Maximal allowed offset for an address in the LD command */
+#define MAX_LD_OFFSET(MODE) (64 - (signed)GET_MODE_SIZE (MODE))
+
+static void avr_option_override (void);
+static int avr_naked_function_p (tree);
+static int interrupt_function_p (tree);
+static int signal_function_p (tree);
+static int avr_OS_task_function_p (tree);
+static int avr_OS_main_function_p (tree);
+static int avr_regs_to_save (HARD_REG_SET *);
+static int get_sequence_length (rtx insns);
+static int sequent_regs_live (void);
+static const char *ptrreg_to_str (int);
+static const char *cond_string (enum rtx_code);
+static int avr_num_arg_regs (enum machine_mode, const_tree);
+
+static RTX_CODE compare_condition (rtx insn);
+static rtx avr_legitimize_address (rtx, rtx, enum machine_mode);
+static int compare_sign_p (rtx insn);
+static tree avr_handle_progmem_attribute (tree *, tree, tree, int, bool *);
+static tree avr_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
+static tree avr_handle_fntype_attribute (tree *, tree, tree, int, bool *);
+static bool avr_assemble_integer (rtx, unsigned int, int);
+static void avr_file_start (void);
+static void avr_file_end (void);
+static bool avr_legitimate_address_p (enum machine_mode, rtx, bool);
+static void avr_asm_function_end_prologue (FILE *);
+static void avr_asm_function_begin_epilogue (FILE *);
+static bool avr_cannot_modify_jumps_p (void);
+static rtx avr_function_value (const_tree, const_tree, bool);
+static void avr_insert_attributes (tree, tree *);
+static void avr_asm_init_sections (void);
+static unsigned int avr_section_type_flags (tree, const char *, int);
+static void avr_encode_section_info (tree, rtx, int);
+static void avr_reorg (void);
+static void avr_asm_out_ctor (rtx, int);
+static void avr_asm_out_dtor (rtx, int);
+static int avr_register_move_cost (enum machine_mode, reg_class_t, reg_class_t);
+static int avr_memory_move_cost (enum machine_mode, reg_class_t, bool);
+static int avr_operand_rtx_cost (rtx, enum machine_mode, enum rtx_code, bool);
+static bool avr_rtx_costs (rtx, int, int, int *, bool);
+static int avr_address_cost (rtx, bool);
+static bool avr_return_in_memory (const_tree, const_tree);
+static struct machine_function * avr_init_machine_status (void);
+static rtx avr_builtin_setjmp_frame_value (void);
+static bool avr_hard_regno_scratch_ok (unsigned int);
+static unsigned int avr_case_values_threshold (void);
+static bool avr_frame_pointer_required_p (void);
+static bool avr_can_eliminate (const int, const int);
+static bool avr_allocate_stack_slots_for_args (void);
+static bool avr_class_likely_spilled_p (reg_class_t c);
+static rtx avr_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
+ const_tree, bool);
+static void avr_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
+ const_tree, bool);
+static void avr_help (void);
+
+/* Allocate registers from r25 to r8 for parameters for function calls. */
+#define FIRST_CUM_REG 26
+
+/* Temporary register RTX (gen_rtx_REG (QImode, TMP_REGNO)) */
+static GTY(()) rtx tmp_reg_rtx;
+
+/* Zeroed register RTX (gen_rtx_REG (QImode, ZERO_REGNO)) */
+static GTY(()) rtx zero_reg_rtx;
+
+/* AVR register names {"r0", "r1", ..., "r31"} */
+static const char *const avr_regnames[] = REGISTER_NAMES;
+
+/* Preprocessor macros to define depending on MCU type. */
+const char *avr_extra_arch_macro;
+
+/* Current architecture. */
+const struct base_arch_s *avr_current_arch;
+
+/* Current device. */
+const struct mcu_type_s *avr_current_device;
+
+section *progmem_section;
+
+/* AVR attributes. */
+static const struct attribute_spec avr_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "progmem", 0, 0, false, false, false, avr_handle_progmem_attribute },
+ { "signal", 0, 0, true, false, false, avr_handle_fndecl_attribute },
+ { "interrupt", 0, 0, true, false, false, avr_handle_fndecl_attribute },
+ { "naked", 0, 0, false, true, true, avr_handle_fntype_attribute },
+ { "OS_task", 0, 0, false, true, true, avr_handle_fntype_attribute },
+ { "OS_main", 0, 0, false, true, true, avr_handle_fntype_attribute },
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
+static const struct default_options avr_option_optimization_table[] =
+ {
+ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+ { OPT_LEVELS_NONE, 0, NULL, 0 }
+ };
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.long\t"
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.word\t"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
+#undef TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER avr_assemble_integer
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START avr_file_start
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END avr_file_end
+
+#undef TARGET_ASM_FUNCTION_END_PROLOGUE
+#define TARGET_ASM_FUNCTION_END_PROLOGUE avr_asm_function_end_prologue
+#undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
+#define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE avr_asm_function_begin_epilogue
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE avr_function_value
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE avr_attribute_table
+#undef TARGET_ASM_FUNCTION_RODATA_SECTION
+#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
+#undef TARGET_INSERT_ATTRIBUTES
+#define TARGET_INSERT_ATTRIBUTES avr_insert_attributes
+#undef TARGET_SECTION_TYPE_FLAGS
+#define TARGET_SECTION_TYPE_FLAGS avr_section_type_flags
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO avr_encode_section_info
+#undef TARGET_REGISTER_MOVE_COST
+#define TARGET_REGISTER_MOVE_COST avr_register_move_cost
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST avr_memory_move_cost
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS avr_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST avr_address_cost
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG avr_reorg
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG avr_function_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE avr_function_arg_advance
+
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS avr_legitimize_address
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY avr_return_in_memory
+
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
+
+#undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
+#define TARGET_BUILTIN_SETJMP_FRAME_VALUE avr_builtin_setjmp_frame_value
+
+#undef TARGET_HARD_REGNO_SCRATCH_OK
+#define TARGET_HARD_REGNO_SCRATCH_OK avr_hard_regno_scratch_ok
+#undef TARGET_CASE_VALUES_THRESHOLD
+#define TARGET_CASE_VALUES_THRESHOLD avr_case_values_threshold
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P avr_legitimate_address_p
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED avr_frame_pointer_required_p
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE avr_can_eliminate
+
+#undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
+#define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS avr_allocate_stack_slots_for_args
+
+#undef TARGET_CLASS_LIKELY_SPILLED_P
+#define TARGET_CLASS_LIKELY_SPILLED_P avr_class_likely_spilled_p
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE avr_option_override
+
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#define TARGET_OPTION_OPTIMIZATION_TABLE avr_option_optimization_table
+
+#undef TARGET_CANNOT_MODIFY_JUMPS_P
+#define TARGET_CANNOT_MODIFY_JUMPS_P avr_cannot_modify_jumps_p
+
+#undef TARGET_HELP
+#define TARGET_HELP avr_help
+
+#undef TARGET_EXCEPT_UNWIND_INFO
+#define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+static void
+avr_option_override (void)
+{
+ const struct mcu_type_s *t;
+
+ flag_delete_null_pointer_checks = 0;
+
+ for (t = avr_mcu_types; t->name; t++)
+ if (strcmp (t->name, avr_mcu_name) == 0)
+ break;
+
+ if (!t->name)
+ {
+ error ("unrecognized argument to -mmcu= option: %qs", avr_mcu_name);
+ inform (input_location, "See --target-help for supported MCUs");
+ }
+
+ avr_current_device = t;
+ avr_current_arch = &avr_arch_types[avr_current_device->arch];
+ avr_extra_arch_macro = avr_current_device->macro;
+
+ tmp_reg_rtx = gen_rtx_REG (QImode, TMP_REGNO);
+ zero_reg_rtx = gen_rtx_REG (QImode, ZERO_REGNO);
+
+ init_machine_status = avr_init_machine_status;
+}
+
+/* Implement TARGET_HELP */
+/* Report extra information for --target-help */
+
+static void
+avr_help (void)
+{
+ const struct mcu_type_s *t;
+ const char * const indent = " ";
+ int len;
+
+ /* Give a list of MCUs that are accepted by -mmcu=* .
+ Note that MCUs supported by the compiler might differ from
+ MCUs supported by binutils. */
+
+ len = strlen (indent);
+ printf ("Known MCU names:\n%s", indent);
+
+ /* Print a blank-separated list of all supported MCUs */
+
+ for (t = avr_mcu_types; t->name; t++)
+ {
+ printf ("%s ", t->name);
+ len += 1 + strlen (t->name);
+
+ /* Break long lines */
+
+ if (len > 66 && (t+1)->name)
+ {
+ printf ("\n%s", indent);
+ len = strlen (indent);
+ }
+ }
+
+ printf ("\n\n");
+}
+
+/* return register class from register number. */
+
+static const enum reg_class reg_class_tab[]={
+ GENERAL_REGS,GENERAL_REGS,GENERAL_REGS,GENERAL_REGS,GENERAL_REGS,
+ GENERAL_REGS,GENERAL_REGS,GENERAL_REGS,GENERAL_REGS,GENERAL_REGS,
+ GENERAL_REGS,GENERAL_REGS,GENERAL_REGS,GENERAL_REGS,GENERAL_REGS,
+ GENERAL_REGS, /* r0 - r15 */
+ LD_REGS,LD_REGS,LD_REGS,LD_REGS,LD_REGS,LD_REGS,LD_REGS,
+ LD_REGS, /* r16 - 23 */
+ ADDW_REGS,ADDW_REGS, /* r24,r25 */
+ POINTER_X_REGS,POINTER_X_REGS, /* r26,27 */
+ POINTER_Y_REGS,POINTER_Y_REGS, /* r28,r29 */
+ POINTER_Z_REGS,POINTER_Z_REGS, /* r30,r31 */
+ STACK_REG,STACK_REG /* SPL,SPH */
+};
+
+/* Function to set up the backend function structure. */
+
+static struct machine_function *
+avr_init_machine_status (void)
+{
+ return ggc_alloc_cleared_machine_function ();
+}
+
+/* Return register class for register R. */
+
+enum reg_class
+avr_regno_reg_class (int r)
+{
+ if (r <= 33)
+ return reg_class_tab[r];
+ return ALL_REGS;
+}
+
+/* Return nonzero if FUNC is a naked function. */
+
+static int
+avr_naked_function_p (tree func)
+{
+ tree a;
+
+ gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
+
+ a = lookup_attribute ("naked", TYPE_ATTRIBUTES (TREE_TYPE (func)));
+ return a != NULL_TREE;
+}
+
+/* Return nonzero if FUNC is an interrupt function as specified
+ by the "interrupt" attribute. */
+
+static int
+interrupt_function_p (tree func)
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ return 0;
+
+ a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+
+/* Return nonzero if FUNC is a signal function as specified
+ by the "signal" attribute. */
+
+static int
+signal_function_p (tree func)
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ return 0;
+
+ a = lookup_attribute ("signal", DECL_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+
+/* Return nonzero if FUNC is a OS_task function. */
+
+static int
+avr_OS_task_function_p (tree func)
+{
+ tree a;
+
+ gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
+
+ a = lookup_attribute ("OS_task", TYPE_ATTRIBUTES (TREE_TYPE (func)));
+ return a != NULL_TREE;
+}
+
+/* Return nonzero if FUNC is a OS_main function. */
+
+static int
+avr_OS_main_function_p (tree func)
+{
+ tree a;
+
+ gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
+
+ a = lookup_attribute ("OS_main", TYPE_ATTRIBUTES (TREE_TYPE (func)));
+ return a != NULL_TREE;
+}
+
+/* Return the number of hard registers to push/pop in the prologue/epilogue
+ of the current function, and optionally store these registers in SET. */
+
+static int
+avr_regs_to_save (HARD_REG_SET *set)
+{
+ int reg, count;
+ int int_or_sig_p = (interrupt_function_p (current_function_decl)
+ || signal_function_p (current_function_decl));
+
+ if (set)
+ CLEAR_HARD_REG_SET (*set);
+ count = 0;
+
+ /* No need to save any registers if the function never returns or
+ is have "OS_task" or "OS_main" attribute. */
+ if (TREE_THIS_VOLATILE (current_function_decl)
+ || cfun->machine->is_OS_task
+ || cfun->machine->is_OS_main)
+ return 0;
+
+ for (reg = 0; reg < 32; reg++)
+ {
+ /* Do not push/pop __tmp_reg__, __zero_reg__, as well as
+ any global register variables. */
+ if (fixed_regs[reg])
+ continue;
+
+ if ((int_or_sig_p && !current_function_is_leaf && call_used_regs[reg])
+ || (df_regs_ever_live_p (reg)
+ && (int_or_sig_p || !call_used_regs[reg])
+ && !(frame_pointer_needed
+ && (reg == REG_Y || reg == (REG_Y+1)))))
+ {
+ if (set)
+ SET_HARD_REG_BIT (*set, reg);
+ count++;
+ }
+ }
+ return count;
+}
+
+
+/* Implement `TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS' */
+
+static bool
+avr_allocate_stack_slots_for_args (void)
+{
+ return !cfun->machine->is_naked;
+}
+
+
+/* Return true if register FROM can be eliminated via register TO. */
+
+bool
+avr_can_eliminate (const int from, const int to)
+{
+ return ((from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
+ || ((from == FRAME_POINTER_REGNUM
+ || from == FRAME_POINTER_REGNUM + 1)
+ && !frame_pointer_needed));
+}
+
+/* Compute offset between arg_pointer and frame_pointer. */
+
+int
+avr_initial_elimination_offset (int from, int to)
+{
+ if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return 0;
+ else
+ {
+ int offset = frame_pointer_needed ? 2 : 0;
+ int avr_pc_size = AVR_HAVE_EIJMP_EICALL ? 3 : 2;
+
+ offset += avr_regs_to_save (NULL);
+ return get_frame_size () + (avr_pc_size) + 1 + offset;
+ }
+}
+
+/* Actual start of frame is virtual_stack_vars_rtx this is offset from
+ frame pointer by +STARTING_FRAME_OFFSET.
+ Using saved frame = virtual_stack_vars_rtx - STARTING_FRAME_OFFSET
+ avoids creating add/sub of offset in nonlocal goto and setjmp. */
+
+rtx avr_builtin_setjmp_frame_value (void)
+{
+ return gen_rtx_MINUS (Pmode, virtual_stack_vars_rtx,
+ gen_int_mode (STARTING_FRAME_OFFSET, Pmode));
+}
+
+/* Return contents of MEM at frame pointer + stack size + 1 (+2 if 3 byte PC).
+ This is return address of function. */
+rtx
+avr_return_addr_rtx (int count, rtx tem)
+{
+ rtx r;
+
+ /* Can only return this functions return address. Others not supported. */
+ if (count)
+ return NULL;
+
+ if (AVR_3_BYTE_PC)
+ {
+ r = gen_rtx_SYMBOL_REF (Pmode, ".L__stack_usage+2");
+ warning (0, "'builtin_return_address' contains only 2 bytes of address");
+ }
+ else
+ r = gen_rtx_SYMBOL_REF (Pmode, ".L__stack_usage+1");
+
+ r = gen_rtx_PLUS (Pmode, tem, r);
+ r = gen_frame_mem (Pmode, memory_address (Pmode, r));
+ r = gen_rtx_ROTATE (HImode, r, GEN_INT (8));
+ return r;
+}
+
+/* Return 1 if the function epilogue is just a single "ret". */
+
+int
+avr_simple_epilogue (void)
+{
+ return (! frame_pointer_needed
+ && get_frame_size () == 0
+ && avr_regs_to_save (NULL) == 0
+ && ! interrupt_function_p (current_function_decl)
+ && ! signal_function_p (current_function_decl)
+ && ! avr_naked_function_p (current_function_decl)
+ && ! TREE_THIS_VOLATILE (current_function_decl));
+}
+
+/* This function checks sequence of live registers. */
+
+static int
+sequent_regs_live (void)
+{
+ int reg;
+ int live_seq=0;
+ int cur_seq=0;
+
+ for (reg = 0; reg < 18; ++reg)
+ {
+ if (fixed_regs[reg])
+ {
+ /* Don't recognize sequences that contain global register
+ variables. */
+
+ if (live_seq != 0)
+ return 0;
+ else
+ continue;
+ }
+
+ if (!call_used_regs[reg])
+ {
+ if (df_regs_ever_live_p (reg))
+ {
+ ++live_seq;
+ ++cur_seq;
+ }
+ else
+ cur_seq = 0;
+ }
+ }
+
+ if (!frame_pointer_needed)
+ {
+ if (df_regs_ever_live_p (REG_Y))
+ {
+ ++live_seq;
+ ++cur_seq;
+ }
+ else
+ cur_seq = 0;
+
+ if (df_regs_ever_live_p (REG_Y+1))
+ {
+ ++live_seq;
+ ++cur_seq;
+ }
+ else
+ cur_seq = 0;
+ }
+ else
+ {
+ cur_seq += 2;
+ live_seq += 2;
+ }
+ return (cur_seq == live_seq) ? live_seq : 0;
+}
+
+/* Obtain the length sequence of insns. */
+
+int
+get_sequence_length (rtx insns)
+{
+ rtx insn;
+ int length;
+
+ for (insn = insns, length = 0; insn; insn = NEXT_INSN (insn))
+ length += get_attr_length (insn);
+
+ return length;
+}
+
+/* Implement INCOMING_RETURN_ADDR_RTX. */
+
+rtx
+avr_incoming_return_addr_rtx (void)
+{
+ /* The return address is at the top of the stack. Note that the push
+ was via post-decrement, which means the actual address is off by one. */
+ return gen_frame_mem (HImode, plus_constant (stack_pointer_rtx, 1));
+}
+
+/* Helper for expand_prologue. Emit a push of a byte register. */
+
+static void
+emit_push_byte (unsigned regno, bool frame_related_p)
+{
+ rtx mem, reg, insn;
+
+ mem = gen_rtx_POST_DEC (HImode, stack_pointer_rtx);
+ mem = gen_frame_mem (QImode, mem);
+ reg = gen_rtx_REG (QImode, regno);
+
+ insn = emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
+ if (frame_related_p)
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ cfun->machine->stack_usage++;
+}
+
+
+/* Output function prologue. */
+
+void
+expand_prologue (void)
+{
+ int live_seq;
+ HARD_REG_SET set;
+ int minimize;
+ HOST_WIDE_INT size = get_frame_size();
+ rtx insn;
+
+ /* Init cfun->machine. */
+ cfun->machine->is_naked = avr_naked_function_p (current_function_decl);
+ cfun->machine->is_interrupt = interrupt_function_p (current_function_decl);
+ cfun->machine->is_signal = signal_function_p (current_function_decl);
+ cfun->machine->is_OS_task = avr_OS_task_function_p (current_function_decl);
+ cfun->machine->is_OS_main = avr_OS_main_function_p (current_function_decl);
+ cfun->machine->stack_usage = 0;
+
+ /* Prologue: naked. */
+ if (cfun->machine->is_naked)
+ {
+ return;
+ }
+
+ avr_regs_to_save (&set);
+ live_seq = sequent_regs_live ();
+ minimize = (TARGET_CALL_PROLOGUES
+ && !cfun->machine->is_interrupt
+ && !cfun->machine->is_signal
+ && !cfun->machine->is_OS_task
+ && !cfun->machine->is_OS_main
+ && live_seq);
+
+ if (cfun->machine->is_interrupt || cfun->machine->is_signal)
+ {
+ /* Enable interrupts. */
+ if (cfun->machine->is_interrupt)
+ emit_insn (gen_enable_interrupt ());
+
+ /* Push zero reg. */
+ emit_push_byte (ZERO_REGNO, true);
+
+ /* Push tmp reg. */
+ emit_push_byte (TMP_REGNO, true);
+
+ /* Push SREG. */
+ /* ??? There's no dwarf2 column reserved for SREG. */
+ emit_move_insn (tmp_reg_rtx, gen_rtx_MEM (QImode, GEN_INT (SREG_ADDR)));
+ emit_push_byte (TMP_REGNO, false);
+
+ /* Push RAMPZ. */
+ /* ??? There's no dwarf2 column reserved for RAMPZ. */
+ if (AVR_HAVE_RAMPZ
+ && TEST_HARD_REG_BIT (set, REG_Z)
+ && TEST_HARD_REG_BIT (set, REG_Z + 1))
+ {
+ emit_move_insn (tmp_reg_rtx,
+ gen_rtx_MEM (QImode, GEN_INT (RAMPZ_ADDR)));
+ emit_push_byte (TMP_REGNO, false);
+ }
+
+ /* Clear zero reg. */
+ emit_move_insn (zero_reg_rtx, const0_rtx);
+
+ /* Prevent any attempt to delete the setting of ZERO_REG! */
+ emit_use (zero_reg_rtx);
+ }
+ if (minimize && (frame_pointer_needed
+ || (AVR_2_BYTE_PC && live_seq > 6)
+ || live_seq > 7))
+ {
+ int first_reg, reg, offset;
+
+ emit_move_insn (gen_rtx_REG (HImode, REG_X),
+ gen_int_mode (size, HImode));
+
+ insn = emit_insn (gen_call_prologue_saves
+ (gen_int_mode (live_seq, HImode),
+ gen_int_mode (size + live_seq, HImode)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* Describe the effect of the unspec_volatile call to prologue_saves.
+ Note that this formulation assumes that add_reg_note pushes the
+ notes to the front. Thus we build them in the reverse order of
+ how we want dwarf2out to process them. */
+
+ /* The function does always set frame_pointer_rtx, but whether that
+ is going to be permanent in the function is frame_pointer_needed. */
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode,
+ (frame_pointer_needed
+ ? frame_pointer_rtx : stack_pointer_rtx),
+ plus_constant (stack_pointer_rtx,
+ -(size + live_seq))));
+
+ /* Note that live_seq always contains r28+r29, but the other
+ registers to be saved are all below 18. */
+ first_reg = 18 - (live_seq - 2);
+
+ for (reg = 29, offset = -live_seq + 1;
+ reg >= first_reg;
+ reg = (reg == 28 ? 17 : reg - 1), ++offset)
+ {
+ rtx m, r;
+
+ m = gen_rtx_MEM (QImode, plus_constant (stack_pointer_rtx, offset));
+ r = gen_rtx_REG (QImode, reg);
+ add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, m, r));
+ }
+
+ cfun->machine->stack_usage += size + live_seq;
+ }
+ else
+ {
+ int reg;
+ for (reg = 0; reg < 32; ++reg)
+ if (TEST_HARD_REG_BIT (set, reg))
+ emit_push_byte (reg, true);
+
+ if (frame_pointer_needed)
+ {
+ if (!(cfun->machine->is_OS_task || cfun->machine->is_OS_main))
+ {
+ /* Push frame pointer. Always be consistent about the
+ ordering of pushes -- epilogue_restores expects the
+ register pair to be pushed low byte first. */
+ emit_push_byte (REG_Y, true);
+ emit_push_byte (REG_Y + 1, true);
+ }
+
+ if (!size)
+ {
+ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else
+ {
+ /* Creating a frame can be done by direct manipulation of the
+ stack or via the frame pointer. These two methods are:
+ fp=sp
+ fp-=size
+ sp=fp
+ OR
+ sp-=size
+ fp=sp
+ the optimum method depends on function type, stack and frame size.
+ To avoid a complex logic, both methods are tested and shortest
+ is selected. */
+ rtx myfp;
+ rtx fp_plus_insns;
+
+ if (AVR_HAVE_8BIT_SP)
+ {
+ /* The high byte (r29) doesn't change. Prefer 'subi'
+ (1 cycle) over 'sbiw' (2 cycles, same size). */
+ myfp = gen_rtx_REG (QImode, FRAME_POINTER_REGNUM);
+ }
+ else
+ {
+ /* Normal sized addition. */
+ myfp = frame_pointer_rtx;
+ }
+
+ /* Method 1-Adjust frame pointer. */
+ start_sequence ();
+
+ /* Normally the dwarf2out frame-related-expr interpreter does
+ not expect to have the CFA change once the frame pointer is
+ set up. Thus we avoid marking the move insn below and
+ instead indicate that the entire operation is complete after
+ the frame pointer subtraction is done. */
+
+ emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+
+ insn = emit_move_insn (myfp, plus_constant (myfp, -size));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, frame_pointer_rtx,
+ plus_constant (stack_pointer_rtx,
+ -size)));
+
+ /* Copy to stack pointer. Note that since we've already
+ changed the CFA to the frame pointer this operation
+ need not be annotated at all. */
+ if (AVR_HAVE_8BIT_SP)
+ {
+ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ }
+ else if (TARGET_NO_INTERRUPTS
+ || cfun->machine->is_signal
+ || cfun->machine->is_OS_main)
+ {
+ emit_insn (gen_movhi_sp_r_irq_off (stack_pointer_rtx,
+ frame_pointer_rtx));
+ }
+ else if (cfun->machine->is_interrupt)
+ {
+ emit_insn (gen_movhi_sp_r_irq_on (stack_pointer_rtx,
+ frame_pointer_rtx));
+ }
+ else
+ {
+ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ }
+
+ fp_plus_insns = get_insns ();
+ end_sequence ();
+
+ /* Method 2-Adjust Stack pointer. */
+ if (size <= 6)
+ {
+ rtx sp_plus_insns;
+
+ start_sequence ();
+
+ insn = plus_constant (stack_pointer_rtx, -size);
+ insn = emit_move_insn (stack_pointer_rtx, insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ sp_plus_insns = get_insns ();
+ end_sequence ();
+
+ /* Use shortest method. */
+ if (get_sequence_length (sp_plus_insns)
+ < get_sequence_length (fp_plus_insns))
+ emit_insn (sp_plus_insns);
+ else
+ emit_insn (fp_plus_insns);
+ }
+ else
+ emit_insn (fp_plus_insns);
+
+ cfun->machine->stack_usage += size;
+ }
+ }
+ }
+
+ if (flag_stack_usage)
+ current_function_static_stack_size = cfun->machine->stack_usage;
+}
+
+/* Output summary at end of function prologue. */
+
+static void
+avr_asm_function_end_prologue (FILE *file)
+{
+ if (cfun->machine->is_naked)
+ {
+ fputs ("/* prologue: naked */\n", file);
+ }
+ else
+ {
+ if (cfun->machine->is_interrupt)
+ {
+ fputs ("/* prologue: Interrupt */\n", file);
+ }
+ else if (cfun->machine->is_signal)
+ {
+ fputs ("/* prologue: Signal */\n", file);
+ }
+ else
+ fputs ("/* prologue: function */\n", file);
+ }
+ fprintf (file, "/* frame size = " HOST_WIDE_INT_PRINT_DEC " */\n",
+ get_frame_size());
+ fprintf (file, "/* stack size = %d */\n",
+ cfun->machine->stack_usage);
+ /* Create symbol stack offset here so all functions have it. Add 1 to stack
+ usage for offset so that SP + .L__stack_offset = return address. */
+ fprintf (file, ".L__stack_usage = %d\n", cfun->machine->stack_usage);
+}
+
+
+/* Implement EPILOGUE_USES. */
+
+int
+avr_epilogue_uses (int regno ATTRIBUTE_UNUSED)
+{
+ if (reload_completed
+ && cfun->machine
+ && (cfun->machine->is_interrupt || cfun->machine->is_signal))
+ return 1;
+ return 0;
+}
+
+/* Helper for expand_epilogue. Emit a pop of a byte register. */
+
+static void
+emit_pop_byte (unsigned regno)
+{
+ rtx mem, reg;
+
+ mem = gen_rtx_PRE_INC (HImode, stack_pointer_rtx);
+ mem = gen_frame_mem (QImode, mem);
+ reg = gen_rtx_REG (QImode, regno);
+
+ emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
+}
+
+/* Output RTL epilogue. */
+
+void
+expand_epilogue (void)
+{
+ int reg;
+ int live_seq;
+ HARD_REG_SET set;
+ int minimize;
+ HOST_WIDE_INT size = get_frame_size();
+
+ /* epilogue: naked */
+ if (cfun->machine->is_naked)
+ {
+ emit_jump_insn (gen_return ());
+ return;
+ }
+
+ avr_regs_to_save (&set);
+ live_seq = sequent_regs_live ();
+ minimize = (TARGET_CALL_PROLOGUES
+ && !cfun->machine->is_interrupt
+ && !cfun->machine->is_signal
+ && !cfun->machine->is_OS_task
+ && !cfun->machine->is_OS_main
+ && live_seq);
+
+ if (minimize && (frame_pointer_needed || live_seq > 4))
+ {
+ if (frame_pointer_needed)
+ {
+ /* Get rid of frame. */
+ emit_move_insn(frame_pointer_rtx,
+ gen_rtx_PLUS (HImode, frame_pointer_rtx,
+ gen_int_mode (size, HImode)));
+ }
+ else
+ {
+ emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+ }
+
+ emit_insn (gen_epilogue_restores (gen_int_mode (live_seq, HImode)));
+ }
+ else
+ {
+ if (frame_pointer_needed)
+ {
+ if (size)
+ {
+ /* Try two methods to adjust stack and select shortest. */
+ rtx myfp;
+ rtx fp_plus_insns;
+
+ if (AVR_HAVE_8BIT_SP)
+ {
+ /* The high byte (r29) doesn't change - prefer 'subi'
+ (1 cycle) over 'sbiw' (2 cycles, same size). */
+ myfp = gen_rtx_REG (QImode, FRAME_POINTER_REGNUM);
+ }
+ else
+ {
+ /* Normal sized addition. */
+ myfp = frame_pointer_rtx;
+ }
+
+ /* Method 1-Adjust frame pointer. */
+ start_sequence ();
+
+ emit_move_insn (myfp, plus_constant (myfp, size));
+
+ /* Copy to stack pointer. */
+ if (AVR_HAVE_8BIT_SP)
+ {
+ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ }
+ else if (TARGET_NO_INTERRUPTS
+ || cfun->machine->is_signal)
+ {
+ emit_insn (gen_movhi_sp_r_irq_off (stack_pointer_rtx,
+ frame_pointer_rtx));
+ }
+ else if (cfun->machine->is_interrupt)
+ {
+ emit_insn (gen_movhi_sp_r_irq_on (stack_pointer_rtx,
+ frame_pointer_rtx));
+ }
+ else
+ {
+ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ }
+
+ fp_plus_insns = get_insns ();
+ end_sequence ();
+
+ /* Method 2-Adjust Stack pointer. */
+ if (size <= 5)
+ {
+ rtx sp_plus_insns;
+
+ start_sequence ();
+
+ emit_move_insn (stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx, size));
+
+ sp_plus_insns = get_insns ();
+ end_sequence ();
+
+ /* Use shortest method. */
+ if (get_sequence_length (sp_plus_insns)
+ < get_sequence_length (fp_plus_insns))
+ emit_insn (sp_plus_insns);
+ else
+ emit_insn (fp_plus_insns);
+ }
+ else
+ emit_insn (fp_plus_insns);
+ }
+ if (!(cfun->machine->is_OS_task || cfun->machine->is_OS_main))
+ {
+ /* Restore previous frame_pointer. See expand_prologue for
+ rationale for not using pophi. */
+ emit_pop_byte (REG_Y + 1);
+ emit_pop_byte (REG_Y);
+ }
+ }
+
+ /* Restore used registers. */
+ for (reg = 31; reg >= 0; --reg)
+ if (TEST_HARD_REG_BIT (set, reg))
+ emit_pop_byte (reg);
+
+ if (cfun->machine->is_interrupt || cfun->machine->is_signal)
+ {
+ /* Restore RAMPZ using tmp reg as scratch. */
+ if (AVR_HAVE_RAMPZ
+ && TEST_HARD_REG_BIT (set, REG_Z)
+ && TEST_HARD_REG_BIT (set, REG_Z + 1))
+ {
+ emit_pop_byte (TMP_REGNO);
+ emit_move_insn (gen_rtx_MEM (QImode, GEN_INT (RAMPZ_ADDR)),
+ tmp_reg_rtx);
+ }
+
+ /* Restore SREG using tmp reg as scratch. */
+ emit_pop_byte (TMP_REGNO);
+
+ emit_move_insn (gen_rtx_MEM (QImode, GEN_INT (SREG_ADDR)),
+ tmp_reg_rtx);
+
+ /* Restore tmp REG. */
+ emit_pop_byte (TMP_REGNO);
+
+ /* Restore zero REG. */
+ emit_pop_byte (ZERO_REGNO);
+ }
+
+ emit_jump_insn (gen_return ());
+ }
+}
+
+/* Output summary messages at beginning of function epilogue. */
+
+static void
+avr_asm_function_begin_epilogue (FILE *file)
+{
+ fprintf (file, "/* epilogue start */\n");
+}
+
+
+/* Implement TARGET_CANNOT_MODITY_JUMPS_P */
+
+static bool
+avr_cannot_modify_jumps_p (void)
+{
+
+ /* Naked Functions must not have any instructions after
+ their epilogue, see PR42240 */
+
+ if (reload_completed
+ && cfun->machine
+ && cfun->machine->is_naked)
+ {
+ return true;
+ }
+
+ return false;
+}
+
+
+/* Return nonzero if X (an RTX) is a legitimate memory address on the target
+ machine for a memory operand of mode MODE. */
+
+bool
+avr_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
+{
+ enum reg_class r = NO_REGS;
+
+ if (TARGET_ALL_DEBUG)
+ {
+ fprintf (stderr, "mode: (%s) %s %s %s %s:",
+ GET_MODE_NAME(mode),
+ strict ? "(strict)": "",
+ reload_completed ? "(reload_completed)": "",
+ reload_in_progress ? "(reload_in_progress)": "",
+ reg_renumber ? "(reg_renumber)" : "");
+ if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) <= MAX_LD_OFFSET (mode)
+ && reg_renumber
+ )
+ fprintf (stderr, "(r%d ---> r%d)", REGNO (XEXP (x, 0)),
+ true_regnum (XEXP (x, 0)));
+ debug_rtx (x);
+ }
+
+ if (REG_P (x) && (strict ? REG_OK_FOR_BASE_STRICT_P (x)
+ : REG_OK_FOR_BASE_NOSTRICT_P (x)))
+ r = POINTER_REGS;
+ else if (CONSTANT_ADDRESS_P (x))
+ r = ALL_REGS;
+ else if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0)
+ {
+ int fit = INTVAL (XEXP (x, 1)) <= MAX_LD_OFFSET (mode);
+ if (fit)
+ {
+ if (! strict
+ || REGNO (XEXP (x,0)) == REG_X
+ || REGNO (XEXP (x,0)) == REG_Y
+ || REGNO (XEXP (x,0)) == REG_Z)
+ r = BASE_POINTER_REGS;
+ if (XEXP (x,0) == frame_pointer_rtx
+ || XEXP (x,0) == arg_pointer_rtx)
+ r = BASE_POINTER_REGS;
+ }
+ else if (frame_pointer_needed && XEXP (x,0) == frame_pointer_rtx)
+ r = POINTER_Y_REGS;
+ }
+ else if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
+ && REG_P (XEXP (x, 0))
+ && (strict ? REG_OK_FOR_BASE_STRICT_P (XEXP (x, 0))
+ : REG_OK_FOR_BASE_NOSTRICT_P (XEXP (x, 0))))
+ {
+ r = POINTER_REGS;
+ }
+ if (TARGET_ALL_DEBUG)
+ {
+ fprintf (stderr, " ret = %c\n", r + '0');
+ }
+ return r == NO_REGS ? 0 : (int)r;
+}
+
+/* Attempts to replace X with a valid
+ memory address for an operand of mode MODE */
+
+rtx
+avr_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
+{
+ x = oldx;
+ if (TARGET_ALL_DEBUG)
+ {
+ fprintf (stderr, "legitimize_address mode: %s", GET_MODE_NAME(mode));
+ debug_rtx (oldx);
+ }
+
+ if (GET_CODE (oldx) == PLUS
+ && REG_P (XEXP (oldx,0)))
+ {
+ if (REG_P (XEXP (oldx,1)))
+ x = force_reg (GET_MODE (oldx), oldx);
+ else if (GET_CODE (XEXP (oldx, 1)) == CONST_INT)
+ {
+ int offs = INTVAL (XEXP (oldx,1));
+ if (frame_pointer_rtx != XEXP (oldx,0))
+ if (offs > MAX_LD_OFFSET (mode))
+ {
+ if (TARGET_ALL_DEBUG)
+ fprintf (stderr, "force_reg (big offset)\n");
+ x = force_reg (GET_MODE (oldx), oldx);
+ }
+ }
+ }
+ return x;
+}
+
+
+/* Return a pointer register name as a string. */
+
+static const char *
+ptrreg_to_str (int regno)
+{
+ switch (regno)
+ {
+ case REG_X: return "X";
+ case REG_Y: return "Y";
+ case REG_Z: return "Z";
+ default:
+ output_operand_lossage ("address operand requires constraint for X, Y, or Z register");
+ }
+ return NULL;
+}
+
+/* Return the condition name as a string.
+ Used in conditional jump constructing */
+
+static const char *
+cond_string (enum rtx_code code)
+{
+ switch (code)
+ {
+ case NE:
+ return "ne";
+ case EQ:
+ return "eq";
+ case GE:
+ if (cc_prev_status.flags & CC_OVERFLOW_UNUSABLE)
+ return "pl";
+ else
+ return "ge";
+ case LT:
+ if (cc_prev_status.flags & CC_OVERFLOW_UNUSABLE)
+ return "mi";
+ else
+ return "lt";
+ case GEU:
+ return "sh";
+ case LTU:
+ return "lo";
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Output ADDR to FILE as address. */
+
+void
+print_operand_address (FILE *file, rtx addr)
+{
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ fprintf (file, ptrreg_to_str (REGNO (addr)));
+ break;
+
+ case PRE_DEC:
+ fprintf (file, "-%s", ptrreg_to_str (REGNO (XEXP (addr, 0))));
+ break;
+
+ case POST_INC:
+ fprintf (file, "%s+", ptrreg_to_str (REGNO (XEXP (addr, 0))));
+ break;
+
+ default:
+ if (CONSTANT_ADDRESS_P (addr)
+ && text_segment_operand (addr, VOIDmode))
+ {
+ rtx x = XEXP (addr,0);
+ if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x,1)) == CONST_INT)
+ {
+ /* Assembler gs() will implant word address. Make offset
+ a byte offset inside gs() for assembler. This is
+ needed because the more logical (constant+gs(sym)) is not
+ accepted by gas. For 128K and lower devices this is ok. For
+ large devices it will create a Trampoline to offset from symbol
+ which may not be what the user really wanted. */
+ fprintf (file, "gs(");
+ output_addr_const (file, XEXP (x,0));
+ fprintf (file,"+" HOST_WIDE_INT_PRINT_DEC ")", 2 * INTVAL (XEXP (x,1)));
+ if (AVR_3_BYTE_PC)
+ if (warning (0, "pointer offset from symbol maybe incorrect"))
+ {
+ output_addr_const (stderr, addr);
+ fprintf(stderr,"\n");
+ }
+ }
+ else
+ {
+ fprintf (file, "gs(");
+ output_addr_const (file, addr);
+ fprintf (file, ")");
+ }
+ }
+ else
+ output_addr_const (file, addr);
+ }
+}
+
+
+/* Output X as assembler operand to file FILE. */
+
+void
+print_operand (FILE *file, rtx x, int code)
+{
+ int abcd = 0;
+
+ if (code >= 'A' && code <= 'D')
+ abcd = code - 'A';
+
+ if (code == '~')
+ {
+ if (!AVR_HAVE_JMP_CALL)
+ fputc ('r', file);
+ }
+ else if (code == '!')
+ {
+ if (AVR_HAVE_EIJMP_EICALL)
+ fputc ('e', file);
+ }
+ else if (REG_P (x))
+ {
+ if (x == zero_reg_rtx)
+ fprintf (file, "__zero_reg__");
+ else
+ fprintf (file, reg_names[true_regnum (x) + abcd]);
+ }
+ else if (GET_CODE (x) == CONST_INT)
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) + abcd);
+ else if (GET_CODE (x) == MEM)
+ {
+ rtx addr = XEXP (x,0);
+ if (code == 'm')
+ {
+ if (!CONSTANT_P (addr))
+ fatal_insn ("bad address, not a constant):", addr);
+ /* Assembler template with m-code is data - not progmem section */
+ if (text_segment_operand (addr, VOIDmode))
+ if (warning ( 0, "accessing data memory with program memory address"))
+ {
+ output_addr_const (stderr, addr);
+ fprintf(stderr,"\n");
+ }
+ output_addr_const (file, addr);
+ }
+ else if (code == 'o')
+ {
+ if (GET_CODE (addr) != PLUS)
+ fatal_insn ("bad address, not (reg+disp):", addr);
+
+ print_operand (file, XEXP (addr, 1), 0);
+ }
+ else if (code == 'p' || code == 'r')
+ {
+ if (GET_CODE (addr) != POST_INC && GET_CODE (addr) != PRE_DEC)
+ fatal_insn ("bad address, not post_inc or pre_dec:", addr);
+
+ if (code == 'p')
+ print_operand_address (file, XEXP (addr, 0)); /* X, Y, Z */
+ else
+ print_operand (file, XEXP (addr, 0), 0); /* r26, r28, r30 */
+ }
+ else if (GET_CODE (addr) == PLUS)
+ {
+ print_operand_address (file, XEXP (addr,0));
+ if (REGNO (XEXP (addr, 0)) == REG_X)
+ fatal_insn ("internal compiler error. Bad address:"
+ ,addr);
+ fputc ('+', file);
+ print_operand (file, XEXP (addr,1), code);
+ }
+ else
+ print_operand_address (file, addr);
+ }
+ else if (code == 'x')
+ {
+ /* Constant progmem address - like used in jmp or call */
+ if (0 == text_segment_operand (x, VOIDmode))
+ if (warning ( 0, "accessing program memory with data memory address"))
+ {
+ output_addr_const (stderr, x);
+ fprintf(stderr,"\n");
+ }
+ /* Use normal symbol for direct address no linker trampoline needed */
+ output_addr_const (file, x);
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ long val;
+ REAL_VALUE_TYPE rv;
+ if (GET_MODE (x) != SFmode)
+ fatal_insn ("internal compiler error. Unknown mode:", x);
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, val);
+ fprintf (file, "0x%lx", val);
+ }
+ else if (code == 'j')
+ fputs (cond_string (GET_CODE (x)), file);
+ else if (code == 'k')
+ fputs (cond_string (reverse_condition (GET_CODE (x))), file);
+ else
+ print_operand_address (file, x);
+}
+
+/* Update the condition code in the INSN. */
+
+void
+notice_update_cc (rtx body ATTRIBUTE_UNUSED, rtx insn)
+{
+ rtx set;
+
+ switch (get_attr_cc (insn))
+ {
+ case CC_NONE:
+ /* Insn does not affect CC at all. */
+ break;
+
+ case CC_SET_N:
+ CC_STATUS_INIT;
+ break;
+
+ case CC_SET_ZN:
+ set = single_set (insn);
+ CC_STATUS_INIT;
+ if (set)
+ {
+ cc_status.flags |= CC_NO_OVERFLOW;
+ cc_status.value1 = SET_DEST (set);
+ }
+ break;
+
+ case CC_SET_CZN:
+ /* Insn sets the Z,N,C flags of CC to recog_operand[0].
+ The V flag may or may not be known but that's ok because
+ alter_cond will change tests to use EQ/NE. */
+ set = single_set (insn);
+ CC_STATUS_INIT;
+ if (set)
+ {
+ cc_status.value1 = SET_DEST (set);
+ cc_status.flags |= CC_OVERFLOW_UNUSABLE;
+ }
+ break;
+
+ case CC_COMPARE:
+ set = single_set (insn);
+ CC_STATUS_INIT;
+ if (set)
+ cc_status.value1 = SET_SRC (set);
+ break;
+
+ case CC_CLOBBER:
+ /* Insn doesn't leave CC in a usable state. */
+ CC_STATUS_INIT;
+
+ /* Correct CC for the ashrqi3 with the shift count as CONST_INT != 6 */
+ set = single_set (insn);
+ if (set)
+ {
+ rtx src = SET_SRC (set);
+
+ if (GET_CODE (src) == ASHIFTRT
+ && GET_MODE (src) == QImode)
+ {
+ rtx x = XEXP (src, 1);
+
+ if (CONST_INT_P (x)
+ && IN_RANGE (INTVAL (x), 1, 5))
+ {
+ cc_status.value1 = SET_DEST (set);
+ cc_status.flags |= CC_OVERFLOW_UNUSABLE;
+ }
+ }
+ }
+ break;
+ }
+}
+
+/* Return maximum number of consecutive registers of
+ class CLASS needed to hold a value of mode MODE. */
+
+int
+class_max_nregs (enum reg_class rclass ATTRIBUTE_UNUSED,enum machine_mode mode)
+{
+ return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
+}
+
+/* Choose mode for jump insn:
+ 1 - relative jump in range -63 <= x <= 62 ;
+ 2 - relative jump in range -2046 <= x <= 2045 ;
+ 3 - absolute jump (only for ATmega[16]03). */
+
+int
+avr_jump_mode (rtx x, rtx insn)
+{
+ int dest_addr = INSN_ADDRESSES (INSN_UID (GET_CODE (x) == LABEL_REF
+ ? XEXP (x, 0) : x));
+ int cur_addr = INSN_ADDRESSES (INSN_UID (insn));
+ int jump_distance = cur_addr - dest_addr;
+
+ if (-63 <= jump_distance && jump_distance <= 62)
+ return 1;
+ else if (-2046 <= jump_distance && jump_distance <= 2045)
+ return 2;
+ else if (AVR_HAVE_JMP_CALL)
+ return 3;
+
+ return 2;
+}
+
+/* return an AVR condition jump commands.
+ X is a comparison RTX.
+ LEN is a number returned by avr_jump_mode function.
+ if REVERSE nonzero then condition code in X must be reversed. */
+
+const char *
+ret_cond_branch (rtx x, int len, int reverse)
+{
+ RTX_CODE cond = reverse ? reverse_condition (GET_CODE (x)) : GET_CODE (x);
+
+ switch (cond)
+ {
+ case GT:
+ if (cc_prev_status.flags & CC_OVERFLOW_UNUSABLE)
+ return (len == 1 ? (AS1 (breq,.+2) CR_TAB
+ AS1 (brpl,%0)) :
+ len == 2 ? (AS1 (breq,.+4) CR_TAB
+ AS1 (brmi,.+2) CR_TAB
+ AS1 (rjmp,%0)) :
+ (AS1 (breq,.+6) CR_TAB
+ AS1 (brmi,.+4) CR_TAB
+ AS1 (jmp,%0)));
+
+ else
+ return (len == 1 ? (AS1 (breq,.+2) CR_TAB
+ AS1 (brge,%0)) :
+ len == 2 ? (AS1 (breq,.+4) CR_TAB
+ AS1 (brlt,.+2) CR_TAB
+ AS1 (rjmp,%0)) :
+ (AS1 (breq,.+6) CR_TAB
+ AS1 (brlt,.+4) CR_TAB
+ AS1 (jmp,%0)));
+ case GTU:
+ return (len == 1 ? (AS1 (breq,.+2) CR_TAB
+ AS1 (brsh,%0)) :
+ len == 2 ? (AS1 (breq,.+4) CR_TAB
+ AS1 (brlo,.+2) CR_TAB
+ AS1 (rjmp,%0)) :
+ (AS1 (breq,.+6) CR_TAB
+ AS1 (brlo,.+4) CR_TAB
+ AS1 (jmp,%0)));
+ case LE:
+ if (cc_prev_status.flags & CC_OVERFLOW_UNUSABLE)
+ return (len == 1 ? (AS1 (breq,%0) CR_TAB
+ AS1 (brmi,%0)) :
+ len == 2 ? (AS1 (breq,.+2) CR_TAB
+ AS1 (brpl,.+2) CR_TAB
+ AS1 (rjmp,%0)) :
+ (AS1 (breq,.+2) CR_TAB
+ AS1 (brpl,.+4) CR_TAB
+ AS1 (jmp,%0)));
+ else
+ return (len == 1 ? (AS1 (breq,%0) CR_TAB
+ AS1 (brlt,%0)) :
+ len == 2 ? (AS1 (breq,.+2) CR_TAB
+ AS1 (brge,.+2) CR_TAB
+ AS1 (rjmp,%0)) :
+ (AS1 (breq,.+2) CR_TAB
+ AS1 (brge,.+4) CR_TAB
+ AS1 (jmp,%0)));
+ case LEU:
+ return (len == 1 ? (AS1 (breq,%0) CR_TAB
+ AS1 (brlo,%0)) :
+ len == 2 ? (AS1 (breq,.+2) CR_TAB
+ AS1 (brsh,.+2) CR_TAB
+ AS1 (rjmp,%0)) :
+ (AS1 (breq,.+2) CR_TAB
+ AS1 (brsh,.+4) CR_TAB
+ AS1 (jmp,%0)));
+ default:
+ if (reverse)
+ {
+ switch (len)
+ {
+ case 1:
+ return AS1 (br%k1,%0);
+ case 2:
+ return (AS1 (br%j1,.+2) CR_TAB
+ AS1 (rjmp,%0));
+ default:
+ return (AS1 (br%j1,.+4) CR_TAB
+ AS1 (jmp,%0));
+ }
+ }
+ else
+ {
+ switch (len)
+ {
+ case 1:
+ return AS1 (br%j1,%0);
+ case 2:
+ return (AS1 (br%k1,.+2) CR_TAB
+ AS1 (rjmp,%0));
+ default:
+ return (AS1 (br%k1,.+4) CR_TAB
+ AS1 (jmp,%0));
+ }
+ }
+ }
+ return "";
+}
+
+/* Predicate function for immediate operand which fits to byte (8bit) */
+
+int
+byte_immediate_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return (GET_CODE (op) == CONST_INT
+ && INTVAL (op) <= 0xff && INTVAL (op) >= 0);
+}
+
+/* Output insn cost for next insn. */
+
+void
+final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
+ int num_operands ATTRIBUTE_UNUSED)
+{
+ if (TARGET_ALL_DEBUG)
+ {
+ fprintf (asm_out_file, "/* DEBUG: cost = %d. */\n",
+ rtx_cost (PATTERN (insn), INSN, !optimize_size));
+ }
+}
+
+/* Return 0 if undefined, 1 if always true or always false. */
+
+int
+avr_simplify_comparison_p (enum machine_mode mode, RTX_CODE op, rtx x)
+{
+ unsigned int max = (mode == QImode ? 0xff :
+ mode == HImode ? 0xffff :
+ mode == SImode ? 0xffffffff : 0);
+ if (max && op && GET_CODE (x) == CONST_INT)
+ {
+ if (unsigned_condition (op) != op)
+ max >>= 1;
+
+ if (max != (INTVAL (x) & max)
+ && INTVAL (x) != 0xff)
+ return 1;
+ }
+ return 0;
+}
+
+
+/* Returns nonzero if REGNO is the number of a hard
+ register in which function arguments are sometimes passed. */
+
+int
+function_arg_regno_p(int r)
+{
+ return (r >= 8 && r <= 25);
+}
+
+/* Initializing the variable cum for the state at the beginning
+ of the argument list. */
+
+void
+init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype, rtx libname,
+ tree fndecl ATTRIBUTE_UNUSED)
+{
+ cum->nregs = 18;
+ cum->regno = FIRST_CUM_REG;
+ if (!libname && stdarg_p (fntype))
+ cum->nregs = 0;
+}
+
+/* Returns the number of registers to allocate for a function argument. */
+
+static int
+avr_num_arg_regs (enum machine_mode mode, const_tree type)
+{
+ int size;
+
+ if (mode == BLKmode)
+ size = int_size_in_bytes (type);
+ else
+ size = GET_MODE_SIZE (mode);
+
+ /* Align all function arguments to start in even-numbered registers.
+ Odd-sized arguments leave holes above them. */
+
+ return (size + 1) & ~1;
+}
+
+/* Controls whether a function argument is passed
+ in a register, and which register. */
+
+static rtx
+avr_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ int bytes = avr_num_arg_regs (mode, type);
+
+ if (cum->nregs && bytes <= cum->nregs)
+ return gen_rtx_REG (mode, cum->regno - bytes);
+
+ return NULL_RTX;
+}
+
+/* Update the summarizer variable CUM to advance past an argument
+ in the argument list. */
+
+static void
+avr_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ int bytes = avr_num_arg_regs (mode, type);
+
+ cum->nregs -= bytes;
+ cum->regno -= bytes;
+
+ if (cum->nregs <= 0)
+ {
+ cum->nregs = 0;
+ cum->regno = FIRST_CUM_REG;
+ }
+}
+
+/***********************************************************************
+ Functions for outputting various mov's for a various modes
+************************************************************************/
+const char *
+output_movqi (rtx insn, rtx operands[], int *l)
+{
+ int dummy;
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ int *real_l = l;
+
+ if (!l)
+ l = &dummy;
+
+ *l = 1;
+
+ if (register_operand (dest, QImode))
+ {
+ if (register_operand (src, QImode)) /* mov r,r */
+ {
+ if (test_hard_reg_class (STACK_REG, dest))
+ return AS2 (out,%0,%1);
+ else if (test_hard_reg_class (STACK_REG, src))
+ return AS2 (in,%0,%1);
+
+ return AS2 (mov,%0,%1);
+ }
+ else if (CONSTANT_P (src))
+ {
+ if (test_hard_reg_class (LD_REGS, dest)) /* ldi d,i */
+ return AS2 (ldi,%0,lo8(%1));
+
+ if (GET_CODE (src) == CONST_INT)
+ {
+ if (src == const0_rtx) /* mov r,L */
+ return AS1 (clr,%0);
+ else if (src == const1_rtx)
+ {
+ *l = 2;
+ return (AS1 (clr,%0) CR_TAB
+ AS1 (inc,%0));
+ }
+ else if (src == constm1_rtx)
+ {
+ /* Immediate constants -1 to any register */
+ *l = 2;
+ return (AS1 (clr,%0) CR_TAB
+ AS1 (dec,%0));
+ }
+ else
+ {
+ int bit_nr = exact_log2 (INTVAL (src));
+
+ if (bit_nr >= 0)
+ {
+ *l = 3;
+ if (!real_l)
+ output_asm_insn ((AS1 (clr,%0) CR_TAB
+ "set"), operands);
+ if (!real_l)
+ avr_output_bld (operands, bit_nr);
+
+ return "";
+ }
+ }
+ }
+
+ /* Last resort, larger than loading from memory. */
+ *l = 4;
+ return (AS2 (mov,__tmp_reg__,r31) CR_TAB
+ AS2 (ldi,r31,lo8(%1)) CR_TAB
+ AS2 (mov,%0,r31) CR_TAB
+ AS2 (mov,r31,__tmp_reg__));
+ }
+ else if (GET_CODE (src) == MEM)
+ return out_movqi_r_mr (insn, operands, real_l); /* mov r,m */
+ }
+ else if (GET_CODE (dest) == MEM)
+ {
+ const char *templ;
+
+ if (src == const0_rtx)
+ operands[1] = zero_reg_rtx;
+
+ templ = out_movqi_mr_r (insn, operands, real_l);
+
+ if (!real_l)
+ output_asm_insn (templ, operands);
+
+ operands[1] = src;
+ }
+ return "";
+}
+
+
+const char *
+output_movhi (rtx insn, rtx operands[], int *l)
+{
+ int dummy;
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ int *real_l = l;
+
+ if (!l)
+ l = &dummy;
+
+ if (register_operand (dest, HImode))
+ {
+ if (register_operand (src, HImode)) /* mov r,r */
+ {
+ if (test_hard_reg_class (STACK_REG, dest))
+ {
+ if (AVR_HAVE_8BIT_SP)
+ return *l = 1, AS2 (out,__SP_L__,%A1);
+ /* Use simple load of stack pointer if no interrupts are
+ used. */
+ else if (TARGET_NO_INTERRUPTS)
+ return *l = 2, (AS2 (out,__SP_H__,%B1) CR_TAB
+ AS2 (out,__SP_L__,%A1));
+ *l = 5;
+ return (AS2 (in,__tmp_reg__,__SREG__) CR_TAB
+ "cli" CR_TAB
+ AS2 (out,__SP_H__,%B1) CR_TAB
+ AS2 (out,__SREG__,__tmp_reg__) CR_TAB
+ AS2 (out,__SP_L__,%A1));
+ }
+ else if (test_hard_reg_class (STACK_REG, src))
+ {
+ *l = 2;
+ return (AS2 (in,%A0,__SP_L__) CR_TAB
+ AS2 (in,%B0,__SP_H__));
+ }
+
+ if (AVR_HAVE_MOVW)
+ {
+ *l = 1;
+ return (AS2 (movw,%0,%1));
+ }
+ else
+ {
+ *l = 2;
+ return (AS2 (mov,%A0,%A1) CR_TAB
+ AS2 (mov,%B0,%B1));
+ }
+ }
+ else if (CONSTANT_P (src))
+ {
+ if (test_hard_reg_class (LD_REGS, dest)) /* ldi d,i */
+ {
+ *l = 2;
+ return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
+ AS2 (ldi,%B0,hi8(%1)));
+ }
+
+ if (GET_CODE (src) == CONST_INT)
+ {
+ if (src == const0_rtx) /* mov r,L */
+ {
+ *l = 2;
+ return (AS1 (clr,%A0) CR_TAB
+ AS1 (clr,%B0));
+ }
+ else if (src == const1_rtx)
+ {
+ *l = 3;
+ return (AS1 (clr,%A0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (inc,%A0));
+ }
+ else if (src == constm1_rtx)
+ {
+ /* Immediate constants -1 to any register */
+ *l = 3;
+ return (AS1 (clr,%0) CR_TAB
+ AS1 (dec,%A0) CR_TAB
+ AS2 (mov,%B0,%A0));
+ }
+ else
+ {
+ int bit_nr = exact_log2 (INTVAL (src));
+
+ if (bit_nr >= 0)
+ {
+ *l = 4;
+ if (!real_l)
+ output_asm_insn ((AS1 (clr,%A0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ "set"), operands);
+ if (!real_l)
+ avr_output_bld (operands, bit_nr);
+
+ return "";
+ }
+ }
+
+ if ((INTVAL (src) & 0xff) == 0)
+ {
+ *l = 5;
+ return (AS2 (mov,__tmp_reg__,r31) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS2 (ldi,r31,hi8(%1)) CR_TAB
+ AS2 (mov,%B0,r31) CR_TAB
+ AS2 (mov,r31,__tmp_reg__));
+ }
+ else if ((INTVAL (src) & 0xff00) == 0)
+ {
+ *l = 5;
+ return (AS2 (mov,__tmp_reg__,r31) CR_TAB
+ AS2 (ldi,r31,lo8(%1)) CR_TAB
+ AS2 (mov,%A0,r31) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS2 (mov,r31,__tmp_reg__));
+ }
+ }
+
+ /* Last resort, equal to loading from memory. */
+ *l = 6;
+ return (AS2 (mov,__tmp_reg__,r31) CR_TAB
+ AS2 (ldi,r31,lo8(%1)) CR_TAB
+ AS2 (mov,%A0,r31) CR_TAB
+ AS2 (ldi,r31,hi8(%1)) CR_TAB
+ AS2 (mov,%B0,r31) CR_TAB
+ AS2 (mov,r31,__tmp_reg__));
+ }
+ else if (GET_CODE (src) == MEM)
+ return out_movhi_r_mr (insn, operands, real_l); /* mov r,m */
+ }
+ else if (GET_CODE (dest) == MEM)
+ {
+ const char *templ;
+
+ if (src == const0_rtx)
+ operands[1] = zero_reg_rtx;
+
+ templ = out_movhi_mr_r (insn, operands, real_l);
+
+ if (!real_l)
+ output_asm_insn (templ, operands);
+
+ operands[1] = src;
+ return "";
+ }
+ fatal_insn ("invalid insn:", insn);
+ return "";
+}
+
+const char *
+out_movqi_r_mr (rtx insn, rtx op[], int *l)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx x = XEXP (src, 0);
+ int dummy;
+
+ if (!l)
+ l = &dummy;
+
+ if (CONSTANT_ADDRESS_P (x))
+ {
+ if (CONST_INT_P (x) && INTVAL (x) == SREG_ADDR)
+ {
+ *l = 1;
+ return AS2 (in,%0,__SREG__);
+ }
+ if (optimize > 0 && io_address_operand (x, QImode))
+ {
+ *l = 1;
+ return AS2 (in,%0,%m1-0x20);
+ }
+ *l = 2;
+ return AS2 (lds,%0,%m1);
+ }
+ /* memory access by reg+disp */
+ else if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x,0))
+ && GET_CODE (XEXP (x,1)) == CONST_INT)
+ {
+ if ((INTVAL (XEXP (x,1)) - GET_MODE_SIZE (GET_MODE (src))) >= 63)
+ {
+ int disp = INTVAL (XEXP (x,1));
+ if (REGNO (XEXP (x,0)) != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (src)))
+ return *l = 3, (AS2 (adiw,r28,%o1-63) CR_TAB
+ AS2 (ldd,%0,Y+63) CR_TAB
+ AS2 (sbiw,r28,%o1-63));
+
+ return *l = 5, (AS2 (subi,r28,lo8(-%o1)) CR_TAB
+ AS2 (sbci,r29,hi8(-%o1)) CR_TAB
+ AS2 (ld,%0,Y) CR_TAB
+ AS2 (subi,r28,lo8(%o1)) CR_TAB
+ AS2 (sbci,r29,hi8(%o1)));
+ }
+ else if (REGNO (XEXP (x,0)) == REG_X)
+ {
+ /* This is a paranoid case LEGITIMIZE_RELOAD_ADDRESS must exclude
+ it but I have this situation with extremal optimizing options. */
+ if (reg_overlap_mentioned_p (dest, XEXP (x,0))
+ || reg_unused_after (insn, XEXP (x,0)))
+ return *l = 2, (AS2 (adiw,r26,%o1) CR_TAB
+ AS2 (ld,%0,X));
+
+ return *l = 3, (AS2 (adiw,r26,%o1) CR_TAB
+ AS2 (ld,%0,X) CR_TAB
+ AS2 (sbiw,r26,%o1));
+ }
+ *l = 1;
+ return AS2 (ldd,%0,%1);
+ }
+ *l = 1;
+ return AS2 (ld,%0,%1);
+}
+
+const char *
+out_movhi_r_mr (rtx insn, rtx op[], int *l)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (src, 0);
+ int reg_dest = true_regnum (dest);
+ int reg_base = true_regnum (base);
+ /* "volatile" forces reading low byte first, even if less efficient,
+ for correct operation with 16-bit I/O registers. */
+ int mem_volatile_p = MEM_VOLATILE_P (src);
+ int tmp;
+
+ if (!l)
+ l = &tmp;
+
+ if (reg_base > 0)
+ {
+ if (reg_dest == reg_base) /* R = (R) */
+ {
+ *l = 3;
+ return (AS2 (ld,__tmp_reg__,%1+) CR_TAB
+ AS2 (ld,%B0,%1) CR_TAB
+ AS2 (mov,%A0,__tmp_reg__));
+ }
+ else if (reg_base == REG_X) /* (R26) */
+ {
+ if (reg_unused_after (insn, base))
+ {
+ *l = 2;
+ return (AS2 (ld,%A0,X+) CR_TAB
+ AS2 (ld,%B0,X));
+ }
+ *l = 3;
+ return (AS2 (ld,%A0,X+) CR_TAB
+ AS2 (ld,%B0,X) CR_TAB
+ AS2 (sbiw,r26,1));
+ }
+ else /* (R) */
+ {
+ *l = 2;
+ return (AS2 (ld,%A0,%1) CR_TAB
+ AS2 (ldd,%B0,%1+1));
+ }
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+ int disp = INTVAL (XEXP (base, 1));
+ int reg_base = true_regnum (XEXP (base, 0));
+
+ if (disp > MAX_LD_OFFSET (GET_MODE (src)))
+ {
+ if (REGNO (XEXP (base, 0)) != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (src)))
+ return *l = 4, (AS2 (adiw,r28,%o1-62) CR_TAB
+ AS2 (ldd,%A0,Y+62) CR_TAB
+ AS2 (ldd,%B0,Y+63) CR_TAB
+ AS2 (sbiw,r28,%o1-62));
+
+ return *l = 6, (AS2 (subi,r28,lo8(-%o1)) CR_TAB
+ AS2 (sbci,r29,hi8(-%o1)) CR_TAB
+ AS2 (ld,%A0,Y) CR_TAB
+ AS2 (ldd,%B0,Y+1) CR_TAB
+ AS2 (subi,r28,lo8(%o1)) CR_TAB
+ AS2 (sbci,r29,hi8(%o1)));
+ }
+ if (reg_base == REG_X)
+ {
+ /* This is a paranoid case. LEGITIMIZE_RELOAD_ADDRESS must exclude
+ it but I have this situation with extremal
+ optimization options. */
+
+ *l = 4;
+ if (reg_base == reg_dest)
+ return (AS2 (adiw,r26,%o1) CR_TAB
+ AS2 (ld,__tmp_reg__,X+) CR_TAB
+ AS2 (ld,%B0,X) CR_TAB
+ AS2 (mov,%A0,__tmp_reg__));
+
+ return (AS2 (adiw,r26,%o1) CR_TAB
+ AS2 (ld,%A0,X+) CR_TAB
+ AS2 (ld,%B0,X) CR_TAB
+ AS2 (sbiw,r26,%o1+1));
+ }
+
+ if (reg_base == reg_dest)
+ {
+ *l = 3;
+ return (AS2 (ldd,__tmp_reg__,%A1) CR_TAB
+ AS2 (ldd,%B0,%B1) CR_TAB
+ AS2 (mov,%A0,__tmp_reg__));
+ }
+
+ *l = 2;
+ return (AS2 (ldd,%A0,%A1) CR_TAB
+ AS2 (ldd,%B0,%B1));
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ {
+ if (reg_overlap_mentioned_p (dest, XEXP (base, 0)))
+ fatal_insn ("incorrect insn:", insn);
+
+ if (mem_volatile_p)
+ {
+ if (REGNO (XEXP (base, 0)) == REG_X)
+ {
+ *l = 4;
+ return (AS2 (sbiw,r26,2) CR_TAB
+ AS2 (ld,%A0,X+) CR_TAB
+ AS2 (ld,%B0,X) CR_TAB
+ AS2 (sbiw,r26,1));
+ }
+ else
+ {
+ *l = 3;
+ return (AS2 (sbiw,%r1,2) CR_TAB
+ AS2 (ld,%A0,%p1) CR_TAB
+ AS2 (ldd,%B0,%p1+1));
+ }
+ }
+
+ *l = 2;
+ return (AS2 (ld,%B0,%1) CR_TAB
+ AS2 (ld,%A0,%1));
+ }
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ {
+ if (reg_overlap_mentioned_p (dest, XEXP (base, 0)))
+ fatal_insn ("incorrect insn:", insn);
+
+ *l = 2;
+ return (AS2 (ld,%A0,%1) CR_TAB
+ AS2 (ld,%B0,%1));
+ }
+ else if (CONSTANT_ADDRESS_P (base))
+ {
+ if (optimize > 0 && io_address_operand (base, HImode))
+ {
+ *l = 2;
+ return (AS2 (in,%A0,%m1-0x20) CR_TAB
+ AS2 (in,%B0,%m1+1-0x20));
+ }
+ *l = 4;
+ return (AS2 (lds,%A0,%m1) CR_TAB
+ AS2 (lds,%B0,%m1+1));
+ }
+
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+const char *
+out_movsi_r_mr (rtx insn, rtx op[], int *l)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (src, 0);
+ int reg_dest = true_regnum (dest);
+ int reg_base = true_regnum (base);
+ int tmp;
+
+ if (!l)
+ l = &tmp;
+
+ if (reg_base > 0)
+ {
+ if (reg_base == REG_X) /* (R26) */
+ {
+ if (reg_dest == REG_X)
+ /* "ld r26,-X" is undefined */
+ return *l=7, (AS2 (adiw,r26,3) CR_TAB
+ AS2 (ld,r29,X) CR_TAB
+ AS2 (ld,r28,-X) CR_TAB
+ AS2 (ld,__tmp_reg__,-X) CR_TAB
+ AS2 (sbiw,r26,1) CR_TAB
+ AS2 (ld,r26,X) CR_TAB
+ AS2 (mov,r27,__tmp_reg__));
+ else if (reg_dest == REG_X - 2)
+ return *l=5, (AS2 (ld,%A0,X+) CR_TAB
+ AS2 (ld,%B0,X+) CR_TAB
+ AS2 (ld,__tmp_reg__,X+) CR_TAB
+ AS2 (ld,%D0,X) CR_TAB
+ AS2 (mov,%C0,__tmp_reg__));
+ else if (reg_unused_after (insn, base))
+ return *l=4, (AS2 (ld,%A0,X+) CR_TAB
+ AS2 (ld,%B0,X+) CR_TAB
+ AS2 (ld,%C0,X+) CR_TAB
+ AS2 (ld,%D0,X));
+ else
+ return *l=5, (AS2 (ld,%A0,X+) CR_TAB
+ AS2 (ld,%B0,X+) CR_TAB
+ AS2 (ld,%C0,X+) CR_TAB
+ AS2 (ld,%D0,X) CR_TAB
+ AS2 (sbiw,r26,3));
+ }
+ else
+ {
+ if (reg_dest == reg_base)
+ return *l=5, (AS2 (ldd,%D0,%1+3) CR_TAB
+ AS2 (ldd,%C0,%1+2) CR_TAB
+ AS2 (ldd,__tmp_reg__,%1+1) CR_TAB
+ AS2 (ld,%A0,%1) CR_TAB
+ AS2 (mov,%B0,__tmp_reg__));
+ else if (reg_base == reg_dest + 2)
+ return *l=5, (AS2 (ld ,%A0,%1) CR_TAB
+ AS2 (ldd,%B0,%1+1) CR_TAB
+ AS2 (ldd,__tmp_reg__,%1+2) CR_TAB
+ AS2 (ldd,%D0,%1+3) CR_TAB
+ AS2 (mov,%C0,__tmp_reg__));
+ else
+ return *l=4, (AS2 (ld ,%A0,%1) CR_TAB
+ AS2 (ldd,%B0,%1+1) CR_TAB
+ AS2 (ldd,%C0,%1+2) CR_TAB
+ AS2 (ldd,%D0,%1+3));
+ }
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+ int disp = INTVAL (XEXP (base, 1));
+
+ if (disp > MAX_LD_OFFSET (GET_MODE (src)))
+ {
+ if (REGNO (XEXP (base, 0)) != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (src)))
+ return *l = 6, (AS2 (adiw,r28,%o1-60) CR_TAB
+ AS2 (ldd,%A0,Y+60) CR_TAB
+ AS2 (ldd,%B0,Y+61) CR_TAB
+ AS2 (ldd,%C0,Y+62) CR_TAB
+ AS2 (ldd,%D0,Y+63) CR_TAB
+ AS2 (sbiw,r28,%o1-60));
+
+ return *l = 8, (AS2 (subi,r28,lo8(-%o1)) CR_TAB
+ AS2 (sbci,r29,hi8(-%o1)) CR_TAB
+ AS2 (ld,%A0,Y) CR_TAB
+ AS2 (ldd,%B0,Y+1) CR_TAB
+ AS2 (ldd,%C0,Y+2) CR_TAB
+ AS2 (ldd,%D0,Y+3) CR_TAB
+ AS2 (subi,r28,lo8(%o1)) CR_TAB
+ AS2 (sbci,r29,hi8(%o1)));
+ }
+
+ reg_base = true_regnum (XEXP (base, 0));
+ if (reg_base == REG_X)
+ {
+ /* R = (X + d) */
+ if (reg_dest == REG_X)
+ {
+ *l = 7;
+ /* "ld r26,-X" is undefined */
+ return (AS2 (adiw,r26,%o1+3) CR_TAB
+ AS2 (ld,r29,X) CR_TAB
+ AS2 (ld,r28,-X) CR_TAB
+ AS2 (ld,__tmp_reg__,-X) CR_TAB
+ AS2 (sbiw,r26,1) CR_TAB
+ AS2 (ld,r26,X) CR_TAB
+ AS2 (mov,r27,__tmp_reg__));
+ }
+ *l = 6;
+ if (reg_dest == REG_X - 2)
+ return (AS2 (adiw,r26,%o1) CR_TAB
+ AS2 (ld,r24,X+) CR_TAB
+ AS2 (ld,r25,X+) CR_TAB
+ AS2 (ld,__tmp_reg__,X+) CR_TAB
+ AS2 (ld,r27,X) CR_TAB
+ AS2 (mov,r26,__tmp_reg__));
+
+ return (AS2 (adiw,r26,%o1) CR_TAB
+ AS2 (ld,%A0,X+) CR_TAB
+ AS2 (ld,%B0,X+) CR_TAB
+ AS2 (ld,%C0,X+) CR_TAB
+ AS2 (ld,%D0,X) CR_TAB
+ AS2 (sbiw,r26,%o1+3));
+ }
+ if (reg_dest == reg_base)
+ return *l=5, (AS2 (ldd,%D0,%D1) CR_TAB
+ AS2 (ldd,%C0,%C1) CR_TAB
+ AS2 (ldd,__tmp_reg__,%B1) CR_TAB
+ AS2 (ldd,%A0,%A1) CR_TAB
+ AS2 (mov,%B0,__tmp_reg__));
+ else if (reg_dest == reg_base - 2)
+ return *l=5, (AS2 (ldd,%A0,%A1) CR_TAB
+ AS2 (ldd,%B0,%B1) CR_TAB
+ AS2 (ldd,__tmp_reg__,%C1) CR_TAB
+ AS2 (ldd,%D0,%D1) CR_TAB
+ AS2 (mov,%C0,__tmp_reg__));
+ return *l=4, (AS2 (ldd,%A0,%A1) CR_TAB
+ AS2 (ldd,%B0,%B1) CR_TAB
+ AS2 (ldd,%C0,%C1) CR_TAB
+ AS2 (ldd,%D0,%D1));
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ return *l=4, (AS2 (ld,%D0,%1) CR_TAB
+ AS2 (ld,%C0,%1) CR_TAB
+ AS2 (ld,%B0,%1) CR_TAB
+ AS2 (ld,%A0,%1));
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ return *l=4, (AS2 (ld,%A0,%1) CR_TAB
+ AS2 (ld,%B0,%1) CR_TAB
+ AS2 (ld,%C0,%1) CR_TAB
+ AS2 (ld,%D0,%1));
+ else if (CONSTANT_ADDRESS_P (base))
+ return *l=8, (AS2 (lds,%A0,%m1) CR_TAB
+ AS2 (lds,%B0,%m1+1) CR_TAB
+ AS2 (lds,%C0,%m1+2) CR_TAB
+ AS2 (lds,%D0,%m1+3));
+
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+const char *
+out_movsi_mr_r (rtx insn, rtx op[], int *l)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (dest, 0);
+ int reg_base = true_regnum (base);
+ int reg_src = true_regnum (src);
+ int tmp;
+
+ if (!l)
+ l = &tmp;
+
+ if (CONSTANT_ADDRESS_P (base))
+ return *l=8,(AS2 (sts,%m0,%A1) CR_TAB
+ AS2 (sts,%m0+1,%B1) CR_TAB
+ AS2 (sts,%m0+2,%C1) CR_TAB
+ AS2 (sts,%m0+3,%D1));
+ if (reg_base > 0) /* (r) */
+ {
+ if (reg_base == REG_X) /* (R26) */
+ {
+ if (reg_src == REG_X)
+ {
+ /* "st X+,r26" is undefined */
+ if (reg_unused_after (insn, base))
+ return *l=6, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+ AS2 (st,X,r26) CR_TAB
+ AS2 (adiw,r26,1) CR_TAB
+ AS2 (st,X+,__tmp_reg__) CR_TAB
+ AS2 (st,X+,r28) CR_TAB
+ AS2 (st,X,r29));
+ else
+ return *l=7, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+ AS2 (st,X,r26) CR_TAB
+ AS2 (adiw,r26,1) CR_TAB
+ AS2 (st,X+,__tmp_reg__) CR_TAB
+ AS2 (st,X+,r28) CR_TAB
+ AS2 (st,X,r29) CR_TAB
+ AS2 (sbiw,r26,3));
+ }
+ else if (reg_base == reg_src + 2)
+ {
+ if (reg_unused_after (insn, base))
+ return *l=7, (AS2 (mov,__zero_reg__,%C1) CR_TAB
+ AS2 (mov,__tmp_reg__,%D1) CR_TAB
+ AS2 (st,%0+,%A1) CR_TAB
+ AS2 (st,%0+,%B1) CR_TAB
+ AS2 (st,%0+,__zero_reg__) CR_TAB
+ AS2 (st,%0,__tmp_reg__) CR_TAB
+ AS1 (clr,__zero_reg__));
+ else
+ return *l=8, (AS2 (mov,__zero_reg__,%C1) CR_TAB
+ AS2 (mov,__tmp_reg__,%D1) CR_TAB
+ AS2 (st,%0+,%A1) CR_TAB
+ AS2 (st,%0+,%B1) CR_TAB
+ AS2 (st,%0+,__zero_reg__) CR_TAB
+ AS2 (st,%0,__tmp_reg__) CR_TAB
+ AS1 (clr,__zero_reg__) CR_TAB
+ AS2 (sbiw,r26,3));
+ }
+ return *l=5, (AS2 (st,%0+,%A1) CR_TAB
+ AS2 (st,%0+,%B1) CR_TAB
+ AS2 (st,%0+,%C1) CR_TAB
+ AS2 (st,%0,%D1) CR_TAB
+ AS2 (sbiw,r26,3));
+ }
+ else
+ return *l=4, (AS2 (st,%0,%A1) CR_TAB
+ AS2 (std,%0+1,%B1) CR_TAB
+ AS2 (std,%0+2,%C1) CR_TAB
+ AS2 (std,%0+3,%D1));
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+ int disp = INTVAL (XEXP (base, 1));
+ reg_base = REGNO (XEXP (base, 0));
+ if (disp > MAX_LD_OFFSET (GET_MODE (dest)))
+ {
+ if (reg_base != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+ return *l = 6, (AS2 (adiw,r28,%o0-60) CR_TAB
+ AS2 (std,Y+60,%A1) CR_TAB
+ AS2 (std,Y+61,%B1) CR_TAB
+ AS2 (std,Y+62,%C1) CR_TAB
+ AS2 (std,Y+63,%D1) CR_TAB
+ AS2 (sbiw,r28,%o0-60));
+
+ return *l = 8, (AS2 (subi,r28,lo8(-%o0)) CR_TAB
+ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
+ AS2 (st,Y,%A1) CR_TAB
+ AS2 (std,Y+1,%B1) CR_TAB
+ AS2 (std,Y+2,%C1) CR_TAB
+ AS2 (std,Y+3,%D1) CR_TAB
+ AS2 (subi,r28,lo8(%o0)) CR_TAB
+ AS2 (sbci,r29,hi8(%o0)));
+ }
+ if (reg_base == REG_X)
+ {
+ /* (X + d) = R */
+ if (reg_src == REG_X)
+ {
+ *l = 9;
+ return (AS2 (mov,__tmp_reg__,r26) CR_TAB
+ AS2 (mov,__zero_reg__,r27) CR_TAB
+ AS2 (adiw,r26,%o0) CR_TAB
+ AS2 (st,X+,__tmp_reg__) CR_TAB
+ AS2 (st,X+,__zero_reg__) CR_TAB
+ AS2 (st,X+,r28) CR_TAB
+ AS2 (st,X,r29) CR_TAB
+ AS1 (clr,__zero_reg__) CR_TAB
+ AS2 (sbiw,r26,%o0+3));
+ }
+ else if (reg_src == REG_X - 2)
+ {
+ *l = 9;
+ return (AS2 (mov,__tmp_reg__,r26) CR_TAB
+ AS2 (mov,__zero_reg__,r27) CR_TAB
+ AS2 (adiw,r26,%o0) CR_TAB
+ AS2 (st,X+,r24) CR_TAB
+ AS2 (st,X+,r25) CR_TAB
+ AS2 (st,X+,__tmp_reg__) CR_TAB
+ AS2 (st,X,__zero_reg__) CR_TAB
+ AS1 (clr,__zero_reg__) CR_TAB
+ AS2 (sbiw,r26,%o0+3));
+ }
+ *l = 6;
+ return (AS2 (adiw,r26,%o0) CR_TAB
+ AS2 (st,X+,%A1) CR_TAB
+ AS2 (st,X+,%B1) CR_TAB
+ AS2 (st,X+,%C1) CR_TAB
+ AS2 (st,X,%D1) CR_TAB
+ AS2 (sbiw,r26,%o0+3));
+ }
+ return *l=4, (AS2 (std,%A0,%A1) CR_TAB
+ AS2 (std,%B0,%B1) CR_TAB
+ AS2 (std,%C0,%C1) CR_TAB
+ AS2 (std,%D0,%D1));
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ return *l=4, (AS2 (st,%0,%D1) CR_TAB
+ AS2 (st,%0,%C1) CR_TAB
+ AS2 (st,%0,%B1) CR_TAB
+ AS2 (st,%0,%A1));
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ return *l=4, (AS2 (st,%0,%A1) CR_TAB
+ AS2 (st,%0,%B1) CR_TAB
+ AS2 (st,%0,%C1) CR_TAB
+ AS2 (st,%0,%D1));
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+const char *
+output_movsisf(rtx insn, rtx operands[], int *l)
+{
+ int dummy;
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ int *real_l = l;
+
+ if (!l)
+ l = &dummy;
+
+ if (register_operand (dest, VOIDmode))
+ {
+ if (register_operand (src, VOIDmode)) /* mov r,r */
+ {
+ if (true_regnum (dest) > true_regnum (src))
+ {
+ if (AVR_HAVE_MOVW)
+ {
+ *l = 2;
+ return (AS2 (movw,%C0,%C1) CR_TAB
+ AS2 (movw,%A0,%A1));
+ }
+ *l = 4;
+ return (AS2 (mov,%D0,%D1) CR_TAB
+ AS2 (mov,%C0,%C1) CR_TAB
+ AS2 (mov,%B0,%B1) CR_TAB
+ AS2 (mov,%A0,%A1));
+ }
+ else
+ {
+ if (AVR_HAVE_MOVW)
+ {
+ *l = 2;
+ return (AS2 (movw,%A0,%A1) CR_TAB
+ AS2 (movw,%C0,%C1));
+ }
+ *l = 4;
+ return (AS2 (mov,%A0,%A1) CR_TAB
+ AS2 (mov,%B0,%B1) CR_TAB
+ AS2 (mov,%C0,%C1) CR_TAB
+ AS2 (mov,%D0,%D1));
+ }
+ }
+ else if (CONSTANT_P (src))
+ {
+ if (test_hard_reg_class (LD_REGS, dest)) /* ldi d,i */
+ {
+ *l = 4;
+ return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
+ AS2 (ldi,%B0,hi8(%1)) CR_TAB
+ AS2 (ldi,%C0,hlo8(%1)) CR_TAB
+ AS2 (ldi,%D0,hhi8(%1)));
+ }
+
+ if (GET_CODE (src) == CONST_INT)
+ {
+ const char *const clr_op0 =
+ AVR_HAVE_MOVW ? (AS1 (clr,%A0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS2 (movw,%C0,%A0))
+ : (AS1 (clr,%A0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%D0));
+
+ if (src == const0_rtx) /* mov r,L */
+ {
+ *l = AVR_HAVE_MOVW ? 3 : 4;
+ return clr_op0;
+ }
+ else if (src == const1_rtx)
+ {
+ if (!real_l)
+ output_asm_insn (clr_op0, operands);
+ *l = AVR_HAVE_MOVW ? 4 : 5;
+ return AS1 (inc,%A0);
+ }
+ else if (src == constm1_rtx)
+ {
+ /* Immediate constants -1 to any register */
+ if (AVR_HAVE_MOVW)
+ {
+ *l = 4;
+ return (AS1 (clr,%A0) CR_TAB
+ AS1 (dec,%A0) CR_TAB
+ AS2 (mov,%B0,%A0) CR_TAB
+ AS2 (movw,%C0,%A0));
+ }
+ *l = 5;
+ return (AS1 (clr,%A0) CR_TAB
+ AS1 (dec,%A0) CR_TAB
+ AS2 (mov,%B0,%A0) CR_TAB
+ AS2 (mov,%C0,%A0) CR_TAB
+ AS2 (mov,%D0,%A0));
+ }
+ else
+ {
+ int bit_nr = exact_log2 (INTVAL (src));
+
+ if (bit_nr >= 0)
+ {
+ *l = AVR_HAVE_MOVW ? 5 : 6;
+ if (!real_l)
+ {
+ output_asm_insn (clr_op0, operands);
+ output_asm_insn ("set", operands);
+ }
+ if (!real_l)
+ avr_output_bld (operands, bit_nr);
+
+ return "";
+ }
+ }
+ }
+
+ /* Last resort, better than loading from memory. */
+ *l = 10;
+ return (AS2 (mov,__tmp_reg__,r31) CR_TAB
+ AS2 (ldi,r31,lo8(%1)) CR_TAB
+ AS2 (mov,%A0,r31) CR_TAB
+ AS2 (ldi,r31,hi8(%1)) CR_TAB
+ AS2 (mov,%B0,r31) CR_TAB
+ AS2 (ldi,r31,hlo8(%1)) CR_TAB
+ AS2 (mov,%C0,r31) CR_TAB
+ AS2 (ldi,r31,hhi8(%1)) CR_TAB
+ AS2 (mov,%D0,r31) CR_TAB
+ AS2 (mov,r31,__tmp_reg__));
+ }
+ else if (GET_CODE (src) == MEM)
+ return out_movsi_r_mr (insn, operands, real_l); /* mov r,m */
+ }
+ else if (GET_CODE (dest) == MEM)
+ {
+ const char *templ;
+
+ if (src == const0_rtx)
+ operands[1] = zero_reg_rtx;
+
+ templ = out_movsi_mr_r (insn, operands, real_l);
+
+ if (!real_l)
+ output_asm_insn (templ, operands);
+
+ operands[1] = src;
+ return "";
+ }
+ fatal_insn ("invalid insn:", insn);
+ return "";
+}
+
+const char *
+out_movqi_mr_r (rtx insn, rtx op[], int *l)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx x = XEXP (dest, 0);
+ int dummy;
+
+ if (!l)
+ l = &dummy;
+
+ if (CONSTANT_ADDRESS_P (x))
+ {
+ if (CONST_INT_P (x) && INTVAL (x) == SREG_ADDR)
+ {
+ *l = 1;
+ return AS2 (out,__SREG__,%1);
+ }
+ if (optimize > 0 && io_address_operand (x, QImode))
+ {
+ *l = 1;
+ return AS2 (out,%m0-0x20,%1);
+ }
+ *l = 2;
+ return AS2 (sts,%m0,%1);
+ }
+ /* memory access by reg+disp */
+ else if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x,0))
+ && GET_CODE (XEXP (x,1)) == CONST_INT)
+ {
+ if ((INTVAL (XEXP (x,1)) - GET_MODE_SIZE (GET_MODE (dest))) >= 63)
+ {
+ int disp = INTVAL (XEXP (x,1));
+ if (REGNO (XEXP (x,0)) != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+ return *l = 3, (AS2 (adiw,r28,%o0-63) CR_TAB
+ AS2 (std,Y+63,%1) CR_TAB
+ AS2 (sbiw,r28,%o0-63));
+
+ return *l = 5, (AS2 (subi,r28,lo8(-%o0)) CR_TAB
+ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
+ AS2 (st,Y,%1) CR_TAB
+ AS2 (subi,r28,lo8(%o0)) CR_TAB
+ AS2 (sbci,r29,hi8(%o0)));
+ }
+ else if (REGNO (XEXP (x,0)) == REG_X)
+ {
+ if (reg_overlap_mentioned_p (src, XEXP (x, 0)))
+ {
+ if (reg_unused_after (insn, XEXP (x,0)))
+ return *l = 3, (AS2 (mov,__tmp_reg__,%1) CR_TAB
+ AS2 (adiw,r26,%o0) CR_TAB
+ AS2 (st,X,__tmp_reg__));
+
+ return *l = 4, (AS2 (mov,__tmp_reg__,%1) CR_TAB
+ AS2 (adiw,r26,%o0) CR_TAB
+ AS2 (st,X,__tmp_reg__) CR_TAB
+ AS2 (sbiw,r26,%o0));
+ }
+ else
+ {
+ if (reg_unused_after (insn, XEXP (x,0)))
+ return *l = 2, (AS2 (adiw,r26,%o0) CR_TAB
+ AS2 (st,X,%1));
+
+ return *l = 3, (AS2 (adiw,r26,%o0) CR_TAB
+ AS2 (st,X,%1) CR_TAB
+ AS2 (sbiw,r26,%o0));
+ }
+ }
+ *l = 1;
+ return AS2 (std,%0,%1);
+ }
+ *l = 1;
+ return AS2 (st,%0,%1);
+}
+
+const char *
+out_movhi_mr_r (rtx insn, rtx op[], int *l)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (dest, 0);
+ int reg_base = true_regnum (base);
+ int reg_src = true_regnum (src);
+ /* "volatile" forces writing high byte first, even if less efficient,
+ for correct operation with 16-bit I/O registers. */
+ int mem_volatile_p = MEM_VOLATILE_P (dest);
+ int tmp;
+
+ if (!l)
+ l = &tmp;
+ if (CONSTANT_ADDRESS_P (base))
+ {
+ if (optimize > 0 && io_address_operand (base, HImode))
+ {
+ *l = 2;
+ return (AS2 (out,%m0+1-0x20,%B1) CR_TAB
+ AS2 (out,%m0-0x20,%A1));
+ }
+ return *l = 4, (AS2 (sts,%m0+1,%B1) CR_TAB
+ AS2 (sts,%m0,%A1));
+ }
+ if (reg_base > 0)
+ {
+ if (reg_base == REG_X)
+ {
+ if (reg_src == REG_X)
+ {
+ /* "st X+,r26" and "st -X,r26" are undefined. */
+ if (!mem_volatile_p && reg_unused_after (insn, src))
+ return *l=4, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+ AS2 (st,X,r26) CR_TAB
+ AS2 (adiw,r26,1) CR_TAB
+ AS2 (st,X,__tmp_reg__));
+ else
+ return *l=5, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+ AS2 (adiw,r26,1) CR_TAB
+ AS2 (st,X,__tmp_reg__) CR_TAB
+ AS2 (sbiw,r26,1) CR_TAB
+ AS2 (st,X,r26));
+ }
+ else
+ {
+ if (!mem_volatile_p && reg_unused_after (insn, base))
+ return *l=2, (AS2 (st,X+,%A1) CR_TAB
+ AS2 (st,X,%B1));
+ else
+ return *l=3, (AS2 (adiw,r26,1) CR_TAB
+ AS2 (st,X,%B1) CR_TAB
+ AS2 (st,-X,%A1));
+ }
+ }
+ else
+ return *l=2, (AS2 (std,%0+1,%B1) CR_TAB
+ AS2 (st,%0,%A1));
+ }
+ else if (GET_CODE (base) == PLUS)
+ {
+ int disp = INTVAL (XEXP (base, 1));
+ reg_base = REGNO (XEXP (base, 0));
+ if (disp > MAX_LD_OFFSET (GET_MODE (dest)))
+ {
+ if (reg_base != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+ return *l = 4, (AS2 (adiw,r28,%o0-62) CR_TAB
+ AS2 (std,Y+63,%B1) CR_TAB
+ AS2 (std,Y+62,%A1) CR_TAB
+ AS2 (sbiw,r28,%o0-62));
+
+ return *l = 6, (AS2 (subi,r28,lo8(-%o0)) CR_TAB
+ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
+ AS2 (std,Y+1,%B1) CR_TAB
+ AS2 (st,Y,%A1) CR_TAB
+ AS2 (subi,r28,lo8(%o0)) CR_TAB
+ AS2 (sbci,r29,hi8(%o0)));
+ }
+ if (reg_base == REG_X)
+ {
+ /* (X + d) = R */
+ if (reg_src == REG_X)
+ {
+ *l = 7;
+ return (AS2 (mov,__tmp_reg__,r26) CR_TAB
+ AS2 (mov,__zero_reg__,r27) CR_TAB
+ AS2 (adiw,r26,%o0+1) CR_TAB
+ AS2 (st,X,__zero_reg__) CR_TAB
+ AS2 (st,-X,__tmp_reg__) CR_TAB
+ AS1 (clr,__zero_reg__) CR_TAB
+ AS2 (sbiw,r26,%o0));
+ }
+ *l = 4;
+ return (AS2 (adiw,r26,%o0+1) CR_TAB
+ AS2 (st,X,%B1) CR_TAB
+ AS2 (st,-X,%A1) CR_TAB
+ AS2 (sbiw,r26,%o0));
+ }
+ return *l=2, (AS2 (std,%B0,%B1) CR_TAB
+ AS2 (std,%A0,%A1));
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ return *l=2, (AS2 (st,%0,%B1) CR_TAB
+ AS2 (st,%0,%A1));
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ {
+ if (mem_volatile_p)
+ {
+ if (REGNO (XEXP (base, 0)) == REG_X)
+ {
+ *l = 4;
+ return (AS2 (adiw,r26,1) CR_TAB
+ AS2 (st,X,%B1) CR_TAB
+ AS2 (st,-X,%A1) CR_TAB
+ AS2 (adiw,r26,2));
+ }
+ else
+ {
+ *l = 3;
+ return (AS2 (std,%p0+1,%B1) CR_TAB
+ AS2 (st,%p0,%A1) CR_TAB
+ AS2 (adiw,%r0,2));
+ }
+ }
+
+ *l = 2;
+ return (AS2 (st,%0,%A1) CR_TAB
+ AS2 (st,%0,%B1));
+ }
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+/* Return 1 if frame pointer for current function required. */
+
+bool
+avr_frame_pointer_required_p (void)
+{
+ return (cfun->calls_alloca
+ || crtl->args.info.nregs == 0
+ || get_frame_size () > 0);
+}
+
+/* Returns the condition of compare insn INSN, or UNKNOWN. */
+
+static RTX_CODE
+compare_condition (rtx insn)
+{
+ rtx next = next_real_insn (insn);
+ RTX_CODE cond = UNKNOWN;
+ if (next && GET_CODE (next) == JUMP_INSN)
+ {
+ rtx pat = PATTERN (next);
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src, 0);
+ cond = GET_CODE (t);
+ }
+ return cond;
+}
+
+/* Returns nonzero if INSN is a tst insn that only tests the sign. */
+
+static int
+compare_sign_p (rtx insn)
+{
+ RTX_CODE cond = compare_condition (insn);
+ return (cond == GE || cond == LT);
+}
+
+/* Returns nonzero if the next insn is a JUMP_INSN with a condition
+ that needs to be swapped (GT, GTU, LE, LEU). */
+
+int
+compare_diff_p (rtx insn)
+{
+ RTX_CODE cond = compare_condition (insn);
+ return (cond == GT || cond == GTU || cond == LE || cond == LEU) ? cond : 0;
+}
+
+/* Returns nonzero if INSN is a compare insn with the EQ or NE condition. */
+
+int
+compare_eq_p (rtx insn)
+{
+ RTX_CODE cond = compare_condition (insn);
+ return (cond == EQ || cond == NE);
+}
+
+
+/* Output test instruction for HImode. */
+
+const char *
+out_tsthi (rtx insn, rtx op, int *l)
+{
+ if (compare_sign_p (insn))
+ {
+ if (l) *l = 1;
+ return AS1 (tst,%B0);
+ }
+ if (reg_unused_after (insn, op)
+ && compare_eq_p (insn))
+ {
+ /* Faster than sbiw if we can clobber the operand. */
+ if (l) *l = 1;
+ return "or %A0,%B0";
+ }
+ if (test_hard_reg_class (ADDW_REGS, op))
+ {
+ if (l) *l = 1;
+ return AS2 (sbiw,%0,0);
+ }
+ if (l) *l = 2;
+ return (AS2 (cp,%A0,__zero_reg__) CR_TAB
+ AS2 (cpc,%B0,__zero_reg__));
+}
+
+
+/* Output test instruction for SImode. */
+
+const char *
+out_tstsi (rtx insn, rtx op, int *l)
+{
+ if (compare_sign_p (insn))
+ {
+ if (l) *l = 1;
+ return AS1 (tst,%D0);
+ }
+ if (test_hard_reg_class (ADDW_REGS, op))
+ {
+ if (l) *l = 3;
+ return (AS2 (sbiw,%A0,0) CR_TAB
+ AS2 (cpc,%C0,__zero_reg__) CR_TAB
+ AS2 (cpc,%D0,__zero_reg__));
+ }
+ if (l) *l = 4;
+ return (AS2 (cp,%A0,__zero_reg__) CR_TAB
+ AS2 (cpc,%B0,__zero_reg__) CR_TAB
+ AS2 (cpc,%C0,__zero_reg__) CR_TAB
+ AS2 (cpc,%D0,__zero_reg__));
+}
+
+
+/* Generate asm equivalent for various shifts.
+ Shift count is a CONST_INT, MEM or REG.
+ This only handles cases that are not already
+ carefully hand-optimized in ?sh??i3_out. */
+
+void
+out_shift_with_cnt (const char *templ, rtx insn, rtx operands[],
+ int *len, int t_len)
+{
+ rtx op[10];
+ char str[500];
+ int second_label = 1;
+ int saved_in_tmp = 0;
+ int use_zero_reg = 0;
+
+ op[0] = operands[0];
+ op[1] = operands[1];
+ op[2] = operands[2];
+ op[3] = operands[3];
+ str[0] = 0;
+
+ if (len)
+ *len = 1;
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int scratch = (GET_CODE (PATTERN (insn)) == PARALLEL);
+ int count = INTVAL (operands[2]);
+ int max_len = 10; /* If larger than this, always use a loop. */
+
+ if (count <= 0)
+ {
+ if (len)
+ *len = 0;
+ return;
+ }
+
+ if (count < 8 && !scratch)
+ use_zero_reg = 1;
+
+ if (optimize_size)
+ max_len = t_len + (scratch ? 3 : (use_zero_reg ? 4 : 5));
+
+ if (t_len * count <= max_len)
+ {
+ /* Output shifts inline with no loop - faster. */
+ if (len)
+ *len = t_len * count;
+ else
+ {
+ while (count-- > 0)
+ output_asm_insn (templ, op);
+ }
+
+ return;
+ }
+
+ if (scratch)
+ {
+ if (!len)
+ strcat (str, AS2 (ldi,%3,%2));
+ }
+ else if (use_zero_reg)
+ {
+ /* Hack to save one word: use __zero_reg__ as loop counter.
+ Set one bit, then shift in a loop until it is 0 again. */
+
+ op[3] = zero_reg_rtx;
+ if (len)
+ *len = 2;
+ else
+ strcat (str, ("set" CR_TAB
+ AS2 (bld,%3,%2-1)));
+ }
+ else
+ {
+ /* No scratch register available, use one from LD_REGS (saved in
+ __tmp_reg__) that doesn't overlap with registers to shift. */
+
+ op[3] = gen_rtx_REG (QImode,
+ ((true_regnum (operands[0]) - 1) & 15) + 16);
+ op[4] = tmp_reg_rtx;
+ saved_in_tmp = 1;
+
+ if (len)
+ *len = 3; /* Includes "mov %3,%4" after the loop. */
+ else
+ strcat (str, (AS2 (mov,%4,%3) CR_TAB
+ AS2 (ldi,%3,%2)));
+ }
+
+ second_label = 0;
+ }
+ else if (GET_CODE (operands[2]) == MEM)
+ {
+ rtx op_mov[10];
+
+ op[3] = op_mov[0] = tmp_reg_rtx;
+ op_mov[1] = op[2];
+
+ if (len)
+ out_movqi_r_mr (insn, op_mov, len);
+ else
+ output_asm_insn (out_movqi_r_mr (insn, op_mov, NULL), op_mov);
+ }
+ else if (register_operand (operands[2], QImode))
+ {
+ if (reg_unused_after (insn, operands[2])
+ && !reg_overlap_mentioned_p (operands[0], operands[2]))
+ {
+ op[3] = op[2];
+ }
+ else
+ {
+ op[3] = tmp_reg_rtx;
+ if (!len)
+ strcat (str, (AS2 (mov,%3,%2) CR_TAB));
+ }
+ }
+ else
+ fatal_insn ("bad shift insn:", insn);
+
+ if (second_label)
+ {
+ if (len)
+ ++*len;
+ else
+ strcat (str, AS1 (rjmp,2f));
+ }
+
+ if (len)
+ *len += t_len + 2; /* template + dec + brXX */
+ else
+ {
+ strcat (str, "\n1:\t");
+ strcat (str, templ);
+ strcat (str, second_label ? "\n2:\t" : "\n\t");
+ strcat (str, use_zero_reg ? AS1 (lsr,%3) : AS1 (dec,%3));
+ strcat (str, CR_TAB);
+ strcat (str, second_label ? AS1 (brpl,1b) : AS1 (brne,1b));
+ if (saved_in_tmp)
+ strcat (str, (CR_TAB AS2 (mov,%3,%4)));
+ output_asm_insn (str, op);
+ }
+}
+
+
+/* 8bit shift left ((char)x << i) */
+
+const char *
+ashlqi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 8)
+ break;
+
+ *len = 1;
+ return AS1 (clr,%0);
+
+ case 1:
+ *len = 1;
+ return AS1 (lsl,%0);
+
+ case 2:
+ *len = 2;
+ return (AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0));
+
+ case 3:
+ *len = 3;
+ return (AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0));
+
+ case 4:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 2;
+ return (AS1 (swap,%0) CR_TAB
+ AS2 (andi,%0,0xf0));
+ }
+ *len = 4;
+ return (AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0));
+
+ case 5:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 3;
+ return (AS1 (swap,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS2 (andi,%0,0xe0));
+ }
+ *len = 5;
+ return (AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0));
+
+ case 6:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 4;
+ return (AS1 (swap,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS2 (andi,%0,0xc0));
+ }
+ *len = 6;
+ return (AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS1 (lsl,%0));
+
+ case 7:
+ *len = 3;
+ return (AS1 (ror,%0) CR_TAB
+ AS1 (clr,%0) CR_TAB
+ AS1 (ror,%0));
+ }
+ }
+ else if (CONSTANT_P (operands[2]))
+ fatal_insn ("internal compiler error. Incorrect shift:", insn);
+
+ out_shift_with_cnt (AS1 (lsl,%0),
+ insn, operands, len, 1);
+ return "";
+}
+
+
+/* 16bit shift left ((short)x << i) */
+
+const char *
+ashlhi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int scratch = (GET_CODE (PATTERN (insn)) == PARALLEL);
+ int ldi_ok = test_hard_reg_class (LD_REGS, operands[0]);
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 16)
+ break;
+
+ *len = 2;
+ return (AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%A0));
+
+ case 4:
+ if (optimize_size && scratch)
+ break; /* 5 */
+ if (ldi_ok)
+ {
+ *len = 6;
+ return (AS1 (swap,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS2 (andi,%B0,0xf0) CR_TAB
+ AS2 (eor,%B0,%A0) CR_TAB
+ AS2 (andi,%A0,0xf0) CR_TAB
+ AS2 (eor,%B0,%A0));
+ }
+ if (scratch)
+ {
+ *len = 7;
+ return (AS1 (swap,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS2 (ldi,%3,0xf0) CR_TAB
+ "and %B0,%3" CR_TAB
+ AS2 (eor,%B0,%A0) CR_TAB
+ "and %A0,%3" CR_TAB
+ AS2 (eor,%B0,%A0));
+ }
+ break; /* optimize_size ? 6 : 8 */
+
+ case 5:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ if (ldi_ok)
+ {
+ *len = 8;
+ return (AS1 (lsl,%A0) CR_TAB
+ AS1 (rol,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS2 (andi,%B0,0xf0) CR_TAB
+ AS2 (eor,%B0,%A0) CR_TAB
+ AS2 (andi,%A0,0xf0) CR_TAB
+ AS2 (eor,%B0,%A0));
+ }
+ if (scratch)
+ {
+ *len = 9;
+ return (AS1 (lsl,%A0) CR_TAB
+ AS1 (rol,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS2 (ldi,%3,0xf0) CR_TAB
+ "and %B0,%3" CR_TAB
+ AS2 (eor,%B0,%A0) CR_TAB
+ "and %A0,%3" CR_TAB
+ AS2 (eor,%B0,%A0));
+ }
+ break; /* 10 */
+
+ case 6:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ *len = 9;
+ return (AS1 (clr,__tmp_reg__) CR_TAB
+ AS1 (lsr,%B0) CR_TAB
+ AS1 (ror,%A0) CR_TAB
+ AS1 (ror,__tmp_reg__) CR_TAB
+ AS1 (lsr,%B0) CR_TAB
+ AS1 (ror,%A0) CR_TAB
+ AS1 (ror,__tmp_reg__) CR_TAB
+ AS2 (mov,%B0,%A0) CR_TAB
+ AS2 (mov,%A0,__tmp_reg__));
+
+ case 7:
+ *len = 5;
+ return (AS1 (lsr,%B0) CR_TAB
+ AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (ror,%B0) CR_TAB
+ AS1 (ror,%A0));
+
+ case 8:
+ return *len = 2, (AS2 (mov,%B0,%A1) CR_TAB
+ AS1 (clr,%A0));
+
+ case 9:
+ *len = 3;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (lsl,%B0));
+
+ case 10:
+ *len = 4;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0));
+
+ case 11:
+ *len = 5;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0));
+
+ case 12:
+ if (ldi_ok)
+ {
+ *len = 4;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS2 (andi,%B0,0xf0));
+ }
+ if (scratch)
+ {
+ *len = 5;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS2 (ldi,%3,0xf0) CR_TAB
+ "and %B0,%3");
+ }
+ *len = 6;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0));
+
+ case 13:
+ if (ldi_ok)
+ {
+ *len = 5;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS2 (andi,%B0,0xe0));
+ }
+ if (AVR_HAVE_MUL && scratch)
+ {
+ *len = 5;
+ return (AS2 (ldi,%3,0x20) CR_TAB
+ AS2 (mul,%A0,%3) CR_TAB
+ AS2 (mov,%B0,r0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ if (scratch)
+ {
+ *len = 6;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS2 (ldi,%3,0xe0) CR_TAB
+ "and %B0,%3");
+ }
+ if (AVR_HAVE_MUL)
+ {
+ *len = 6;
+ return ("set" CR_TAB
+ AS2 (bld,r1,5) CR_TAB
+ AS2 (mul,%A0,r1) CR_TAB
+ AS2 (mov,%B0,r0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ *len = 7;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (lsl,%B0));
+
+ case 14:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return (AS2 (ldi,%B0,0x40) CR_TAB
+ AS2 (mul,%A0,%B0) CR_TAB
+ AS2 (mov,%B0,r0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ if (AVR_HAVE_MUL && scratch)
+ {
+ *len = 5;
+ return (AS2 (ldi,%3,0x40) CR_TAB
+ AS2 (mul,%A0,%3) CR_TAB
+ AS2 (mov,%B0,r0) CR_TAB
+ AS1 (clr,%A0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ if (optimize_size && ldi_ok)
+ {
+ *len = 5;
+ return (AS2 (mov,%B0,%A0) CR_TAB
+ AS2 (ldi,%A0,6) "\n1:\t"
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (dec,%A0) CR_TAB
+ AS1 (brne,1b));
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ *len = 6;
+ return (AS1 (clr,%B0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (ror,%B0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (ror,%B0) CR_TAB
+ AS1 (clr,%A0));
+
+ case 15:
+ *len = 4;
+ return (AS1 (clr,%B0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (ror,%B0) CR_TAB
+ AS1 (clr,%A0));
+ }
+ len = t;
+ }
+ out_shift_with_cnt ((AS1 (lsl,%A0) CR_TAB
+ AS1 (rol,%B0)),
+ insn, operands, len, 2);
+ return "";
+}
+
+
+/* 32bit shift left ((long)x << i) */
+
+const char *
+ashlsi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 32)
+ break;
+
+ if (AVR_HAVE_MOVW)
+ return *len = 3, (AS1 (clr,%D0) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS2 (movw,%A0,%C0));
+ *len = 4;
+ return (AS1 (clr,%D0) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%A0));
+
+ case 8:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+ *len = 4;
+ if (reg0 >= reg1)
+ return (AS2 (mov,%D0,%C1) CR_TAB
+ AS2 (mov,%C0,%B1) CR_TAB
+ AS2 (mov,%B0,%A1) CR_TAB
+ AS1 (clr,%A0));
+ else
+ return (AS1 (clr,%A0) CR_TAB
+ AS2 (mov,%B0,%A1) CR_TAB
+ AS2 (mov,%C0,%B1) CR_TAB
+ AS2 (mov,%D0,%C1));
+ }
+
+ case 16:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+ if (reg0 + 2 == reg1)
+ return *len = 2, (AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%A0));
+ if (AVR_HAVE_MOVW)
+ return *len = 3, (AS2 (movw,%C0,%A1) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%A0));
+ else
+ return *len = 4, (AS2 (mov,%C0,%A1) CR_TAB
+ AS2 (mov,%D0,%B1) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%A0));
+ }
+
+ case 24:
+ *len = 4;
+ return (AS2 (mov,%D0,%A1) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%A0));
+
+ case 31:
+ *len = 6;
+ return (AS1 (clr,%D0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (ror,%D0) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%A0));
+ }
+ len = t;
+ }
+ out_shift_with_cnt ((AS1 (lsl,%A0) CR_TAB
+ AS1 (rol,%B0) CR_TAB
+ AS1 (rol,%C0) CR_TAB
+ AS1 (rol,%D0)),
+ insn, operands, len, 4);
+ return "";
+}
+
+/* 8bit arithmetic shift right ((signed char)x >> i) */
+
+const char *
+ashrqi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ case 1:
+ *len = 1;
+ return AS1 (asr,%0);
+
+ case 2:
+ *len = 2;
+ return (AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0));
+
+ case 3:
+ *len = 3;
+ return (AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0));
+
+ case 4:
+ *len = 4;
+ return (AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0));
+
+ case 5:
+ *len = 5;
+ return (AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0) CR_TAB
+ AS1 (asr,%0));
+
+ case 6:
+ *len = 4;
+ return (AS2 (bst,%0,6) CR_TAB
+ AS1 (lsl,%0) CR_TAB
+ AS2 (sbc,%0,%0) CR_TAB
+ AS2 (bld,%0,0));
+
+ default:
+ if (INTVAL (operands[2]) < 8)
+ break;
+
+ /* fall through */
+
+ case 7:
+ *len = 2;
+ return (AS1 (lsl,%0) CR_TAB
+ AS2 (sbc,%0,%0));
+ }
+ }
+ else if (CONSTANT_P (operands[2]))
+ fatal_insn ("internal compiler error. Incorrect shift:", insn);
+
+ out_shift_with_cnt (AS1 (asr,%0),
+ insn, operands, len, 1);
+ return "";
+}
+
+
+/* 16bit arithmetic shift right ((signed short)x >> i) */
+
+const char *
+ashrhi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int scratch = (GET_CODE (PATTERN (insn)) == PARALLEL);
+ int ldi_ok = test_hard_reg_class (LD_REGS, operands[0]);
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ case 4:
+ case 5:
+ /* XXX try to optimize this too? */
+ break;
+
+ case 6:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ *len = 8;
+ return (AS2 (mov,__tmp_reg__,%A0) CR_TAB
+ AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (lsl,__tmp_reg__) CR_TAB
+ AS1 (rol,%A0) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (lsl,__tmp_reg__) CR_TAB
+ AS1 (rol,%A0) CR_TAB
+ AS1 (rol,%B0));
+
+ case 7:
+ *len = 4;
+ return (AS1 (lsl,%A0) CR_TAB
+ AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (rol,%A0) CR_TAB
+ AS2 (sbc,%B0,%B0));
+
+ case 8:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+
+ if (reg0 == reg1)
+ return *len = 3, (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS2 (sbc,%B0,%B0));
+ else
+ return *len = 4, (AS2 (mov,%A0,%B1) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS2 (sbrc,%A0,7) CR_TAB
+ AS1 (dec,%B0));
+ }
+
+ case 9:
+ *len = 4;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (asr,%A0));
+
+ case 10:
+ *len = 5;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0));
+
+ case 11:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return (AS2 (ldi,%A0,0x20) CR_TAB
+ AS2 (muls,%B0,%A0) CR_TAB
+ AS2 (mov,%A0,r1) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ *len = 6;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0));
+
+ case 12:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return (AS2 (ldi,%A0,0x10) CR_TAB
+ AS2 (muls,%B0,%A0) CR_TAB
+ AS2 (mov,%A0,r1) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ *len = 7;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0));
+
+ case 13:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return (AS2 (ldi,%A0,0x08) CR_TAB
+ AS2 (muls,%B0,%A0) CR_TAB
+ AS2 (mov,%A0,r1) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ if (optimize_size)
+ break; /* scratch ? 5 : 7 */
+ *len = 8;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0) CR_TAB
+ AS1 (asr,%A0));
+
+ case 14:
+ *len = 5;
+ return (AS1 (lsl,%B0) CR_TAB
+ AS2 (sbc,%A0,%A0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS2 (mov,%B0,%A0) CR_TAB
+ AS1 (rol,%A0));
+
+ default:
+ if (INTVAL (operands[2]) < 16)
+ break;
+
+ /* fall through */
+
+ case 15:
+ return *len = 3, (AS1 (lsl,%B0) CR_TAB
+ AS2 (sbc,%A0,%A0) CR_TAB
+ AS2 (mov,%B0,%A0));
+ }
+ len = t;
+ }
+ out_shift_with_cnt ((AS1 (asr,%B0) CR_TAB
+ AS1 (ror,%A0)),
+ insn, operands, len, 2);
+ return "";
+}
+
+
+/* 32bit arithmetic shift right ((signed long)x >> i) */
+
+const char *
+ashrsi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ case 8:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+ *len=6;
+ if (reg0 <= reg1)
+ return (AS2 (mov,%A0,%B1) CR_TAB
+ AS2 (mov,%B0,%C1) CR_TAB
+ AS2 (mov,%C0,%D1) CR_TAB
+ AS1 (clr,%D0) CR_TAB
+ AS2 (sbrc,%C0,7) CR_TAB
+ AS1 (dec,%D0));
+ else
+ return (AS1 (clr,%D0) CR_TAB
+ AS2 (sbrc,%D1,7) CR_TAB
+ AS1 (dec,%D0) CR_TAB
+ AS2 (mov,%C0,%D1) CR_TAB
+ AS2 (mov,%B0,%C1) CR_TAB
+ AS2 (mov,%A0,%B1));
+ }
+
+ case 16:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+
+ if (reg0 == reg1 + 2)
+ return *len = 4, (AS1 (clr,%D0) CR_TAB
+ AS2 (sbrc,%B0,7) CR_TAB
+ AS1 (com,%D0) CR_TAB
+ AS2 (mov,%C0,%D0));
+ if (AVR_HAVE_MOVW)
+ return *len = 5, (AS2 (movw,%A0,%C1) CR_TAB
+ AS1 (clr,%D0) CR_TAB
+ AS2 (sbrc,%B0,7) CR_TAB
+ AS1 (com,%D0) CR_TAB
+ AS2 (mov,%C0,%D0));
+ else
+ return *len = 6, (AS2 (mov,%B0,%D1) CR_TAB
+ AS2 (mov,%A0,%C1) CR_TAB
+ AS1 (clr,%D0) CR_TAB
+ AS2 (sbrc,%B0,7) CR_TAB
+ AS1 (com,%D0) CR_TAB
+ AS2 (mov,%C0,%D0));
+ }
+
+ case 24:
+ return *len = 6, (AS2 (mov,%A0,%D1) CR_TAB
+ AS1 (clr,%D0) CR_TAB
+ AS2 (sbrc,%A0,7) CR_TAB
+ AS1 (com,%D0) CR_TAB
+ AS2 (mov,%B0,%D0) CR_TAB
+ AS2 (mov,%C0,%D0));
+
+ default:
+ if (INTVAL (operands[2]) < 32)
+ break;
+
+ /* fall through */
+
+ case 31:
+ if (AVR_HAVE_MOVW)
+ return *len = 4, (AS1 (lsl,%D0) CR_TAB
+ AS2 (sbc,%A0,%A0) CR_TAB
+ AS2 (mov,%B0,%A0) CR_TAB
+ AS2 (movw,%C0,%A0));
+ else
+ return *len = 5, (AS1 (lsl,%D0) CR_TAB
+ AS2 (sbc,%A0,%A0) CR_TAB
+ AS2 (mov,%B0,%A0) CR_TAB
+ AS2 (mov,%C0,%A0) CR_TAB
+ AS2 (mov,%D0,%A0));
+ }
+ len = t;
+ }
+ out_shift_with_cnt ((AS1 (asr,%D0) CR_TAB
+ AS1 (ror,%C0) CR_TAB
+ AS1 (ror,%B0) CR_TAB
+ AS1 (ror,%A0)),
+ insn, operands, len, 4);
+ return "";
+}
+
+/* 8bit logic shift right ((unsigned char)x >> i) */
+
+const char *
+lshrqi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 8)
+ break;
+
+ *len = 1;
+ return AS1 (clr,%0);
+
+ case 1:
+ *len = 1;
+ return AS1 (lsr,%0);
+
+ case 2:
+ *len = 2;
+ return (AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0));
+ case 3:
+ *len = 3;
+ return (AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0));
+
+ case 4:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len=2;
+ return (AS1 (swap,%0) CR_TAB
+ AS2 (andi,%0,0x0f));
+ }
+ *len = 4;
+ return (AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0));
+
+ case 5:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 3;
+ return (AS1 (swap,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS2 (andi,%0,0x7));
+ }
+ *len = 5;
+ return (AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0));
+
+ case 6:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 4;
+ return (AS1 (swap,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS2 (andi,%0,0x3));
+ }
+ *len = 6;
+ return (AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0) CR_TAB
+ AS1 (lsr,%0));
+
+ case 7:
+ *len = 3;
+ return (AS1 (rol,%0) CR_TAB
+ AS1 (clr,%0) CR_TAB
+ AS1 (rol,%0));
+ }
+ }
+ else if (CONSTANT_P (operands[2]))
+ fatal_insn ("internal compiler error. Incorrect shift:", insn);
+
+ out_shift_with_cnt (AS1 (lsr,%0),
+ insn, operands, len, 1);
+ return "";
+}
+
+/* 16bit logic shift right ((unsigned short)x >> i) */
+
+const char *
+lshrhi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int scratch = (GET_CODE (PATTERN (insn)) == PARALLEL);
+ int ldi_ok = test_hard_reg_class (LD_REGS, operands[0]);
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 16)
+ break;
+
+ *len = 2;
+ return (AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%A0));
+
+ case 4:
+ if (optimize_size && scratch)
+ break; /* 5 */
+ if (ldi_ok)
+ {
+ *len = 6;
+ return (AS1 (swap,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS2 (andi,%A0,0x0f) CR_TAB
+ AS2 (eor,%A0,%B0) CR_TAB
+ AS2 (andi,%B0,0x0f) CR_TAB
+ AS2 (eor,%A0,%B0));
+ }
+ if (scratch)
+ {
+ *len = 7;
+ return (AS1 (swap,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS2 (ldi,%3,0x0f) CR_TAB
+ "and %A0,%3" CR_TAB
+ AS2 (eor,%A0,%B0) CR_TAB
+ "and %B0,%3" CR_TAB
+ AS2 (eor,%A0,%B0));
+ }
+ break; /* optimize_size ? 6 : 8 */
+
+ case 5:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ if (ldi_ok)
+ {
+ *len = 8;
+ return (AS1 (lsr,%B0) CR_TAB
+ AS1 (ror,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS2 (andi,%A0,0x0f) CR_TAB
+ AS2 (eor,%A0,%B0) CR_TAB
+ AS2 (andi,%B0,0x0f) CR_TAB
+ AS2 (eor,%A0,%B0));
+ }
+ if (scratch)
+ {
+ *len = 9;
+ return (AS1 (lsr,%B0) CR_TAB
+ AS1 (ror,%A0) CR_TAB
+ AS1 (swap,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS2 (ldi,%3,0x0f) CR_TAB
+ "and %A0,%3" CR_TAB
+ AS2 (eor,%A0,%B0) CR_TAB
+ "and %B0,%3" CR_TAB
+ AS2 (eor,%A0,%B0));
+ }
+ break; /* 10 */
+
+ case 6:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ *len = 9;
+ return (AS1 (clr,__tmp_reg__) CR_TAB
+ AS1 (lsl,%A0) CR_TAB
+ AS1 (rol,%B0) CR_TAB
+ AS1 (rol,__tmp_reg__) CR_TAB
+ AS1 (lsl,%A0) CR_TAB
+ AS1 (rol,%B0) CR_TAB
+ AS1 (rol,__tmp_reg__) CR_TAB
+ AS2 (mov,%A0,%B0) CR_TAB
+ AS2 (mov,%B0,__tmp_reg__));
+
+ case 7:
+ *len = 5;
+ return (AS1 (lsl,%A0) CR_TAB
+ AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (rol,%A0) CR_TAB
+ AS2 (sbc,%B0,%B0) CR_TAB
+ AS1 (neg,%B0));
+
+ case 8:
+ return *len = 2, (AS2 (mov,%A0,%B1) CR_TAB
+ AS1 (clr,%B0));
+
+ case 9:
+ *len = 3;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (lsr,%A0));
+
+ case 10:
+ *len = 4;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0));
+
+ case 11:
+ *len = 5;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0));
+
+ case 12:
+ if (ldi_ok)
+ {
+ *len = 4;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS2 (andi,%A0,0x0f));
+ }
+ if (scratch)
+ {
+ *len = 5;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS2 (ldi,%3,0x0f) CR_TAB
+ "and %A0,%3");
+ }
+ *len = 6;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0));
+
+ case 13:
+ if (ldi_ok)
+ {
+ *len = 5;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS2 (andi,%A0,0x07));
+ }
+ if (AVR_HAVE_MUL && scratch)
+ {
+ *len = 5;
+ return (AS2 (ldi,%3,0x08) CR_TAB
+ AS2 (mul,%B0,%3) CR_TAB
+ AS2 (mov,%A0,r1) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ if (scratch)
+ {
+ *len = 6;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (swap,%A0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS2 (ldi,%3,0x07) CR_TAB
+ "and %A0,%3");
+ }
+ if (AVR_HAVE_MUL)
+ {
+ *len = 6;
+ return ("set" CR_TAB
+ AS2 (bld,r1,3) CR_TAB
+ AS2 (mul,%B0,r1) CR_TAB
+ AS2 (mov,%A0,r1) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ *len = 7;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (lsr,%A0));
+
+ case 14:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return (AS2 (ldi,%A0,0x04) CR_TAB
+ AS2 (mul,%B0,%A0) CR_TAB
+ AS2 (mov,%A0,r1) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ if (AVR_HAVE_MUL && scratch)
+ {
+ *len = 5;
+ return (AS2 (ldi,%3,0x04) CR_TAB
+ AS2 (mul,%B0,%3) CR_TAB
+ AS2 (mov,%A0,r1) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,__zero_reg__));
+ }
+ if (optimize_size && ldi_ok)
+ {
+ *len = 5;
+ return (AS2 (mov,%A0,%B0) CR_TAB
+ AS2 (ldi,%B0,6) "\n1:\t"
+ AS1 (lsr,%A0) CR_TAB
+ AS1 (dec,%B0) CR_TAB
+ AS1 (brne,1b));
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ *len = 6;
+ return (AS1 (clr,%A0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (rol,%A0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (rol,%A0) CR_TAB
+ AS1 (clr,%B0));
+
+ case 15:
+ *len = 4;
+ return (AS1 (clr,%A0) CR_TAB
+ AS1 (lsl,%B0) CR_TAB
+ AS1 (rol,%A0) CR_TAB
+ AS1 (clr,%B0));
+ }
+ len = t;
+ }
+ out_shift_with_cnt ((AS1 (lsr,%B0) CR_TAB
+ AS1 (ror,%A0)),
+ insn, operands, len, 2);
+ return "";
+}
+
+/* 32bit logic shift right ((unsigned int)x >> i) */
+
+const char *
+lshrsi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 32)
+ break;
+
+ if (AVR_HAVE_MOVW)
+ return *len = 3, (AS1 (clr,%D0) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS2 (movw,%A0,%C0));
+ *len = 4;
+ return (AS1 (clr,%D0) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%A0));
+
+ case 8:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+ *len = 4;
+ if (reg0 <= reg1)
+ return (AS2 (mov,%A0,%B1) CR_TAB
+ AS2 (mov,%B0,%C1) CR_TAB
+ AS2 (mov,%C0,%D1) CR_TAB
+ AS1 (clr,%D0));
+ else
+ return (AS1 (clr,%D0) CR_TAB
+ AS2 (mov,%C0,%D1) CR_TAB
+ AS2 (mov,%B0,%C1) CR_TAB
+ AS2 (mov,%A0,%B1));
+ }
+
+ case 16:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+
+ if (reg0 == reg1 + 2)
+ return *len = 2, (AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%D0));
+ if (AVR_HAVE_MOVW)
+ return *len = 3, (AS2 (movw,%A0,%C1) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%D0));
+ else
+ return *len = 4, (AS2 (mov,%B0,%D1) CR_TAB
+ AS2 (mov,%A0,%C1) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%D0));
+ }
+
+ case 24:
+ return *len = 4, (AS2 (mov,%A0,%D1) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%D0));
+
+ case 31:
+ *len = 6;
+ return (AS1 (clr,%A0) CR_TAB
+ AS2 (sbrc,%D0,7) CR_TAB
+ AS1 (inc,%A0) CR_TAB
+ AS1 (clr,%B0) CR_TAB
+ AS1 (clr,%C0) CR_TAB
+ AS1 (clr,%D0));
+ }
+ len = t;
+ }
+ out_shift_with_cnt ((AS1 (lsr,%D0) CR_TAB
+ AS1 (ror,%C0) CR_TAB
+ AS1 (ror,%B0) CR_TAB
+ AS1 (ror,%A0)),
+ insn, operands, len, 4);
+ return "";
+}
+
+/* Create RTL split patterns for byte sized rotate expressions. This
+ produces a series of move instructions and considers overlap situations.
+ Overlapping non-HImode operands need a scratch register. */
+
+bool
+avr_rotate_bytes (rtx operands[])
+{
+ int i, j;
+ enum machine_mode mode = GET_MODE (operands[0]);
+ bool overlapped = reg_overlap_mentioned_p (operands[0], operands[1]);
+ bool same_reg = rtx_equal_p (operands[0], operands[1]);
+ int num = INTVAL (operands[2]);
+ rtx scratch = operands[3];
+ /* Work out if byte or word move is needed. Odd byte rotates need QImode.
+ Word move if no scratch is needed, otherwise use size of scratch. */
+ enum machine_mode move_mode = QImode;
+ int move_size, offset, size;
+
+ if (num & 0xf)
+ move_mode = QImode;
+ else if ((mode == SImode && !same_reg) || !overlapped)
+ move_mode = HImode;
+ else
+ move_mode = GET_MODE (scratch);
+
+ /* Force DI rotate to use QI moves since other DI moves are currently split
+ into QI moves so forward propagation works better. */
+ if (mode == DImode)
+ move_mode = QImode;
+ /* Make scratch smaller if needed. */
+ if (SCRATCH != GET_CODE (scratch)
+ && HImode == GET_MODE (scratch)
+ && QImode == move_mode)
+ scratch = simplify_gen_subreg (move_mode, scratch, HImode, 0);
+
+ move_size = GET_MODE_SIZE (move_mode);
+ /* Number of bytes/words to rotate. */
+ offset = (num >> 3) / move_size;
+ /* Number of moves needed. */
+ size = GET_MODE_SIZE (mode) / move_size;
+ /* Himode byte swap is special case to avoid a scratch register. */
+ if (mode == HImode && same_reg)
+ {
+ /* HImode byte swap, using xor. This is as quick as using scratch. */
+ rtx src, dst;
+ src = simplify_gen_subreg (move_mode, operands[1], mode, 0);
+ dst = simplify_gen_subreg (move_mode, operands[0], mode, 1);
+ if (!rtx_equal_p (dst, src))
+ {
+ emit_move_insn (dst, gen_rtx_XOR (QImode, dst, src));
+ emit_move_insn (src, gen_rtx_XOR (QImode, src, dst));
+ emit_move_insn (dst, gen_rtx_XOR (QImode, dst, src));
+ }
+ }
+ else
+ {
+#define MAX_SIZE 8 /* GET_MODE_SIZE (DImode) / GET_MODE_SIZE (QImode) */
+ /* Create linked list of moves to determine move order. */
+ struct {
+ rtx src, dst;
+ int links;
+ } move[MAX_SIZE + 8];
+ int blocked, moves;
+
+ gcc_assert (size <= MAX_SIZE);
+ /* Generate list of subreg moves. */
+ for (i = 0; i < size; i++)
+ {
+ int from = i;
+ int to = (from + offset) % size;
+ move[i].src = simplify_gen_subreg (move_mode, operands[1],
+ mode, from * move_size);
+ move[i].dst = simplify_gen_subreg (move_mode, operands[0],
+ mode, to * move_size);
+ move[i].links = -1;
+ }
+ /* Mark dependence where a dst of one move is the src of another move.
+ The first move is a conflict as it must wait until second is
+ performed. We ignore moves to self - we catch this later. */
+ if (overlapped)
+ for (i = 0; i < size; i++)
+ if (reg_overlap_mentioned_p (move[i].dst, operands[1]))
+ for (j = 0; j < size; j++)
+ if (j != i && rtx_equal_p (move[j].src, move[i].dst))
+ {
+ /* The dst of move i is the src of move j. */
+ move[i].links = j;
+ break;
+ }
+
+ blocked = -1;
+ moves = 0;
+ /* Go through move list and perform non-conflicting moves. As each
+ non-overlapping move is made, it may remove other conflicts
+ so the process is repeated until no conflicts remain. */
+ do
+ {
+ blocked = -1;
+ moves = 0;
+ /* Emit move where dst is not also a src or we have used that
+ src already. */
+ for (i = 0; i < size; i++)
+ if (move[i].src != NULL_RTX)
+ {
+ if (move[i].links == -1
+ || move[move[i].links].src == NULL_RTX)
+ {
+ moves++;
+ /* Ignore NOP moves to self. */
+ if (!rtx_equal_p (move[i].dst, move[i].src))
+ emit_move_insn (move[i].dst, move[i].src);
+
+ /* Remove conflict from list. */
+ move[i].src = NULL_RTX;
+ }
+ else
+ blocked = i;
+ }
+
+ /* Check for deadlock. This is when no moves occurred and we have
+ at least one blocked move. */
+ if (moves == 0 && blocked != -1)
+ {
+ /* Need to use scratch register to break deadlock.
+ Add move to put dst of blocked move into scratch.
+ When this move occurs, it will break chain deadlock.
+ The scratch register is substituted for real move. */
+
+ gcc_assert (SCRATCH != GET_CODE (scratch));
+
+ move[size].src = move[blocked].dst;
+ move[size].dst = scratch;
+ /* Scratch move is never blocked. */
+ move[size].links = -1;
+ /* Make sure we have valid link. */
+ gcc_assert (move[blocked].links != -1);
+ /* Replace src of blocking move with scratch reg. */
+ move[move[blocked].links].src = scratch;
+ /* Make dependent on scratch move occuring. */
+ move[blocked].links = size;
+ size=size+1;
+ }
+ }
+ while (blocked != -1);
+ }
+ return true;
+}
+
+/* Modifies the length assigned to instruction INSN
+ LEN is the initially computed length of the insn. */
+
+int
+adjust_insn_length (rtx insn, int len)
+{
+ rtx patt = PATTERN (insn);
+ rtx set;
+
+ if (GET_CODE (patt) == SET)
+ {
+ rtx op[10];
+ op[1] = SET_SRC (patt);
+ op[0] = SET_DEST (patt);
+ if (general_operand (op[1], VOIDmode)
+ && general_operand (op[0], VOIDmode))
+ {
+ switch (GET_MODE (op[0]))
+ {
+ case QImode:
+ output_movqi (insn, op, &len);
+ break;
+ case HImode:
+ output_movhi (insn, op, &len);
+ break;
+ case SImode:
+ case SFmode:
+ output_movsisf (insn, op, &len);
+ break;
+ default:
+ break;
+ }
+ }
+ else if (op[0] == cc0_rtx && REG_P (op[1]))
+ {
+ switch (GET_MODE (op[1]))
+ {
+ case HImode: out_tsthi (insn, op[1], &len); break;
+ case SImode: out_tstsi (insn, op[1], &len); break;
+ default: break;
+ }
+ }
+ else if (GET_CODE (op[1]) == AND)
+ {
+ if (GET_CODE (XEXP (op[1],1)) == CONST_INT)
+ {
+ HOST_WIDE_INT mask = INTVAL (XEXP (op[1],1));
+ if (GET_MODE (op[1]) == SImode)
+ len = (((mask & 0xff) != 0xff)
+ + ((mask & 0xff00) != 0xff00)
+ + ((mask & 0xff0000L) != 0xff0000L)
+ + ((mask & 0xff000000L) != 0xff000000L));
+ else if (GET_MODE (op[1]) == HImode)
+ len = (((mask & 0xff) != 0xff)
+ + ((mask & 0xff00) != 0xff00));
+ }
+ }
+ else if (GET_CODE (op[1]) == IOR)
+ {
+ if (GET_CODE (XEXP (op[1],1)) == CONST_INT)
+ {
+ HOST_WIDE_INT mask = INTVAL (XEXP (op[1],1));
+ if (GET_MODE (op[1]) == SImode)
+ len = (((mask & 0xff) != 0)
+ + ((mask & 0xff00) != 0)
+ + ((mask & 0xff0000L) != 0)
+ + ((mask & 0xff000000L) != 0));
+ else if (GET_MODE (op[1]) == HImode)
+ len = (((mask & 0xff) != 0)
+ + ((mask & 0xff00) != 0));
+ }
+ }
+ }
+ set = single_set (insn);
+ if (set)
+ {
+ rtx op[10];
+
+ op[1] = SET_SRC (set);
+ op[0] = SET_DEST (set);
+
+ if (GET_CODE (patt) == PARALLEL
+ && general_operand (op[1], VOIDmode)
+ && general_operand (op[0], VOIDmode))
+ {
+ if (XVECLEN (patt, 0) == 2)
+ op[2] = XVECEXP (patt, 0, 1);
+
+ switch (GET_MODE (op[0]))
+ {
+ case QImode:
+ len = 2;
+ break;
+ case HImode:
+ output_reload_inhi (insn, op, &len);
+ break;
+ case SImode:
+ case SFmode:
+ output_reload_insisf (insn, op, &len);
+ break;
+ default:
+ break;
+ }
+ }
+ else if (GET_CODE (op[1]) == ASHIFT
+ || GET_CODE (op[1]) == ASHIFTRT
+ || GET_CODE (op[1]) == LSHIFTRT)
+ {
+ rtx ops[10];
+ ops[0] = op[0];
+ ops[1] = XEXP (op[1],0);
+ ops[2] = XEXP (op[1],1);
+ switch (GET_CODE (op[1]))
+ {
+ case ASHIFT:
+ switch (GET_MODE (op[0]))
+ {
+ case QImode: ashlqi3_out (insn,ops,&len); break;
+ case HImode: ashlhi3_out (insn,ops,&len); break;
+ case SImode: ashlsi3_out (insn,ops,&len); break;
+ default: break;
+ }
+ break;
+ case ASHIFTRT:
+ switch (GET_MODE (op[0]))
+ {
+ case QImode: ashrqi3_out (insn,ops,&len); break;
+ case HImode: ashrhi3_out (insn,ops,&len); break;
+ case SImode: ashrsi3_out (insn,ops,&len); break;
+ default: break;
+ }
+ break;
+ case LSHIFTRT:
+ switch (GET_MODE (op[0]))
+ {
+ case QImode: lshrqi3_out (insn,ops,&len); break;
+ case HImode: lshrhi3_out (insn,ops,&len); break;
+ case SImode: lshrsi3_out (insn,ops,&len); break;
+ default: break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return len;
+}
+
+/* Return nonzero if register REG dead after INSN. */
+
+int
+reg_unused_after (rtx insn, rtx reg)
+{
+ return (dead_or_set_p (insn, reg)
+ || (REG_P(reg) && _reg_unused_after (insn, reg)));
+}
+
+/* Return nonzero if REG is not used after INSN.
+ We assume REG is a reload reg, and therefore does
+ not live past labels. It may live past calls or jumps though. */
+
+int
+_reg_unused_after (rtx insn, rtx reg)
+{
+ enum rtx_code code;
+ rtx set;
+
+ /* If the reg is set by this instruction, then it is safe for our
+ case. Disregard the case where this is a store to memory, since
+ we are checking a register used in the store address. */
+ set = single_set (insn);
+ if (set && GET_CODE (SET_DEST (set)) != MEM
+ && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ return 1;
+
+ while ((insn = NEXT_INSN (insn)))
+ {
+ rtx set;
+ code = GET_CODE (insn);
+
+#if 0
+ /* If this is a label that existed before reload, then the register
+ if dead here. However, if this is a label added by reorg, then
+ the register may still be live here. We can't tell the difference,
+ so we just ignore labels completely. */
+ if (code == CODE_LABEL)
+ return 1;
+ /* else */
+#endif
+
+ if (!INSN_P (insn))
+ continue;
+
+ if (code == JUMP_INSN)
+ return 0;
+
+ /* If this is a sequence, we must handle them all at once.
+ We could have for instance a call that sets the target register,
+ and an insn in a delay slot that uses the register. In this case,
+ we must return 0. */
+ else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ int i;
+ int retval = 0;
+
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
+ rtx set = single_set (this_insn);
+
+ if (GET_CODE (this_insn) == CALL_INSN)
+ code = CALL_INSN;
+ else if (GET_CODE (this_insn) == JUMP_INSN)
+ {
+ if (INSN_ANNULLED_BRANCH_P (this_insn))
+ return 0;
+ code = JUMP_INSN;
+ }
+
+ if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
+ return 0;
+ if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ {
+ if (GET_CODE (SET_DEST (set)) != MEM)
+ retval = 1;
+ else
+ return 0;
+ }
+ if (set == 0
+ && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
+ return 0;
+ }
+ if (retval == 1)
+ return 1;
+ else if (code == JUMP_INSN)
+ return 0;
+ }
+
+ if (code == CALL_INSN)
+ {
+ rtx tem;
+ for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
+ if (GET_CODE (XEXP (tem, 0)) == USE
+ && REG_P (XEXP (XEXP (tem, 0), 0))
+ && reg_overlap_mentioned_p (reg, XEXP (XEXP (tem, 0), 0)))
+ return 0;
+ if (call_used_regs[REGNO (reg)])
+ return 1;
+ }
+
+ set = single_set (insn);
+
+ if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
+ return 0;
+ if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ return GET_CODE (SET_DEST (set)) != MEM;
+ if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ return 0;
+ }
+ return 1;
+}
+
+/* Target hook for assembling integer objects. The AVR version needs
+ special handling for references to certain labels. */
+
+static bool
+avr_assemble_integer (rtx x, unsigned int size, int aligned_p)
+{
+ if (size == POINTER_SIZE / BITS_PER_UNIT && aligned_p
+ && text_segment_operand (x, VOIDmode) )
+ {
+ fputs ("\t.word\tgs(", asm_out_file);
+ output_addr_const (asm_out_file, x);
+ fputs (")\n", asm_out_file);
+ return true;
+ }
+ return default_assemble_integer (x, size, aligned_p);
+}
+
+/* Worker function for ASM_DECLARE_FUNCTION_NAME. */
+
+void
+avr_asm_declare_function_name (FILE *file, const char *name, tree decl)
+{
+
+ /* If the function has the 'signal' or 'interrupt' attribute, test to
+ make sure that the name of the function is "__vector_NN" so as to
+ catch when the user misspells the interrupt vector name. */
+
+ if (cfun->machine->is_interrupt)
+ {
+ if (strncmp (name, "__vector", strlen ("__vector")) != 0)
+ {
+ warning_at (DECL_SOURCE_LOCATION (decl), 0,
+ "%qs appears to be a misspelled interrupt handler",
+ name);
+ }
+ }
+ else if (cfun->machine->is_signal)
+ {
+ if (strncmp (name, "__vector", strlen ("__vector")) != 0)
+ {
+ warning_at (DECL_SOURCE_LOCATION (decl), 0,
+ "%qs appears to be a misspelled signal handler",
+ name);
+ }
+ }
+
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
+ ASM_OUTPUT_LABEL (file, name);
+}
+
+/* The routine used to output NUL terminated strings. We use a special
+ version of this for most svr4 targets because doing so makes the
+ generated assembly code more compact (and thus faster to assemble)
+ as well as more readable, especially for targets like the i386
+ (where the only alternative is to output character sequences as
+ comma separated lists of numbers). */
+
+void
+gas_output_limited_string(FILE *file, const char *str)
+{
+ const unsigned char *_limited_str = (const unsigned char *) str;
+ unsigned ch;
+ fprintf (file, "%s\"", STRING_ASM_OP);
+ for (; (ch = *_limited_str); _limited_str++)
+ {
+ int escape;
+ switch (escape = ESCAPES[ch])
+ {
+ case 0:
+ putc (ch, file);
+ break;
+ case 1:
+ fprintf (file, "\\%03o", ch);
+ break;
+ default:
+ putc ('\\', file);
+ putc (escape, file);
+ break;
+ }
+ }
+ fprintf (file, "\"\n");
+}
+
+/* The routine used to output sequences of byte values. We use a special
+ version of this for most svr4 targets because doing so makes the
+ generated assembly code more compact (and thus faster to assemble)
+ as well as more readable. Note that if we find subparts of the
+ character sequence which end with NUL (and which are shorter than
+ STRING_LIMIT) we output those using ASM_OUTPUT_LIMITED_STRING. */
+
+void
+gas_output_ascii(FILE *file, const char *str, size_t length)
+{
+ const unsigned char *_ascii_bytes = (const unsigned char *) str;
+ const unsigned char *limit = _ascii_bytes + length;
+ unsigned bytes_in_chunk = 0;
+ for (; _ascii_bytes < limit; _ascii_bytes++)
+ {
+ const unsigned char *p;
+ if (bytes_in_chunk >= 60)
+ {
+ fprintf (file, "\"\n");
+ bytes_in_chunk = 0;
+ }
+ for (p = _ascii_bytes; p < limit && *p != '\0'; p++)
+ continue;
+ if (p < limit && (p - _ascii_bytes) <= (signed)STRING_LIMIT)
+ {
+ if (bytes_in_chunk > 0)
+ {
+ fprintf (file, "\"\n");
+ bytes_in_chunk = 0;
+ }
+ gas_output_limited_string (file, (const char*)_ascii_bytes);
+ _ascii_bytes = p;
+ }
+ else
+ {
+ int escape;
+ unsigned ch;
+ if (bytes_in_chunk == 0)
+ fprintf (file, "\t.ascii\t\"");
+ switch (escape = ESCAPES[ch = *_ascii_bytes])
+ {
+ case 0:
+ putc (ch, file);
+ bytes_in_chunk++;
+ break;
+ case 1:
+ fprintf (file, "\\%03o", ch);
+ bytes_in_chunk += 4;
+ break;
+ default:
+ putc ('\\', file);
+ putc (escape, file);
+ bytes_in_chunk += 2;
+ break;
+ }
+ }
+ }
+ if (bytes_in_chunk > 0)
+ fprintf (file, "\"\n");
+}
+
+/* Return value is nonzero if pseudos that have been
+ assigned to registers of class CLASS would likely be spilled
+ because registers of CLASS are needed for spill registers. */
+
+static bool
+avr_class_likely_spilled_p (reg_class_t c)
+{
+ return (c != ALL_REGS && c != ADDW_REGS);
+}
+
+/* Valid attributes:
+ progmem - put data to program memory;
+ signal - make a function to be hardware interrupt. After function
+ prologue interrupts are disabled;
+ interrupt - make a function to be hardware interrupt. After function
+ prologue interrupts are enabled;
+ naked - don't generate function prologue/epilogue and `ret' command.
+
+ Only `progmem' attribute valid for type. */
+
+/* Handle a "progmem" attribute; arguments as in
+ struct attribute_spec.handler. */
+static tree
+avr_handle_progmem_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (DECL_P (*node))
+ {
+ if (TREE_CODE (*node) == TYPE_DECL)
+ {
+ /* This is really a decl attribute, not a type attribute,
+ but try to handle it for GCC 3.0 backwards compatibility. */
+
+ tree type = TREE_TYPE (*node);
+ tree attr = tree_cons (name, args, TYPE_ATTRIBUTES (type));
+ tree newtype = build_type_attribute_variant (type, attr);
+
+ TYPE_MAIN_VARIANT (newtype) = TYPE_MAIN_VARIANT (type);
+ TREE_TYPE (*node) = newtype;
+ *no_add_attrs = true;
+ }
+ else if (TREE_STATIC (*node) || DECL_EXTERNAL (*node))
+ {
+ *no_add_attrs = false;
+ }
+ else
+ {
+ warning (OPT_Wattributes, "%qE attribute ignored",
+ name);
+ *no_add_attrs = true;
+ }
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+avr_handle_fndecl_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+static tree
+avr_handle_fntype_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_TYPE)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Look for attribute `progmem' in DECL
+ if found return 1, otherwise 0. */
+
+int
+avr_progmem_p (tree decl, tree attributes)
+{
+ tree a;
+
+ if (TREE_CODE (decl) != VAR_DECL)
+ return 0;
+
+ if (NULL_TREE
+ != lookup_attribute ("progmem", attributes))
+ return 1;
+
+ a=decl;
+ do
+ a = TREE_TYPE(a);
+ while (TREE_CODE (a) == ARRAY_TYPE);
+
+ if (a == error_mark_node)
+ return 0;
+
+ if (NULL_TREE != lookup_attribute ("progmem", TYPE_ATTRIBUTES (a)))
+ return 1;
+
+ return 0;
+}
+
+/* Add the section attribute if the variable is in progmem. */
+
+static void
+avr_insert_attributes (tree node, tree *attributes)
+{
+ if (TREE_CODE (node) == VAR_DECL
+ && (TREE_STATIC (node) || DECL_EXTERNAL (node))
+ && avr_progmem_p (node, *attributes))
+ {
+ tree node0 = node;
+
+ /* For C++, we have to peel arrays in order to get correct
+ determination of readonlyness. */
+
+ do
+ node0 = TREE_TYPE (node0);
+ while (TREE_CODE (node0) == ARRAY_TYPE);
+
+ if (error_mark_node == node0)
+ return;
+
+ if (TYPE_READONLY (node0))
+ {
+ static const char dsec[] = ".progmem.data";
+
+ *attributes = tree_cons (get_identifier ("section"),
+ build_tree_list (NULL, build_string (strlen (dsec), dsec)),
+ *attributes);
+ }
+ else
+ {
+ error ("variable %q+D must be const in order to be put into"
+ " read-only section by means of %<__attribute__((progmem))%>",
+ node);
+ }
+ }
+}
+
+/* A get_unnamed_section callback for switching to progmem_section. */
+
+static void
+avr_output_progmem_section_asm_op (const void *arg ATTRIBUTE_UNUSED)
+{
+ fprintf (asm_out_file,
+ "\t.section .progmem.gcc_sw_table, \"%s\", @progbits\n",
+ AVR_HAVE_JMP_CALL ? "a" : "ax");
+ /* Should already be aligned, this is just to be safe if it isn't. */
+ fprintf (asm_out_file, "\t.p2align 1\n");
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
+
+static void
+avr_asm_init_sections (void)
+{
+ progmem_section = get_unnamed_section (AVR_HAVE_JMP_CALL ? 0 : SECTION_CODE,
+ avr_output_progmem_section_asm_op,
+ NULL);
+ readonly_data_section = data_section;
+}
+
+static unsigned int
+avr_section_type_flags (tree decl, const char *name, int reloc)
+{
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
+
+ if (strncmp (name, ".noinit", 7) == 0)
+ {
+ if (decl && TREE_CODE (decl) == VAR_DECL
+ && DECL_INITIAL (decl) == NULL_TREE)
+ flags |= SECTION_BSS; /* @nobits */
+ else
+ warning (0, "only uninitialized variables can be placed in the "
+ ".noinit section");
+ }
+
+ if (0 == strncmp (name, ".progmem.data", strlen (".progmem.data")))
+ flags &= ~SECTION_WRITE;
+
+ return flags;
+}
+
+
+/* Implement `TARGET_ENCODE_SECTION_INFO'. */
+
+static void
+avr_encode_section_info (tree decl, rtx rtl, int new_decl_p)
+{
+ /* In avr_handle_progmem_attribute, DECL_INITIAL is not yet
+ readily available, see PR34734. So we postpone the warning
+ about uninitialized data in program memory section until here. */
+
+ if (new_decl_p
+ && decl && DECL_P (decl)
+ && NULL_TREE == DECL_INITIAL (decl)
+ && !DECL_EXTERNAL (decl)
+ && avr_progmem_p (decl, DECL_ATTRIBUTES (decl)))
+ {
+ warning (OPT_Wuninitialized,
+ "uninitialized variable %q+D put into "
+ "program memory area", decl);
+ }
+
+ default_encode_section_info (decl, rtl, new_decl_p);
+}
+
+
+/* Outputs some appropriate text to go at the start of an assembler
+ file. */
+
+static void
+avr_file_start (void)
+{
+ if (avr_current_arch->asm_only)
+ error ("MCU %qs supported for assembler only", avr_mcu_name);
+
+ default_file_start ();
+
+ fputs ("__SREG__ = 0x3f\n"
+ "__SP_H__ = 0x3e\n"
+ "__SP_L__ = 0x3d\n", asm_out_file);
+
+ fputs ("__tmp_reg__ = 0\n"
+ "__zero_reg__ = 1\n", asm_out_file);
+
+ /* FIXME: output these only if there is anything in the .data / .bss
+ sections - some code size could be saved by not linking in the
+ initialization code from libgcc if one or both sections are empty. */
+ fputs ("\t.global __do_copy_data\n", asm_out_file);
+ fputs ("\t.global __do_clear_bss\n", asm_out_file);
+}
+
+/* Outputs to the stdio stream FILE some
+ appropriate text to go at the end of an assembler file. */
+
+static void
+avr_file_end (void)
+{
+}
+
+/* Choose the order in which to allocate hard registers for
+ pseudo-registers local to a basic block.
+
+ Store the desired register order in the array `reg_alloc_order'.
+ Element 0 should be the register to allocate first; element 1, the
+ next register; and so on. */
+
+void
+order_regs_for_local_alloc (void)
+{
+ unsigned int i;
+ static const int order_0[] = {
+ 24,25,
+ 18,19,
+ 20,21,
+ 22,23,
+ 30,31,
+ 26,27,
+ 28,29,
+ 17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,
+ 0,1,
+ 32,33,34,35
+ };
+ static const int order_1[] = {
+ 18,19,
+ 20,21,
+ 22,23,
+ 24,25,
+ 30,31,
+ 26,27,
+ 28,29,
+ 17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,
+ 0,1,
+ 32,33,34,35
+ };
+ static const int order_2[] = {
+ 25,24,
+ 23,22,
+ 21,20,
+ 19,18,
+ 30,31,
+ 26,27,
+ 28,29,
+ 17,16,
+ 15,14,13,12,11,10,9,8,7,6,5,4,3,2,
+ 1,0,
+ 32,33,34,35
+ };
+
+ const int *order = (TARGET_ORDER_1 ? order_1 :
+ TARGET_ORDER_2 ? order_2 :
+ order_0);
+ for (i=0; i < ARRAY_SIZE (order_0); ++i)
+ reg_alloc_order[i] = order[i];
+}
+
+
+/* Implement `TARGET_REGISTER_MOVE_COST' */
+
+static int
+avr_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t from, reg_class_t to)
+{
+ return (from == STACK_REG ? 6
+ : to == STACK_REG ? 12
+ : 2);
+}
+
+
+/* Implement `TARGET_MEMORY_MOVE_COST' */
+
+static int
+avr_memory_move_cost (enum machine_mode mode, reg_class_t rclass ATTRIBUTE_UNUSED,
+ bool in ATTRIBUTE_UNUSED)
+{
+ return (mode == QImode ? 2
+ : mode == HImode ? 4
+ : mode == SImode ? 8
+ : mode == SFmode ? 8
+ : 16);
+}
+
+
+/* Mutually recursive subroutine of avr_rtx_cost for calculating the
+ cost of an RTX operand given its context. X is the rtx of the
+ operand, MODE is its mode, and OUTER is the rtx_code of this
+ operand's parent operator. */
+
+static int
+avr_operand_rtx_cost (rtx x, enum machine_mode mode, enum rtx_code outer,
+ bool speed)
+{
+ enum rtx_code code = GET_CODE (x);
+ int total;
+
+ switch (code)
+ {
+ case REG:
+ case SUBREG:
+ return 0;
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ return COSTS_N_INSNS (GET_MODE_SIZE (mode));
+
+ default:
+ break;
+ }
+
+ total = 0;
+ avr_rtx_costs (x, code, outer, &total, speed);
+ return total;
+}
+
+/* The AVR backend's rtx_cost function. X is rtx expression whose cost
+ is to be calculated. Return true if the complete cost has been
+ computed, and false if subexpressions should be scanned. In either
+ case, *TOTAL contains the cost result. */
+
+static bool
+avr_rtx_costs (rtx x, int codearg, int outer_code ATTRIBUTE_UNUSED, int *total,
+ bool speed)
+{
+ enum rtx_code code = (enum rtx_code) codearg;
+ enum machine_mode mode = GET_MODE (x);
+ HOST_WIDE_INT val;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ /* Immediate constants are as cheap as registers. */
+ *total = 0;
+ return true;
+
+ case MEM:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ return true;
+
+ case NEG:
+ switch (mode)
+ {
+ case QImode:
+ case SFmode:
+ *total = COSTS_N_INSNS (1);
+ break;
+
+ case HImode:
+ *total = COSTS_N_INSNS (3);
+ break;
+
+ case SImode:
+ *total = COSTS_N_INSNS (7);
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case ABS:
+ switch (mode)
+ {
+ case QImode:
+ case SFmode:
+ *total = COSTS_N_INSNS (1);
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case NOT:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case ZERO_EXTEND:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode)
+ - GET_MODE_SIZE (GET_MODE (XEXP (x, 0))));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case SIGN_EXTEND:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) + 2
+ - GET_MODE_SIZE (GET_MODE (XEXP (x, 0))));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case PLUS:
+ switch (mode)
+ {
+ case QImode:
+ *total = COSTS_N_INSNS (1);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ break;
+
+ case HImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (2);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else if (INTVAL (XEXP (x, 1)) >= -63 && INTVAL (XEXP (x, 1)) <= 63)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (2);
+ break;
+
+ case SImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (4);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else if (INTVAL (XEXP (x, 1)) >= -63 && INTVAL (XEXP (x, 1)) <= 63)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (4);
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case MINUS:
+ case AND:
+ case IOR:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ return true;
+
+ case XOR:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ return true;
+
+ case MULT:
+ switch (mode)
+ {
+ case QImode:
+ if (AVR_HAVE_MUL)
+ *total = COSTS_N_INSNS (!speed ? 3 : 4);
+ else if (!speed)
+ *total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 2 : 1);
+ else
+ return false;
+ break;
+
+ case HImode:
+ if (AVR_HAVE_MUL)
+ *total = COSTS_N_INSNS (!speed ? 7 : 10);
+ else if (!speed)
+ *total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 2 : 1);
+ else
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ return true;
+
+ case DIV:
+ case MOD:
+ case UDIV:
+ case UMOD:
+ if (!speed)
+ *total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 2 : 1);
+ else
+ return false;
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ return true;
+
+ case ROTATE:
+ switch (mode)
+ {
+ case QImode:
+ if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) == 4)
+ *total = COSTS_N_INSNS (1);
+
+ break;
+
+ case HImode:
+ if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) == 8)
+ *total = COSTS_N_INSNS (3);
+
+ break;
+
+ case SImode:
+ if (CONST_INT_P (XEXP (x, 1)))
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 8:
+ case 24:
+ *total = COSTS_N_INSNS (5);
+ break;
+ case 16:
+ *total = COSTS_N_INSNS (AVR_HAVE_MOVW ? 4 : 6);
+ break;
+ }
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case ASHIFT:
+ switch (mode)
+ {
+ case QImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 4 : 17);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else
+ {
+ val = INTVAL (XEXP (x, 1));
+ if (val == 7)
+ *total = COSTS_N_INSNS (3);
+ else if (val >= 0 && val <= 7)
+ *total = COSTS_N_INSNS (val);
+ else
+ *total = COSTS_N_INSNS (1);
+ }
+ break;
+
+ case HImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ case 8:
+ *total = COSTS_N_INSNS (2);
+ break;
+ case 9:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 2:
+ case 3:
+ case 10:
+ case 15:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 7:
+ case 11:
+ case 12:
+ *total = COSTS_N_INSNS (5);
+ break;
+ case 4:
+ *total = COSTS_N_INSNS (!speed ? 5 : 8);
+ break;
+ case 6:
+ *total = COSTS_N_INSNS (!speed ? 5 : 9);
+ break;
+ case 5:
+ *total = COSTS_N_INSNS (!speed ? 5 : 10);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ break;
+
+ case SImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 24:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 1:
+ case 8:
+ case 16:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 31:
+ *total = COSTS_N_INSNS (6);
+ break;
+ case 2:
+ *total = COSTS_N_INSNS (!speed ? 7 : 8);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case ASHIFTRT:
+ switch (mode)
+ {
+ case QImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 4 : 17);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else
+ {
+ val = INTVAL (XEXP (x, 1));
+ if (val == 6)
+ *total = COSTS_N_INSNS (4);
+ else if (val == 7)
+ *total = COSTS_N_INSNS (2);
+ else if (val >= 0 && val <= 7)
+ *total = COSTS_N_INSNS (val);
+ else
+ *total = COSTS_N_INSNS (1);
+ }
+ break;
+
+ case HImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ *total = COSTS_N_INSNS (2);
+ break;
+ case 15:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 2:
+ case 7:
+ case 8:
+ case 9:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 10:
+ case 14:
+ *total = COSTS_N_INSNS (5);
+ break;
+ case 11:
+ *total = COSTS_N_INSNS (!speed ? 5 : 6);
+ break;
+ case 12:
+ *total = COSTS_N_INSNS (!speed ? 5 : 7);
+ break;
+ case 6:
+ case 13:
+ *total = COSTS_N_INSNS (!speed ? 5 : 8);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ break;
+
+ case SImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 8:
+ case 16:
+ case 24:
+ *total = COSTS_N_INSNS (6);
+ break;
+ case 2:
+ *total = COSTS_N_INSNS (!speed ? 7 : 8);
+ break;
+ case 31:
+ *total = COSTS_N_INSNS (AVR_HAVE_MOVW ? 4 : 5);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case LSHIFTRT:
+ switch (mode)
+ {
+ case QImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 4 : 17);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else
+ {
+ val = INTVAL (XEXP (x, 1));
+ if (val == 7)
+ *total = COSTS_N_INSNS (3);
+ else if (val >= 0 && val <= 7)
+ *total = COSTS_N_INSNS (val);
+ else
+ *total = COSTS_N_INSNS (1);
+ }
+ break;
+
+ case HImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ case 8:
+ *total = COSTS_N_INSNS (2);
+ break;
+ case 9:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 2:
+ case 10:
+ case 15:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 7:
+ case 11:
+ *total = COSTS_N_INSNS (5);
+ break;
+ case 3:
+ case 12:
+ case 13:
+ case 14:
+ *total = COSTS_N_INSNS (!speed ? 5 : 6);
+ break;
+ case 4:
+ *total = COSTS_N_INSNS (!speed ? 5 : 7);
+ break;
+ case 5:
+ case 6:
+ *total = COSTS_N_INSNS (!speed ? 5 : 9);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ break;
+
+ case SImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 2:
+ *total = COSTS_N_INSNS (!speed ? 7 : 8);
+ break;
+ case 8:
+ case 16:
+ case 24:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 31:
+ *total = COSTS_N_INSNS (6);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ }
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ case COMPARE:
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ *total = COSTS_N_INSNS (1);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ break;
+
+ case HImode:
+ *total = COSTS_N_INSNS (2);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ else if (INTVAL (XEXP (x, 1)) != 0)
+ *total += COSTS_N_INSNS (1);
+ break;
+
+ case SImode:
+ *total = COSTS_N_INSNS (4);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, speed);
+ else if (INTVAL (XEXP (x, 1)) != 0)
+ *total += COSTS_N_INSNS (3);
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, speed);
+ return true;
+
+ default:
+ break;
+ }
+ return false;
+}
+
+/* Calculate the cost of a memory address. */
+
+static int
+avr_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
+{
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x,1)) == CONST_INT
+ && (REG_P (XEXP (x,0)) || GET_CODE (XEXP (x,0)) == SUBREG)
+ && INTVAL (XEXP (x,1)) >= 61)
+ return 18;
+ if (CONSTANT_ADDRESS_P (x))
+ {
+ if (optimize > 0 && io_address_operand (x, QImode))
+ return 2;
+ return 4;
+ }
+ return 4;
+}
+
+/* Test for extra memory constraint 'Q'.
+ It's a memory address based on Y or Z pointer with valid displacement. */
+
+int
+extra_constraint_Q (rtx x)
+{
+ if (GET_CODE (XEXP (x,0)) == PLUS
+ && REG_P (XEXP (XEXP (x,0), 0))
+ && GET_CODE (XEXP (XEXP (x,0), 1)) == CONST_INT
+ && (INTVAL (XEXP (XEXP (x,0), 1))
+ <= MAX_LD_OFFSET (GET_MODE (x))))
+ {
+ rtx xx = XEXP (XEXP (x,0), 0);
+ int regno = REGNO (xx);
+ if (TARGET_ALL_DEBUG)
+ {
+ fprintf (stderr, ("extra_constraint:\n"
+ "reload_completed: %d\n"
+ "reload_in_progress: %d\n"),
+ reload_completed, reload_in_progress);
+ debug_rtx (x);
+ }
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return 1; /* allocate pseudos */
+ else if (regno == REG_Z || regno == REG_Y)
+ return 1; /* strictly check */
+ else if (xx == frame_pointer_rtx
+ || xx == arg_pointer_rtx)
+ return 1; /* XXX frame & arg pointer checks */
+ }
+ return 0;
+}
+
+/* Convert condition code CONDITION to the valid AVR condition code. */
+
+RTX_CODE
+avr_normalize_condition (RTX_CODE condition)
+{
+ switch (condition)
+ {
+ case GT:
+ return GE;
+ case GTU:
+ return GEU;
+ case LE:
+ return LT;
+ case LEU:
+ return LTU;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* This function optimizes conditional jumps. */
+
+static void
+avr_reorg (void)
+{
+ rtx insn, pattern;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (! (GET_CODE (insn) == INSN
+ || GET_CODE (insn) == CALL_INSN
+ || GET_CODE (insn) == JUMP_INSN)
+ || !single_set (insn))
+ continue;
+
+ pattern = PATTERN (insn);
+
+ if (GET_CODE (pattern) == PARALLEL)
+ pattern = XVECEXP (pattern, 0, 0);
+ if (GET_CODE (pattern) == SET
+ && SET_DEST (pattern) == cc0_rtx
+ && compare_diff_p (insn))
+ {
+ if (GET_CODE (SET_SRC (pattern)) == COMPARE)
+ {
+ /* Now we work under compare insn. */
+
+ pattern = SET_SRC (pattern);
+ if (true_regnum (XEXP (pattern,0)) >= 0
+ && true_regnum (XEXP (pattern,1)) >= 0 )
+ {
+ rtx x = XEXP (pattern,0);
+ rtx next = next_real_insn (insn);
+ rtx pat = PATTERN (next);
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src,0);
+ PUT_CODE (t, swap_condition (GET_CODE (t)));
+ XEXP (pattern,0) = XEXP (pattern,1);
+ XEXP (pattern,1) = x;
+ INSN_CODE (next) = -1;
+ }
+ else if (true_regnum (XEXP (pattern, 0)) >= 0
+ && XEXP (pattern, 1) == const0_rtx)
+ {
+ /* This is a tst insn, we can reverse it. */
+ rtx next = next_real_insn (insn);
+ rtx pat = PATTERN (next);
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src,0);
+
+ PUT_CODE (t, swap_condition (GET_CODE (t)));
+ XEXP (pattern, 1) = XEXP (pattern, 0);
+ XEXP (pattern, 0) = const0_rtx;
+ INSN_CODE (next) = -1;
+ INSN_CODE (insn) = -1;
+ }
+ else if (true_regnum (XEXP (pattern,0)) >= 0
+ && GET_CODE (XEXP (pattern,1)) == CONST_INT)
+ {
+ rtx x = XEXP (pattern,1);
+ rtx next = next_real_insn (insn);
+ rtx pat = PATTERN (next);
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src,0);
+ enum machine_mode mode = GET_MODE (XEXP (pattern, 0));
+
+ if (avr_simplify_comparison_p (mode, GET_CODE (t), x))
+ {
+ XEXP (pattern, 1) = gen_int_mode (INTVAL (x) + 1, mode);
+ PUT_CODE (t, avr_normalize_condition (GET_CODE (t)));
+ INSN_CODE (next) = -1;
+ INSN_CODE (insn) = -1;
+ }
+ }
+ }
+ }
+ }
+}
+
+/* Returns register number for function return value.*/
+
+int
+avr_ret_register (void)
+{
+ return 24;
+}
+
+/* Create an RTX representing the place where a
+ library function returns a value of mode MODE. */
+
+rtx
+avr_libcall_value (enum machine_mode mode)
+{
+ int offs = GET_MODE_SIZE (mode);
+ if (offs < 2)
+ offs = 2;
+ return gen_rtx_REG (mode, RET_REGISTER + 2 - offs);
+}
+
+/* Create an RTX representing the place where a
+ function returns a value of data type VALTYPE. */
+
+rtx
+avr_function_value (const_tree type,
+ const_tree func ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ unsigned int offs;
+
+ if (TYPE_MODE (type) != BLKmode)
+ return avr_libcall_value (TYPE_MODE (type));
+
+ offs = int_size_in_bytes (type);
+ if (offs < 2)
+ offs = 2;
+ if (offs > 2 && offs < GET_MODE_SIZE (SImode))
+ offs = GET_MODE_SIZE (SImode);
+ else if (offs > GET_MODE_SIZE (SImode) && offs < GET_MODE_SIZE (DImode))
+ offs = GET_MODE_SIZE (DImode);
+
+ return gen_rtx_REG (BLKmode, RET_REGISTER + 2 - offs);
+}
+
+int
+test_hard_reg_class (enum reg_class rclass, rtx x)
+{
+ int regno = true_regnum (x);
+ if (regno < 0)
+ return 0;
+
+ if (TEST_HARD_REG_CLASS (rclass, regno))
+ return 1;
+
+ return 0;
+}
+
+
+int
+jump_over_one_insn_p (rtx insn, rtx dest)
+{
+ int uid = INSN_UID (GET_CODE (dest) == LABEL_REF
+ ? XEXP (dest, 0)
+ : dest);
+ int jump_addr = INSN_ADDRESSES (INSN_UID (insn));
+ int dest_addr = INSN_ADDRESSES (uid);
+ return dest_addr - jump_addr == get_attr_length (insn) + 1;
+}
+
+/* Returns 1 if a value of mode MODE can be stored starting with hard
+ register number REGNO. On the enhanced core, anything larger than
+ 1 byte must start in even numbered register for "movw" to work
+ (this way we don't have to check for odd registers everywhere). */
+
+int
+avr_hard_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ /* NOTE: 8-bit values must not be disallowed for R28 or R29.
+ Disallowing QI et al. in these regs might lead to code like
+ (set (subreg:QI (reg:HI 28) n) ...)
+ which will result in wrong code because reload does not
+ handle SUBREGs of hard regsisters like this.
+ This could be fixed in reload. However, it appears
+ that fixing reload is not wanted by reload people. */
+
+ /* Any GENERAL_REGS register can hold 8-bit values. */
+
+ if (GET_MODE_SIZE (mode) == 1)
+ return 1;
+
+ /* FIXME: Ideally, the following test is not needed.
+ However, it turned out that it can reduce the number
+ of spill fails. AVR and it's poor endowment with
+ address registers is extreme stress test for reload. */
+
+ if (GET_MODE_SIZE (mode) >= 4
+ && regno >= REG_X)
+ return 0;
+
+ /* All modes larger than 8 bits should start in an even register. */
+
+ return !(regno & 1);
+}
+
+const char *
+output_reload_inhi (rtx insn ATTRIBUTE_UNUSED, rtx *operands, int *len)
+{
+ int tmp;
+ if (!len)
+ len = &tmp;
+
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ int val = INTVAL (operands[1]);
+ if ((val & 0xff) == 0)
+ {
+ *len = 3;
+ return (AS2 (mov,%A0,__zero_reg__) CR_TAB
+ AS2 (ldi,%2,hi8(%1)) CR_TAB
+ AS2 (mov,%B0,%2));
+ }
+ else if ((val & 0xff00) == 0)
+ {
+ *len = 3;
+ return (AS2 (ldi,%2,lo8(%1)) CR_TAB
+ AS2 (mov,%A0,%2) CR_TAB
+ AS2 (mov,%B0,__zero_reg__));
+ }
+ else if ((val & 0xff) == ((val & 0xff00) >> 8))
+ {
+ *len = 3;
+ return (AS2 (ldi,%2,lo8(%1)) CR_TAB
+ AS2 (mov,%A0,%2) CR_TAB
+ AS2 (mov,%B0,%2));
+ }
+ }
+ *len = 4;
+ return (AS2 (ldi,%2,lo8(%1)) CR_TAB
+ AS2 (mov,%A0,%2) CR_TAB
+ AS2 (ldi,%2,hi8(%1)) CR_TAB
+ AS2 (mov,%B0,%2));
+}
+
+
+const char *
+output_reload_insisf (rtx insn ATTRIBUTE_UNUSED, rtx *operands, int *len)
+{
+ rtx src = operands[1];
+ int cnst = (GET_CODE (src) == CONST_INT);
+
+ if (len)
+ {
+ if (cnst)
+ *len = 4 + ((INTVAL (src) & 0xff) != 0)
+ + ((INTVAL (src) & 0xff00) != 0)
+ + ((INTVAL (src) & 0xff0000) != 0)
+ + ((INTVAL (src) & 0xff000000) != 0);
+ else
+ *len = 8;
+
+ return "";
+ }
+
+ if (cnst && ((INTVAL (src) & 0xff) == 0))
+ output_asm_insn (AS2 (mov, %A0, __zero_reg__), operands);
+ else
+ {
+ output_asm_insn (AS2 (ldi, %2, lo8(%1)), operands);
+ output_asm_insn (AS2 (mov, %A0, %2), operands);
+ }
+ if (cnst && ((INTVAL (src) & 0xff00) == 0))
+ output_asm_insn (AS2 (mov, %B0, __zero_reg__), operands);
+ else
+ {
+ output_asm_insn (AS2 (ldi, %2, hi8(%1)), operands);
+ output_asm_insn (AS2 (mov, %B0, %2), operands);
+ }
+ if (cnst && ((INTVAL (src) & 0xff0000) == 0))
+ output_asm_insn (AS2 (mov, %C0, __zero_reg__), operands);
+ else
+ {
+ output_asm_insn (AS2 (ldi, %2, hlo8(%1)), operands);
+ output_asm_insn (AS2 (mov, %C0, %2), operands);
+ }
+ if (cnst && ((INTVAL (src) & 0xff000000) == 0))
+ output_asm_insn (AS2 (mov, %D0, __zero_reg__), operands);
+ else
+ {
+ output_asm_insn (AS2 (ldi, %2, hhi8(%1)), operands);
+ output_asm_insn (AS2 (mov, %D0, %2), operands);
+ }
+ return "";
+}
+
+void
+avr_output_bld (rtx operands[], int bit_nr)
+{
+ static char s[] = "bld %A0,0";
+
+ s[5] = 'A' + (bit_nr >> 3);
+ s[8] = '0' + (bit_nr & 7);
+ output_asm_insn (s, operands);
+}
+
+void
+avr_output_addr_vec_elt (FILE *stream, int value)
+{
+ switch_to_section (progmem_section);
+ if (AVR_HAVE_JMP_CALL)
+ fprintf (stream, "\t.word gs(.L%d)\n", value);
+ else
+ fprintf (stream, "\trjmp .L%d\n", value);
+}
+
+/* Returns true if SCRATCH are safe to be allocated as a scratch
+ registers (for a define_peephole2) in the current function. */
+
+bool
+avr_hard_regno_scratch_ok (unsigned int regno)
+{
+ /* Interrupt functions can only use registers that have already been saved
+ by the prologue, even if they would normally be call-clobbered. */
+
+ if ((cfun->machine->is_interrupt || cfun->machine->is_signal)
+ && !df_regs_ever_live_p (regno))
+ return false;
+
+ /* Don't allow hard registers that might be part of the frame pointer.
+ Some places in the compiler just test for [HARD_]FRAME_POINTER_REGNUM
+ and don't care for a frame pointer that spans more than one register. */
+
+ if ((!reload_completed || frame_pointer_needed)
+ && (regno == REG_Y || regno == REG_Y + 1))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+/* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
+
+int
+avr_hard_regno_rename_ok (unsigned int old_reg,
+ unsigned int new_reg)
+{
+ /* Interrupt functions can only use registers that have already been
+ saved by the prologue, even if they would normally be
+ call-clobbered. */
+
+ if ((cfun->machine->is_interrupt || cfun->machine->is_signal)
+ && !df_regs_ever_live_p (new_reg))
+ return 0;
+
+ /* Don't allow hard registers that might be part of the frame pointer.
+ Some places in the compiler just test for [HARD_]FRAME_POINTER_REGNUM
+ and don't care for a frame pointer that spans more than one register. */
+
+ if ((!reload_completed || frame_pointer_needed)
+ && (old_reg == REG_Y || old_reg == REG_Y + 1
+ || new_reg == REG_Y || new_reg == REG_Y + 1))
+ {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Output a branch that tests a single bit of a register (QI, HI, SI or DImode)
+ or memory location in the I/O space (QImode only).
+
+ Operand 0: comparison operator (must be EQ or NE, compare bit to zero).
+ Operand 1: register operand to test, or CONST_INT memory address.
+ Operand 2: bit number.
+ Operand 3: label to jump to if the test is true. */
+
+const char *
+avr_out_sbxx_branch (rtx insn, rtx operands[])
+{
+ enum rtx_code comp = GET_CODE (operands[0]);
+ int long_jump = (get_attr_length (insn) >= 4);
+ int reverse = long_jump || jump_over_one_insn_p (insn, operands[3]);
+
+ if (comp == GE)
+ comp = EQ;
+ else if (comp == LT)
+ comp = NE;
+
+ if (reverse)
+ comp = reverse_condition (comp);
+
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) < 0x40)
+ {
+ if (comp == EQ)
+ output_asm_insn (AS2 (sbis,%m1-0x20,%2), operands);
+ else
+ output_asm_insn (AS2 (sbic,%m1-0x20,%2), operands);
+ }
+ else
+ {
+ output_asm_insn (AS2 (in,__tmp_reg__,%m1-0x20), operands);
+ if (comp == EQ)
+ output_asm_insn (AS2 (sbrs,__tmp_reg__,%2), operands);
+ else
+ output_asm_insn (AS2 (sbrc,__tmp_reg__,%2), operands);
+ }
+ }
+ else /* GET_CODE (operands[1]) == REG */
+ {
+ if (GET_MODE (operands[1]) == QImode)
+ {
+ if (comp == EQ)
+ output_asm_insn (AS2 (sbrs,%1,%2), operands);
+ else
+ output_asm_insn (AS2 (sbrc,%1,%2), operands);
+ }
+ else /* HImode or SImode */
+ {
+ static char buf[] = "sbrc %A1,0";
+ int bit_nr = INTVAL (operands[2]);
+ buf[3] = (comp == EQ) ? 's' : 'c';
+ buf[6] = 'A' + (bit_nr >> 3);
+ buf[9] = '0' + (bit_nr & 7);
+ output_asm_insn (buf, operands);
+ }
+ }
+
+ if (long_jump)
+ return (AS1 (rjmp,.+4) CR_TAB
+ AS1 (jmp,%x3));
+ if (!reverse)
+ return AS1 (rjmp,%x3);
+ return "";
+}
+
+/* Worker function for TARGET_ASM_CONSTRUCTOR. */
+
+static void
+avr_asm_out_ctor (rtx symbol, int priority)
+{
+ fputs ("\t.global __do_global_ctors\n", asm_out_file);
+ default_ctor_section_asm_out_constructor (symbol, priority);
+}
+
+/* Worker function for TARGET_ASM_DESTRUCTOR. */
+
+static void
+avr_asm_out_dtor (rtx symbol, int priority)
+{
+ fputs ("\t.global __do_global_dtors\n", asm_out_file);
+ default_dtor_section_asm_out_destructor (symbol, priority);
+}
+
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+
+static bool
+avr_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ if (TYPE_MODE (type) == BLKmode)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ return (size == -1 || size > 8);
+ }
+ else
+ return false;
+}
+
+/* Worker function for CASE_VALUES_THRESHOLD. */
+
+unsigned int avr_case_values_threshold (void)
+{
+ return (!AVR_HAVE_JMP_CALL || TARGET_CALL_PROLOGUES) ? 8 : 17;
+}
+
+#include "gt-avr.h"
diff --git a/gcc/config/avr/avr.h b/gcc/config/avr/avr.h
new file mode 100644
index 000000000..efe782df7
--- /dev/null
+++ b/gcc/config/avr/avr.h
@@ -0,0 +1,835 @@
+/* Definitions of target machine for GNU compiler,
+ for ATMEL AVR at90s8515, ATmega103/103L, ATmega603/603L microcontrollers.
+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
+ 2008, 2009, 2010, 2011
+ Free Software Foundation, Inc.
+ Contributed by Denis Chertykov (chertykov@gmail.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+struct base_arch_s {
+ /* Assembler only. */
+ int asm_only;
+
+ /* Core have 'MUL*' instructions. */
+ int have_mul;
+
+ /* Core have 'CALL' and 'JMP' instructions. */
+ int have_jmp_call;
+
+ /* Core have 'MOVW' and 'LPM Rx,Z' instructions. */
+ int have_movw_lpmx;
+
+ /* Core have 'ELPM' instructions. */
+ int have_elpm;
+
+ /* Core have 'ELPM Rx,Z' instructions. */
+ int have_elpmx;
+
+ /* Core have 'EICALL' and 'EIJMP' instructions. */
+ int have_eijmp_eicall;
+
+ /* Reserved for xmega architecture. */
+ int reserved;
+
+ /* Reserved for xmega architecture. */
+ int reserved2;
+
+ /* Default start of data section address for architecture. */
+ int default_data_section_start;
+
+ const char *const macro;
+
+ /* Architecture name. */
+ const char *const arch_name;
+};
+
+/* These names are used as the index into the avr_arch_types[] table
+ above. */
+
+enum avr_arch
+{
+ ARCH_UNKNOWN,
+ ARCH_AVR1,
+ ARCH_AVR2,
+ ARCH_AVR25,
+ ARCH_AVR3,
+ ARCH_AVR31,
+ ARCH_AVR35,
+ ARCH_AVR4,
+ ARCH_AVR5,
+ ARCH_AVR51,
+ ARCH_AVR6
+};
+
+struct mcu_type_s {
+ /* Device name. */
+ const char *const name;
+
+ /* Index in avr_arch_types[]. */
+ int arch;
+
+ /* Must lie outside user's namespace. NULL == no macro. */
+ const char *const macro;
+
+ /* Stack pointer have 8 bits width. */
+ int short_sp;
+
+ /* Start of data section. */
+ int data_section_start;
+
+ /* Name of device library. */
+ const char *const library_name;
+};
+
+/* Preprocessor macros to define depending on MCU type. */
+extern const char *avr_extra_arch_macro;
+extern const struct base_arch_s *avr_current_arch;
+extern const struct mcu_type_s *avr_current_device;
+extern const struct mcu_type_s avr_mcu_types[];
+extern const struct base_arch_s avr_arch_types[];
+
+#define TARGET_CPU_CPP_BUILTINS() avr_cpu_cpp_builtins (pfile)
+
+#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS)
+extern GTY(()) section *progmem_section;
+#endif
+
+#define AVR_HAVE_JMP_CALL (avr_current_arch->have_jmp_call && !TARGET_SHORT_CALLS)
+#define AVR_HAVE_MUL (avr_current_arch->have_mul)
+#define AVR_HAVE_MOVW (avr_current_arch->have_movw_lpmx)
+#define AVR_HAVE_LPMX (avr_current_arch->have_movw_lpmx)
+#define AVR_HAVE_RAMPZ (avr_current_arch->have_elpm)
+#define AVR_HAVE_EIJMP_EICALL (avr_current_arch->have_eijmp_eicall)
+#define AVR_HAVE_8BIT_SP (avr_current_device->short_sp || TARGET_TINY_STACK)
+
+#define AVR_2_BYTE_PC (!AVR_HAVE_EIJMP_EICALL)
+#define AVR_3_BYTE_PC (AVR_HAVE_EIJMP_EICALL)
+
+#define TARGET_VERSION fprintf (stderr, " (GNU assembler syntax)");
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN 0
+#define WORDS_BIG_ENDIAN 0
+
+#ifdef IN_LIBGCC2
+/* This is to get correct SI and DI modes in libgcc2.c (32 and 64 bits). */
+#define UNITS_PER_WORD 4
+#else
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 1
+#endif
+
+#define POINTER_SIZE 16
+
+
+/* Maximum sized of reasonable data type
+ DImode or Dfmode ... */
+#define MAX_FIXED_MODE_SIZE 32
+
+#define PARM_BOUNDARY 8
+
+#define FUNCTION_BOUNDARY 8
+
+#define EMPTY_FIELD_BOUNDARY 8
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 8
+
+#define MAX_OFILE_ALIGNMENT (32768 * 8)
+
+#define TARGET_VTABLE_ENTRY_ALIGN 8
+
+#define STRICT_ALIGNMENT 0
+
+#define INT_TYPE_SIZE (TARGET_INT8 ? 8 : 16)
+#define SHORT_TYPE_SIZE (INT_TYPE_SIZE == 8 ? INT_TYPE_SIZE : 16)
+#define LONG_TYPE_SIZE (INT_TYPE_SIZE == 8 ? 16 : 32)
+#define LONG_LONG_TYPE_SIZE (INT_TYPE_SIZE == 8 ? 32 : 64)
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 32
+#define LONG_DOUBLE_TYPE_SIZE 32
+
+#define DEFAULT_SIGNED_CHAR 1
+
+#define SIZE_TYPE (INT_TYPE_SIZE == 8 ? "long unsigned int" : "unsigned int")
+#define PTRDIFF_TYPE (INT_TYPE_SIZE == 8 ? "long int" :"int")
+
+#define WCHAR_TYPE_SIZE 16
+
+#define FIRST_PSEUDO_REGISTER 36
+
+#define FIXED_REGISTERS {\
+ 1,1,/* r0 r1 */\
+ 0,0,/* r2 r3 */\
+ 0,0,/* r4 r5 */\
+ 0,0,/* r6 r7 */\
+ 0,0,/* r8 r9 */\
+ 0,0,/* r10 r11 */\
+ 0,0,/* r12 r13 */\
+ 0,0,/* r14 r15 */\
+ 0,0,/* r16 r17 */\
+ 0,0,/* r18 r19 */\
+ 0,0,/* r20 r21 */\
+ 0,0,/* r22 r23 */\
+ 0,0,/* r24 r25 */\
+ 0,0,/* r26 r27 */\
+ 0,0,/* r28 r29 */\
+ 0,0,/* r30 r31 */\
+ 1,1,/* STACK */\
+ 1,1 /* arg pointer */ }
+
+#define CALL_USED_REGISTERS { \
+ 1,1,/* r0 r1 */ \
+ 0,0,/* r2 r3 */ \
+ 0,0,/* r4 r5 */ \
+ 0,0,/* r6 r7 */ \
+ 0,0,/* r8 r9 */ \
+ 0,0,/* r10 r11 */ \
+ 0,0,/* r12 r13 */ \
+ 0,0,/* r14 r15 */ \
+ 0,0,/* r16 r17 */ \
+ 1,1,/* r18 r19 */ \
+ 1,1,/* r20 r21 */ \
+ 1,1,/* r22 r23 */ \
+ 1,1,/* r24 r25 */ \
+ 1,1,/* r26 r27 */ \
+ 0,0,/* r28 r29 */ \
+ 1,1,/* r30 r31 */ \
+ 1,1,/* STACK */ \
+ 1,1 /* arg pointer */ }
+
+#define REG_ALLOC_ORDER { \
+ 24,25, \
+ 18,19, \
+ 20,21, \
+ 22,23, \
+ 30,31, \
+ 26,27, \
+ 28,29, \
+ 17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2, \
+ 0,1, \
+ 32,33,34,35 \
+ }
+
+#define ADJUST_REG_ALLOC_ORDER order_regs_for_local_alloc ()
+
+
+#define HARD_REGNO_NREGS(REGNO, MODE) ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) avr_hard_regno_mode_ok(REGNO, MODE)
+
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+enum reg_class {
+ NO_REGS,
+ R0_REG, /* r0 */
+ POINTER_X_REGS, /* r26 - r27 */
+ POINTER_Y_REGS, /* r28 - r29 */
+ POINTER_Z_REGS, /* r30 - r31 */
+ STACK_REG, /* STACK */
+ BASE_POINTER_REGS, /* r28 - r31 */
+ POINTER_REGS, /* r26 - r31 */
+ ADDW_REGS, /* r24 - r31 */
+ SIMPLE_LD_REGS, /* r16 - r23 */
+ LD_REGS, /* r16 - r31 */
+ NO_LD_REGS, /* r0 - r15 */
+ GENERAL_REGS, /* r0 - r31 */
+ ALL_REGS, LIM_REG_CLASSES
+};
+
+
+#define N_REG_CLASSES (int)LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES { \
+ "NO_REGS", \
+ "R0_REG", /* r0 */ \
+ "POINTER_X_REGS", /* r26 - r27 */ \
+ "POINTER_Y_REGS", /* r28 - r29 */ \
+ "POINTER_Z_REGS", /* r30 - r31 */ \
+ "STACK_REG", /* STACK */ \
+ "BASE_POINTER_REGS", /* r28 - r31 */ \
+ "POINTER_REGS", /* r26 - r31 */ \
+ "ADDW_REGS", /* r24 - r31 */ \
+ "SIMPLE_LD_REGS", /* r16 - r23 */ \
+ "LD_REGS", /* r16 - r31 */ \
+ "NO_LD_REGS", /* r0 - r15 */ \
+ "GENERAL_REGS", /* r0 - r31 */ \
+ "ALL_REGS" }
+
+#define REG_CLASS_CONTENTS { \
+ {0x00000000,0x00000000}, /* NO_REGS */ \
+ {0x00000001,0x00000000}, /* R0_REG */ \
+ {3 << REG_X,0x00000000}, /* POINTER_X_REGS, r26 - r27 */ \
+ {3 << REG_Y,0x00000000}, /* POINTER_Y_REGS, r28 - r29 */ \
+ {3 << REG_Z,0x00000000}, /* POINTER_Z_REGS, r30 - r31 */ \
+ {0x00000000,0x00000003}, /* STACK_REG, STACK */ \
+ {(3 << REG_Y) | (3 << REG_Z), \
+ 0x00000000}, /* BASE_POINTER_REGS, r28 - r31 */ \
+ {(3 << REG_X) | (3 << REG_Y) | (3 << REG_Z), \
+ 0x00000000}, /* POINTER_REGS, r26 - r31 */ \
+ {(3 << REG_X) | (3 << REG_Y) | (3 << REG_Z) | (3 << REG_W), \
+ 0x00000000}, /* ADDW_REGS, r24 - r31 */ \
+ {0x00ff0000,0x00000000}, /* SIMPLE_LD_REGS r16 - r23 */ \
+ {(3 << REG_X)|(3 << REG_Y)|(3 << REG_Z)|(3 << REG_W)|(0xff << 16), \
+ 0x00000000}, /* LD_REGS, r16 - r31 */ \
+ {0x0000ffff,0x00000000}, /* NO_LD_REGS r0 - r15 */ \
+ {0xffffffff,0x00000000}, /* GENERAL_REGS, r0 - r31 */ \
+ {0xffffffff,0x00000003} /* ALL_REGS */ \
+}
+
+#define REGNO_REG_CLASS(R) avr_regno_reg_class(R)
+
+/* The following macro defines cover classes for Integrated Register
+ Allocator. Cover classes is a set of non-intersected register
+ classes covering all hard registers used for register allocation
+ purpose. Any move between two registers of a cover class should be
+ cheaper than load or store of the registers. The macro value is
+ array of register classes with LIM_REG_CLASSES used as the end
+ marker. */
+
+#define IRA_COVER_CLASSES \
+{ \
+ GENERAL_REGS, LIM_REG_CLASSES \
+}
+
+#define BASE_REG_CLASS (reload_completed ? BASE_POINTER_REGS : POINTER_REGS)
+
+#define INDEX_REG_CLASS NO_REGS
+
+#define REGNO_OK_FOR_BASE_P(r) (((r) < FIRST_PSEUDO_REGISTER \
+ && ((r) == REG_X \
+ || (r) == REG_Y \
+ || (r) == REG_Z \
+ || (r) == ARG_POINTER_REGNUM)) \
+ || (reg_renumber \
+ && (reg_renumber[r] == REG_X \
+ || reg_renumber[r] == REG_Y \
+ || reg_renumber[r] == REG_Z \
+ || (reg_renumber[r] \
+ == ARG_POINTER_REGNUM))))
+
+#define REGNO_OK_FOR_INDEX_P(NUM) 0
+
+#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true
+
+#define CLASS_MAX_NREGS(CLASS, MODE) class_max_nregs (CLASS, MODE)
+
+#define STACK_PUSH_CODE POST_DEC
+
+#define STACK_GROWS_DOWNWARD
+
+#define STARTING_FRAME_OFFSET 1
+
+#define STACK_POINTER_OFFSET 1
+
+#define FIRST_PARM_OFFSET(FUNDECL) 0
+
+#define STACK_BOUNDARY 8
+
+#define STACK_POINTER_REGNUM 32
+
+#define FRAME_POINTER_REGNUM REG_Y
+
+#define ARG_POINTER_REGNUM 34
+
+#define STATIC_CHAIN_REGNUM 2
+
+#define ELIMINABLE_REGS { \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM} \
+ ,{FRAME_POINTER_REGNUM+1,STACK_POINTER_REGNUM+1}}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ OFFSET = avr_initial_elimination_offset (FROM, TO)
+
+#define RETURN_ADDR_RTX(count, tem) avr_return_addr_rtx (count, tem)
+
+/* Don't use Push rounding. expr.c: emit_single_push_insn is broken
+ for POST_DEC targets (PR27386). */
+/*#define PUSH_ROUNDING(NPUSHED) (NPUSHED)*/
+
+typedef struct avr_args {
+ int nregs; /* # registers available for passing */
+ int regno; /* next available register number */
+} CUMULATIVE_ARGS;
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+ init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL)
+
+#define FUNCTION_ARG_REGNO_P(r) function_arg_regno_p(r)
+
+extern int avr_reg_order[];
+
+#define RET_REGISTER avr_ret_register ()
+
+#define LIBCALL_VALUE(MODE) avr_libcall_value (MODE)
+
+#define FUNCTION_VALUE_REGNO_P(N) ((int) (N) == RET_REGISTER)
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#define EPILOGUE_USES(REGNO) avr_epilogue_uses(REGNO)
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+#define MAX_REGS_PER_ADDRESS 1
+
+#define REG_OK_FOR_BASE_NOSTRICT_P(X) \
+ (REGNO (X) >= FIRST_PSEUDO_REGISTER || REG_OK_FOR_BASE_STRICT_P(X))
+
+#define REG_OK_FOR_BASE_STRICT_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* LEGITIMIZE_RELOAD_ADDRESS will allow register R26/27 to be used, where it
+ is no worse than normal base pointers R28/29 and R30/31. For example:
+ If base offset is greater than 63 bytes or for R++ or --R addressing. */
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_LEVELS, WIN) \
+do { \
+ if (1&&(GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC)) \
+ { \
+ push_reload (XEXP (X,0), XEXP (X,0), &XEXP (X,0), &XEXP (X,0), \
+ POINTER_REGS, GET_MODE (X),GET_MODE (X) , 0, 0, \
+ OPNUM, RELOAD_OTHER); \
+ goto WIN; \
+ } \
+ if (GET_CODE (X) == PLUS \
+ && REG_P (XEXP (X, 0)) \
+ && reg_equiv_constant[REGNO (XEXP (X, 0))] == 0 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && INTVAL (XEXP (X, 1)) >= 1) \
+ { \
+ int fit = INTVAL (XEXP (X, 1)) <= (64 - GET_MODE_SIZE (MODE)); \
+ if (fit) \
+ { \
+ if (reg_equiv_address[REGNO (XEXP (X, 0))] != 0) \
+ { \
+ int regno = REGNO (XEXP (X, 0)); \
+ rtx mem = make_memloc (X, regno); \
+ push_reload (XEXP (mem,0), NULL, &XEXP (mem,0), NULL, \
+ POINTER_REGS, Pmode, VOIDmode, 0, 0, \
+ 1, ADDR_TYPE (TYPE)); \
+ push_reload (mem, NULL_RTX, &XEXP (X, 0), NULL, \
+ BASE_POINTER_REGS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+ } \
+ else if (! (frame_pointer_needed && XEXP (X,0) == frame_pointer_rtx)) \
+ { \
+ push_reload (X, NULL_RTX, &X, NULL, \
+ POINTER_REGS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+ } \
+} while(0)
+
+#define LEGITIMATE_CONSTANT_P(X) 1
+
+#define BRANCH_COST(speed_p, predictable_p) 0
+
+#define SLOW_BYTE_ACCESS 0
+
+#define NO_FUNCTION_CSE
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+
+#define DATA_SECTION_ASM_OP "\t.data"
+
+#define BSS_SECTION_ASM_OP "\t.section .bss"
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+ There are no shared libraries on this target, and these sections are
+ placed in the read-only program memory, so they are not writable. */
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"a\",@progbits"
+
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"a\",@progbits"
+
+#define TARGET_ASM_CONSTRUCTOR avr_asm_out_ctor
+
+#define TARGET_ASM_DESTRUCTOR avr_asm_out_dtor
+
+#define SUPPORTS_INIT_PRIORITY 0
+
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+
+#define ASM_COMMENT_START " ; "
+
+#define ASM_APP_ON "/* #APP */\n"
+
+#define ASM_APP_OFF "/* #NOAPP */\n"
+
+/* Switch into a generic section. */
+#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
+#define TARGET_ASM_INIT_SECTIONS avr_asm_init_sections
+
+#define ASM_OUTPUT_ASCII(FILE, P, SIZE) gas_output_ascii (FILE,P,SIZE)
+
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == '\n' || ((C) == '$'))
+
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+do { \
+ fputs ("\t.comm ", (STREAM)); \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), ",%lu,1\n", (unsigned long)(SIZE)); \
+} while (0)
+
+#define ASM_OUTPUT_BSS(FILE, DECL, NAME, SIZE, ROUNDED) \
+ asm_output_bss ((FILE), (DECL), (NAME), (SIZE), (ROUNDED))
+
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+do { \
+ fputs ("\t.lcomm ", (STREAM)); \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), ",%d\n", (int)(SIZE)); \
+} while (0)
+
+#undef TYPE_ASM_OP
+#undef SIZE_ASM_OP
+#undef WEAK_ASM_OP
+#define TYPE_ASM_OP "\t.type\t"
+#define SIZE_ASM_OP "\t.size\t"
+#define WEAK_ASM_OP "\t.weak\t"
+/* Define the strings used for the special svr4 .type and .size directives.
+ These strings generally do not vary from one system running svr4 to
+ another, but if a given system (e.g. m88k running svr) needs to use
+ different pseudo-op names for these, they may be overridden in the
+ file which includes this one. */
+
+
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "@%s"
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+avr_asm_declare_function_name ((FILE), (NAME), (DECL))
+
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do { \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \
+ } while (0)
+
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+do { \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+} while (0)
+
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ const char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ HOST_WIDE_INT size; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \
+ } \
+ } while (0)
+
+
+#define ESCAPES \
+"\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1"
+/* A table of bytes codes used by the ASM_OUTPUT_ASCII and
+ ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table
+ corresponds to a particular byte value [0..255]. For any
+ given byte value, if the value in the corresponding table
+ position is zero, the given character can be output directly.
+ If the table value is 1, the byte must be output as a \ooo
+ octal escape. If the tables value is anything else, then the
+ byte value should be output as a \ followed by the value
+ in the table. Note that we can use standard UN*X escape
+ sequences for many control characters, but we don't use
+ \a to represent BEL because some svr4 assemblers (e.g. on
+ the i386) don't know about that. Also, we don't use \v
+ since some versions of gas, such as 2.2 did not accept it. */
+
+#define STRING_LIMIT ((unsigned) 64)
+#define STRING_ASM_OP "\t.string\t"
+/* Some svr4 assemblers have a limit on the number of characters which
+ can appear in the operand of a .string directive. If your assembler
+ has such a limitation, you should define STRING_LIMIT to reflect that
+ limit. Note that at least some svr4 assemblers have a limit on the
+ actual number of bytes in the double-quoted string, and that they
+ count each character in an escape sequence as one byte. Thus, an
+ escape sequence like \377 would count as four bytes.
+
+ If your target assembler doesn't support the .string directive, you
+ should define this to zero. */
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP ".global\t"
+
+#define SET_ASM_OP "\t.set\t"
+
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do \
+ { \
+ fputs ("\t.weak\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputc ('\n', (FILE)); \
+ } \
+ while (0)
+
+#define SUPPORTS_WEAK 1
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \
+sprintf (STRING, "*.%s%lu", PREFIX, (unsigned long)(NUM))
+
+#define HAS_INIT_SECTION 1
+
+#define REGISTER_NAMES { \
+ "r0","r1","r2","r3","r4","r5","r6","r7", \
+ "r8","r9","r10","r11","r12","r13","r14","r15", \
+ "r16","r17","r18","r19","r20","r21","r22","r23", \
+ "r24","r25","r26","r27","r28","r29","r30","r31", \
+ "__SP_L__","__SP_H__","argL","argH"}
+
+#define FINAL_PRESCAN_INSN(insn, operand, nop) final_prescan_insn (insn, operand,nop)
+
+#define PRINT_OPERAND(STREAM, X, CODE) print_operand (STREAM, X, CODE)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '~' || (CODE) == '!')
+
+#define PRINT_OPERAND_ADDRESS(STREAM, X) print_operand_address(STREAM, X)
+
+#define USER_LABEL_PREFIX ""
+
+#define ASSEMBLER_DIALECT AVR_HAVE_MOVW
+
+#define ASM_OUTPUT_REG_PUSH(STREAM, REGNO) \
+{ \
+ gcc_assert (REGNO < 32); \
+ fprintf (STREAM, "\tpush\tr%d", REGNO); \
+}
+
+#define ASM_OUTPUT_REG_POP(STREAM, REGNO) \
+{ \
+ gcc_assert (REGNO < 32); \
+ fprintf (STREAM, "\tpop\tr%d", REGNO); \
+}
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ avr_output_addr_vec_elt(STREAM, VALUE)
+
+#define ASM_OUTPUT_CASE_LABEL(STREAM, PREFIX, NUM, TABLE) \
+ (switch_to_section (progmem_section), \
+ (*targetm.asm_out.internal_label) (STREAM, PREFIX, NUM))
+
+#define ASM_OUTPUT_SKIP(STREAM, N) \
+fprintf (STREAM, "\t.skip %lu,0\n", (unsigned long)(N))
+
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do { \
+ if ((POWER) > 1) \
+ fprintf (STREAM, "\t.p2align\t%d\n", POWER); \
+ } while (0)
+
+#define ASM_OUTPUT_EXTERNAL(FILE, DECL, NAME) \
+ default_elf_asm_output_external (FILE, DECL, NAME)
+
+#define CASE_VECTOR_MODE HImode
+
+#undef WORD_REGISTER_OPERATIONS
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define Pmode HImode
+
+#define FUNCTION_MODE HImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define TRAMPOLINE_SIZE 4
+
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define NOTICE_UPDATE_CC(EXP, INSN) notice_update_cc(EXP, INSN)
+
+/* The add insns don't set overflow in a usable way. */
+#define CC_OVERFLOW_UNUSABLE 01000
+/* The mov,and,or,xor insns don't set carry. That's ok though as the
+ Z bit is all we need when doing unsigned comparisons on the result of
+ these insns (since they're always with 0). However, conditions.h has
+ CC_NO_OVERFLOW defined for this purpose. Rename it to something more
+ understandable. */
+#define CC_NO_CARRY CC_NO_OVERFLOW
+
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "/* profiler %d */", (LABELNO))
+
+#define ADJUST_INSN_LENGTH(INSN, LENGTH) (LENGTH =\
+ adjust_insn_length (INSN, LENGTH))
+
+extern const char *avr_device_to_arch (int argc, const char **argv);
+extern const char *avr_device_to_data_start (int argc, const char **argv);
+extern const char *avr_device_to_startfiles (int argc, const char **argv);
+extern const char *avr_device_to_devicelib (int argc, const char **argv);
+
+#define EXTRA_SPEC_FUNCTIONS \
+ { "device_to_arch", avr_device_to_arch }, \
+ { "device_to_data_start", avr_device_to_data_start }, \
+ { "device_to_startfile", avr_device_to_startfiles }, \
+ { "device_to_devicelib", avr_device_to_devicelib },
+
+#define CPP_SPEC ""
+
+#define CC1_SPEC ""
+
+#define CC1PLUS_SPEC "%{!frtti:-fno-rtti} \
+ %{!fenforce-eh-specs:-fno-enforce-eh-specs} \
+ %{!fexceptions:-fno-exceptions}"
+/* A C string constant that tells the GCC driver program options to
+ pass to `cc1plus'. */
+
+#define ASM_SPEC "%{mmcu=avr25:-mmcu=avr2;mmcu=avr35:-mmcu=avr3;mmcu=avr31:-mmcu=avr3;mmcu=avr51:-mmcu=avr5;\
+mmcu=*:-mmcu=%*}"
+
+#define LINK_SPEC "\
+%{mrelax:--relax\
+ %{mpmem-wrap-around:%{mmcu=at90usb8*:--pmem-wrap-around=8k}\
+ %{mmcu=atmega16*:--pmem-wrap-around=16k}\
+ %{mmcu=atmega32*|\
+ mmcu=at90can32*:--pmem-wrap-around=32k}\
+ %{mmcu=atmega64*|\
+ mmcu=at90can64*|\
+ mmcu=at90usb64*:--pmem-wrap-around=64k}}}\
+%:device_to_arch(%{mmcu=*:%*})\
+%:device_to_data_start(%{mmcu=*:%*})"
+
+#define LIB_SPEC \
+ "%{!mmcu=at90s1*:%{!mmcu=attiny11:%{!mmcu=attiny12:%{!mmcu=attiny15:%{!mmcu=attiny28: -lc }}}}}"
+
+#define LIBSTDCXX "gcc"
+/* No libstdc++ for now. Empty string doesn't work. */
+
+#define LIBGCC_SPEC \
+ "%{!mmcu=at90s1*:%{!mmcu=attiny11:%{!mmcu=attiny12:%{!mmcu=attiny15:%{!mmcu=attiny28: -lgcc }}}}}"
+
+#define STARTFILE_SPEC "%:device_to_startfile(%{mmcu=*:%*})"
+
+#define ENDFILE_SPEC ""
+
+/* This is the default without any -mmcu=* option (AT90S*). */
+#define MULTILIB_DEFAULTS { "mmcu=avr2" }
+
+#define TEST_HARD_REG_CLASS(CLASS, REGNO) \
+ TEST_HARD_REG_BIT (reg_class_contents[ (int) (CLASS)], REGNO)
+
+/* Note that the other files fail to use these
+ in some of the places where they should. */
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+#define AS2(a,b,c) #a " " #b "," #c
+#define AS2C(b,c) " " #b "," #c
+#define AS3(a,b,c,d) #a " " #b "," #c "," #d
+#define AS1(a,b) #a " " #b
+#else
+#define AS1(a,b) "a b"
+#define AS2(a,b,c) "a b,c"
+#define AS2C(b,c) " b,c"
+#define AS3(a,b,c,d) "a b,c,d"
+#endif
+#define OUT_AS1(a,b) output_asm_insn (AS1(a,b), operands)
+#define OUT_AS2(a,b,c) output_asm_insn (AS2(a,b,c), operands)
+#define CR_TAB "\n\t"
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#define DWARF2_DEBUGGING_INFO 1
+
+#define DWARF2_ADDR_SIZE 4
+
+#define OBJECT_FORMAT_ELF
+
+#define INCOMING_RETURN_ADDR_RTX avr_incoming_return_addr_rtx ()
+#define INCOMING_FRAME_SP_OFFSET (AVR_3_BYTE_PC ? 3 : 2)
+
+/* The caller's stack pointer value immediately before the call
+ is one byte below the first argument. */
+#define ARG_POINTER_CFA_OFFSET(FNDECL) -1
+
+#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \
+ avr_hard_regno_rename_ok (OLD_REG, NEW_REG)
+
+/* A C structure for machine-specific, per-function data.
+ This is added to the cfun structure. */
+struct GTY(()) machine_function
+{
+ /* 'true' - if current function is a naked function. */
+ int is_naked;
+
+ /* 'true' - if current function is an interrupt function
+ as specified by the "interrupt" attribute. */
+ int is_interrupt;
+
+ /* 'true' - if current function is a signal function
+ as specified by the "signal" attribute. */
+ int is_signal;
+
+ /* 'true' - if current function is a 'task' function
+ as specified by the "OS_task" attribute. */
+ int is_OS_task;
+
+ /* 'true' - if current function is a 'main' function
+ as specified by the "OS_main" attribute. */
+ int is_OS_main;
+
+ /* Current function stack size. */
+ int stack_usage;
+};
diff --git a/gcc/config/avr/avr.md b/gcc/config/avr/avr.md
new file mode 100644
index 000000000..1fc6fee57
--- /dev/null
+++ b/gcc/config/avr/avr.md
@@ -0,0 +1,3248 @@
+;; Machine description for GNU compiler,
+;; for ATMEL AVR micro controllers.
+;; Copyright (C) 1998, 1999, 2000, 2001, 2002, 2004, 2005, 2006, 2007, 2008,
+;; 2009, 2010 Free Software Foundation, Inc.
+;; Contributed by Denis Chertykov (chertykov@gmail.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Special characters after '%':
+;; A No effect (add 0).
+;; B Add 1 to REG number, MEM address or CONST_INT.
+;; C Add 2.
+;; D Add 3.
+;; j Branch condition.
+;; k Reverse branch condition.
+;;..m..Constant Direct Data memory address.
+;; o Displacement for (mem (plus (reg) (const_int))) operands.
+;; p POST_INC or PRE_DEC address as a pointer (X, Y, Z)
+;; r POST_INC or PRE_DEC address as a register (r26, r28, r30)
+;;..x..Constant Direct Program memory address.
+;; ~ Output 'r' if not AVR_HAVE_JMP_CALL.
+;; ! Output 'e' if AVR_HAVE_EIJMP_EICALL.
+
+;; UNSPEC usage:
+;; 0 Length of a string, see "strlenhi".
+;; 1 Jump by register pair Z or by table addressed by Z, see "casesi".
+
+(define_constants
+ [(REG_X 26)
+ (REG_Y 28)
+ (REG_Z 30)
+ (REG_W 24)
+ (REG_SP 32)
+ (TMP_REGNO 0) ; temporary register r0
+ (ZERO_REGNO 1) ; zero register r1
+
+ (SREG_ADDR 0x5F)
+ (RAMPZ_ADDR 0x5B)
+
+ (UNSPEC_STRLEN 0)
+ (UNSPEC_INDEX_JMP 1)
+ (UNSPEC_SEI 2)
+ (UNSPEC_CLI 3)
+
+ (UNSPECV_PROLOGUE_SAVES 0)
+ (UNSPECV_EPILOGUE_RESTORES 1)
+ (UNSPECV_WRITE_SP_IRQ_ON 2)
+ (UNSPECV_WRITE_SP_IRQ_OFF 3)
+ (UNSPECV_GOTO_RECEIVER 4)])
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; Condition code settings.
+(define_attr "cc" "none,set_czn,set_zn,set_n,compare,clobber"
+ (const_string "none"))
+
+(define_attr "type" "branch,branch1,arith,xcall"
+ (const_string "arith"))
+
+(define_attr "mcu_have_movw" "yes,no"
+ (const (if_then_else (symbol_ref "AVR_HAVE_MOVW")
+ (const_string "yes")
+ (const_string "no"))))
+
+(define_attr "mcu_mega" "yes,no"
+ (const (if_then_else (symbol_ref "AVR_HAVE_JMP_CALL")
+ (const_string "yes")
+ (const_string "no"))))
+
+
+;; The size of instructions in bytes.
+;; XXX may depend from "cc"
+
+(define_attr "length" ""
+ (cond [(eq_attr "type" "branch")
+ (if_then_else (and (ge (minus (pc) (match_dup 0))
+ (const_int -63))
+ (le (minus (pc) (match_dup 0))
+ (const_int 62)))
+ (const_int 1)
+ (if_then_else (and (ge (minus (pc) (match_dup 0))
+ (const_int -2045))
+ (le (minus (pc) (match_dup 0))
+ (const_int 2045)))
+ (const_int 2)
+ (const_int 3)))
+ (eq_attr "type" "branch1")
+ (if_then_else (and (ge (minus (pc) (match_dup 0))
+ (const_int -62))
+ (le (minus (pc) (match_dup 0))
+ (const_int 61)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (pc) (match_dup 0))
+ (const_int -2044))
+ (le (minus (pc) (match_dup 0))
+ (const_int 2043)))
+ (const_int 3)
+ (const_int 4)))
+ (eq_attr "type" "xcall")
+ (if_then_else (eq_attr "mcu_mega" "no")
+ (const_int 1)
+ (const_int 2))]
+ (const_int 2)))
+
+;; Define mode iterator
+(define_mode_iterator QISI [(QI "") (HI "") (SI "")])
+(define_mode_iterator QIDI [(QI "") (HI "") (SI "") (DI "")])
+(define_mode_iterator HIDI [(HI "") (SI "") (DI "")])
+(define_mode_iterator HISI [(HI "") (SI "")])
+
+;;========================================================================
+;; The following is used by nonlocal_goto and setjmp.
+;; The receiver pattern will create no instructions since internally
+;; virtual_stack_vars = hard_frame_pointer + 1 so the RTL become R28=R28
+;; This avoids creating add/sub offsets in frame_pointer save/resore.
+;; The 'null' receiver also avoids problems with optimisation
+;; not recognising incoming jmp and removing code that resets frame_pointer.
+;; The code derived from builtins.c.
+
+(define_expand "nonlocal_goto_receiver"
+ [(set (reg:HI REG_Y)
+ (unspec_volatile:HI [(const_int 0)] UNSPECV_GOTO_RECEIVER))]
+ ""
+ {
+ emit_move_insn (virtual_stack_vars_rtx,
+ gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx,
+ gen_int_mode (STARTING_FRAME_OFFSET,
+ Pmode)));
+ /* This might change the hard frame pointer in ways that aren't
+ apparent to early optimization passes, so force a clobber. */
+ emit_clobber (hard_frame_pointer_rtx);
+ DONE;
+ })
+
+
+;; Defining nonlocal_goto_receiver means we must also define this.
+;; even though its function is identical to that in builtins.c
+
+(define_expand "nonlocal_goto"
+ [
+ (use (match_operand 0 "general_operand"))
+ (use (match_operand 1 "general_operand"))
+ (use (match_operand 2 "general_operand"))
+ (use (match_operand 3 "general_operand"))
+ ]
+ ""
+{
+ rtx r_label = copy_to_reg (operands[1]);
+ rtx r_fp = operands[3];
+ rtx r_sp = operands[2];
+
+ emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
+
+ emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
+
+ emit_move_insn (hard_frame_pointer_rtx, r_fp);
+ emit_stack_restore (SAVE_NONLOCAL, r_sp);
+
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
+
+ emit_indirect_jump (r_label);
+
+ DONE;
+})
+
+
+(define_insn "*pushqi"
+ [(set (mem:QI (post_dec:HI (reg:HI REG_SP)))
+ (match_operand:QI 0 "reg_or_0_operand" "r,L"))]
+ ""
+ "@
+ push %0
+ push __zero_reg__"
+ [(set_attr "length" "1,1")])
+
+(define_insn "*pushhi"
+ [(set (mem:HI (post_dec:HI (reg:HI REG_SP)))
+ (match_operand:HI 0 "reg_or_0_operand" "r,L"))]
+ ""
+ "@
+ push %B0\;push %A0
+ push __zero_reg__\;push __zero_reg__"
+ [(set_attr "length" "2,2")])
+
+(define_insn "*pushsi"
+ [(set (mem:SI (post_dec:HI (reg:HI REG_SP)))
+ (match_operand:SI 0 "reg_or_0_operand" "r,L"))]
+ ""
+ "@
+ push %D0\;push %C0\;push %B0\;push %A0
+ push __zero_reg__\;push __zero_reg__\;push __zero_reg__\;push __zero_reg__"
+ [(set_attr "length" "4,4")])
+
+(define_insn "*pushsf"
+ [(set (mem:SF (post_dec:HI (reg:HI REG_SP)))
+ (match_operand:SF 0 "register_operand" "r"))]
+ ""
+ "push %D0
+ push %C0
+ push %B0
+ push %A0"
+ [(set_attr "length" "4")])
+
+;;========================================================================
+;; move byte
+;; The last alternative (any immediate constant to any register) is
+;; very expensive. It should be optimized by peephole2 if a scratch
+;; register is available, but then that register could just as well be
+;; allocated for the variable we are loading. But, most of NO_LD_REGS
+;; are call-saved registers, and most of LD_REGS are call-used registers,
+;; so this may still be a win for registers live across function calls.
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "/* One of the ops has to be in a register. */
+ if (!register_operand(operand0, QImode)
+ && ! (register_operand(operand1, QImode) || const0_rtx == operand1))
+ operands[1] = copy_to_mode_reg(QImode, operand1);
+ ")
+
+(define_insn "*movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
+ (match_operand:QI 1 "general_operand" "rL,i,rL,Qm,r,q,i"))]
+ "(register_operand (operands[0],QImode)
+ || register_operand (operands[1], QImode) || const0_rtx == operands[1])"
+ "* return output_movqi (insn, operands, NULL);"
+ [(set_attr "length" "1,1,5,5,1,1,4")
+ (set_attr "cc" "none,none,clobber,clobber,none,none,clobber")])
+
+;; This is used in peephole2 to optimize loading immediate constants
+;; if a scratch register from LD_REGS happens to be available.
+
+(define_insn "*reload_inqi"
+ [(set (match_operand:QI 0 "register_operand" "=l")
+ (match_operand:QI 1 "immediate_operand" "i"))
+ (clobber (match_operand:QI 2 "register_operand" "=&d"))]
+ "reload_completed"
+ "ldi %2,lo8(%1)
+ mov %0,%2"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+(define_peephole2
+ [(match_scratch:QI 2 "d")
+ (set (match_operand:QI 0 "l_register_operand" "")
+ (match_operand:QI 1 "immediate_operand" ""))]
+ "(operands[1] != const0_rtx
+ && operands[1] != const1_rtx
+ && operands[1] != constm1_rtx)"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (clobber (match_dup 2))])]
+ "")
+
+;;============================================================================
+;; move word (16 bit)
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register. */
+ if (!register_operand(operand0, HImode)
+ && !(register_operand(operand1, HImode) || const0_rtx == operands[1]))
+ {
+ operands[1] = copy_to_mode_reg(HImode, operand1);
+ }
+}")
+
+(define_insn "*movhi_sp"
+ [(set (match_operand:HI 0 "register_operand" "=q,r")
+ (match_operand:HI 1 "register_operand" "r,q"))]
+ "((stack_register_operand(operands[0], HImode) && register_operand (operands[1], HImode))
+ || (register_operand (operands[0], HImode) && stack_register_operand(operands[1], HImode)))"
+ "* return output_movhi (insn, operands, NULL);"
+ [(set_attr "length" "5,2")
+ (set_attr "cc" "none,none")])
+
+(define_insn "movhi_sp_r_irq_off"
+ [(set (match_operand:HI 0 "stack_register_operand" "=q")
+ (unspec_volatile:HI [(match_operand:HI 1 "register_operand" "r")]
+ UNSPECV_WRITE_SP_IRQ_OFF))]
+ ""
+ "out __SP_H__, %B1
+ out __SP_L__, %A1"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+(define_insn "movhi_sp_r_irq_on"
+ [(set (match_operand:HI 0 "stack_register_operand" "=q")
+ (unspec_volatile:HI [(match_operand:HI 1 "register_operand" "r")]
+ UNSPECV_WRITE_SP_IRQ_ON))]
+ ""
+ "cli
+ out __SP_H__, %B1
+ sei
+ out __SP_L__, %A1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")])
+
+(define_peephole2
+ [(match_scratch:QI 2 "d")
+ (set (match_operand:HI 0 "l_register_operand" "")
+ (match_operand:HI 1 "immediate_operand" ""))]
+ "(operands[1] != const0_rtx
+ && operands[1] != constm1_rtx)"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (clobber (match_dup 2))])]
+ "")
+
+;; '*' because it is not used in rtl generation, only in above peephole
+(define_insn "*reload_inhi"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (match_operand:HI 1 "immediate_operand" "i"))
+ (clobber (match_operand:QI 2 "register_operand" "=&d"))]
+ "reload_completed"
+ "* return output_reload_inhi (insn, operands, NULL);"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")])
+
+(define_insn "*movhi"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
+ (match_operand:HI 1 "general_operand" "rL,m,rL,i,i,r,q"))]
+ "(register_operand (operands[0],HImode)
+ || register_operand (operands[1],HImode) || const0_rtx == operands[1])"
+ "* return output_movhi (insn, operands, NULL);"
+ [(set_attr "length" "2,6,7,2,6,5,2")
+ (set_attr "cc" "none,clobber,clobber,none,clobber,none,none")])
+
+(define_peephole2 ; movw
+ [(set (match_operand:QI 0 "even_register_operand" "")
+ (match_operand:QI 1 "even_register_operand" ""))
+ (set (match_operand:QI 2 "odd_register_operand" "")
+ (match_operand:QI 3 "odd_register_operand" ""))]
+ "(AVR_HAVE_MOVW
+ && REGNO (operands[0]) == REGNO (operands[2]) - 1
+ && REGNO (operands[1]) == REGNO (operands[3]) - 1)"
+ [(set (match_dup 4) (match_dup 5))]
+ {
+ operands[4] = gen_rtx_REG (HImode, REGNO (operands[0]));
+ operands[5] = gen_rtx_REG (HImode, REGNO (operands[1]));
+ })
+
+(define_peephole2 ; movw_r
+ [(set (match_operand:QI 0 "odd_register_operand" "")
+ (match_operand:QI 1 "odd_register_operand" ""))
+ (set (match_operand:QI 2 "even_register_operand" "")
+ (match_operand:QI 3 "even_register_operand" ""))]
+ "(AVR_HAVE_MOVW
+ && REGNO (operands[2]) == REGNO (operands[0]) - 1
+ && REGNO (operands[3]) == REGNO (operands[1]) - 1)"
+ [(set (match_dup 4) (match_dup 5))]
+ {
+ operands[4] = gen_rtx_REG (HImode, REGNO (operands[2]));
+ operands[5] = gen_rtx_REG (HImode, REGNO (operands[3]));
+ })
+
+;;==========================================================================
+;; move double word (32 bit)
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register. */
+ if (!register_operand (operand0, SImode)
+ && !(register_operand (operand1, SImode) || const0_rtx == operand1))
+ {
+ operands[1] = copy_to_mode_reg (SImode, operand1);
+ }
+}")
+
+
+
+(define_peephole2 ; movsi_lreg_const
+ [(match_scratch:QI 2 "d")
+ (set (match_operand:SI 0 "l_register_operand" "")
+ (match_operand:SI 1 "immediate_operand" ""))
+ (match_dup 2)]
+ "(operands[1] != const0_rtx
+ && operands[1] != constm1_rtx)"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (clobber (match_dup 2))])]
+ "")
+
+;; '*' because it is not used in rtl generation.
+(define_insn "*reload_insi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "immediate_operand" "i"))
+ (clobber (match_operand:QI 2 "register_operand" "=&d"))]
+ "reload_completed"
+ "* return output_reload_insisf (insn, operands, NULL);"
+ [(set_attr "length" "8")
+ (set_attr "cc" "none")])
+
+
+(define_insn "*movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
+ (match_operand:SI 1 "general_operand" "r,L,Qm,rL,i,i"))]
+ "(register_operand (operands[0],SImode)
+ || register_operand (operands[1],SImode) || const0_rtx == operands[1])"
+ "* return output_movsisf (insn, operands, NULL);"
+ [(set_attr "length" "4,4,8,9,4,10")
+ (set_attr "cc" "none,set_zn,clobber,clobber,none,clobber")])
+
+;; fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+;; move floating point numbers (32 bit)
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register. */
+ if (!register_operand (operand1, SFmode)
+ && !register_operand (operand0, SFmode))
+ {
+ operands[1] = copy_to_mode_reg (SFmode, operand1);
+ }
+}")
+
+(define_insn "*movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
+ (match_operand:SF 1 "general_operand" "r,G,Qm,r,F,F"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "* return output_movsisf (insn, operands, NULL);"
+ [(set_attr "length" "4,4,8,9,4,10")
+ (set_attr "cc" "none,set_zn,clobber,clobber,none,clobber")])
+
+;;=========================================================================
+;; move string (like memcpy)
+;; implement as RTL loop
+
+(define_expand "movmemhi"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand:BLK 1 "memory_operand" ""))
+ (use (match_operand:HI 2 "const_int_operand" ""))
+ (use (match_operand:HI 3 "const_int_operand" ""))])]
+ ""
+ "{
+ int prob;
+ HOST_WIDE_INT count;
+ enum machine_mode mode;
+ rtx label = gen_label_rtx ();
+ rtx loop_reg;
+ rtx jump;
+
+ /* Copy pointers into new psuedos - they will be changed. */
+ rtx addr0 = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
+ rtx addr1 = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
+
+ /* Create rtx for tmp register - we use this as scratch. */
+ rtx tmp_reg_rtx = gen_rtx_REG (QImode, TMP_REGNO);
+
+ if (GET_CODE (operands[2]) != CONST_INT)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ if (count <= 0)
+ FAIL;
+
+ /* Work out branch probability for latter use. */
+ prob = REG_BR_PROB_BASE - REG_BR_PROB_BASE / count;
+
+ /* See if constant fit 8 bits. */
+ mode = (count < 0x100) ? QImode : HImode;
+ /* Create loop counter register. */
+ loop_reg = copy_to_mode_reg (mode, gen_int_mode (count, mode));
+
+ /* Now create RTL code for move loop. */
+ /* Label at top of loop. */
+ emit_label (label);
+
+ /* Move one byte into scratch and inc pointer. */
+ emit_move_insn (tmp_reg_rtx, gen_rtx_MEM (QImode, addr1));
+ emit_move_insn (addr1, gen_rtx_PLUS (Pmode, addr1, const1_rtx));
+
+ /* Move to mem and inc pointer. */
+ emit_move_insn (gen_rtx_MEM (QImode, addr0), tmp_reg_rtx);
+ emit_move_insn (addr0, gen_rtx_PLUS (Pmode, addr0, const1_rtx));
+
+ /* Decrement count. */
+ emit_move_insn (loop_reg, gen_rtx_PLUS (mode, loop_reg, constm1_rtx));
+
+ /* Compare with zero and jump if not equal. */
+ emit_cmp_and_jump_insns (loop_reg, const0_rtx, NE, NULL_RTX, mode, 1,
+ label);
+ /* Set jump probability based on loop count. */
+ jump = get_last_insn ();
+ add_reg_note (jump, REG_BR_PROB, GEN_INT (prob));
+ DONE;
+}")
+
+;; =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2
+;; memset (%0, %2, %1)
+
+(define_expand "setmemhi"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand 2 "const_int_operand" ""))
+ (use (match_operand:HI 1 "const_int_operand" ""))
+ (use (match_operand:HI 3 "const_int_operand" "n"))
+ (clobber (match_scratch:HI 4 ""))
+ (clobber (match_dup 5))])]
+ ""
+ "{
+ rtx addr0;
+ int cnt8;
+ enum machine_mode mode;
+
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
+ if (GET_CODE (operands[1]) != CONST_INT)
+ FAIL;
+
+ cnt8 = byte_immediate_operand (operands[1], GET_MODE (operands[1]));
+ mode = cnt8 ? QImode : HImode;
+ operands[5] = gen_rtx_SCRATCH (mode);
+ operands[1] = copy_to_mode_reg (mode,
+ gen_int_mode (INTVAL (operands[1]), mode));
+ addr0 = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
+ operands[0] = gen_rtx_MEM (BLKmode, addr0);
+}")
+
+(define_insn "*clrmemqi"
+ [(set (mem:BLK (match_operand:HI 0 "register_operand" "e"))
+ (const_int 0))
+ (use (match_operand:QI 1 "register_operand" "r"))
+ (use (match_operand:QI 2 "const_int_operand" "n"))
+ (clobber (match_scratch:HI 3 "=0"))
+ (clobber (match_scratch:QI 4 "=&1"))]
+ ""
+ "st %a0+,__zero_reg__
+ dec %1
+ brne .-6"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*clrmemhi"
+ [(set (mem:BLK (match_operand:HI 0 "register_operand" "e,e"))
+ (const_int 0))
+ (use (match_operand:HI 1 "register_operand" "!w,d"))
+ (use (match_operand:HI 2 "const_int_operand" "n,n"))
+ (clobber (match_scratch:HI 3 "=0,0"))
+ (clobber (match_scratch:HI 4 "=&1,&1"))]
+ ""
+ "*{
+ if (which_alternative==0)
+ return (AS2 (st,%a0+,__zero_reg__) CR_TAB
+ AS2 (sbiw,%A1,1) CR_TAB
+ AS1 (brne,.-6));
+ else
+ return (AS2 (st,%a0+,__zero_reg__) CR_TAB
+ AS2 (subi,%A1,1) CR_TAB
+ AS2 (sbci,%B1,0) CR_TAB
+ AS1 (brne,.-8));
+}"
+ [(set_attr "length" "3,4")
+ (set_attr "cc" "clobber,clobber")])
+
+(define_expand "strlenhi"
+ [(set (match_dup 4)
+ (unspec:HI [(match_operand:BLK 1 "memory_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")
+ (match_operand:HI 3 "immediate_operand" "")]
+ UNSPEC_STRLEN))
+ (set (match_dup 4) (plus:HI (match_dup 4)
+ (const_int -1)))
+ (set (match_operand:HI 0 "register_operand" "")
+ (minus:HI (match_dup 4)
+ (match_dup 5)))]
+ ""
+ "{
+ rtx addr;
+ if (! (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0))
+ FAIL;
+ addr = copy_to_mode_reg (Pmode, XEXP (operands[1],0));
+ operands[1] = gen_rtx_MEM (BLKmode, addr);
+ operands[5] = addr;
+ operands[4] = gen_reg_rtx (HImode);
+}")
+
+(define_insn "*strlenhi"
+ [(set (match_operand:HI 0 "register_operand" "=e")
+ (unspec:HI [(mem:BLK (match_operand:HI 1 "register_operand" "%0"))
+ (const_int 0)
+ (match_operand:HI 2 "immediate_operand" "i")]
+ UNSPEC_STRLEN))]
+ ""
+ "ld __tmp_reg__,%a0+
+ tst __tmp_reg__
+ brne .-6"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+;+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+; add bytes
+
+(define_insn "addqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r,d,r,r")
+ (plus:QI (match_operand:QI 1 "register_operand" "%0,0,0,0")
+ (match_operand:QI 2 "nonmemory_operand" "r,i,P,N")))]
+ ""
+ "@
+ add %0,%2
+ subi %0,lo8(-(%2))
+ inc %0
+ dec %0"
+ [(set_attr "length" "1,1,1,1")
+ (set_attr "cc" "set_czn,set_czn,set_zn,set_zn")])
+
+
+(define_expand "addhi3"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (plus:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:HI 2 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ short tmp = INTVAL (operands[2]);
+ operands[2] = GEN_INT(tmp);
+ }
+}")
+
+
+(define_insn "*addhi3_zero_extend"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (zero_extend:HI
+ (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:HI 2 "register_operand" "0")))]
+ ""
+ "add %A0,%1
+ adc %B0,__zero_reg__"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_n")])
+
+(define_insn "*addhi3_zero_extend1"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (match_operand:HI 1 "register_operand" "%0")
+ (zero_extend:HI
+ (match_operand:QI 2 "register_operand" "r"))))]
+ ""
+ "add %A0,%2
+ adc %B0,__zero_reg__"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_n")])
+
+(define_insn "*addhi3_sp_R_pc2"
+ [(set (match_operand:HI 1 "stack_register_operand" "=q")
+ (plus:HI (match_operand:HI 2 "stack_register_operand" "q")
+ (match_operand:HI 0 "avr_sp_immediate_operand" "R")))]
+ "AVR_2_BYTE_PC"
+ "*{
+ if (CONST_INT_P (operands[0]))
+ {
+ switch(INTVAL (operands[0]))
+ {
+ case -6:
+ return \"rcall .\" CR_TAB
+ \"rcall .\" CR_TAB
+ \"rcall .\";
+ case -5:
+ return \"rcall .\" CR_TAB
+ \"rcall .\" CR_TAB
+ \"push __tmp_reg__\";
+ case -4:
+ return \"rcall .\" CR_TAB
+ \"rcall .\";
+ case -3:
+ return \"rcall .\" CR_TAB
+ \"push __tmp_reg__\";
+ case -2:
+ return \"rcall .\";
+ case -1:
+ return \"push __tmp_reg__\";
+ case 0:
+ return \"\";
+ case 1:
+ return \"pop __tmp_reg__\";
+ case 2:
+ return \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\";
+ case 3:
+ return \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\";
+ case 4:
+ return \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\";
+ case 5:
+ return \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\";
+ }
+ }
+ return \"bug\";
+ }"
+ [(set (attr "length")
+ (cond [(eq (const_int -6) (symbol_ref "INTVAL (operands[0])")) (const_int 3)
+ (eq (const_int -5) (symbol_ref "INTVAL (operands[0])")) (const_int 3)
+ (eq (const_int -4) (symbol_ref "INTVAL (operands[0])")) (const_int 2)
+ (eq (const_int -3) (symbol_ref "INTVAL (operands[0])")) (const_int 2)
+ (eq (const_int -2) (symbol_ref "INTVAL (operands[0])")) (const_int 1)
+ (eq (const_int -1) (symbol_ref "INTVAL (operands[0])")) (const_int 1)
+ (eq (const_int 0) (symbol_ref "INTVAL (operands[0])")) (const_int 0)
+ (eq (const_int 1) (symbol_ref "INTVAL (operands[0])")) (const_int 1)
+ (eq (const_int 2) (symbol_ref "INTVAL (operands[0])")) (const_int 2)
+ (eq (const_int 3) (symbol_ref "INTVAL (operands[0])")) (const_int 3)
+ (eq (const_int 4) (symbol_ref "INTVAL (operands[0])")) (const_int 4)
+ (eq (const_int 5) (symbol_ref "INTVAL (operands[0])")) (const_int 5)]
+ (const_int 0)))])
+
+(define_insn "*addhi3_sp_R_pc3"
+ [(set (match_operand:HI 1 "stack_register_operand" "=q")
+ (plus:HI (match_operand:HI 2 "stack_register_operand" "q")
+ (match_operand:QI 0 "avr_sp_immediate_operand" "R")))]
+ "AVR_3_BYTE_PC"
+ "*{
+ if (CONST_INT_P (operands[0]))
+ {
+ switch(INTVAL (operands[0]))
+ {
+ case -6:
+ return \"rcall .\" CR_TAB
+ \"rcall .\";
+ case -5:
+ return \"rcall .\" CR_TAB
+ \"push __tmp_reg__\" CR_TAB
+ \"push __tmp_reg__\";
+ case -4:
+ return \"rcall .\" CR_TAB
+ \"push __tmp_reg__\";
+ case -3:
+ return \"rcall .\";
+ case -2:
+ return \"push __tmp_reg__\" CR_TAB
+ \"push __tmp_reg__\";
+ case -1:
+ return \"push __tmp_reg__\";
+ case 0:
+ return \"\";
+ case 1:
+ return \"pop __tmp_reg__\";
+ case 2:
+ return \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\";
+ case 3:
+ return \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\";
+ case 4:
+ return \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\";
+ case 5:
+ return \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\" CR_TAB
+ \"pop __tmp_reg__\";
+ }
+ }
+ return \"bug\";
+ }"
+ [(set (attr "length")
+ (cond [(eq (const_int -6) (symbol_ref "INTVAL (operands[0])")) (const_int 2)
+ (eq (const_int -5) (symbol_ref "INTVAL (operands[0])")) (const_int 3)
+ (eq (const_int -4) (symbol_ref "INTVAL (operands[0])")) (const_int 2)
+ (eq (const_int -3) (symbol_ref "INTVAL (operands[0])")) (const_int 1)
+ (eq (const_int -2) (symbol_ref "INTVAL (operands[0])")) (const_int 2)
+ (eq (const_int -1) (symbol_ref "INTVAL (operands[0])")) (const_int 1)
+ (eq (const_int 0) (symbol_ref "INTVAL (operands[0])")) (const_int 0)
+ (eq (const_int 1) (symbol_ref "INTVAL (operands[0])")) (const_int 1)
+ (eq (const_int 2) (symbol_ref "INTVAL (operands[0])")) (const_int 2)
+ (eq (const_int 3) (symbol_ref "INTVAL (operands[0])")) (const_int 3)
+ (eq (const_int 4) (symbol_ref "INTVAL (operands[0])")) (const_int 4)
+ (eq (const_int 5) (symbol_ref "INTVAL (operands[0])")) (const_int 5)]
+ (const_int 0)))])
+
+(define_insn "*addhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r,!w,!w,d,r,r")
+ (plus:HI
+ (match_operand:HI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:HI 2 "nonmemory_operand" "r,I,J,i,P,N")))]
+ ""
+ "@
+ add %A0,%A2\;adc %B0,%B2
+ adiw %A0,%2
+ sbiw %A0,%n2
+ subi %A0,lo8(-(%2))\;sbci %B0,hi8(-(%2))
+ sec\;adc %A0,__zero_reg__\;adc %B0,__zero_reg__
+ sec\;sbc %A0,__zero_reg__\;sbc %B0,__zero_reg__"
+ [(set_attr "length" "2,1,1,2,3,3")
+ (set_attr "cc" "set_n,set_czn,set_czn,set_czn,set_n,set_n")])
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,!w,!w,d,r,r")
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,I,J,i,P,N")))]
+ ""
+ "@
+ add %A0,%A2\;adc %B0,%B2\;adc %C0,%C2\;adc %D0,%D2
+ adiw %0,%2\;adc %C0,__zero_reg__\;adc %D0,__zero_reg__
+ sbiw %0,%n2\;sbc %C0,__zero_reg__\;sbc %D0,__zero_reg__
+ subi %0,lo8(-(%2))\;sbci %B0,hi8(-(%2))\;sbci %C0,hlo8(-(%2))\;sbci %D0,hhi8(-(%2))
+ sec\;adc %A0,__zero_reg__\;adc %B0,__zero_reg__\;adc %C0,__zero_reg__\;adc %D0,__zero_reg__
+ sec\;sbc %A0,__zero_reg__\;sbc %B0,__zero_reg__\;sbc %C0,__zero_reg__\;sbc %D0,__zero_reg__"
+ [(set_attr "length" "4,3,3,4,5,5")
+ (set_attr "cc" "set_n,set_n,set_czn,set_czn,set_n,set_n")])
+
+(define_insn "*addsi3_zero_extend"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (zero_extend:SI
+ (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "add %A0,%1
+ adc %B0,__zero_reg__
+ adc %C0,__zero_reg__
+ adc %D0,__zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_n")])
+
+;-----------------------------------------------------------------------------
+; sub bytes
+(define_insn "subqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r,d")
+ (minus:QI (match_operand:QI 1 "register_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ sub %0,%2
+ subi %0,lo8(%2)"
+ [(set_attr "length" "1,1")
+ (set_attr "cc" "set_czn,set_czn")])
+
+(define_insn "subhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r,d")
+ (minus:HI (match_operand:HI 1 "register_operand" "0,0")
+ (match_operand:HI 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ sub %A0,%A2\;sbc %B0,%B2
+ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)"
+ [(set_attr "length" "2,2")
+ (set_attr "cc" "set_czn,set_czn")])
+
+(define_insn "*subhi3_zero_extend1"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (minus:HI (match_operand:HI 1 "register_operand" "0")
+ (zero_extend:HI
+ (match_operand:QI 2 "register_operand" "r"))))]
+ ""
+ "sub %A0,%2
+ sbc %B0,__zero_reg__"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_n")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ sub %0,%2\;sbc %B0,%B2\;sbc %C0,%C2\;sbc %D0,%D2
+ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)\;sbci %C0,hlo8(%2)\;sbci %D0,hhi8(%2)"
+ [(set_attr "length" "4,4")
+ (set_attr "cc" "set_czn,set_czn")])
+
+(define_insn "*subsi3_zero_extend"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0")
+ (zero_extend:SI
+ (match_operand:QI 2 "register_operand" "r"))))]
+ ""
+ "sub %A0,%2
+ sbc %B0,__zero_reg__
+ sbc %C0,__zero_reg__
+ sbc %D0,__zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_n")])
+
+;******************************************************************************
+; mul
+
+(define_expand "mulqi3"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (mult:QI (match_operand:QI 1 "register_operand" "")
+ (match_operand:QI 2 "register_operand" "")))]
+ ""
+ "{
+ if (!AVR_HAVE_MUL)
+ {
+ emit_insn (gen_mulqi3_call (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "*mulqi3_enh"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (mult:QI (match_operand:QI 1 "register_operand" "r")
+ (match_operand:QI 2 "register_operand" "r")))]
+ "AVR_HAVE_MUL"
+ "mul %1,%2
+ mov %0,r0
+ clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_expand "mulqi3_call"
+ [(set (reg:QI 24) (match_operand:QI 1 "register_operand" ""))
+ (set (reg:QI 22) (match_operand:QI 2 "register_operand" ""))
+ (parallel [(set (reg:QI 24) (mult:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))])
+ (set (match_operand:QI 0 "register_operand" "") (reg:QI 24))]
+ ""
+ "")
+
+(define_insn "*mulqi3_call"
+ [(set (reg:QI 24) (mult:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))]
+ "!AVR_HAVE_MUL"
+ "%~call __mulqi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "mulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "d"))
+ (sign_extend:HI (match_operand:QI 2 "register_operand" "d"))))]
+ "AVR_HAVE_MUL"
+ "muls %1,%2
+ movw %0,r0
+ clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "umulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
+ (zero_extend:HI (match_operand:QI 2 "register_operand" "r"))))]
+ "AVR_HAVE_MUL"
+ "mul %1,%2
+ movw %0,r0
+ clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_expand "mulhi3"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (mult:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:HI 2 "register_operand" "")))]
+ ""
+ "
+{
+ if (!AVR_HAVE_MUL)
+ {
+ emit_insn (gen_mulhi3_call (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "*mulhi3_enh"
+ [(set (match_operand:HI 0 "register_operand" "=&r")
+ (mult:HI (match_operand:HI 1 "register_operand" "r")
+ (match_operand:HI 2 "register_operand" "r")))]
+ "AVR_HAVE_MUL"
+ "mul %A1,%A2
+ movw %0,r0
+ mul %A1,%B2
+ add %B0,r0
+ mul %B1,%A2
+ add %B0,r0
+ clr r1"
+ [(set_attr "length" "7")
+ (set_attr "cc" "clobber")])
+
+(define_expand "mulhi3_call"
+ [(set (reg:HI 24) (match_operand:HI 1 "register_operand" ""))
+ (set (reg:HI 22) (match_operand:HI 2 "register_operand" ""))
+ (parallel [(set (reg:HI 24) (mult:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 22))
+ (clobber (reg:QI 21))])
+ (set (match_operand:HI 0 "register_operand" "") (reg:HI 24))]
+ ""
+ "")
+
+(define_insn "*mulhi3_call"
+ [(set (reg:HI 24) (mult:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 22))
+ (clobber (reg:QI 21))]
+ "!AVR_HAVE_MUL"
+ "%~call __mulhi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; Operand 2 (reg:SI 18) not clobbered on the enhanced core.
+;; All call-used registers clobbered otherwise - normal library call.
+(define_expand "mulsi3"
+ [(set (reg:SI 22) (match_operand:SI 1 "register_operand" ""))
+ (set (reg:SI 18) (match_operand:SI 2 "register_operand" ""))
+ (parallel [(set (reg:SI 22) (mult:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])
+ (set (match_operand:SI 0 "register_operand" "") (reg:SI 22))]
+ "AVR_HAVE_MUL"
+ "")
+
+(define_insn "*mulsi3_call"
+ [(set (reg:SI 22) (mult:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))]
+ "AVR_HAVE_MUL"
+ "%~call __mulsi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; / % / % / % / % / % / % / % / % / % / % / % / % / % / % / % / % / % / % / %
+; divmod
+
+;; Generate libgcc.S calls ourselves, because:
+;; - we know exactly which registers are clobbered (for QI and HI
+;; modes, some of the call-used registers are preserved)
+;; - we get both the quotient and the remainder at no extra cost
+;; - we split the patterns only after the first CSE passes because
+;; CSE has problems to operate on hard regs.
+;;
+(define_insn_and_split "divmodqi4"
+ [(parallel [(set (match_operand:QI 0 "pseudo_register_operand" "")
+ (div:QI (match_operand:QI 1 "pseudo_register_operand" "")
+ (match_operand:QI 2 "pseudo_register_operand" "")))
+ (set (match_operand:QI 3 "pseudo_register_operand" "")
+ (mod:QI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 23))
+ (clobber (reg:QI 24))
+ (clobber (reg:QI 25))])]
+ ""
+ "this divmodqi4 pattern should have been splitted;"
+ ""
+ [(set (reg:QI 24) (match_dup 1))
+ (set (reg:QI 22) (match_dup 2))
+ (parallel [(set (reg:QI 24) (div:QI (reg:QI 24) (reg:QI 22)))
+ (set (reg:QI 25) (mod:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 23))])
+ (set (match_dup 0) (reg:QI 24))
+ (set (match_dup 3) (reg:QI 25))]
+ "")
+
+(define_insn "*divmodqi4_call"
+ [(set (reg:QI 24) (div:QI (reg:QI 24) (reg:QI 22)))
+ (set (reg:QI 25) (mod:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 23))]
+ ""
+ "%~call __divmodqi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "udivmodqi4"
+ [(parallel [(set (match_operand:QI 0 "pseudo_register_operand" "")
+ (udiv:QI (match_operand:QI 1 "pseudo_register_operand" "")
+ (match_operand:QI 2 "pseudo_register_operand" "")))
+ (set (match_operand:QI 3 "pseudo_register_operand" "")
+ (umod:QI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 23))
+ (clobber (reg:QI 24))
+ (clobber (reg:QI 25))])]
+ ""
+ "this udivmodqi4 pattern should have been splitted;"
+ ""
+ [(set (reg:QI 24) (match_dup 1))
+ (set (reg:QI 22) (match_dup 2))
+ (parallel [(set (reg:QI 24) (udiv:QI (reg:QI 24) (reg:QI 22)))
+ (set (reg:QI 25) (umod:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 23))])
+ (set (match_dup 0) (reg:QI 24))
+ (set (match_dup 3) (reg:QI 25))]
+ "")
+
+(define_insn "*udivmodqi4_call"
+ [(set (reg:QI 24) (udiv:QI (reg:QI 24) (reg:QI 22)))
+ (set (reg:QI 25) (umod:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 23))]
+ ""
+ "%~call __udivmodqi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "divmodhi4"
+ [(parallel [(set (match_operand:HI 0 "pseudo_register_operand" "")
+ (div:HI (match_operand:HI 1 "pseudo_register_operand" "")
+ (match_operand:HI 2 "pseudo_register_operand" "")))
+ (set (match_operand:HI 3 "pseudo_register_operand" "")
+ (mod:HI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI 22))
+ (clobber (reg:HI 24))
+ (clobber (reg:HI 26))])]
+ ""
+ "this should have been splitted;"
+ ""
+ [(set (reg:HI 24) (match_dup 1))
+ (set (reg:HI 22) (match_dup 2))
+ (parallel [(set (reg:HI 22) (div:HI (reg:HI 24) (reg:HI 22)))
+ (set (reg:HI 24) (mod:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))])
+ (set (match_dup 0) (reg:HI 22))
+ (set (match_dup 3) (reg:HI 24))]
+ "")
+
+(define_insn "*divmodhi4_call"
+ [(set (reg:HI 22) (div:HI (reg:HI 24) (reg:HI 22)))
+ (set (reg:HI 24) (mod:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))]
+ ""
+ "%~call __divmodhi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "udivmodhi4"
+ [(parallel [(set (match_operand:HI 0 "pseudo_register_operand" "")
+ (udiv:HI (match_operand:HI 1 "pseudo_register_operand" "")
+ (match_operand:HI 2 "pseudo_register_operand" "")))
+ (set (match_operand:HI 3 "pseudo_register_operand" "")
+ (umod:HI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI 22))
+ (clobber (reg:HI 24))
+ (clobber (reg:HI 26))])]
+ ""
+ "this udivmodhi4 pattern should have been splitted.;"
+ ""
+ [(set (reg:HI 24) (match_dup 1))
+ (set (reg:HI 22) (match_dup 2))
+ (parallel [(set (reg:HI 22) (udiv:HI (reg:HI 24) (reg:HI 22)))
+ (set (reg:HI 24) (umod:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))])
+ (set (match_dup 0) (reg:HI 22))
+ (set (match_dup 3) (reg:HI 24))]
+ "")
+
+(define_insn "*udivmodhi4_call"
+ [(set (reg:HI 22) (udiv:HI (reg:HI 24) (reg:HI 22)))
+ (set (reg:HI 24) (umod:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))]
+ ""
+ "%~call __udivmodhi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "divmodsi4"
+ [(parallel [(set (match_operand:SI 0 "pseudo_register_operand" "")
+ (div:SI (match_operand:SI 1 "pseudo_register_operand" "")
+ (match_operand:SI 2 "pseudo_register_operand" "")))
+ (set (match_operand:SI 3 "pseudo_register_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:SI 18))
+ (clobber (reg:SI 22))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])]
+ ""
+ "this divmodsi4 pattern should have been splitted;"
+ ""
+ [(set (reg:SI 22) (match_dup 1))
+ (set (reg:SI 18) (match_dup 2))
+ (parallel [(set (reg:SI 18) (div:SI (reg:SI 22) (reg:SI 18)))
+ (set (reg:SI 22) (mod:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])
+ (set (match_dup 0) (reg:SI 18))
+ (set (match_dup 3) (reg:SI 22))]
+ "")
+
+(define_insn "*divmodsi4_call"
+ [(set (reg:SI 18) (div:SI (reg:SI 22) (reg:SI 18)))
+ (set (reg:SI 22) (mod:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))]
+ ""
+ "%~call __divmodsi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "udivmodsi4"
+ [(parallel [(set (match_operand:SI 0 "pseudo_register_operand" "")
+ (udiv:SI (match_operand:SI 1 "pseudo_register_operand" "")
+ (match_operand:SI 2 "pseudo_register_operand" "")))
+ (set (match_operand:SI 3 "pseudo_register_operand" "")
+ (umod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:SI 18))
+ (clobber (reg:SI 22))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])]
+ ""
+ "this udivmodsi4 pattern should have been splitted;"
+ ""
+ [(set (reg:SI 22) (match_dup 1))
+ (set (reg:SI 18) (match_dup 2))
+ (parallel [(set (reg:SI 18) (udiv:SI (reg:SI 22) (reg:SI 18)))
+ (set (reg:SI 22) (umod:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])
+ (set (match_dup 0) (reg:SI 18))
+ (set (match_dup 3) (reg:SI 22))]
+ "")
+
+(define_insn "*udivmodsi4_call"
+ [(set (reg:SI 18) (udiv:SI (reg:SI 22) (reg:SI 18)))
+ (set (reg:SI 22) (umod:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))]
+ ""
+ "%~call __udivmodsi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+; and
+
+(define_insn "andqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r,d")
+ (and:QI (match_operand:QI 1 "register_operand" "%0,0")
+ (match_operand:QI 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ and %0,%2
+ andi %0,lo8(%2)"
+ [(set_attr "length" "1,1")
+ (set_attr "cc" "set_zn,set_zn")])
+
+(define_insn "andhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r,d,r")
+ (and:HI (match_operand:HI 1 "register_operand" "%0,0,0")
+ (match_operand:HI 2 "nonmemory_operand" "r,i,M")))
+ (clobber (match_scratch:QI 3 "=X,X,&d"))]
+ ""
+{
+ if (which_alternative==0)
+ return ("and %A0,%A2" CR_TAB
+ "and %B0,%B2");
+ else if (which_alternative==1)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int mask = INTVAL (operands[2]);
+ if ((mask & 0xff) != 0xff)
+ output_asm_insn (AS2 (andi,%A0,lo8(%2)), operands);
+ if ((mask & 0xff00) != 0xff00)
+ output_asm_insn (AS2 (andi,%B0,hi8(%2)), operands);
+ return "";
+ }
+ return (AS2 (andi,%A0,lo8(%2)) CR_TAB
+ AS2 (andi,%B0,hi8(%2)));
+ }
+ return (AS2 (ldi,%3,lo8(%2)) CR_TAB
+ "and %A0,%3" CR_TAB
+ AS1 (clr,%B0));
+}
+ [(set_attr "length" "2,2,3")
+ (set_attr "cc" "set_n,clobber,set_n")])
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,i")))]
+ ""
+{
+ if (which_alternative==0)
+ return ("and %0,%2" CR_TAB
+ "and %B0,%B2" CR_TAB
+ "and %C0,%C2" CR_TAB
+ "and %D0,%D2");
+ else if (which_alternative==1)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ HOST_WIDE_INT mask = INTVAL (operands[2]);
+ if ((mask & 0xff) != 0xff)
+ output_asm_insn (AS2 (andi,%A0,lo8(%2)), operands);
+ if ((mask & 0xff00) != 0xff00)
+ output_asm_insn (AS2 (andi,%B0,hi8(%2)), operands);
+ if ((mask & 0xff0000L) != 0xff0000L)
+ output_asm_insn (AS2 (andi,%C0,hlo8(%2)), operands);
+ if ((mask & 0xff000000L) != 0xff000000L)
+ output_asm_insn (AS2 (andi,%D0,hhi8(%2)), operands);
+ return "";
+ }
+ return (AS2 (andi, %A0,lo8(%2)) CR_TAB
+ AS2 (andi, %B0,hi8(%2)) CR_TAB
+ AS2 (andi, %C0,hlo8(%2)) CR_TAB
+ AS2 (andi, %D0,hhi8(%2)));
+ }
+ return "bug";
+}
+ [(set_attr "length" "4,4")
+ (set_attr "cc" "set_n,clobber")])
+
+(define_peephole2 ; andi
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (and:QI (match_dup 0)
+ (match_operand:QI 1 "const_int_operand" "")))
+ (set (match_dup 0)
+ (and:QI (match_dup 0)
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(set (match_dup 0) (and:QI (match_dup 0) (match_dup 1)))]
+ {
+ operands[1] = GEN_INT (INTVAL (operands[1]) & INTVAL (operands[2]));
+ })
+
+;;|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+;; ior
+
+(define_insn "iorqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r,d")
+ (ior:QI (match_operand:QI 1 "register_operand" "%0,0")
+ (match_operand:QI 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ or %0,%2
+ ori %0,lo8(%2)"
+ [(set_attr "length" "1,1")
+ (set_attr "cc" "set_zn,set_zn")])
+
+(define_insn "iorhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r,d")
+ (ior:HI (match_operand:HI 1 "register_operand" "%0,0")
+ (match_operand:HI 2 "nonmemory_operand" "r,i")))]
+ ""
+{
+ if (which_alternative==0)
+ return ("or %A0,%A2" CR_TAB
+ "or %B0,%B2");
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int mask = INTVAL (operands[2]);
+ if (mask & 0xff)
+ output_asm_insn (AS2 (ori,%A0,lo8(%2)), operands);
+ if (mask & 0xff00)
+ output_asm_insn (AS2 (ori,%B0,hi8(%2)), operands);
+ return "";
+ }
+ return (AS2 (ori,%0,lo8(%2)) CR_TAB
+ AS2 (ori,%B0,hi8(%2)));
+}
+ [(set_attr "length" "2,2")
+ (set_attr "cc" "set_n,clobber")])
+
+(define_insn "*iorhi3_clobber"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (ior:HI (match_operand:HI 1 "register_operand" "%0,0")
+ (match_operand:HI 2 "immediate_operand" "M,i")))
+ (clobber (match_scratch:QI 3 "=&d,&d"))]
+ ""
+ "@
+ ldi %3,lo8(%2)\;or %A0,%3
+ ldi %3,lo8(%2)\;or %A0,%3\;ldi %3,hi8(%2)\;or %B0,%3"
+ [(set_attr "length" "2,4")
+ (set_attr "cc" "clobber,set_n")])
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,i")))]
+ ""
+{
+ if (which_alternative==0)
+ return ("or %0,%2" CR_TAB
+ "or %B0,%B2" CR_TAB
+ "or %C0,%C2" CR_TAB
+ "or %D0,%D2");
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ HOST_WIDE_INT mask = INTVAL (operands[2]);
+ if (mask & 0xff)
+ output_asm_insn (AS2 (ori,%A0,lo8(%2)), operands);
+ if (mask & 0xff00)
+ output_asm_insn (AS2 (ori,%B0,hi8(%2)), operands);
+ if (mask & 0xff0000L)
+ output_asm_insn (AS2 (ori,%C0,hlo8(%2)), operands);
+ if (mask & 0xff000000L)
+ output_asm_insn (AS2 (ori,%D0,hhi8(%2)), operands);
+ return "";
+ }
+ return (AS2 (ori, %A0,lo8(%2)) CR_TAB
+ AS2 (ori, %B0,hi8(%2)) CR_TAB
+ AS2 (ori, %C0,hlo8(%2)) CR_TAB
+ AS2 (ori, %D0,hhi8(%2)));
+}
+ [(set_attr "length" "4,4")
+ (set_attr "cc" "set_n,clobber")])
+
+(define_insn "*iorsi3_clobber"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0")
+ (match_operand:SI 2 "immediate_operand" "M,i")))
+ (clobber (match_scratch:QI 3 "=&d,&d"))]
+ ""
+ "@
+ ldi %3,lo8(%2)\;or %A0,%3
+ ldi %3,lo8(%2)\;or %A0,%3\;ldi %3,hi8(%2)\;or %B0,%3\;ldi %3,hlo8(%2)\;or %C0,%3\;ldi %3,hhi8(%2)\;or %D0,%3"
+ [(set_attr "length" "2,8")
+ (set_attr "cc" "clobber,set_n")])
+
+;;^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+;; xor
+
+(define_insn "xorqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (xor:QI (match_operand:QI 1 "register_operand" "%0")
+ (match_operand:QI 2 "register_operand" "r")))]
+ ""
+ "eor %0,%2"
+ [(set_attr "length" "1")
+ (set_attr "cc" "set_zn")])
+
+(define_insn "xorhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (xor:HI (match_operand:HI 1 "register_operand" "%0")
+ (match_operand:HI 2 "register_operand" "r")))]
+ ""
+ "eor %0,%2
+ eor %B0,%B2"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_n")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "eor %0,%2
+ eor %B0,%B2
+ eor %C0,%C2
+ eor %D0,%D2"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_n")])
+
+;; swap swap swap swap swap swap swap swap swap swap swap swap swap swap swap
+;; swap
+
+(define_expand "rotlqi3"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (rotate:QI (match_operand:QI 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ "
+{
+ if (!CONST_INT_P (operands[2]) || (INTVAL (operands[2]) != 4))
+ FAIL;
+}")
+
+(define_insn "*rotlqi3_4"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (rotate:QI (match_operand:QI 1 "register_operand" "0")
+ (const_int 4)))]
+ ""
+ "swap %0"
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+;; Split all rotates of HI,SI and DImode registers where rotation is by
+;; a whole number of bytes. The split creates the appropriate moves and
+;; considers all overlap situations. DImode is split before reload.
+
+;; HImode does not need scratch. Use attribute for this constraint.
+;; Use QI scratch for DI mode as this is often split into byte sized operands.
+
+(define_mode_attr rotx [(DI "&r,&r,X") (SI "&r,&r,X") (HI "X,X,X")])
+(define_mode_attr rotsmode [(DI "QI") (SI "HI") (HI "QI")])
+
+(define_expand "rotl<mode>3"
+ [(parallel [(set (match_operand:HIDI 0 "register_operand" "")
+ (rotate:HIDI (match_operand:HIDI 1 "register_operand" "")
+ (match_operand:VOID 2 "const_int_operand" "")))
+ (clobber (match_operand 3 ""))])]
+ ""
+ {
+ if (CONST_INT_P (operands[2])
+ && 0 == INTVAL (operands[2]) % 8)
+ {
+ if (AVR_HAVE_MOVW && 0 == INTVAL (operands[2]) % 16)
+ operands[3] = gen_rtx_SCRATCH (<rotsmode>mode);
+ else
+ operands[3] = gen_rtx_SCRATCH (QImode);
+ }
+ else
+ FAIL;
+ })
+
+
+;; Overlapping non-HImode registers often (but not always) need a scratch.
+;; The best we can do is use early clobber alternative "#&r" so that
+;; completely non-overlapping operands dont get a scratch but # so register
+;; allocation does not prefer non-overlapping.
+
+
+; Split word aligned rotates using scratch that is mode dependent.
+(define_insn_and_split "*rotw<mode>"
+ [(set (match_operand:HIDI 0 "register_operand" "=r,r,#&r")
+ (rotate:HIDI (match_operand:HIDI 1 "register_operand" "0,r,r")
+ (match_operand 2 "const_int_operand" "n,n,n")))
+ (clobber (match_scratch:<rotsmode> 3 "=<rotx>"))]
+ "AVR_HAVE_MOVW
+ && CONST_INT_P (operands[2])
+ && 0 == INTVAL (operands[2]) % 16"
+ "#"
+ "&& (reload_completed || <MODE>mode == DImode)"
+ [(const_int 0)]
+ {
+ avr_rotate_bytes (operands);
+ DONE;
+ })
+
+
+; Split byte aligned rotates using scratch that is always QI mode.
+(define_insn_and_split "*rotb<mode>"
+ [(set (match_operand:HIDI 0 "register_operand" "=r,r,#&r")
+ (rotate:HIDI (match_operand:HIDI 1 "register_operand" "0,r,r")
+ (match_operand 2 "const_int_operand" "n,n,n")))
+ (clobber (match_scratch:QI 3 "=<rotx>"))]
+ "CONST_INT_P (operands[2])
+ && (8 == INTVAL (operands[2]) % 16
+ || (!AVR_HAVE_MOVW
+ && 0 == INTVAL (operands[2]) % 16))"
+ "#"
+ "&& (reload_completed || <MODE>mode == DImode)"
+ [(const_int 0)]
+ {
+ avr_rotate_bytes (operands);
+ DONE;
+ })
+
+
+;;<< << << << << << << << << << << << << << << << << << << << << << << << << <<
+;; arithmetic shift left
+
+(define_expand "ashlqi3"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (ashift:QI (match_operand:QI 1 "register_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
+ ""
+ "")
+
+(define_split ; ashlqi3_const4
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (ashift:QI (match_dup 0)
+ (const_int 4)))]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (and:QI (match_dup 0) (const_int -16)))]
+ "")
+
+(define_split ; ashlqi3_const5
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (ashift:QI (match_dup 0)
+ (const_int 5)))]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (ashift:QI (match_dup 0) (const_int 1)))
+ (set (match_dup 0) (and:QI (match_dup 0) (const_int -32)))]
+ "")
+
+(define_split ; ashlqi3_const6
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (ashift:QI (match_dup 0)
+ (const_int 6)))]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (ashift:QI (match_dup 0) (const_int 2)))
+ (set (match_dup 0) (and:QI (match_dup 0) (const_int -64)))]
+ "")
+
+(define_insn "*ashlqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
+ (ashift:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
+ ""
+ "* return ashlqi3_out (insn, operands, NULL);"
+ [(set_attr "length" "5,0,1,2,4,6,9")
+ (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
+
+(define_insn "ashlhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashift:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return ashlhi3_out (insn, operands, NULL);"
+ [(set_attr "length" "6,0,2,2,4,10,10")
+ (set_attr "cc" "clobber,none,set_n,clobber,set_n,clobber,clobber")])
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return ashlsi3_out (insn, operands, NULL);"
+ [(set_attr "length" "8,0,4,4,8,10,12")
+ (set_attr "cc" "clobber,none,set_n,clobber,set_n,clobber,clobber")])
+
+;; Optimize if a scratch register from LD_REGS happens to be available.
+
+(define_peephole2 ; ashlqi3_l_const4
+ [(set (match_operand:QI 0 "l_register_operand" "")
+ (ashift:QI (match_dup 0)
+ (const_int 4)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 1) (const_int -16))
+ (set (match_dup 0) (and:QI (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_peephole2 ; ashlqi3_l_const5
+ [(set (match_operand:QI 0 "l_register_operand" "")
+ (ashift:QI (match_dup 0)
+ (const_int 5)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (ashift:QI (match_dup 0) (const_int 1)))
+ (set (match_dup 1) (const_int -32))
+ (set (match_dup 0) (and:QI (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_peephole2 ; ashlqi3_l_const6
+ [(set (match_operand:QI 0 "l_register_operand" "")
+ (ashift:QI (match_dup 0)
+ (const_int 6)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (ashift:QI (match_dup 0) (const_int 2)))
+ (set (match_dup 1) (const_int -64))
+ (set (match_dup 0) (and:QI (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:HI 0 "register_operand" "")
+ (ashift:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0) (ashift:HI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+(define_insn "*ashlhi3_const"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
+ (ashift:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ "reload_completed"
+ "* return ashlhi3_out (insn, operands, NULL);"
+ [(set_attr "length" "0,2,2,4,10")
+ (set_attr "cc" "none,set_n,clobber,set_n,clobber")])
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:SI 0 "register_operand" "")
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+(define_insn "*ashlsi3_const"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ "reload_completed"
+ "* return ashlsi3_out (insn, operands, NULL);"
+ [(set_attr "length" "0,4,4,10")
+ (set_attr "cc" "none,set_n,clobber,clobber")])
+
+;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
+;; arithmetic shift right
+
+(define_insn "ashrqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,r,r")
+ (ashiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,K,n,Qm")))]
+ ""
+ "* return ashrqi3_out (insn, operands, NULL);"
+ [(set_attr "length" "5,0,1,2,5,9")
+ (set_attr "cc" "clobber,none,clobber,clobber,clobber,clobber")])
+
+(define_insn "ashrhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return ashrhi3_out (insn, operands, NULL);"
+ [(set_attr "length" "6,0,2,4,4,10,10")
+ (set_attr "cc" "clobber,none,clobber,set_n,clobber,clobber,clobber")])
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return ashrsi3_out (insn, operands, NULL);"
+ [(set_attr "length" "8,0,4,6,8,10,12")
+ (set_attr "cc" "clobber,none,clobber,set_n,clobber,clobber,clobber")])
+
+;; Optimize if a scratch register from LD_REGS happens to be available.
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:HI 0 "register_operand" "")
+ (ashiftrt:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0) (ashiftrt:HI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+(define_insn "*ashrhi3_const"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
+ (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ "reload_completed"
+ "* return ashrhi3_out (insn, operands, NULL);"
+ [(set_attr "length" "0,2,4,4,10")
+ (set_attr "cc" "none,clobber,set_n,clobber,clobber")])
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0) (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+(define_insn "*ashrsi3_const"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ "reload_completed"
+ "* return ashrsi3_out (insn, operands, NULL);"
+ [(set_attr "length" "0,4,4,10")
+ (set_attr "cc" "none,clobber,set_n,clobber")])
+
+;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
+;; logical shift right
+
+(define_expand "lshrqi3"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (lshiftrt:QI (match_operand:QI 1 "register_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
+ ""
+ "")
+
+(define_split ; lshrqi3_const4
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (lshiftrt:QI (match_dup 0)
+ (const_int 4)))]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (and:QI (match_dup 0) (const_int 15)))]
+ "")
+
+(define_split ; lshrqi3_const5
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (lshiftrt:QI (match_dup 0)
+ (const_int 5)))]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 1)))
+ (set (match_dup 0) (and:QI (match_dup 0) (const_int 7)))]
+ "")
+
+(define_split ; lshrqi3_const6
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (lshiftrt:QI (match_dup 0)
+ (const_int 6)))]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 2)))
+ (set (match_dup 0) (and:QI (match_dup 0) (const_int 3)))]
+ "")
+
+(define_insn "*lshrqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
+ (lshiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
+ ""
+ "* return lshrqi3_out (insn, operands, NULL);"
+ [(set_attr "length" "5,0,1,2,4,6,9")
+ (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
+
+(define_insn "lshrhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return lshrhi3_out (insn, operands, NULL);"
+ [(set_attr "length" "6,0,2,2,4,10,10")
+ (set_attr "cc" "clobber,none,clobber,clobber,clobber,clobber,clobber")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return lshrsi3_out (insn, operands, NULL);"
+ [(set_attr "length" "8,0,4,4,8,10,12")
+ (set_attr "cc" "clobber,none,clobber,clobber,clobber,clobber,clobber")])
+
+;; Optimize if a scratch register from LD_REGS happens to be available.
+
+(define_peephole2 ; lshrqi3_l_const4
+ [(set (match_operand:QI 0 "l_register_operand" "")
+ (lshiftrt:QI (match_dup 0)
+ (const_int 4)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 1) (const_int 15))
+ (set (match_dup 0) (and:QI (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_peephole2 ; lshrqi3_l_const5
+ [(set (match_operand:QI 0 "l_register_operand" "")
+ (lshiftrt:QI (match_dup 0)
+ (const_int 5)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 1)))
+ (set (match_dup 1) (const_int 7))
+ (set (match_dup 0) (and:QI (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_peephole2 ; lshrqi3_l_const6
+ [(set (match_operand:QI 0 "l_register_operand" "")
+ (lshiftrt:QI (match_dup 0)
+ (const_int 6)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 2)))
+ (set (match_dup 1) (const_int 3))
+ (set (match_dup 0) (and:QI (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:HI 0 "register_operand" "")
+ (lshiftrt:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0) (lshiftrt:HI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+(define_insn "*lshrhi3_const"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
+ (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ "reload_completed"
+ "* return lshrhi3_out (insn, operands, NULL);"
+ [(set_attr "length" "0,2,2,4,10")
+ (set_attr "cc" "none,clobber,clobber,clobber,clobber")])
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0) (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+(define_insn "*lshrsi3_const"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ "reload_completed"
+ "* return lshrsi3_out (insn, operands, NULL);"
+ [(set_attr "length" "0,4,4,10")
+ (set_attr "cc" "none,clobber,clobber,clobber")])
+
+;; abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x)
+;; abs
+
+(define_insn "absqi2"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (abs:QI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "sbrc %0,7
+ neg %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")])
+
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=d,r")
+ (abs:SF (match_operand:SF 1 "register_operand" "0,0")))]
+ ""
+ "@
+ andi %D0,0x7f
+ clt\;bld %D0,7"
+ [(set_attr "length" "1,2")
+ (set_attr "cc" "set_n,clobber")])
+
+;; 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x
+;; neg
+
+(define_insn "negqi2"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (neg:QI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "neg %0"
+ [(set_attr "length" "1")
+ (set_attr "cc" "set_zn")])
+
+(define_insn "neghi2"
+ [(set (match_operand:HI 0 "register_operand" "=!d,r,&r")
+ (neg:HI (match_operand:HI 1 "register_operand" "0,0,r")))]
+ ""
+ "@
+ com %B0\;neg %A0\;sbci %B0,lo8(-1)
+ com %B0\;neg %A0\;sbc %B0,__zero_reg__\;inc %B0
+ clr %A0\;clr %B0\;sub %A0,%A1\;sbc %B0,%B1"
+ [(set_attr "length" "3,4,4")
+ (set_attr "cc" "set_czn,set_n,set_czn")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=!d,r,&r")
+ (neg:SI (match_operand:SI 1 "register_operand" "0,0,r")))]
+ ""
+ "@
+ com %D0\;com %C0\;com %B0\;neg %A0\;sbci %B0,lo8(-1)\;sbci %C0,lo8(-1)\;sbci %D0,lo8(-1)
+ com %D0\;com %C0\;com %B0\;com %A0\;adc %A0,__zero_reg__\;adc %B0,__zero_reg__\;adc %C0,__zero_reg__\;adc %D0,__zero_reg__
+ clr %A0\;clr %B0\;{clr %C0\;clr %D0|movw %C0,%A0}\;sub %A0,%A1\;sbc %B0,%B1\;sbc %C0,%C1\;sbc %D0,%D1"
+ [(set_attr_alternative "length"
+ [(const_int 7)
+ (const_int 8)
+ (if_then_else (eq_attr "mcu_have_movw" "yes")
+ (const_int 7)
+ (const_int 8))])
+ (set_attr "cc" "set_czn,set_n,set_czn")])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d,r")
+ (neg:SF (match_operand:SF 1 "register_operand" "0,0")))]
+ ""
+ "@
+ subi %D0,0x80
+ bst %D0,7\;com %D0\;bld %D0,7\;com %D0"
+ [(set_attr "length" "1,4")
+ (set_attr "cc" "set_n,set_n")])
+
+;; !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+;; not
+
+(define_insn "one_cmplqi2"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (not:QI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "com %0"
+ [(set_attr "length" "1")
+ (set_attr "cc" "set_czn")])
+
+(define_insn "one_cmplhi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (not:HI (match_operand:HI 1 "register_operand" "0")))]
+ ""
+ "com %0
+ com %B0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_n")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_operand" "0")))]
+ ""
+ "com %0
+ com %B0
+ com %C0
+ com %D0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_n")])
+
+;; xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x
+;; sign extend
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (sign_extend:HI (match_operand:QI 1 "register_operand" "0,*r")))]
+ ""
+ "@
+ clr %B0\;sbrc %0,7\;com %B0
+ mov %A0,%A1\;clr %B0\;sbrc %A0,7\;com %B0"
+ [(set_attr "length" "3,4")
+ (set_attr "cc" "set_n,set_n")])
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "0,*r")))]
+ ""
+ "@
+ clr %B0\;sbrc %A0,7\;com %B0\;mov %C0,%B0\;mov %D0,%B0
+ mov %A0,%A1\;clr %B0\;sbrc %A0,7\;com %B0\;mov %C0,%B0\;mov %D0,%B0"
+ [(set_attr "length" "5,6")
+ (set_attr "cc" "set_n,set_n")])
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,&r")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "0,*r")))]
+ ""
+ "@
+ clr %C0\;sbrc %B0,7\;com %C0\;mov %D0,%C0
+ {mov %A0,%A1\;mov %B0,%B1|movw %A0,%A1}\;clr %C0\;sbrc %B0,7\;com %C0\;mov %D0,%C0"
+ [(set_attr_alternative "length"
+ [(const_int 4)
+ (if_then_else (eq_attr "mcu_have_movw" "yes")
+ (const_int 5)
+ (const_int 6))])
+ (set_attr "cc" "set_n,set_n")])
+
+;; xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x
+;; zero extend
+
+(define_insn_and_split "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (zero_extend:HI (match_operand:QI 1 "register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+{
+ unsigned int low_off = subreg_lowpart_offset (QImode, HImode);
+ unsigned int high_off = subreg_highpart_offset (QImode, HImode);
+
+ operands[2] = simplify_gen_subreg (QImode, operands[0], HImode, low_off);
+ operands[3] = simplify_gen_subreg (QImode, operands[0], HImode, high_off);
+})
+
+(define_insn_and_split "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (zero_extend:HI (match_dup 1)))
+ (set (match_dup 3) (const_int 0))]
+{
+ unsigned int low_off = subreg_lowpart_offset (HImode, SImode);
+ unsigned int high_off = subreg_highpart_offset (HImode, SImode);
+
+ operands[2] = simplify_gen_subreg (HImode, operands[0], SImode, low_off);
+ operands[3] = simplify_gen_subreg (HImode, operands[0], SImode, high_off);
+})
+
+(define_insn_and_split "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+{
+ unsigned int low_off = subreg_lowpart_offset (HImode, SImode);
+ unsigned int high_off = subreg_highpart_offset (HImode, SImode);
+
+ operands[2] = simplify_gen_subreg (HImode, operands[0], SImode, low_off);
+ operands[3] = simplify_gen_subreg (HImode, operands[0], SImode, high_off);
+})
+
+(define_insn_and_split "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:QI 1 "register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+ (set (match_dup 3) (const_int 0))]
+{
+ unsigned int low_off = subreg_lowpart_offset (SImode, DImode);
+ unsigned int high_off = subreg_highpart_offset (SImode, DImode);
+
+ operands[2] = simplify_gen_subreg (SImode, operands[0], DImode, low_off);
+ operands[3] = simplify_gen_subreg (SImode, operands[0], DImode, high_off);
+})
+
+(define_insn_and_split "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+ (set (match_dup 3) (const_int 0))]
+{
+ unsigned int low_off = subreg_lowpart_offset (SImode, DImode);
+ unsigned int high_off = subreg_highpart_offset (SImode, DImode);
+
+ operands[2] = simplify_gen_subreg (SImode, operands[0], DImode, low_off);
+ operands[3] = simplify_gen_subreg (SImode, operands[0], DImode, high_off);
+})
+
+(define_insn_and_split "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+{
+ unsigned int low_off = subreg_lowpart_offset (SImode, DImode);
+ unsigned int high_off = subreg_highpart_offset (SImode, DImode);
+
+ operands[2] = simplify_gen_subreg (SImode, operands[0], DImode, low_off);
+ operands[3] = simplify_gen_subreg (SImode, operands[0], DImode, high_off);
+})
+
+;;<=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=>
+;; compare
+
+; Optimize negated tests into reverse compare if overflow is undefined.
+(define_insn "*negated_tstqi"
+ [(set (cc0)
+ (compare (neg:QI (match_operand:QI 0 "register_operand" "r"))
+ (const_int 0)))]
+ "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
+ "cp __zero_reg__,%0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*reversed_tstqi"
+ [(set (cc0)
+ (compare (const_int 0)
+ (match_operand:QI 0 "register_operand" "r")))]
+ ""
+ "cp __zero_reg__,%0"
+[(set_attr "cc" "compare")
+ (set_attr "length" "2")])
+
+(define_insn "*negated_tsthi"
+ [(set (cc0)
+ (compare (neg:HI (match_operand:HI 0 "register_operand" "r"))
+ (const_int 0)))]
+ "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
+ "cp __zero_reg__,%A0
+ cpc __zero_reg__,%B0"
+[(set_attr "cc" "compare")
+ (set_attr "length" "2")])
+
+;; Leave here the clobber used by the cmphi pattern for simplicity, even
+;; though it is unused, because this pattern is synthesized by avr_reorg.
+(define_insn "*reversed_tsthi"
+ [(set (cc0)
+ (compare (const_int 0)
+ (match_operand:HI 0 "register_operand" "r")))
+ (clobber (match_scratch:QI 1 "=X"))]
+ ""
+ "cp __zero_reg__,%A0
+ cpc __zero_reg__,%B0"
+[(set_attr "cc" "compare")
+ (set_attr "length" "2")])
+
+(define_insn "*negated_tstsi"
+ [(set (cc0)
+ (compare (neg:SI (match_operand:SI 0 "register_operand" "r"))
+ (const_int 0)))]
+ "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
+ "cp __zero_reg__,%A0
+ cpc __zero_reg__,%B0
+ cpc __zero_reg__,%C0
+ cpc __zero_reg__,%D0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "4")])
+
+(define_insn "*reversed_tstsi"
+ [(set (cc0)
+ (compare (const_int 0)
+ (match_operand:SI 0 "register_operand" "r")))
+ (clobber (match_scratch:QI 1 "=X"))]
+ ""
+ "cp __zero_reg__,%A0
+ cpc __zero_reg__,%B0
+ cpc __zero_reg__,%C0
+ cpc __zero_reg__,%D0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "4")])
+
+
+(define_insn "*cmpqi"
+ [(set (cc0)
+ (compare (match_operand:QI 0 "register_operand" "r,r,d")
+ (match_operand:QI 1 "nonmemory_operand" "L,r,i")))]
+ ""
+ "@
+ tst %0
+ cp %0,%1
+ cpi %0,lo8(%1)"
+ [(set_attr "cc" "compare,compare,compare")
+ (set_attr "length" "1,1,1")])
+
+(define_insn "*cmpqi_sign_extend"
+ [(set (cc0)
+ (compare (sign_extend:HI
+ (match_operand:QI 0 "register_operand" "d"))
+ (match_operand:HI 1 "const_int_operand" "n")))]
+ "INTVAL (operands[1]) >= -128 && INTVAL (operands[1]) <= 127"
+ "cpi %0,lo8(%1)"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmphi"
+ [(set (cc0)
+ (compare (match_operand:HI 0 "register_operand" "!w,r,r,d,d,r,r")
+ (match_operand:HI 1 "nonmemory_operand" "L,L,r,M,i,M,i")))
+ (clobber (match_scratch:QI 2 "=X,X,X,X,&d,&d,&d"))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0: case 1:
+ return out_tsthi (insn, operands[0], NULL);
+
+ case 2:
+ return (AS2 (cp,%A0,%A1) CR_TAB
+ AS2 (cpc,%B0,%B1));
+ case 3:
+ if (reg_unused_after (insn, operands[0])
+ && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 63
+ && test_hard_reg_class (ADDW_REGS, operands[0]))
+ return AS2 (sbiw,%0,%1);
+ else
+ return (AS2 (cpi,%0,%1) CR_TAB
+ AS2 (cpc,%B0,__zero_reg__));
+ case 4:
+ if (reg_unused_after (insn, operands[0]))
+ return (AS2 (subi,%0,lo8(%1)) CR_TAB
+ AS2 (sbci,%B0,hi8(%1)));
+ else
+ return (AS2 (ldi, %2,hi8(%1)) CR_TAB
+ AS2 (cpi, %A0,lo8(%1)) CR_TAB
+ AS2 (cpc, %B0,%2));
+ case 5:
+ return (AS2 (ldi, %2,lo8(%1)) CR_TAB
+ AS2 (cp, %A0,%2) CR_TAB
+ AS2 (cpc, %B0,__zero_reg__));
+
+ case 6:
+ return (AS2 (ldi, %2,lo8(%1)) CR_TAB
+ AS2 (cp, %A0,%2) CR_TAB
+ AS2 (ldi, %2,hi8(%1)) CR_TAB
+ AS2 (cpc, %B0,%2));
+ }
+ return \"bug\";
+}"
+ [(set_attr "cc" "compare,compare,compare,compare,compare,compare,compare")
+ (set_attr "length" "1,2,2,2,3,3,4")])
+
+
+(define_insn "*cmpsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "register_operand" "r,r,d,d,r,r")
+ (match_operand:SI 1 "nonmemory_operand" "L,r,M,i,M,i")))
+ (clobber (match_scratch:QI 2 "=X,X,X,&d,&d,&d"))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ return out_tstsi (insn, operands[0], NULL);
+
+ case 1:
+ return (AS2 (cp,%A0,%A1) CR_TAB
+ AS2 (cpc,%B0,%B1) CR_TAB
+ AS2 (cpc,%C0,%C1) CR_TAB
+ AS2 (cpc,%D0,%D1));
+ case 2:
+ if (reg_unused_after (insn, operands[0])
+ && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 63
+ && test_hard_reg_class (ADDW_REGS, operands[0]))
+ return (AS2 (sbiw,%0,%1) CR_TAB
+ AS2 (cpc,%C0,__zero_reg__) CR_TAB
+ AS2 (cpc,%D0,__zero_reg__));
+ else
+ return (AS2 (cpi,%A0,lo8(%1)) CR_TAB
+ AS2 (cpc,%B0,__zero_reg__) CR_TAB
+ AS2 (cpc,%C0,__zero_reg__) CR_TAB
+ AS2 (cpc,%D0,__zero_reg__));
+ case 3:
+ if (reg_unused_after (insn, operands[0]))
+ return (AS2 (subi,%A0,lo8(%1)) CR_TAB
+ AS2 (sbci,%B0,hi8(%1)) CR_TAB
+ AS2 (sbci,%C0,hlo8(%1)) CR_TAB
+ AS2 (sbci,%D0,hhi8(%1)));
+ else
+ return (AS2 (cpi, %A0,lo8(%1)) CR_TAB
+ AS2 (ldi, %2,hi8(%1)) CR_TAB
+ AS2 (cpc, %B0,%2) CR_TAB
+ AS2 (ldi, %2,hlo8(%1)) CR_TAB
+ AS2 (cpc, %C0,%2) CR_TAB
+ AS2 (ldi, %2,hhi8(%1)) CR_TAB
+ AS2 (cpc, %D0,%2));
+ case 4:
+ return (AS2 (ldi,%2,lo8(%1)) CR_TAB
+ AS2 (cp,%A0,%2) CR_TAB
+ AS2 (cpc,%B0,__zero_reg__) CR_TAB
+ AS2 (cpc,%C0,__zero_reg__) CR_TAB
+ AS2 (cpc,%D0,__zero_reg__));
+ case 5:
+ return (AS2 (ldi, %2,lo8(%1)) CR_TAB
+ AS2 (cp, %A0,%2) CR_TAB
+ AS2 (ldi, %2,hi8(%1)) CR_TAB
+ AS2 (cpc, %B0,%2) CR_TAB
+ AS2 (ldi, %2,hlo8(%1)) CR_TAB
+ AS2 (cpc, %C0,%2) CR_TAB
+ AS2 (ldi, %2,hhi8(%1)) CR_TAB
+ AS2 (cpc, %D0,%2));
+ }
+ return \"bug\";
+}"
+ [(set_attr "cc" "compare,compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,7,5,8")])
+
+
+;; ----------------------------------------------------------------------
+;; JUMP INSTRUCTIONS
+;; ----------------------------------------------------------------------
+;; Conditional jump instructions
+
+(define_expand "cbranchsi4"
+ [(parallel [(set (cc0)
+ (compare (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))
+ (clobber (match_scratch:QI 4 ""))])
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "")
+
+(define_expand "cbranchhi4"
+ [(parallel [(set (cc0)
+ (compare (match_operand:HI 1 "register_operand" "")
+ (match_operand:HI 2 "nonmemory_operand" "")))
+ (clobber (match_scratch:QI 4 ""))])
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "")
+
+(define_expand "cbranchqi4"
+ [(set (cc0)
+ (compare (match_operand:QI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "")
+
+
+;; Test a single bit in a QI/HI/SImode register.
+;; Combine will create zero extract patterns for single bit tests.
+;; permit any mode in source pattern by using VOIDmode.
+
+(define_insn "*sbrx_branch<mode>"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "eqne_operator"
+ [(zero_extract:QIDI
+ (match_operand:VOID 1 "register_operand" "r")
+ (const_int 1)
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "* return avr_out_sbxx_branch (insn, operands);"
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 3)) (const_int -2046))
+ (le (minus (pc) (match_dup 3)) (const_int 2046)))
+ (const_int 2)
+ (if_then_else (eq_attr "mcu_mega" "no")
+ (const_int 2)
+ (const_int 4))))
+ (set_attr "cc" "clobber")])
+
+;; Same test based on Bitwise AND RTL. Keep this incase gcc changes patterns.
+;; or for old peepholes.
+;; Fixme - bitwise Mask will not work for DImode
+
+(define_insn "*sbrx_and_branch<mode>"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "eqne_operator"
+ [(and:QISI
+ (match_operand:QISI 1 "register_operand" "r")
+ (match_operand:QISI 2 "single_one_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+{
+ HOST_WIDE_INT bitnumber;
+ bitnumber = exact_log2 (GET_MODE_MASK (<MODE>mode) & INTVAL (operands[2]));
+ operands[2] = GEN_INT (bitnumber);
+ return avr_out_sbxx_branch (insn, operands);
+}
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 3)) (const_int -2046))
+ (le (minus (pc) (match_dup 3)) (const_int 2046)))
+ (const_int 2)
+ (if_then_else (eq_attr "mcu_mega" "no")
+ (const_int 2)
+ (const_int 4))))
+ (set_attr "cc" "clobber")])
+
+;; Convert sign tests to bit 7/15/31 tests that match the above insns.
+(define_peephole2
+ [(set (cc0) (compare (match_operand:QI 0 "register_operand" "")
+ (const_int 0)))
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (eq (zero_extract:HI (match_dup 0)
+ (const_int 1)
+ (const_int 7))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))]
+ "")
+
+(define_peephole2
+ [(set (cc0) (compare (match_operand:QI 0 "register_operand" "")
+ (const_int 0)))
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (ne (zero_extract:HI (match_dup 0)
+ (const_int 1)
+ (const_int 7))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))]
+ "")
+
+(define_peephole2
+ [(parallel [(set (cc0) (compare (match_operand:HI 0 "register_operand" "")
+ (const_int 0)))
+ (clobber (match_operand:HI 2 ""))])
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (eq (and:HI (match_dup 0) (const_int -32768))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))]
+ "")
+
+(define_peephole2
+ [(parallel [(set (cc0) (compare (match_operand:HI 0 "register_operand" "")
+ (const_int 0)))
+ (clobber (match_operand:HI 2 ""))])
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (ne (and:HI (match_dup 0) (const_int -32768))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))]
+ "")
+
+(define_peephole2
+ [(parallel [(set (cc0) (compare (match_operand:SI 0 "register_operand" "")
+ (const_int 0)))
+ (clobber (match_operand:SI 2 ""))])
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (eq (and:SI (match_dup 0) (match_dup 2))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))]
+ "operands[2] = GEN_INT (-2147483647 - 1);")
+
+(define_peephole2
+ [(parallel [(set (cc0) (compare (match_operand:SI 0 "register_operand" "")
+ (const_int 0)))
+ (clobber (match_operand:SI 2 ""))])
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (ne (and:SI (match_dup 0) (match_dup 2))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))]
+ "operands[2] = GEN_INT (-2147483647 - 1);")
+
+;; ************************************************************************
+;; Implementation of conditional jumps here.
+;; Compare with 0 (test) jumps
+;; ************************************************************************
+
+(define_insn "branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "simple_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0],insn), 0);"
+ [(set_attr "type" "branch")
+ (set_attr "cc" "clobber")])
+
+;; ****************************************************************
+;; AVR does not have following conditional jumps: LE,LEU,GT,GTU.
+;; Convert them all to proper jumps.
+;; ****************************************************************/
+
+(define_insn "difficult_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "difficult_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0],insn), 0);"
+ [(set_attr "type" "branch1")
+ (set_attr "cc" "clobber")])
+
+;; revers branch
+
+(define_insn "rvbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "simple_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 1);"
+ [(set_attr "type" "branch1")
+ (set_attr "cc" "clobber")])
+
+(define_insn "difficult_rvbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "difficult_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 1);"
+ [(set_attr "type" "branch")
+ (set_attr "cc" "clobber")])
+
+;; **************************************************************************
+;; Unconditional and other jump instructions.
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "*{
+ if (AVR_HAVE_JMP_CALL && get_attr_length (insn) != 1)
+ return AS1 (jmp,%x0);
+ return AS1 (rjmp,%x0);
+}"
+ [(set (attr "length")
+ (if_then_else (match_operand 0 "symbol_ref_operand" "")
+ (if_then_else (eq_attr "mcu_mega" "no")
+ (const_int 1)
+ (const_int 2))
+ (if_then_else (and (ge (minus (pc) (match_dup 0)) (const_int -2047))
+ (le (minus (pc) (match_dup 0)) (const_int 2047)))
+ (const_int 1)
+ (const_int 2))))
+ (set_attr "cc" "none")])
+
+;; call
+
+(define_expand "call"
+ [(call (match_operand:HI 0 "call_insn_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ;; Operand 1 not used on the AVR.
+ ""
+ "")
+
+;; call value
+
+(define_expand "call_value"
+ [(set (match_operand 0 "register_operand" "")
+ (call (match_operand:HI 1 "call_insn_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ;; Operand 2 not used on the AVR.
+ ""
+ "")
+
+(define_insn "call_insn"
+ [(call (mem:HI (match_operand:HI 0 "nonmemory_operand" "!z,*r,s,n"))
+ (match_operand:HI 1 "general_operand" "X,X,X,X"))]
+;; We don't need in saving Z register because r30,r31 is a call used registers
+ ;; Operand 1 not used on the AVR.
+ "(register_operand (operands[0], HImode) || CONSTANT_P (operands[0]))"
+ "*{
+ if (which_alternative==0)
+ return \"%!icall\";
+ else if (which_alternative==1)
+ {
+ if (AVR_HAVE_MOVW)
+ return (AS2 (movw, r30, %0) CR_TAB
+ \"%!icall\");
+ else
+ return (AS2 (mov, r30, %A0) CR_TAB
+ AS2 (mov, r31, %B0) CR_TAB
+ \"%!icall\");
+ }
+ else if (which_alternative==2)
+ return AS1(%~call,%x0);
+ return (AS2 (ldi,r30,lo8(%0)) CR_TAB
+ AS2 (ldi,r31,hi8(%0)) CR_TAB
+ \"%!icall\");
+}"
+ [(set_attr "cc" "clobber,clobber,clobber,clobber")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "mcu_have_movw" "yes")
+ (const_int 2)
+ (const_int 3))
+ (if_then_else (eq_attr "mcu_mega" "yes")
+ (const_int 2)
+ (const_int 1))
+ (const_int 3)])])
+
+(define_insn "call_value_insn"
+ [(set (match_operand 0 "register_operand" "=r,r,r,r")
+ (call (mem:HI (match_operand:HI 1 "nonmemory_operand" "!z,*r,s,n"))
+;; We don't need in saving Z register because r30,r31 is a call used registers
+ (match_operand:HI 2 "general_operand" "X,X,X,X")))]
+ ;; Operand 2 not used on the AVR.
+ "(register_operand (operands[0], VOIDmode) || CONSTANT_P (operands[0]))"
+ "*{
+ if (which_alternative==0)
+ return \"%!icall\";
+ else if (which_alternative==1)
+ {
+ if (AVR_HAVE_MOVW)
+ return (AS2 (movw, r30, %1) CR_TAB
+ \"%!icall\");
+ else
+ return (AS2 (mov, r30, %A1) CR_TAB
+ AS2 (mov, r31, %B1) CR_TAB
+ \"%!icall\");
+ }
+ else if (which_alternative==2)
+ return AS1(%~call,%x1);
+ return (AS2 (ldi, r30, lo8(%1)) CR_TAB
+ AS2 (ldi, r31, hi8(%1)) CR_TAB
+ \"%!icall\");
+}"
+ [(set_attr "cc" "clobber,clobber,clobber,clobber")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "mcu_have_movw" "yes")
+ (const_int 2)
+ (const_int 3))
+ (if_then_else (eq_attr "mcu_mega" "yes")
+ (const_int 2)
+ (const_int 1))
+ (const_int 3)])])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+; indirect jump
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand:HI 0 "nonmemory_operand" ""))]
+ ""
+ " if ((!AVR_HAVE_JMP_CALL) && !register_operand(operand0, HImode))
+ {
+ operands[0] = copy_to_mode_reg(HImode, operand0);
+ }"
+)
+
+; indirect jump
+(define_insn "*jcindirect_jump"
+ [(set (pc) (match_operand:HI 0 "immediate_operand" "i"))]
+ ""
+ "@
+ %~jmp %x0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+;;
+(define_insn "*njcindirect_jump"
+ [(set (pc) (match_operand:HI 0 "register_operand" "!z,*r"))]
+ "!AVR_HAVE_EIJMP_EICALL"
+ "@
+ ijmp
+ push %A0\;push %B0\;ret"
+ [(set_attr "length" "1,3")
+ (set_attr "cc" "none,none")])
+
+(define_insn "*indirect_jump_avr6"
+ [(set (pc) (match_operand:HI 0 "register_operand" "z"))]
+ "AVR_HAVE_EIJMP_EICALL"
+ "eijmp"
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+;; table jump
+
+;; Table made from "rjmp" instructions for <=8K devices.
+(define_insn "*tablejump_rjmp"
+ [(set (pc) (unspec:HI [(match_operand:HI 0 "register_operand" "!z,*r")]
+ UNSPEC_INDEX_JMP))
+ (use (label_ref (match_operand 1 "" "")))
+ (clobber (match_dup 0))]
+ "(!AVR_HAVE_JMP_CALL) && (!AVR_HAVE_EIJMP_EICALL)"
+ "@
+ ijmp
+ push %A0\;push %B0\;ret"
+ [(set_attr "length" "1,3")
+ (set_attr "cc" "none,none")])
+
+;; Not a prologue, but similar idea - move the common piece of code to libgcc.
+(define_insn "*tablejump_lib"
+ [(set (pc) (unspec:HI [(match_operand:HI 0 "register_operand" "z")]
+ UNSPEC_INDEX_JMP))
+ (use (label_ref (match_operand 1 "" "")))
+ (clobber (match_dup 0))]
+ "AVR_HAVE_JMP_CALL && TARGET_CALL_PROLOGUES"
+ "%~jmp __tablejump2__"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*tablejump_enh"
+ [(set (pc) (unspec:HI [(match_operand:HI 0 "register_operand" "z")]
+ UNSPEC_INDEX_JMP))
+ (use (label_ref (match_operand 1 "" "")))
+ (clobber (match_dup 0))]
+ "AVR_HAVE_JMP_CALL && AVR_HAVE_LPMX"
+ "lsl r30
+ rol r31
+ lpm __tmp_reg__,Z+
+ lpm r31,Z
+ mov r30,__tmp_reg__
+ %!ijmp"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*tablejump"
+ [(set (pc) (unspec:HI [(match_operand:HI 0 "register_operand" "z")]
+ UNSPEC_INDEX_JMP))
+ (use (label_ref (match_operand 1 "" "")))
+ (clobber (match_dup 0))]
+ "AVR_HAVE_JMP_CALL && !AVR_HAVE_EIJMP_EICALL"
+ "lsl r30
+ rol r31
+ lpm
+ inc r30
+ push r0
+ lpm
+ push r0
+ ret"
+ [(set_attr "length" "8")
+ (set_attr "cc" "clobber")])
+
+(define_expand "casesi"
+ [(set (match_dup 6)
+ (minus:HI (subreg:HI (match_operand:SI 0 "register_operand" "") 0)
+ (match_operand:HI 1 "register_operand" "")))
+ (parallel [(set (cc0)
+ (compare (match_dup 6)
+ (match_operand:HI 2 "register_operand" "")))
+ (clobber (match_scratch:QI 9 ""))])
+
+ (set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+
+ (set (match_dup 6)
+ (plus:HI (match_dup 6) (label_ref (match_operand:HI 3 "" ""))))
+
+ (parallel [(set (pc) (unspec:HI [(match_dup 6)] UNSPEC_INDEX_JMP))
+ (use (label_ref (match_dup 3)))
+ (clobber (match_dup 6))])]
+ ""
+ "
+{
+ operands[6] = gen_reg_rtx (HImode);
+}")
+
+
+;; ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+;; This instruction sets Z flag
+
+(define_insn "sez"
+ [(set (cc0) (const_int 0))]
+ ""
+ "sez"
+ [(set_attr "length" "1")
+ (set_attr "cc" "compare")])
+
+;; Clear/set/test a single bit in I/O address space.
+
+(define_insn "*cbi"
+ [(set (mem:QI (match_operand 0 "low_io_address_operand" "n"))
+ (and:QI (mem:QI (match_dup 0))
+ (match_operand:QI 1 "single_zero_operand" "n")))]
+ "(optimize > 0)"
+{
+ operands[2] = GEN_INT (exact_log2 (~INTVAL (operands[1]) & 0xff));
+ return AS2 (cbi,%m0-0x20,%2);
+}
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+(define_insn "*sbi"
+ [(set (mem:QI (match_operand 0 "low_io_address_operand" "n"))
+ (ior:QI (mem:QI (match_dup 0))
+ (match_operand:QI 1 "single_one_operand" "n")))]
+ "(optimize > 0)"
+{
+ operands[2] = GEN_INT (exact_log2 (INTVAL (operands[1]) & 0xff));
+ return AS2 (sbi,%m0-0x20,%2);
+}
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+;; Lower half of the I/O space - use sbic/sbis directly.
+(define_insn "*sbix_branch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "eqne_operator"
+ [(zero_extract:HI
+ (mem:QI (match_operand 1 "low_io_address_operand" "n"))
+ (const_int 1)
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "(optimize > 0)"
+ "* return avr_out_sbxx_branch (insn, operands);"
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 3)) (const_int -2046))
+ (le (minus (pc) (match_dup 3)) (const_int 2046)))
+ (const_int 2)
+ (if_then_else (eq_attr "mcu_mega" "no")
+ (const_int 2)
+ (const_int 4))))
+ (set_attr "cc" "clobber")])
+
+;; Tests of bit 7 are pessimized to sign tests, so we need this too...
+(define_insn "*sbix_branch_bit7"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "gelt_operator"
+ [(mem:QI (match_operand 1 "low_io_address_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "(optimize > 0)"
+{
+ operands[3] = operands[2];
+ operands[2] = GEN_INT (7);
+ return avr_out_sbxx_branch (insn, operands);
+}
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 2)) (const_int -2046))
+ (le (minus (pc) (match_dup 2)) (const_int 2046)))
+ (const_int 2)
+ (if_then_else (eq_attr "mcu_mega" "no")
+ (const_int 2)
+ (const_int 4))))
+ (set_attr "cc" "clobber")])
+
+;; Upper half of the I/O space - read port to __tmp_reg__ and use sbrc/sbrs.
+(define_insn "*sbix_branch_tmp"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "eqne_operator"
+ [(zero_extract:HI
+ (mem:QI (match_operand 1 "high_io_address_operand" "n"))
+ (const_int 1)
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "(optimize > 0)"
+ "* return avr_out_sbxx_branch (insn, operands);"
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 3)) (const_int -2046))
+ (le (minus (pc) (match_dup 3)) (const_int 2045)))
+ (const_int 3)
+ (if_then_else (eq_attr "mcu_mega" "no")
+ (const_int 3)
+ (const_int 5))))
+ (set_attr "cc" "clobber")])
+
+(define_insn "*sbix_branch_tmp_bit7"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "gelt_operator"
+ [(mem:QI (match_operand 1 "high_io_address_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "(optimize > 0)"
+{
+ operands[3] = operands[2];
+ operands[2] = GEN_INT (7);
+ return avr_out_sbxx_branch (insn, operands);
+}
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 2)) (const_int -2046))
+ (le (minus (pc) (match_dup 2)) (const_int 2045)))
+ (const_int 3)
+ (if_then_else (eq_attr "mcu_mega" "no")
+ (const_int 3)
+ (const_int 5))))
+ (set_attr "cc" "clobber")])
+
+;; ************************* Peepholes ********************************
+
+(define_peephole
+ [(set (match_operand:SI 0 "d_register_operand" "")
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (parallel
+ [(set (cc0)
+ (compare (match_dup 0)
+ (const_int -1)))
+ (clobber (match_operand:QI 1 "d_register_operand" ""))])
+ (set (pc)
+ (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ if (test_hard_reg_class (ADDW_REGS, operands[0]))
+ output_asm_insn (AS2 (sbiw,%0,1) CR_TAB
+ AS2 (sbc,%C0,__zero_reg__) CR_TAB
+ AS2 (sbc,%D0,__zero_reg__) \"\\n\", operands);
+ else
+ output_asm_insn (AS2 (subi,%A0,1) CR_TAB
+ AS2 (sbc,%B0,__zero_reg__) CR_TAB
+ AS2 (sbc,%C0,__zero_reg__) CR_TAB
+ AS2 (sbc,%D0,__zero_reg__) \"\\n\", operands);
+ switch (avr_jump_mode (operands[2],insn))
+ {
+ case 1:
+ return AS1 (brcc,%2);
+ case 2:
+ return (AS1 (brcs,.+2) CR_TAB
+ AS1 (rjmp,%2));
+ }
+ return (AS1 (brcs,.+4) CR_TAB
+ AS1 (jmp,%2));
+}")
+
+(define_peephole
+ [(set (match_operand:HI 0 "d_register_operand" "")
+ (plus:HI (match_dup 0)
+ (const_int -1)))
+ (parallel
+ [(set (cc0)
+ (compare (match_dup 0)
+ (const_int 65535)))
+ (clobber (match_operand:QI 1 "d_register_operand" ""))])
+ (set (pc)
+ (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ if (test_hard_reg_class (ADDW_REGS, operands[0]))
+ output_asm_insn (AS2 (sbiw,%0,1), operands);
+ else
+ output_asm_insn (AS2 (subi,%A0,1) CR_TAB
+ AS2 (sbc,%B0,__zero_reg__) \"\\n\", operands);
+ switch (avr_jump_mode (operands[2],insn))
+ {
+ case 1:
+ return AS1 (brcc,%2);
+ case 2:
+ return (AS1 (brcs,.+2) CR_TAB
+ AS1 (rjmp,%2));
+ }
+ return (AS1 (brcs,.+4) CR_TAB
+ AS1 (jmp,%2));
+}")
+
+(define_peephole
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (plus:QI (match_dup 0)
+ (const_int -1)))
+ (set (cc0)
+ (compare (match_dup 0)
+ (const_int -1)))
+ (set (pc)
+ (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ cc_status.value1 = operands[0];
+ cc_status.flags |= CC_OVERFLOW_UNUSABLE;
+ output_asm_insn (AS2 (subi,%A0,1), operands);
+ switch (avr_jump_mode (operands[1],insn))
+ {
+ case 1:
+ return AS1 (brcc,%1);
+ case 2:
+ return (AS1 (brcs,.+2) CR_TAB
+ AS1 (rjmp,%1));
+ }
+ return (AS1 (brcs,.+4) CR_TAB
+ AS1 (jmp,%1));
+}")
+
+(define_peephole
+ [(set (cc0)
+ (compare (match_operand:QI 0 "register_operand" "")
+ (const_int 0)))
+ (set (pc)
+ (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ "jump_over_one_insn_p (insn, operands[1])"
+ "cpse %0,__zero_reg__")
+
+(define_peephole
+ [(set (cc0)
+ (compare (match_operand:QI 0 "register_operand" "")
+ (match_operand:QI 1 "register_operand" "")))
+ (set (pc)
+ (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "jump_over_one_insn_p (insn, operands[2])"
+ "cpse %0,%1")
+
+;;pppppppppppppppppppppppppppppppppppppppppppppppppppp
+;;prologue/epilogue support instructions
+
+(define_insn "popqi"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (mem:QI (pre_inc:HI (reg:HI REG_SP))))]
+ ""
+ "pop %0"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+;; Enable Interrupts
+(define_insn "enable_interrupt"
+ [(unspec [(const_int 0)] UNSPEC_SEI)]
+ ""
+ "sei"
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")
+ ])
+
+;; Disable Interrupts
+(define_insn "disable_interrupt"
+ [(unspec [(const_int 0)] UNSPEC_CLI)]
+ ""
+ "cli"
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")
+ ])
+
+;; Library prologue saves
+(define_insn "call_prologue_saves"
+ [(unspec_volatile:HI [(const_int 0)] UNSPECV_PROLOGUE_SAVES)
+ (match_operand:HI 0 "immediate_operand" "")
+ (set (reg:HI REG_SP) (minus:HI
+ (reg:HI REG_SP)
+ (match_operand:HI 1 "immediate_operand" "")))
+ (use (reg:HI REG_X))
+ (clobber (reg:HI REG_Z))]
+ ""
+ "ldi r30,lo8(gs(1f))
+ ldi r31,hi8(gs(1f))
+ %~jmp __prologue_saves__+((18 - %0) * 2)
+1:"
+ [(set_attr_alternative "length"
+ [(if_then_else (eq_attr "mcu_mega" "yes")
+ (const_int 6)
+ (const_int 5))])
+ (set_attr "cc" "clobber")
+ ])
+
+; epilogue restores using library
+(define_insn "epilogue_restores"
+ [(unspec_volatile:QI [(const_int 0)] UNSPECV_EPILOGUE_RESTORES)
+ (set (reg:HI REG_Y ) (plus:HI
+ (reg:HI REG_Y)
+ (match_operand:HI 0 "immediate_operand" "")))
+ (set (reg:HI REG_SP) (reg:HI REG_Y))
+ (clobber (reg:QI REG_Z))]
+ ""
+ "ldi r30, lo8(%0)
+ %~jmp __epilogue_restores__ + ((18 - %0) * 2)"
+ [(set_attr_alternative "length"
+ [(if_then_else (eq_attr "mcu_mega" "yes")
+ (const_int 3)
+ (const_int 2))])
+ (set_attr "cc" "clobber")
+ ])
+
+; return
+(define_insn "return"
+ [(return)]
+ "reload_completed && avr_simple_epilogue ()"
+ "ret"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+(define_insn "return_from_epilogue"
+ [(return)]
+ "(reload_completed
+ && cfun->machine
+ && !(cfun->machine->is_interrupt || cfun->machine->is_signal)
+ && !cfun->machine->is_naked)"
+ "ret"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+(define_insn "return_from_interrupt_epilogue"
+ [(return)]
+ "(reload_completed
+ && cfun->machine
+ && (cfun->machine->is_interrupt || cfun->machine->is_signal)
+ && !cfun->machine->is_naked)"
+ "reti"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+(define_insn "return_from_naked_epilogue"
+ [(return)]
+ "(reload_completed
+ && cfun->machine
+ && cfun->machine->is_naked)"
+ ""
+ [(set_attr "cc" "none")
+ (set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ {
+ expand_prologue ();
+ DONE;
+ }")
+
+(define_expand "epilogue"
+ [(const_int 0)]
+ ""
+ "
+ {
+ expand_epilogue ();
+ DONE;
+ }")
diff --git a/gcc/config/avr/avr.opt b/gcc/config/avr/avr.opt
new file mode 100644
index 000000000..d9c3c0f27
--- /dev/null
+++ b/gcc/config/avr/avr.opt
@@ -0,0 +1,60 @@
+; Options for the ATMEL AVR port of the compiler.
+
+; Copyright (C) 2005, 2007, 2008, 2010 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mcall-prologues
+Target Report Mask(CALL_PROLOGUES)
+Use subroutines for function prologues and epilogues
+
+mmcu=
+Target RejectNegative Joined Var(avr_mcu_name) Init("avr2")
+-mmcu=MCU Select the target MCU
+
+mdeb
+Target Report Undocumented Mask(ALL_DEBUG)
+
+mint8
+Target Report Mask(INT8)
+Use an 8-bit 'int' type
+
+mno-interrupts
+Target Report RejectNegative Mask(NO_INTERRUPTS)
+Change the stack pointer without disabling interrupts
+
+morder1
+Target Report Undocumented Mask(ORDER_1)
+
+morder2
+Target Report Undocumented Mask(ORDER_2)
+
+mshort-calls
+Target Report Mask(SHORT_CALLS)
+Use rjmp/rcall (limited range) on >8K devices
+
+mtiny-stack
+Target Report Mask(TINY_STACK)
+Change only the low 8 bits of the stack pointer
+
+mrelax
+Target Report
+Relax branches
+
+mpmem-wrap-around
+Target Report
+Make the linker relaxation machine assume that a program counter wrap-around occurs.
diff --git a/gcc/config/avr/constraints.md b/gcc/config/avr/constraints.md
new file mode 100644
index 000000000..2ac8833bd
--- /dev/null
+++ b/gcc/config/avr/constraints.md
@@ -0,0 +1,109 @@
+;; Constraint definitions for ATMEL AVR micro controllers.
+;; Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Register constraints
+
+(define_register_constraint "t" "R0_REG"
+ "Temporary register r0")
+
+(define_register_constraint "b" "BASE_POINTER_REGS"
+ "Base pointer registers (r28--r31)")
+
+(define_register_constraint "e" "POINTER_REGS"
+ "Pointer registers (r26--r31)")
+
+(define_register_constraint "w" "ADDW_REGS"
+ "Registers from r24 to r31. These registers
+ can be used in @samp{adiw} command.")
+
+(define_register_constraint "d" "LD_REGS"
+ "Registers from r16 to r31.")
+
+(define_register_constraint "l" "NO_LD_REGS"
+ "Registers from r0 to r15.")
+
+(define_register_constraint "a" "SIMPLE_LD_REGS"
+ "Registers from r16 to r23.")
+
+(define_register_constraint "x" "POINTER_X_REGS"
+ "Register pair X (r27:r26).")
+
+(define_register_constraint "y" "POINTER_Y_REGS"
+ "Register pair Y (r29:r28).")
+
+(define_register_constraint "z" "POINTER_Z_REGS"
+ "Register pair Z (r31:r30).")
+
+(define_register_constraint "q" "STACK_REG"
+ "Stack pointer register (SPH:SPL).")
+
+(define_constraint "I"
+ "Integer constant in the range 0 @dots{} 63."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 63")))
+
+(define_constraint "J"
+ "Integer constant in the range -63 @dots{} 0."
+ (and (match_code "const_int")
+ (match_test "ival <= 0 && ival >= -63")))
+
+(define_constraint "K"
+ "Integer constant 2."
+ (and (match_code "const_int")
+ (match_test "ival == 2")))
+
+(define_constraint "L"
+ "Zero."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "M"
+ "Integer constant in the range 0 @dots{} 0xff."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 0xff")))
+
+(define_constraint "N"
+ "Constant integer @minus{}1."
+ (and (match_code "const_int")
+ (match_test "ival == -1")))
+
+(define_constraint "O"
+ "Constant integer 8, 16, or 24."
+ (and (match_code "const_int")
+ (match_test "ival == 8 || ival == 16 || ival == 24")))
+
+(define_constraint "P"
+ "Constant integer 1."
+ (and (match_code "const_int")
+ (match_test "ival == 1")))
+
+(define_constraint "G"
+ "Constant float 0."
+ (and (match_code "const_double")
+ (match_test "op == CONST0_RTX (SFmode)")))
+
+(define_constraint "R"
+ "Integer constant in the range -6 @dots{} 5."
+ (and (match_code "const_int")
+ (match_test "ival >= -6 && ival <= 5")))
+
+(define_memory_constraint "Q"
+ "A memory address based on Y or Z pointer with displacement."
+ (and (match_code "mem")
+ (match_test "extra_constraint_Q (op)")))
diff --git a/gcc/config/avr/driver-avr.c b/gcc/config/avr/driver-avr.c
new file mode 100755
index 000000000..6ab0bb822
--- /dev/null
+++ b/gcc/config/avr/driver-avr.c
@@ -0,0 +1,114 @@
+/* Subroutines for the gcc driver.
+ Copyright (C) 2009, 2010 Free Software Foundation, Inc.
+ Contributed by Anatoly Sokolov <aesok@post.ru>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+
+/* Current architecture. */
+const struct base_arch_s *avr_current_arch = NULL;
+
+/* Current device. */
+const struct mcu_type_s *avr_current_device = NULL;
+
+/* Initialize avr_current_arch and avr_current_device variables. */
+
+static void
+avr_set_current_device (const char *name)
+{
+
+ if (NULL != avr_current_arch)
+ return;
+
+ for (avr_current_device = avr_mcu_types; avr_current_device->name;
+ avr_current_device++)
+ {
+ if (strcmp (avr_current_device->name, name) == 0)
+ break;
+ }
+
+ avr_current_arch = &avr_arch_types[avr_current_device->arch];
+}
+
+/* Returns command line parameters that describe the device architecture. */
+
+const char *
+avr_device_to_arch (int argc, const char **argv)
+{
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ return concat ("-m ", avr_current_arch->arch_name, NULL);
+}
+
+/* Returns command line parameters that describe start of date section. */
+
+const char *
+avr_device_to_data_start (int argc, const char **argv)
+{
+ unsigned long data_section_start;
+ char data_section_start_str[16];
+
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ if (avr_current_device->data_section_start
+ == avr_current_arch->default_data_section_start)
+ return NULL;
+
+ data_section_start = 0x800000 + avr_current_device->data_section_start;
+
+ snprintf (data_section_start_str, sizeof(data_section_start_str) - 1,
+ "0x%lX", data_section_start);
+
+ return concat ("-Tdata ", data_section_start_str, NULL);
+}
+
+/* Returns command line parameters that describe the device startfile. */
+
+const char *
+avr_device_to_startfiles (int argc, const char **argv)
+{
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ return concat ("crt", avr_current_device->library_name, ".o%s", NULL);
+}
+
+/* Returns command line parameters that describe the device library. */
+
+const char *
+avr_device_to_devicelib (int argc, const char **argv)
+{
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ return concat ("-l", avr_current_device->library_name, NULL);
+}
+
diff --git a/gcc/config/avr/libgcc.S b/gcc/config/avr/libgcc.S
new file mode 100644
index 000000000..ac8e5cd94
--- /dev/null
+++ b/gcc/config/avr/libgcc.S
@@ -0,0 +1,901 @@
+/* -*- Mode: Asm -*- */
+/* Copyright (C) 1998, 1999, 2000, 2007, 2008, 2009
+ Free Software Foundation, Inc.
+ Contributed by Denis Chertykov <chertykov@gmail.com>
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define __zero_reg__ r1
+#define __tmp_reg__ r0
+#define __SREG__ 0x3f
+#define __SP_H__ 0x3e
+#define __SP_L__ 0x3d
+#define __RAMPZ__ 0x3B
+#define __EIND__ 0x3C
+
+/* Most of the functions here are called directly from avr.md
+ patterns, instead of using the standard libcall mechanisms.
+ This can make better code because GCC knows exactly which
+ of the call-used registers (not all of them) are clobbered. */
+
+ .section .text.libgcc, "ax", @progbits
+
+ .macro mov_l r_dest, r_src
+#if defined (__AVR_HAVE_MOVW__)
+ movw \r_dest, \r_src
+#else
+ mov \r_dest, \r_src
+#endif
+ .endm
+
+ .macro mov_h r_dest, r_src
+#if defined (__AVR_HAVE_MOVW__)
+ ; empty
+#else
+ mov \r_dest, \r_src
+#endif
+ .endm
+
+/* Note: mulqi3, mulhi3 are open-coded on the enhanced core. */
+#if !defined (__AVR_HAVE_MUL__)
+/*******************************************************
+ Multiplication 8 x 8
+*******************************************************/
+#if defined (L_mulqi3)
+
+#define r_arg2 r22 /* multiplicand */
+#define r_arg1 r24 /* multiplier */
+#define r_res __tmp_reg__ /* result */
+
+ .global __mulqi3
+ .func __mulqi3
+__mulqi3:
+ clr r_res ; clear result
+__mulqi3_loop:
+ sbrc r_arg1,0
+ add r_res,r_arg2
+ add r_arg2,r_arg2 ; shift multiplicand
+ breq __mulqi3_exit ; while multiplicand != 0
+ lsr r_arg1 ;
+ brne __mulqi3_loop ; exit if multiplier = 0
+__mulqi3_exit:
+ mov r_arg1,r_res ; result to return register
+ ret
+
+#undef r_arg2
+#undef r_arg1
+#undef r_res
+
+.endfunc
+#endif /* defined (L_mulqi3) */
+
+#if defined (L_mulqihi3)
+ .global __mulqihi3
+ .func __mulqihi3
+__mulqihi3:
+ clr r25
+ sbrc r24, 7
+ dec r25
+ clr r23
+ sbrc r22, 7
+ dec r22
+ rjmp __mulhi3
+ .endfunc
+#endif /* defined (L_mulqihi3) */
+
+#if defined (L_umulqihi3)
+ .global __umulqihi3
+ .func __umulqihi3
+__umulqihi3:
+ clr r25
+ clr r23
+ rjmp __mulhi3
+ .endfunc
+#endif /* defined (L_umulqihi3) */
+
+/*******************************************************
+ Multiplication 16 x 16
+*******************************************************/
+#if defined (L_mulhi3)
+#define r_arg1L r24 /* multiplier Low */
+#define r_arg1H r25 /* multiplier High */
+#define r_arg2L r22 /* multiplicand Low */
+#define r_arg2H r23 /* multiplicand High */
+#define r_resL __tmp_reg__ /* result Low */
+#define r_resH r21 /* result High */
+
+ .global __mulhi3
+ .func __mulhi3
+__mulhi3:
+ clr r_resH ; clear result
+ clr r_resL ; clear result
+__mulhi3_loop:
+ sbrs r_arg1L,0
+ rjmp __mulhi3_skip1
+ add r_resL,r_arg2L ; result + multiplicand
+ adc r_resH,r_arg2H
+__mulhi3_skip1:
+ add r_arg2L,r_arg2L ; shift multiplicand
+ adc r_arg2H,r_arg2H
+
+ cp r_arg2L,__zero_reg__
+ cpc r_arg2H,__zero_reg__
+ breq __mulhi3_exit ; while multiplicand != 0
+
+ lsr r_arg1H ; gets LSB of multiplier
+ ror r_arg1L
+ sbiw r_arg1L,0
+ brne __mulhi3_loop ; exit if multiplier = 0
+__mulhi3_exit:
+ mov r_arg1H,r_resH ; result to return register
+ mov r_arg1L,r_resL
+ ret
+
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg2L
+#undef r_arg2H
+#undef r_resL
+#undef r_resH
+
+.endfunc
+#endif /* defined (L_mulhi3) */
+#endif /* !defined (__AVR_HAVE_MUL__) */
+
+#if defined (L_mulhisi3)
+ .global __mulhisi3
+ .func __mulhisi3
+__mulhisi3:
+ mov_l r18, r24
+ mov_h r19, r25
+ clr r24
+ sbrc r23, 7
+ dec r24
+ mov r25, r24
+ clr r20
+ sbrc r19, 7
+ dec r20
+ mov r21, r20
+ rjmp __mulsi3
+ .endfunc
+#endif /* defined (L_mulhisi3) */
+
+#if defined (L_umulhisi3)
+ .global __umulhisi3
+ .func __umulhisi3
+__umulhisi3:
+ mov_l r18, r24
+ mov_h r19, r25
+ clr r24
+ clr r25
+ clr r20
+ clr r21
+ rjmp __mulsi3
+ .endfunc
+#endif /* defined (L_umulhisi3) */
+
+#if defined (L_mulsi3)
+/*******************************************************
+ Multiplication 32 x 32
+*******************************************************/
+#define r_arg1L r22 /* multiplier Low */
+#define r_arg1H r23
+#define r_arg1HL r24
+#define r_arg1HH r25 /* multiplier High */
+
+
+#define r_arg2L r18 /* multiplicand Low */
+#define r_arg2H r19
+#define r_arg2HL r20
+#define r_arg2HH r21 /* multiplicand High */
+
+#define r_resL r26 /* result Low */
+#define r_resH r27
+#define r_resHL r30
+#define r_resHH r31 /* result High */
+
+
+ .global __mulsi3
+ .func __mulsi3
+__mulsi3:
+#if defined (__AVR_HAVE_MUL__)
+ mul r_arg1L, r_arg2L
+ movw r_resL, r0
+ mul r_arg1H, r_arg2H
+ movw r_resHL, r0
+ mul r_arg1HL, r_arg2L
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1L, r_arg2HL
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1HH, r_arg2L
+ add r_resHH, r0
+ mul r_arg1HL, r_arg2H
+ add r_resHH, r0
+ mul r_arg1H, r_arg2HL
+ add r_resHH, r0
+ mul r_arg1L, r_arg2HH
+ add r_resHH, r0
+ clr r_arg1HH ; use instead of __zero_reg__ to add carry
+ mul r_arg1H, r_arg2L
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_arg1HH ; add carry
+ mul r_arg1L, r_arg2H
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_arg1HH ; add carry
+ movw r_arg1L, r_resL
+ movw r_arg1HL, r_resHL
+ clr r1 ; __zero_reg__ clobbered by "mul"
+ ret
+#else
+ clr r_resHH ; clear result
+ clr r_resHL ; clear result
+ clr r_resH ; clear result
+ clr r_resL ; clear result
+__mulsi3_loop:
+ sbrs r_arg1L,0
+ rjmp __mulsi3_skip1
+ add r_resL,r_arg2L ; result + multiplicand
+ adc r_resH,r_arg2H
+ adc r_resHL,r_arg2HL
+ adc r_resHH,r_arg2HH
+__mulsi3_skip1:
+ add r_arg2L,r_arg2L ; shift multiplicand
+ adc r_arg2H,r_arg2H
+ adc r_arg2HL,r_arg2HL
+ adc r_arg2HH,r_arg2HH
+
+ lsr r_arg1HH ; gets LSB of multiplier
+ ror r_arg1HL
+ ror r_arg1H
+ ror r_arg1L
+ brne __mulsi3_loop
+ sbiw r_arg1HL,0
+ cpc r_arg1H,r_arg1L
+ brne __mulsi3_loop ; exit if multiplier = 0
+__mulsi3_exit:
+ mov_h r_arg1HH,r_resHH ; result to return register
+ mov_l r_arg1HL,r_resHL
+ mov_h r_arg1H,r_resH
+ mov_l r_arg1L,r_resL
+ ret
+#endif /* defined (__AVR_HAVE_MUL__) */
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg1HL
+#undef r_arg1HH
+
+
+#undef r_arg2L
+#undef r_arg2H
+#undef r_arg2HL
+#undef r_arg2HH
+
+#undef r_resL
+#undef r_resH
+#undef r_resHL
+#undef r_resHH
+
+.endfunc
+#endif /* defined (L_mulsi3) */
+
+/*******************************************************
+ Division 8 / 8 => (result + remainder)
+*******************************************************/
+#define r_rem r25 /* remainder */
+#define r_arg1 r24 /* dividend, quotient */
+#define r_arg2 r22 /* divisor */
+#define r_cnt r23 /* loop count */
+
+#if defined (L_udivmodqi4)
+ .global __udivmodqi4
+ .func __udivmodqi4
+__udivmodqi4:
+ sub r_rem,r_rem ; clear remainder and carry
+ ldi r_cnt,9 ; init loop counter
+ rjmp __udivmodqi4_ep ; jump to entry point
+__udivmodqi4_loop:
+ rol r_rem ; shift dividend into remainder
+ cp r_rem,r_arg2 ; compare remainder & divisor
+ brcs __udivmodqi4_ep ; remainder <= divisor
+ sub r_rem,r_arg2 ; restore remainder
+__udivmodqi4_ep:
+ rol r_arg1 ; shift dividend (with CARRY)
+ dec r_cnt ; decrement loop counter
+ brne __udivmodqi4_loop
+ com r_arg1 ; complement result
+ ; because C flag was complemented in loop
+ ret
+ .endfunc
+#endif /* defined (L_udivmodqi4) */
+
+#if defined (L_divmodqi4)
+ .global __divmodqi4
+ .func __divmodqi4
+__divmodqi4:
+ bst r_arg1,7 ; store sign of dividend
+ mov __tmp_reg__,r_arg1
+ eor __tmp_reg__,r_arg2; r0.7 is sign of result
+ sbrc r_arg1,7
+ neg r_arg1 ; dividend negative : negate
+ sbrc r_arg2,7
+ neg r_arg2 ; divisor negative : negate
+ rcall __udivmodqi4 ; do the unsigned div/mod
+ brtc __divmodqi4_1
+ neg r_rem ; correct remainder sign
+__divmodqi4_1:
+ sbrc __tmp_reg__,7
+ neg r_arg1 ; correct result sign
+__divmodqi4_exit:
+ ret
+ .endfunc
+#endif /* defined (L_divmodqi4) */
+
+#undef r_rem
+#undef r_arg1
+#undef r_arg2
+#undef r_cnt
+
+
+/*******************************************************
+ Division 16 / 16 => (result + remainder)
+*******************************************************/
+#define r_remL r26 /* remainder Low */
+#define r_remH r27 /* remainder High */
+
+/* return: remainder */
+#define r_arg1L r24 /* dividend Low */
+#define r_arg1H r25 /* dividend High */
+
+/* return: quotient */
+#define r_arg2L r22 /* divisor Low */
+#define r_arg2H r23 /* divisor High */
+
+#define r_cnt r21 /* loop count */
+
+#if defined (L_udivmodhi4)
+ .global __udivmodhi4
+ .func __udivmodhi4
+__udivmodhi4:
+ sub r_remL,r_remL
+ sub r_remH,r_remH ; clear remainder and carry
+ ldi r_cnt,17 ; init loop counter
+ rjmp __udivmodhi4_ep ; jump to entry point
+__udivmodhi4_loop:
+ rol r_remL ; shift dividend into remainder
+ rol r_remH
+ cp r_remL,r_arg2L ; compare remainder & divisor
+ cpc r_remH,r_arg2H
+ brcs __udivmodhi4_ep ; remainder < divisor
+ sub r_remL,r_arg2L ; restore remainder
+ sbc r_remH,r_arg2H
+__udivmodhi4_ep:
+ rol r_arg1L ; shift dividend (with CARRY)
+ rol r_arg1H
+ dec r_cnt ; decrement loop counter
+ brne __udivmodhi4_loop
+ com r_arg1L
+ com r_arg1H
+; div/mod results to return registers, as for the div() function
+ mov_l r_arg2L, r_arg1L ; quotient
+ mov_h r_arg2H, r_arg1H
+ mov_l r_arg1L, r_remL ; remainder
+ mov_h r_arg1H, r_remH
+ ret
+ .endfunc
+#endif /* defined (L_udivmodhi4) */
+
+#if defined (L_divmodhi4)
+ .global __divmodhi4
+ .func __divmodhi4
+__divmodhi4:
+ .global _div
+_div:
+ bst r_arg1H,7 ; store sign of dividend
+ mov __tmp_reg__,r_arg1H
+ eor __tmp_reg__,r_arg2H ; r0.7 is sign of result
+ rcall __divmodhi4_neg1 ; dividend negative : negate
+ sbrc r_arg2H,7
+ rcall __divmodhi4_neg2 ; divisor negative : negate
+ rcall __udivmodhi4 ; do the unsigned div/mod
+ rcall __divmodhi4_neg1 ; correct remainder sign
+ tst __tmp_reg__
+ brpl __divmodhi4_exit
+__divmodhi4_neg2:
+ com r_arg2H
+ neg r_arg2L ; correct divisor/result sign
+ sbci r_arg2H,0xff
+__divmodhi4_exit:
+ ret
+__divmodhi4_neg1:
+ brtc __divmodhi4_exit
+ com r_arg1H
+ neg r_arg1L ; correct dividend/remainder sign
+ sbci r_arg1H,0xff
+ ret
+ .endfunc
+#endif /* defined (L_divmodhi4) */
+
+#undef r_remH
+#undef r_remL
+
+#undef r_arg1H
+#undef r_arg1L
+
+#undef r_arg2H
+#undef r_arg2L
+
+#undef r_cnt
+
+/*******************************************************
+ Division 32 / 32 => (result + remainder)
+*******************************************************/
+#define r_remHH r31 /* remainder High */
+#define r_remHL r30
+#define r_remH r27
+#define r_remL r26 /* remainder Low */
+
+/* return: remainder */
+#define r_arg1HH r25 /* dividend High */
+#define r_arg1HL r24
+#define r_arg1H r23
+#define r_arg1L r22 /* dividend Low */
+
+/* return: quotient */
+#define r_arg2HH r21 /* divisor High */
+#define r_arg2HL r20
+#define r_arg2H r19
+#define r_arg2L r18 /* divisor Low */
+
+#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
+
+#if defined (L_udivmodsi4)
+ .global __udivmodsi4
+ .func __udivmodsi4
+__udivmodsi4:
+ ldi r_remL, 33 ; init loop counter
+ mov r_cnt, r_remL
+ sub r_remL,r_remL
+ sub r_remH,r_remH ; clear remainder and carry
+ mov_l r_remHL, r_remL
+ mov_h r_remHH, r_remH
+ rjmp __udivmodsi4_ep ; jump to entry point
+__udivmodsi4_loop:
+ rol r_remL ; shift dividend into remainder
+ rol r_remH
+ rol r_remHL
+ rol r_remHH
+ cp r_remL,r_arg2L ; compare remainder & divisor
+ cpc r_remH,r_arg2H
+ cpc r_remHL,r_arg2HL
+ cpc r_remHH,r_arg2HH
+ brcs __udivmodsi4_ep ; remainder <= divisor
+ sub r_remL,r_arg2L ; restore remainder
+ sbc r_remH,r_arg2H
+ sbc r_remHL,r_arg2HL
+ sbc r_remHH,r_arg2HH
+__udivmodsi4_ep:
+ rol r_arg1L ; shift dividend (with CARRY)
+ rol r_arg1H
+ rol r_arg1HL
+ rol r_arg1HH
+ dec r_cnt ; decrement loop counter
+ brne __udivmodsi4_loop
+ ; __zero_reg__ now restored (r_cnt == 0)
+ com r_arg1L
+ com r_arg1H
+ com r_arg1HL
+ com r_arg1HH
+; div/mod results to return registers, as for the ldiv() function
+ mov_l r_arg2L, r_arg1L ; quotient
+ mov_h r_arg2H, r_arg1H
+ mov_l r_arg2HL, r_arg1HL
+ mov_h r_arg2HH, r_arg1HH
+ mov_l r_arg1L, r_remL ; remainder
+ mov_h r_arg1H, r_remH
+ mov_l r_arg1HL, r_remHL
+ mov_h r_arg1HH, r_remHH
+ ret
+ .endfunc
+#endif /* defined (L_udivmodsi4) */
+
+#if defined (L_divmodsi4)
+ .global __divmodsi4
+ .func __divmodsi4
+__divmodsi4:
+ bst r_arg1HH,7 ; store sign of dividend
+ mov __tmp_reg__,r_arg1HH
+ eor __tmp_reg__,r_arg2HH ; r0.7 is sign of result
+ rcall __divmodsi4_neg1 ; dividend negative : negate
+ sbrc r_arg2HH,7
+ rcall __divmodsi4_neg2 ; divisor negative : negate
+ rcall __udivmodsi4 ; do the unsigned div/mod
+ rcall __divmodsi4_neg1 ; correct remainder sign
+ rol __tmp_reg__
+ brcc __divmodsi4_exit
+__divmodsi4_neg2:
+ com r_arg2HH
+ com r_arg2HL
+ com r_arg2H
+ neg r_arg2L ; correct divisor/quotient sign
+ sbci r_arg2H,0xff
+ sbci r_arg2HL,0xff
+ sbci r_arg2HH,0xff
+__divmodsi4_exit:
+ ret
+__divmodsi4_neg1:
+ brtc __divmodsi4_exit
+ com r_arg1HH
+ com r_arg1HL
+ com r_arg1H
+ neg r_arg1L ; correct dividend/remainder sign
+ sbci r_arg1H, 0xff
+ sbci r_arg1HL,0xff
+ sbci r_arg1HH,0xff
+ ret
+ .endfunc
+#endif /* defined (L_divmodsi4) */
+
+/**********************************
+ * This is a prologue subroutine
+ **********************************/
+#if defined (L_prologue)
+
+ .global __prologue_saves__
+ .func __prologue_saves__
+__prologue_saves__:
+ push r2
+ push r3
+ push r4
+ push r5
+ push r6
+ push r7
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+ push r13
+ push r14
+ push r15
+ push r16
+ push r17
+ push r28
+ push r29
+ in r28,__SP_L__
+ in r29,__SP_H__
+ sub r28,r26
+ sbc r29,r27
+ in __tmp_reg__,__SREG__
+ cli
+ out __SP_H__,r29
+ out __SREG__,__tmp_reg__
+ out __SP_L__,r28
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ eijmp
+#else
+ ijmp
+#endif
+
+.endfunc
+#endif /* defined (L_prologue) */
+
+/*
+ * This is an epilogue subroutine
+ */
+#if defined (L_epilogue)
+
+ .global __epilogue_restores__
+ .func __epilogue_restores__
+__epilogue_restores__:
+ ldd r2,Y+18
+ ldd r3,Y+17
+ ldd r4,Y+16
+ ldd r5,Y+15
+ ldd r6,Y+14
+ ldd r7,Y+13
+ ldd r8,Y+12
+ ldd r9,Y+11
+ ldd r10,Y+10
+ ldd r11,Y+9
+ ldd r12,Y+8
+ ldd r13,Y+7
+ ldd r14,Y+6
+ ldd r15,Y+5
+ ldd r16,Y+4
+ ldd r17,Y+3
+ ldd r26,Y+2
+ ldd r27,Y+1
+ add r28,r30
+ adc r29,__zero_reg__
+ in __tmp_reg__,__SREG__
+ cli
+ out __SP_H__,r29
+ out __SREG__,__tmp_reg__
+ out __SP_L__,r28
+ mov_l r28, r26
+ mov_h r29, r27
+ ret
+.endfunc
+#endif /* defined (L_epilogue) */
+
+#ifdef L_exit
+ .section .fini9,"ax",@progbits
+ .global _exit
+ .func _exit
+_exit:
+ .weak exit
+exit:
+ .endfunc
+
+ /* Code from .fini8 ... .fini1 sections inserted by ld script. */
+
+ .section .fini0,"ax",@progbits
+ cli
+__stop_program:
+ rjmp __stop_program
+#endif /* defined (L_exit) */
+
+#ifdef L_cleanup
+ .weak _cleanup
+ .func _cleanup
+_cleanup:
+ ret
+.endfunc
+#endif /* defined (L_cleanup) */
+
+#ifdef L_tablejump
+ .global __tablejump2__
+ .func __tablejump2__
+__tablejump2__:
+ lsl r30
+ rol r31
+ .global __tablejump__
+__tablejump__:
+#if defined (__AVR_HAVE_LPMX__)
+ lpm __tmp_reg__, Z+
+ lpm r31, Z
+ mov r30, __tmp_reg__
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ eijmp
+#else
+ ijmp
+#endif
+
+#else
+ lpm
+ adiw r30, 1
+ push r0
+ lpm
+ push r0
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ in __tmp_reg__, __EIND__
+ push __tmp_reg__
+#endif
+ ret
+#endif
+ .endfunc
+#endif /* defined (L_tablejump) */
+
+#ifdef L_copy_data
+ .section .init4,"ax",@progbits
+ .global __do_copy_data
+__do_copy_data:
+#if defined(__AVR_HAVE_ELPMX__)
+ ldi r17, hi8(__data_end)
+ ldi r26, lo8(__data_start)
+ ldi r27, hi8(__data_start)
+ ldi r30, lo8(__data_load_start)
+ ldi r31, hi8(__data_load_start)
+ ldi r16, hh8(__data_load_start)
+ out __RAMPZ__, r16
+ rjmp .L__do_copy_data_start
+.L__do_copy_data_loop:
+ elpm r0, Z+
+ st X+, r0
+.L__do_copy_data_start:
+ cpi r26, lo8(__data_end)
+ cpc r27, r17
+ brne .L__do_copy_data_loop
+#elif !defined(__AVR_HAVE_ELPMX__) && defined(__AVR_HAVE_ELPM__)
+ ldi r17, hi8(__data_end)
+ ldi r26, lo8(__data_start)
+ ldi r27, hi8(__data_start)
+ ldi r30, lo8(__data_load_start)
+ ldi r31, hi8(__data_load_start)
+ ldi r16, hh8(__data_load_start - 0x10000)
+.L__do_copy_data_carry:
+ inc r16
+ out __RAMPZ__, r16
+ rjmp .L__do_copy_data_start
+.L__do_copy_data_loop:
+ elpm
+ st X+, r0
+ adiw r30, 1
+ brcs .L__do_copy_data_carry
+.L__do_copy_data_start:
+ cpi r26, lo8(__data_end)
+ cpc r27, r17
+ brne .L__do_copy_data_loop
+#elif !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__)
+ ldi r17, hi8(__data_end)
+ ldi r26, lo8(__data_start)
+ ldi r27, hi8(__data_start)
+ ldi r30, lo8(__data_load_start)
+ ldi r31, hi8(__data_load_start)
+ rjmp .L__do_copy_data_start
+.L__do_copy_data_loop:
+#if defined (__AVR_HAVE_LPMX__)
+ lpm r0, Z+
+#else
+ lpm
+ adiw r30, 1
+#endif
+ st X+, r0
+.L__do_copy_data_start:
+ cpi r26, lo8(__data_end)
+ cpc r27, r17
+ brne .L__do_copy_data_loop
+#endif /* !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__) */
+#endif /* L_copy_data */
+
+/* __do_clear_bss is only necessary if there is anything in .bss section. */
+
+#ifdef L_clear_bss
+ .section .init4,"ax",@progbits
+ .global __do_clear_bss
+__do_clear_bss:
+ ldi r17, hi8(__bss_end)
+ ldi r26, lo8(__bss_start)
+ ldi r27, hi8(__bss_start)
+ rjmp .do_clear_bss_start
+.do_clear_bss_loop:
+ st X+, __zero_reg__
+.do_clear_bss_start:
+ cpi r26, lo8(__bss_end)
+ cpc r27, r17
+ brne .do_clear_bss_loop
+#endif /* L_clear_bss */
+
+/* __do_global_ctors and __do_global_dtors are only necessary
+ if there are any constructors/destructors. */
+
+#if defined (__AVR_HAVE_JMP_CALL__)
+#define XCALL call
+#else
+#define XCALL rcall
+#endif
+
+#ifdef L_ctors
+ .section .init6,"ax",@progbits
+ .global __do_global_ctors
+#if defined(__AVR_HAVE_RAMPZ__)
+__do_global_ctors:
+ ldi r17, hi8(__ctors_start)
+ ldi r28, lo8(__ctors_end)
+ ldi r29, hi8(__ctors_end)
+ ldi r16, hh8(__ctors_end)
+ rjmp .L__do_global_ctors_start
+.L__do_global_ctors_loop:
+ sbiw r28, 2
+ sbc r16, __zero_reg__
+ mov_h r31, r29
+ mov_l r30, r28
+ out __RAMPZ__, r16
+ XCALL __tablejump_elpm__
+.L__do_global_ctors_start:
+ cpi r28, lo8(__ctors_start)
+ cpc r29, r17
+ ldi r24, hh8(__ctors_start)
+ cpc r16, r24
+ brne .L__do_global_ctors_loop
+#else
+__do_global_ctors:
+ ldi r17, hi8(__ctors_start)
+ ldi r28, lo8(__ctors_end)
+ ldi r29, hi8(__ctors_end)
+ rjmp .L__do_global_ctors_start
+.L__do_global_ctors_loop:
+ sbiw r28, 2
+ mov_h r31, r29
+ mov_l r30, r28
+ XCALL __tablejump__
+.L__do_global_ctors_start:
+ cpi r28, lo8(__ctors_start)
+ cpc r29, r17
+ brne .L__do_global_ctors_loop
+#endif /* defined(__AVR_HAVE_RAMPZ__) */
+#endif /* L_ctors */
+
+#ifdef L_dtors
+ .section .fini6,"ax",@progbits
+ .global __do_global_dtors
+#if defined(__AVR_HAVE_RAMPZ__)
+__do_global_dtors:
+ ldi r17, hi8(__dtors_end)
+ ldi r28, lo8(__dtors_start)
+ ldi r29, hi8(__dtors_start)
+ ldi r16, hh8(__dtors_start)
+ rjmp .L__do_global_dtors_start
+.L__do_global_dtors_loop:
+ sbiw r28, 2
+ sbc r16, __zero_reg__
+ mov_h r31, r29
+ mov_l r30, r28
+ out __RAMPZ__, r16
+ XCALL __tablejump_elpm__
+.L__do_global_dtors_start:
+ cpi r28, lo8(__dtors_end)
+ cpc r29, r17
+ ldi r24, hh8(__dtors_end)
+ cpc r16, r24
+ brne .L__do_global_dtors_loop
+#else
+__do_global_dtors:
+ ldi r17, hi8(__dtors_end)
+ ldi r28, lo8(__dtors_start)
+ ldi r29, hi8(__dtors_start)
+ rjmp .L__do_global_dtors_start
+.L__do_global_dtors_loop:
+ mov_h r31, r29
+ mov_l r30, r28
+ XCALL __tablejump__
+ adiw r28, 2
+.L__do_global_dtors_start:
+ cpi r28, lo8(__dtors_end)
+ cpc r29, r17
+ brne .L__do_global_dtors_loop
+#endif /* defined(__AVR_HAVE_RAMPZ__) */
+#endif /* L_dtors */
+
+#ifdef L_tablejump_elpm
+ .global __tablejump_elpm__
+ .func __tablejump_elpm__
+__tablejump_elpm__:
+#if defined (__AVR_HAVE_ELPM__)
+#if defined (__AVR_HAVE_LPMX__)
+ elpm __tmp_reg__, Z+
+ elpm r31, Z
+ mov r30, __tmp_reg__
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ eijmp
+#else
+ ijmp
+#endif
+
+#else
+ elpm
+ adiw r30, 1
+ push r0
+ elpm
+ push r0
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ in __tmp_reg__, __EIND__
+ push __tmp_reg__
+#endif
+ ret
+#endif
+#endif /* defined (__AVR_HAVE_ELPM__) */
+ .endfunc
+#endif /* defined (L_tablejump_elpm) */
+
diff --git a/gcc/config/avr/predicates.md b/gcc/config/avr/predicates.md
new file mode 100755
index 000000000..9a3473bf8
--- /dev/null
+++ b/gcc/config/avr/predicates.md
@@ -0,0 +1,140 @@
+;; Predicate definitions for ATMEL AVR micro controllers.
+;; Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Registers from r0 to r15.
+(define_predicate "l_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) <= 15")))
+
+;; Registers from r16 to r31.
+(define_predicate "d_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) >= 16 && REGNO (op) <= 31")))
+
+(define_predicate "even_register_operand"
+ (and (match_code "reg")
+ (and (match_test "REGNO (op) <= 31")
+ (match_test "(REGNO (op) & 1) == 0"))))
+
+(define_predicate "odd_register_operand"
+ (and (match_code "reg")
+ (and (match_test "REGNO (op) <= 31")
+ (match_test "(REGNO (op) & 1) != 0"))))
+
+;; SP register.
+(define_predicate "stack_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == REG_SP")))
+
+;; Return true if OP is a valid address for lower half of I/O space.
+(define_predicate "low_io_address_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE((INTVAL (op)), 0x20, 0x3F)")))
+
+;; Return true if OP is a valid address for high half of I/O space.
+(define_predicate "high_io_address_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE((INTVAL (op)), 0x40, 0x5F)")))
+
+;; Return true if OP is a valid address of I/O space.
+(define_predicate "io_address_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE((INTVAL (op)), 0x20, (0x60 - GET_MODE_SIZE(mode)))")))
+
+;; Return 1 if OP is the zero constant for MODE.
+(define_predicate "const0_operand"
+ (and (match_code "const_int,const_double")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; Returns true if OP is either the constant zero or a register.
+(define_predicate "reg_or_0_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const0_operand")))
+
+;; Returns 1 if OP is a SYMBOL_REF.
+(define_predicate "symbol_ref_operand"
+ (match_code "symbol_ref"))
+
+;; Return true if OP is a text segment reference.
+;; This is needed for program memory address expressions.
+(define_predicate "text_segment_operand"
+ (match_code "code_label,label_ref,symbol_ref,plus,const")
+{
+ switch (GET_CODE (op))
+ {
+ case CODE_LABEL:
+ return true;
+ case LABEL_REF :
+ return true;
+ case SYMBOL_REF :
+ return SYMBOL_REF_FUNCTION_P (op);
+ case PLUS :
+ /* Assume canonical format of symbol + constant.
+ Fall through. */
+ case CONST :
+ return text_segment_operand (XEXP (op, 0), VOIDmode);
+ default :
+ return false;
+ }
+})
+
+;; Return true if OP is a constant that contains only one 1 in its
+;; binary representation.
+(define_predicate "single_one_operand"
+ (and (match_code "const_int")
+ (match_test "exact_log2(INTVAL (op) & GET_MODE_MASK (mode)) >= 0")))
+
+;; Return true if OP is a constant that contains only one 0 in its
+;; binary representation.
+(define_predicate "single_zero_operand"
+ (and (match_code "const_int")
+ (match_test "exact_log2(~INTVAL (op) & GET_MODE_MASK (mode)) >= 0")))
+
+;;
+(define_predicate "avr_sp_immediate_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= -6 && INTVAL (op) <= 5")))
+
+;; True for EQ & NE
+(define_predicate "eqne_operator"
+ (match_code "eq,ne"))
+
+;; True for GE & LT
+(define_predicate "gelt_operator"
+ (match_code "ge,lt"))
+
+;; True for GT, GTU, LE & LEU
+(define_predicate "difficult_comparison_operator"
+ (match_code "gt,gtu,le,leu"))
+
+;; False for GT, GTU, LE & LEU
+(define_predicate "simple_comparison_operator"
+ (and (match_operand 0 "comparison_operator")
+ (not (match_code "gt,gtu,le,leu"))))
+
+;; Return true if OP is a valid call operand.
+(define_predicate "call_insn_operand"
+ (and (match_code "mem")
+ (ior (match_test "register_operand (XEXP (op, 0), mode)")
+ (match_test "CONSTANT_ADDRESS_P (XEXP (op, 0))"))))
+
+;; True for register that is pseudo register.
+(define_predicate "pseudo_register_operand"
+ (and (match_code "reg")
+ (match_test "!HARD_REGISTER_P (op)")))
diff --git a/gcc/config/avr/rtems.h b/gcc/config/avr/rtems.h
new file mode 100644
index 000000000..efd8aface
--- /dev/null
+++ b/gcc/config/avr/rtems.h
@@ -0,0 +1,28 @@
+/* Definitions for rtems targeting a AVR using ELF.
+ Copyright (C) 2004, 2007 Free Software Foundation, Inc.
+ Contributed by Ralf Corsepius (ralf.corsepius@rtems.org).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Specify predefined symbols in preprocessor. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+do { \
+ builtin_define ("__rtems__"); \
+ builtin_define ("__USE_INIT_FINI__"); \
+ builtin_assert ("system=rtems"); \
+} while (0)
diff --git a/gcc/config/avr/t-avr b/gcc/config/avr/t-avr
new file mode 100644
index 000000000..18769ebb2
--- /dev/null
+++ b/gcc/config/avr/t-avr
@@ -0,0 +1,225 @@
+# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
+# 2009, 2010 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+driver-avr.o: $(srcdir)/config/avr/driver-avr.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+avr-devices.o: $(srcdir)/config/avr/avr-devices.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+
+avr-c.o: $(srcdir)/config/avr/avr-c.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(C_COMMON_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+
+
+LIB1ASMSRC = avr/libgcc.S
+LIB1ASMFUNCS = \
+ _mulqi3 \
+ _mulhi3 \
+ _mulsi3 \
+ _udivmodqi4 \
+ _divmodqi4 \
+ _udivmodhi4 \
+ _divmodhi4 \
+ _udivmodsi4 \
+ _divmodsi4 \
+ _prologue \
+ _epilogue \
+ _exit \
+ _cleanup \
+ _tablejump \
+ _tablejump_elpm \
+ _copy_data \
+ _clear_bss \
+ _ctors \
+ _dtors
+
+# We do not have the DF type.
+# Most of the C functions in libgcc2 use almost all registers,
+# so use -mcall-prologues for smaller code size.
+TARGET_LIBGCC2_CFLAGS = -DDF=SF -Dinhibit_libc -mcall-prologues -Os
+
+fp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/avr/t-avr
+ echo '#define FLOAT' > fp-bit.c
+ echo '#define FLOAT_ONLY' >> fp-bit.c
+ echo '#define CMPtype QItype' >> fp-bit.c
+ echo '#define DF SF' >> fp-bit.c
+ echo '#define DI SI' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#define SMALL_MACHINE' >> fp-bit.c
+ echo 'typedef int QItype __attribute__ ((mode (QI)));' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+FPBIT = fp-bit.c
+
+MULTILIB_OPTIONS = mmcu=avr2/mmcu=avr25/mmcu=avr3/mmcu=avr31/mmcu=avr35/mmcu=avr4/mmcu=avr5/mmcu=avr51/mmcu=avr6
+MULTILIB_DIRNAMES = avr2 avr25 avr3 avr31 avr35 avr4 avr5 avr51 avr6
+
+# The many avr2 matches are not listed here - this is the default.
+MULTILIB_MATCHES = \
+ mmcu?avr25=mmcu?ata6289 \
+ mmcu?avr25=mmcu?attiny13 \
+ mmcu?avr25=mmcu?attiny13a \
+ mmcu?avr25=mmcu?attiny2313 \
+ mmcu?avr25=mmcu?attiny2313a \
+ mmcu?avr25=mmcu?attiny4313 \
+ mmcu?avr25=mmcu?attiny24 \
+ mmcu?avr25=mmcu?attiny24a \
+ mmcu?avr25=mmcu?attiny44 \
+ mmcu?avr25=mmcu?attiny44a \
+ mmcu?avr25=mmcu?attiny84 \
+ mmcu?avr25=mmcu?attiny84a \
+ mmcu?avr25=mmcu?attiny25 \
+ mmcu?avr25=mmcu?attiny45 \
+ mmcu?avr25=mmcu?attiny85 \
+ mmcu?avr25=mmcu?attiny261 \
+ mmcu?avr25=mmcu?attiny261a \
+ mmcu?avr25=mmcu?attiny461 \
+ mmcu?avr25=mmcu?attiny461a \
+ mmcu?avr25=mmcu?attiny861 \
+ mmcu?avr25=mmcu?attiny861a \
+ mmcu?avr25=mmcu?attiny43u \
+ mmcu?avr25=mmcu?attiny87 \
+ mmcu?avr25=mmcu?attiny48 \
+ mmcu?avr25=mmcu?attiny88 \
+ mmcu?avr25=mmcu?at86rf401 \
+ mmcu?avr3=mmcu?at43usb355 \
+ mmcu?avr3=mmcu?at76c711 \
+ mmcu?avr31=mmcu?atmega103 \
+ mmcu?avr31=mmcu?at43usb320 \
+ mmcu?avr35=mmcu?at90usb82 \
+ mmcu?avr35=mmcu?at90usb162 \
+ mmcu?avr35=mmcu?atmega8u2 \
+ mmcu?avr35=mmcu?atmega16u2 \
+ mmcu?avr35=mmcu?atmega32u2 \
+ mmcu?avr35=mmcu?attiny167 \
+ mmcu?avr4=mmcu?atmega48 \
+ mmcu?avr4=mmcu?atmega48a \
+ mmcu?avr4=mmcu?atmega48p \
+ mmcu?avr4=mmcu?atmega8 \
+ mmcu?avr4=mmcu?atmega8515 \
+ mmcu?avr4=mmcu?atmega8535 \
+ mmcu?avr4=mmcu?atmega88 \
+ mmcu?avr4=mmcu?atmega88a \
+ mmcu?avr4=mmcu?atmega88p \
+ mmcu?avr4=mmcu?atmega88pa \
+ mmcu?avr4=mmcu?atmega8hva \
+ mmcu?avr4=mmcu?at90pwm1 \
+ mmcu?avr4=mmcu?at90pwm2 \
+ mmcu?avr4=mmcu?at90pwm2b \
+ mmcu?avr4=mmcu?at90pwm3 \
+ mmcu?avr4=mmcu?at90pwm3b \
+ mmcu?avr4=mmcu?at90pwm81 \
+ mmcu?avr5=mmcu?atmega16 \
+ mmcu?avr5=mmcu?atmega16a \
+ mmcu?avr5=mmcu?atmega161 \
+ mmcu?avr5=mmcu?atmega162 \
+ mmcu?avr5=mmcu?atmega163 \
+ mmcu?avr5=mmcu?atmega164a \
+ mmcu?avr5=mmcu?atmega164p \
+ mmcu?avr5=mmcu?atmega165 \
+ mmcu?avr5=mmcu?atmega165a \
+ mmcu?avr5=mmcu?atmega165p \
+ mmcu?avr5=mmcu?atmega168 \
+ mmcu?avr5=mmcu?atmega168a \
+ mmcu?avr5=mmcu?atmega168p \
+ mmcu?avr5=mmcu?atmega169 \
+ mmcu?avr5=mmcu?atmega169a \
+ mmcu?avr5=mmcu?atmega169p \
+ mmcu?avr5=mmcu?atmega169pa \
+ mmcu?avr5=mmcu?atmega32 \
+ mmcu?avr5=mmcu?atmega323 \
+ mmcu?avr5=mmcu?atmega324a \
+ mmcu?avr5=mmcu?atmega324p \
+ mmcu?avr5=mmcu?atmega324pa \
+ mmcu?avr5=mmcu?atmega325 \
+ mmcu?avr5=mmcu?atmega325a \
+ mmcu?avr5=mmcu?atmega325p \
+ mmcu?avr5=mmcu?atmega3250 \
+ mmcu?avr5=mmcu?atmega3250a \
+ mmcu?avr5=mmcu?atmega3250p \
+ mmcu?avr5=mmcu?atmega328 \
+ mmcu?avr5=mmcu?atmega328p \
+ mmcu?avr5=mmcu?atmega329 \
+ mmcu?avr5=mmcu?atmega329a \
+ mmcu?avr5=mmcu?atmega329p \
+ mmcu?avr5=mmcu?atmega329pa \
+ mmcu?avr5=mmcu?atmega3290 \
+ mmcu?avr5=mmcu?atmega3290a \
+ mmcu?avr5=mmcu?atmega3290p \
+ mmcu?avr5=mmcu?atmega406 \
+ mmcu?avr5=mmcu?atmega64 \
+ mmcu?avr5=mmcu?atmega640 \
+ mmcu?avr5=mmcu?atmega644 \
+ mmcu?avr5=mmcu?atmega644a \
+ mmcu?avr5=mmcu?atmega644p \
+ mmcu?avr5=mmcu?atmega644pa \
+ mmcu?avr5=mmcu?atmega645 \
+ mmcu?avr5=mmcu?atmega645a \
+ mmcu?avr5=mmcu?atmega645p \
+ mmcu?avr5=mmcu?atmega6450 \
+ mmcu?avr5=mmcu?atmega6450a \
+ mmcu?avr5=mmcu?atmega6450p \
+ mmcu?avr5=mmcu?atmega649 \
+ mmcu?avr5=mmcu?atmega649a \
+ mmcu?avr5=mmcu?atmega649p \
+ mmcu?avr5=mmcu?atmega6490 \
+ mmcu?avr5=mmcu?atmega6490a \
+ mmcu?avr5=mmcu?atmega6490p \
+ mmcu?avr5=mmcu?atmega16hva \
+ mmcu?avr5=mmcu?atmega16hva2 \
+ mmcu?avr5=mmcu?atmega16hvb \
+ mmcu?avr5=mmcu?atmega32hvb \
+ mmcu?avr5=mmcu?atmega64hve \
+ mmcu?avr5=mmcu?at90can32 \
+ mmcu?avr5=mmcu?at90can64 \
+ mmcu?avr5=mmcu?at90pwm216 \
+ mmcu?avr5=mmcu?at90pwm316 \
+ mmcu?avr5=mmcu?atmega32c1 \
+ mmcu?avr5=mmcu?atmega64c1 \
+ mmcu?avr5=mmcu?atmega16m1 \
+ mmcu?avr5=mmcu?atmega32m1 \
+ mmcu?avr5=mmcu?atmega64m1 \
+ mmcu?avr5=mmcu?atmega16u4 \
+ mmcu?avr5=mmcu?atmega32u4 \
+ mmcu?avr5=mmcu?atmega32u6 \
+ mmcu?avr5=mmcu?at90scr100 \
+ mmcu?avr5=mmcu?at90usb646 \
+ mmcu?avr5=mmcu?at90usb647 \
+ mmcu?avr5=mmcu?at94k \
+ mmcu?avr5=mmcu?m3000 \
+ mmcu?avr51=mmcu?atmega128 \
+ mmcu?avr51=mmcu?atmega1280 \
+ mmcu?avr51=mmcu?atmega1281 \
+ mmcu?avr51=mmcu?atmega1284p \
+ mmcu?avr51=mmcu?atmega128rfa1 \
+ mmcu?avr51=mmcu?at90can128 \
+ mmcu?avr51=mmcu?at90usb1286 \
+ mmcu?avr51=mmcu?at90usb1287 \
+ mmcu?avr6=mmcu?atmega2560 \
+ mmcu?avr6=mmcu?atmega2561
+
+MULTILIB_EXCEPTIONS =
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc/config/avr/t-rtems b/gcc/config/avr/t-rtems
new file mode 100644
index 000000000..a3ef8bd80
--- /dev/null
+++ b/gcc/config/avr/t-rtems
@@ -0,0 +1,3 @@
+# Multilibs for avr RTEMS targets.
+
+# ATM, this is just a stub